Merge pull request #226 from zhongyehong/master

rpc
This commit is contained in:
zhong yehong 2017-05-08 16:14:38 +08:00 committed by GitHub
commit ff1c562b79
14 changed files with 175 additions and 117 deletions

View File

@ -116,7 +116,7 @@ do_start_master () {
# recovery : start cluster and recover status from etcd and global directory
# Default is "recovery"
../tools/nginx_config.sh
$DOCKLET_HOME/tools/nginx_config.sh
start-stop-daemon --start --oknodo --background --pidfile $PIDFILE_MASTER --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_MASTER -- $DAEMON_OPTS_MASTER
log_end_msg $?

View File

@ -21,6 +21,7 @@ from log import logger
import os
import http.server, cgi, json, sys, shutil
import xmlrpc.client
from socketserver import ThreadingMixIn
import nodemgr, vclustermgr, etcdlib, network, imagemgr, notificationmgr
import userManager,beansapplicationmgr
@ -103,6 +104,11 @@ def beans_check(func):
return wrapper
@app.route("/isalive/", methods = ['POST'])
@login_required
def isalive(user, beans, form):
return json.dumps({'success':'true'})
@app.route("/cluster/create/", methods=['POST'])
@login_required
@beans_check
@ -479,7 +485,7 @@ def listphynodes_monitor(user, beans, form):
global G_nodemgr
logger.info("handle request: monitor/listphynodes/")
res = {}
res['allnodes'] = G_nodemgr.get_allnodes()
res['allnodes'] = G_nodemgr.get_nodeips()
return json.dumps({'success':'true', 'monitor':res})
@app.route("/billing/beans/", methods=['POST'])

View File

@ -19,6 +19,7 @@ design:
from configparser import ConfigParser
from io import StringIO
import os,sys,subprocess,time,re,datetime,threading,random
import xmlrpc.client
from log import logger
import env
@ -311,10 +312,11 @@ class ImageMgr():
logger.info("only root can update base image")
#vclustermgr.stop_allclusters()
#vclustermgr.detach_allclusters()
workers = vclustermgr.nodemgr.get_rpcs()
workers = vclustermgr.nodemgr.get_nodeips()
logger.info("update base image in all workers")
for worker in workers:
worker.update_basefs(image)
workerrpc = xmlprc.client.ServerProxy("http://%s:%s" % (worker, env.getenv("WORKER_PORT")))
workerrpc.update_basefs(image)
logger.info("update base image success")
#vclustermgr.mount_allclusters()
#logger.info("mount all cluster success")

View File

@ -20,6 +20,7 @@ Design:Monitor mainly consists of three parts: Collectors, Master_Collector and
import subprocess,re,os,etcdlib,psutil,math,sys
import time,threading,json,traceback,platform
import env
import xmlrpc.client
from datetime import datetime
from model import db,VNode,History
@ -547,12 +548,13 @@ class Master_Collector(threading.Thread):
while not self.thread_stop:
for worker in monitor_hosts.keys():
monitor_hosts[worker]['running'] = False
workers = self.nodemgr.get_rpcs()
workers = self.nodemgr.get_nodeips()
for worker in workers:
try:
ip = self.nodemgr.rpc_to_ip(worker)
ip = worker
workerrpc = xmlrpc.client.ServerProxy("http://%s:%s" % (worker, env.getenv("WORKER_PORT")))
# fetch data
info = list(eval(worker.workerFetchInfo(self.master_ip)))
info = list(eval(workerrpc.workerFetchInfo(self.master_ip)))
#logger.info(info[0])
# store data in monitor_hosts and monitor_vnodes
monitor_hosts[ip] = info[0]

View File

@ -46,9 +46,6 @@ class NodeMgr(object):
logger.error("docklet-br not found")
sys.exit(1)
# init rpc list
self.rpcs = []
# get allnodes
self.allnodes = self._nodelist_etcd("allnodes")
self.runnodes = []
@ -58,8 +55,6 @@ class NodeMgr(object):
if node['value'] == 'ok':
logger.info ("running node %s" % nodeip)
self.runnodes.append(nodeip)
self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s" % (nodeip, self.workerport)))
logger.info ("add %s:%s in rpc client list" % (nodeip, self.workerport))
logger.info ("all nodes are: %s" % self.allnodes)
logger.info ("run nodes are: %s" % self.runnodes)
@ -69,7 +64,8 @@ class NodeMgr(object):
self.thread_watchnewnode = threading.Thread(target=self._watchnewnode)
self.thread_watchnewnode.start()
# wait for all nodes joins
while(True):
# while(True):
for i in range(10):
allin = True
for node in self.allnodes:
if node not in self.runnodes:
@ -78,7 +74,7 @@ class NodeMgr(object):
if allin:
logger.info("all nodes necessary joins ...")
break
time.sleep(0.05)
time.sleep(1)
logger.info ("run nodes are: %s" % self.runnodes)
@ -130,10 +126,6 @@ class NodeMgr(object):
self.etcd.setkey("machines/allnodes/"+nodeip, "ok")
logger.debug ("all nodes are: %s" % self.allnodes)
logger.debug ("run nodes are: %s" % self.runnodes)
rpccl = xmlrpc.client.ServerProxy("http://%s:%s" % (nodeip, self.workerport))
self.rpcs.append(rpccl)
logger.info ("add %s:%s in rpc client list" %
(nodeip, self.workerport))
elif node['value'] == 'ok':
etcd_runip.append(nodeip)
new_runnodes = []
@ -144,32 +136,12 @@ class NodeMgr(object):
#print(self.runnodes)
#print(etcd_runip)
#print(self.rpcs)
self.rpcs.remove(self.ip_to_rpc(nodeip))
else:
new_runnodes.append(nodeip)
self.runnodes = new_runnodes
self.runnodes = etcd_runip
# get all run nodes' IP addr
def get_nodeips(self):
return self.allnodes
return self.runnodes
def get_rpcs(self):
return self.rpcs
def get_onerpc(self):
return self.rpcs[random.randint(0, len(self.rpcs)-1)]
def rpc_to_ip(self, rpcclient):
try:
return self.runnodes[self.rpcs.index(rpcclient)]
except:
return None
def ip_to_rpc(self, nodeip):
try:
return self.rpcs[self.runnodes.index(nodeip)]
except:
return None
def get_allnodes(self):
return self.allnodes

View File

@ -2,6 +2,7 @@
import os, random, json, sys, imagemgr
import datetime
import xmlrpc.client
from log import logger
import env
@ -85,7 +86,7 @@ class VclusterMgr(object):
return [False, "cluster:%s already exists" % clustername]
clustersize = int(self.defaultsize)
logger.info ("starting cluster %s with %d containers for %s" % (clustername, int(clustersize), username))
workers = self.nodemgr.get_rpcs()
workers = self.nodemgr.get_nodeips()
image_json = json.dumps(image)
groupname = json.loads(user_info)["data"]["group"]
if (len(workers) == 0):
@ -114,7 +115,6 @@ class VclusterMgr(object):
proxy_server_ip = ""
containers = []
for i in range(0, clustersize):
onework = workers[random.randint(0, len(workers)-1)]
if self.distributedgw == "True" and i == 0 and not self.networkmgr.has_usrgw(username):
[success,message] = self.networkmgr.setup_usrgw(username, self.nodemgr, onework)
if not success:
@ -125,13 +125,15 @@ class VclusterMgr(object):
lxc_name = username + "-" + str(clusterid) + "-" + str(i)
hostname = "host-"+str(i)
logger.info ("create container with : name-%s, username-%s, clustername-%s, clusterid-%s, hostname-%s, ip-%s, gateway-%s, image-%s" % (lxc_name, username, clustername, str(clusterid), hostname, ips[i], gateway, image_json))
[success,message] = onework.create_container(lxc_name, proxy_server_ip, username, json.dumps(setting) , clustername, str(clusterid), str(i), hostname, ips[i], gateway, str(vlanid), image_json)
workerip = workers[random.randint(0, len(workers)-1)]
oneworker = xmlrpc.client.ServerProxy("http://%s:%s" % (workerip, env.getenv("WORKER_PORT")))
[success,message] = oneworker.create_container(lxc_name, proxy_server_ip, username, json.dumps(setting) , clustername, str(clusterid), str(i), hostname, ips[i], gateway, str(vlanid), image_json)
if success is False:
logger.info("container create failed, so vcluster create failed")
return [False, message]
logger.info("container create success")
hosts = hosts + ips[i].split("/")[0] + "\t" + hostname + "\t" + hostname + "."+clustername + "\n"
containers.append({ 'containername':lxc_name, 'hostname':hostname, 'ip':ips[i], 'host':self.nodemgr.rpc_to_ip(onework), 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'setting': setting })
containers.append({ 'containername':lxc_name, 'hostname':hostname, 'ip':ips[i], 'host':workerip, 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'setting': setting })
hostfile = open(hostpath, 'w')
hostfile.write(hosts)
hostfile.close()
@ -145,7 +147,7 @@ class VclusterMgr(object):
def scale_out_cluster(self,clustername,username,image,user_info, setting):
if not self.is_cluster(clustername,username):
return [False, "cluster:%s not found" % clustername]
workers = self.nodemgr.get_rpcs()
workers = self.nodemgr.get_nodeips()
if (len(workers) == 0):
logger.warning("no workers to start containers, scale out failed")
return [False, "no workers are running"]
@ -162,24 +164,25 @@ class VclusterMgr(object):
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
hostpath = self.fspath + "/global/users/" + username + "/hosts/" + str(clusterid) + ".hosts"
cid = clusterinfo['nextcid']
onework = workers[random.randint(0, len(workers)-1)]
workerip = workers[random.randint(0, len(workers)-1)]
oneworker = xmlrpc.client.ServerProxy("http://%s:%s" % (workerip, env.getenv("WORKER_PORT")))
lxc_name = username + "-" + str(clusterid) + "-" + str(cid)
hostname = "host-" + str(cid)
proxy_server_ip = clusterinfo['proxy_server_ip']
[success, message] = onework.create_container(lxc_name, proxy_server_ip, username, json.dumps(setting), clustername, clusterid, str(cid), hostname, ip, gateway, str(vlanid), image_json)
[success, message] = oneworker.create_container(lxc_name, username, json.dumps(setting), clustername, clusterid, str(cid), hostname, ip, gateway, str(vlanid), image_json)
if success is False:
logger.info("create container failed, so scale out failed")
return [False, message]
if clusterinfo['status'] == "running":
onework.start_container(lxc_name)
onework.start_services(lxc_name, ["ssh"]) # TODO: need fix
oneworker.start_container(lxc_name)
oneworker.start_services(lxc_name, ["ssh"]) # TODO: need fix
logger.info("scale out success")
hostfile = open(hostpath, 'a')
hostfile.write(ip.split("/")[0] + "\t" + hostname + "\t" + hostname + "." + clustername + "\n")
hostfile.close()
clusterinfo['nextcid'] = int(clusterinfo['nextcid']) + 1
clusterinfo['size'] = int(clusterinfo['size']) + 1
clusterinfo['containers'].append({'containername':lxc_name, 'hostname':hostname, 'ip':ip, 'host':self.nodemgr.rpc_to_ip(onework), 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'setting': setting})
clusterinfo['containers'].append({'containername':lxc_name, 'hostname':hostname, 'ip':ip, 'host':workerip, 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'setting': setting})
clusterfile = open(clusterpath, 'w')
clusterfile.write(json.dumps(clusterinfo))
clusterfile.close()
@ -226,8 +229,8 @@ class VclusterMgr(object):
for container in containers:
if container['containername'] == containername:
logger.info("container: %s found" % containername)
onework = self.nodemgr.ip_to_rpc(container['host'])
onework.create_image(username,imagetmp,containername)
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
worker.create_image(username,imagetmp,containername)
fimage = container['image']
logger.info("image: %s created" % imagetmp)
break
@ -236,10 +239,10 @@ class VclusterMgr(object):
for container in containers:
if container['containername'] != containername:
logger.info("container: %s now flush" % container['containername'])
onework = self.nodemgr.ip_to_rpc(container['host'])
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
#t = threading.Thread(target=onework.flush_container,args=(username,imagetmp,container['containername']))
#threads.append(t)
onework.flush_container(username,imagetmp,container['containername'])
worker.flush_container(username,imagetmp,container['containername'])
container['lastsave'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
container['image'] = fimage
logger.info("thread for container: %s has been prepared" % container['containername'])
@ -269,10 +272,10 @@ class VclusterMgr(object):
for container in containers:
if container['containername'] == containername:
logger.info("container: %s found" % containername)
onework = self.nodemgr.ip_to_rpc(container['host'])
if onework is None:
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
if worker is None:
return [False, "The worker can't be found or has been stopped."]
res = onework.create_image(username,imagename,containername,description,imagenum)
res = worker.create_image(username,imagename,containername,description,imagenum)
container['lastsave'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
container['image'] = imagename
break
@ -293,7 +296,7 @@ class VclusterMgr(object):
return [False, "cluster is still running, you need to stop it and then delete"]
ips = []
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
if worker is None:
return [False, "The worker can't be found or has been stopped."]
worker.delete_container(container['containername'])
@ -320,7 +323,7 @@ class VclusterMgr(object):
new_containers = []
for container in info['containers']:
if container['containername'] == containername:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
if worker is None:
return [False, "The worker can't be found or has been stopped."]
worker.delete_container(containername)
@ -375,7 +378,7 @@ class VclusterMgr(object):
except:
return [False, "start cluster failed with setting proxy failed"]
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
if worker is None:
return [False, "The worker can't be found or has been stopped."]
worker.start_container(container['containername'])
@ -390,7 +393,7 @@ class VclusterMgr(object):
if not status:
return [False, "cluster not found"]
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
if worker is None:
return [False, "The worker can't be found or has been stopped."]
worker.mount_container(container['containername'])
@ -416,7 +419,7 @@ class VclusterMgr(object):
return [False, "start cluster failed with setting proxy failed"]
# recover containers of this cluster
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
if worker is None:
return [False, "The worker can't be found or has been stopped."]
worker.recover_container(container['containername'])
@ -435,7 +438,7 @@ class VclusterMgr(object):
else:
proxytool.delete_route("/" + info['proxy_server_ip'] + '/go/'+username+'/'+clustername)
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
if worker is None:
return [False, "The worker can't be found or has been stopped."]
worker.stop_container(container['containername'])
@ -453,7 +456,7 @@ class VclusterMgr(object):
if info['status'] == 'running':
return [False, 'cluster is running, please stop it first']
for container in info['containers']:
worker = self.nodemgr.ip_to_rpc(container['host'])
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
if worker is None:
return [False, "The worker can't be found or has been stopped."]
worker.detach_container(container['containername'])

View File

@ -182,12 +182,12 @@ class Worker(object):
def sendheartbeat(self):
while(True):
# check send heartbeat package every 1s
time.sleep(1)
time.sleep(20)
[status, value] = self.etcd.getkey("machines/runnodes/"+self.addr)
if status:
# master has know the worker so we start send heartbeat package
if value=='ok':
self.etcd.setkey("machines/runnodes/"+self.addr, "ok", ttl = 2)
self.etcd.setkey("machines/runnodes/"+self.addr, "ok", ttl = 60)
else:
logger.error("get key %s failed, master crashed or initialized. restart worker please." % self.addr)
sys.exit(1)

View File

@ -5,14 +5,18 @@ NGINX_PORT=8080
PROXY_PORT=8000
WEB_PORT=8888
NGINX_CONF=/etc/nginx
DOCKLET_CONF=../conf
toolsdir=${0%/*}
DOCKLET_TOOLS=$(cd $toolsdir; pwd)
DOCKLET_HOME=${DOCKLET_TOOLS%/*}
DOCKLET_CONF=$DOCKLET_HOME/conf
. $DOCKLET_CONF/docklet.conf
NGINX_CONF=${NGINX_CONF}/sites-enabled
echo "copy nginx_docklet.conf to nginx config path..."
cp ../conf/nginx_docklet.conf ${NGINX_CONF}/
cp $DOCKLET_CONF/nginx_docklet.conf ${NGINX_CONF}/
sed -i "s/%MASTER_IP/${MASTER_IP}/g" ${NGINX_CONF}/nginx_docklet.conf
sed -i "s/%NGINX_PORT/${NGINX_PORT}/g" ${NGINX_CONF}/nginx_docklet.conf
if [ "${DISTRIBUTED_GATEWAY}" = "True" ]

View File

@ -24,22 +24,6 @@
<div>
{% block content %}
<ul class="nav nav-tabs" role="tablist" id="myTabs">
{% for master in allimages %}
{% if loop.index == 1 %}
<li role="presentation" class="active"><a href="#{{master.split("@")[1]}}" data-toggle="tab" aria-controls="{{master.split("@")[1]}}">{{master.split("@")[1]}}</a></li>
{% else %}
<li role="presentation"><a href="#{{master.split("@")[1]}}" data-toggle="tab" aria-controls="{{master.split("@")[1]}}">{{master.split("@")[1]}}</a></li>
{% endif %}
{% endfor %}
</ul>
<div id="myTabContent" class="tab-content">
{% for master in allimages %}
{% if loop.index == 1 %}
<div role="tabpanel" class="tab-pane active" aria-labelledby="{{master.split("@")[1]}}" id="{{master.split("@")[1]}}">
{% else %}
<div role="tabpanel" class="tab-pane" aria-labelledby="{{master.split("@")[1]}}" id="{{master.split("@")[1]}}">
{% endif %}
<div class="row">
<div class="col-lg-12">
<div class="box box-info">
@ -53,16 +37,24 @@
</div>
</div>
<div class="box-body">
<form id="form" class="form-horizontal" action="/workspace/add/{{master.split("@")[0]}}/" method="POST">
<form id="form" class="form-horizontal" action="/workspace/add/{{masterips[0].split("@")[0]}}/" method="POST">
<div class="form-group"><label class="col-sm-2 control-label">Workspace Name</label>
<div class="col-sm-10"><input type="text" class="form-control" name="clusterName" id="clusterName"></div>
</div>
<div class="hr-line-dashed"></div>
<br/>
<div class="form-group"><label class="col-sm-2 control-label">Center Choose</label>
<div class="col-sm-10"><select id="masterselector" class="form-control">
{% for master in masterips %}
<option value="{{master.split("@")[0]}}">{{master.split("@")[1]}}</option>
{% endfor %}
</select></div>
</div>
<br/>
<div class="form-group"><label class="col-sm-2 control-label">Image Choose</label>
<div class="col-sm-10">
<table class="table table-striped table-bordered table-hover table-image" >
<table id="imagetable" class="table table-striped table-bordered table-hover table-image" >
<thead>
<tr>
<th>ImageName</th>
@ -80,22 +72,22 @@
<td>A base image for you</td>
<td><div class="i-checks"><label><input type="radio" name="image" value="base_base_base" checked="checked"></label></div></td>
</tr>
{% for image in allimages[master]['private'] %}
{% for image in images['private'] %}
<tr>
<td>{{image['name']}}</td>
<td>private</td>
<td>{{user}}</td>
<td><a href="/image/description/{{image['name']}}_{{user}}_private/{{master.split("@")[1]}}/" target="_blank">{{image['description']}}</a></td>
<td><a href="/image/description/{{image['name']}}_{{user}}_private/{{masterips[0].split("@")[1]}}/" target="_blank">{{image['description']}}</a></td>
<td><div class="i-checks"><label><input type="radio" name="image" value="{{image['name']}}_{{user}}_private"></label></div></td>
</tr>
{% endfor %}
{% for p_user,p_images in allimages[master]['public'].items() %}
{% for p_user,p_images in images['public'].items() %}
{% for image in p_images %}
<tr>
<td>{{image['name']}}</td>
<td>public</td>
<td>{{p_user}}</td>
<td><a href="/image/description/{{image['name']}}_{{p_user}}_public/{{master.split("@")[1]}}/" target="_blank">{{image['description']}}</a></td>
<td><a href="/image/description/{{image['name']}}_{{p_user}}_public/{{masterips[0].split("@")[1]}}/" target="_blank">{{image['description']}}</a></td>
<td><div class="i-checks"><label><input type="radio" name="image" value="{{image['name']}}_{{p_user}}_public"></label></div></td>
</tr>
{% endfor %}
@ -106,16 +98,16 @@
</div>
<div class="hr-line-dashed"></div>
<div class="panel-group" id="accordion_{{master.split("@")[1]}}">
<div class="panel-group" id="accordion">
<div class="panel panel-default">
<div class="panel-heading">
<h4 class="panel-title">
<a data-toggle="collapse" data-panel="#accordion_{{master.split("@")[1]}}" href="#collapseOne_{{master.split("@")[1]}}">
<a data-toggle="collapse" data-panel="#accordion" href="#collapseOne">
show advanced options
</a>
</h4>
</div>
<div id="collapseOne_{{master.split("@")[1]}}" class="panel-collapse collapse">
<div id="collapseOne" class="panel-collapse collapse">
<div class="panel-body">
<div class="form-group">
<label class="col-sm-2 control-label">CPU</label>
@ -150,9 +142,7 @@
</div>
</div>
</div>
</div>
{% endfor %}
</div>
{% endblock %}
@ -174,14 +164,57 @@
<script src="http://cdn.bootcss.com/datatables-tabletools/2.1.5/js/TableTools.min.js"></script>
<script>
$(document).ready(function(){
$(".table-image").DataTable();
$(".table-image").attr("style","");
$('#myTabs a').click(function (e) {
e.preventDefault();
$(this).tab('show');
})
});
<script type="text/javascript">
$("select#masterselector").change(function() {
var masterip=$(this).children('option:selected').val();
console.log(masterip);
document.getElementById("form").action="/workspace/add/"+masterip+"/";
var host = window.location.host;
$.post("http://"+host+"/image/list/"+masterip+"/",{},function(data){
var images = data.images;
var imagehtml =
"<thread>"
+"<tr>"
+"<th>ImageName</th>"
+"<th>Type</th>"
+"<th>Owner</th>"
+"<th>Description</th>"
+"<th>Choose</th>"
+"</tr>"
+"</thead>"
+"<tbody>"
+"<tr>"
+"<td>base</td>"
+"<td>public</td>"
+"<td>docklet</td>"
+"<td>A base image for you</td>"
+'<td><div class="i-checks"><label><input type="radio" name="image" value="base_base_base" checked="checked"></label></div></td>'
+"</tr>";
for(var index in images.private) {
var image = images.private[index];
imagehtml +=
"<tr>"
+"<td>"+image.name+"</td>"
+"<td>private</td>"
+"<td>{{user}}</td>"
+'<td><a href="/image/description/' + image.name + "_{{user}}_private/" + masterip + '/" target="_blank">' + image.description + '</a></td>'
+'<td><div class="i-checks"><label><input type="radio" name="image" value="' + image.name + '_{{user}}_private"><label></div></td>'
+"</tr>";
}
for(var p_user in images.public) {
for(var index in images.public[p_user]) {
image=images.public[p_user][index];
imagehtml +=
"<tr>"
+"<td>"+image.name+"</td>"
+"<td>public</td>"
+"<td>" + p_user + "</td>"
+'<td><a href="/image/description/' + image.name + "_" + p_user + "_ublic/" + masterip + '/" target="_blank">' + image.description + '</a></td>'
+'<td><div class="i-checks"><label><input type="radio" name="image" value="' + image.name + "_" + p_user + '_public"><label></div></td>';
}
}
$("#imagetable").html(imagehtml);
},"json");
});
</script>
{% endblock %}

View File

@ -34,17 +34,17 @@
{% for clustername, clusterinfo in allclusters[master].items() %}
<div class="row">
<div class="col-md-12">
<div class="box box-info">
<div class="box box-info collapsed-box">
<div class="box-header with-border">
<h3 class="box-title">WorkSpace Name: {{ clustername }} <strong>@ {{master.split("@")[1]}}</strong></h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-plus"></i>
</button>
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
</div>
</div>
<div class="box-body">
<div class="box-body" style="display:none">
<div class="row">
<div class="col-md-12">
<div class="box box-info">
@ -258,16 +258,16 @@
{% endfor %}
<div class="row">
<div class="col-lg-12">
<div class="box box-info">
<div class="box box-info collapsed-box">
<div class="box-header with-border">
<h3 class="box-title">Image Info</h3>
<div class="box-tools pull-right">
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-plus"></i>
</button>
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
</div>
</div>
<div class="box-body">
<div class="box-body" style="display:none">
<table class="table table-striped table-bordered table-hover table-image" >
<thead>

View File

@ -16,7 +16,22 @@
{% endblock %}
{% block content %}
<ul class="nav nav-tabs" role="tablist" id="myTabs">
{% for master in allmachines %}
{% if loop.index == 1%}
<li role="presentation" class="active"><a href="#{{master.split("@")[1]}}" data-toggle="tab" aria-controls="{{master.split("@")[1]}}">{{master.split("@")[1]}}</a></li>
{% else %}
<li role="presentation"><a href="#{{master.split("@")[1]}}" data-toggle="tab" aria-controls="{{master.split("@")[1]}}">{{master.split("@")[1]}}</a></li>
{% endif %}
{% endfor %}
</ul>
<div id="myTabContent" class="tab-content">
{% for master in allmachines %}
{% if loop.index == 1 %}
<div role="tabpanel" class="tab-pane active" aria-labelledby="{{master.split("@")[1]}}" id="{{master.split("@")[1]}}">
{% else %}
<div role="tabpanel" class="tab-pane" aria-labelledby="{{master.split("@")[1]}}" id="{{master.split("@")[1]}}">
{% endif %}
<div class="row">
<div class="col-md-12">
<div class="box box-info">
@ -75,6 +90,7 @@
</div>
</div>
</div>
</div>
{% endfor %}
{% endblock %}
@ -85,6 +101,7 @@
var MB = 1024;
$.post(url+"/status/"+masterip+"/",{},function(data){
console.log(data);
var status = data.monitor.status;
if(status == 'RUNNING')
{

View File

@ -226,6 +226,18 @@ def deleteproxy(clustername,masterip):
deleteproxyView.masterip = masterip
return deleteproxyView.as_view()
@app.route("/image/list/<masterip>/", methods=['POST'])
@login_required
def image_list(masterip):
data = {
"user": session['username']
}
path = request.path[:request.path.rfind("/")]
path = path[:path.rfind("/")+1]
result = dockletRequest.post(path, data, masterip)
logger.debug("image" + str(type(result)))
return json.dumps(result)
@app.route("/image/description/<image>/<masterip>/", methods=['GET'])
@login_required
def descriptionImage(image,masterip):
@ -316,6 +328,7 @@ def monitor_request(comid,infotype,masterip):
path = path[:path.rfind("/")+1]
logger.debug(path + "_____" + masterip)
result = dockletRequest.post(path, data, masterip)
logger.debug("monitor" + str(type(result)))
return json.dumps(result)
@app.route("/beans/application/", methods=['GET'])
@ -632,4 +645,4 @@ if __name__ == '__main__':
elif opt in ("-p", "--port"):
webport = int(arg)
app.run(host = webip, port = webport, threaded=True,)
app.run(host = webip, port = webport, threaded=True)

View File

@ -10,10 +10,8 @@ class addClusterView(normalView):
@classmethod
def get(self):
result = dockletRequest.post_to_all("/image/list/")
allimages={}
for master in result:
allimages[master] = result[master].get("images")
masterips = dockletRequest.post_to_all()
images = dockletRequest.post("/image/list/",{},masterips[0].split("@")[0]).get("images")
result = dockletRequest.post("/user/usageQuery/")
quota = result.get("quota")
usage = result.get("usage")
@ -48,7 +46,7 @@ class addClusterView(normalView):
'disk': defaultdisk
}
if (result):
return self.render(self.template_path, user = session['username'], allimages = allimages, quota = quota, usage = usage, defaultsetting = defaultsetting)
return self.render(self.template_path, user = session['username'],masterips = masterips, images = images, quota = quota, usage = usage, defaultsetting = defaultsetting)
else:
self.error()

View File

@ -59,7 +59,15 @@ class dockletRequest():
@classmethod
def post_to_all(self, url = '/', data={}):
if (url == '/'):
return masterips
res = []
for masterip in masterips:
try:
requests.post("http://"+getip(masterip)+":"+master_port+"/isalive/",data=data)
except Exception as e:
logger.debug(e)
continue
res.append(masterip)
return res
data = dict(data)
data['token'] = session['token']
logger.info("Docklet Request: user = %s data = %s, url = %s"%(session['username'], data, url))