Merge branch 'master' into network-ip-check

This commit is contained in:
leebaok 2016-05-06 16:56:53 +08:00
commit 5bbbe705b4
19 changed files with 724 additions and 616 deletions

View File

@ -3,7 +3,6 @@
[ $(id -u) != '0' ] && echo "root is needed" && exit 1
# get some path of docklet
bindir=${0%/*}
# $bindir maybe like /opt/docklet/src/../sbin
# use command below to make $bindir in normal absolute path

View File

@ -43,8 +43,6 @@ PIDFILE=$RUN_DIR/$DAEMON_NAME.pid
###########
pre_start () {
log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX"
[ ! -d $FS_PREFIX/global ] && mkdir -p $FS_PREFIX/global
[ ! -d $FS_PREFIX/local ] && mkdir -p $FS_PREFIX/local
[ ! -d $FS_PREFIX/global/users ] && mkdir -p $FS_PREFIX/global/users
@ -81,6 +79,7 @@ pre_start () {
do_start() {
pre_start
log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX"
start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS
log_end_msg $?
}
@ -93,6 +92,7 @@ do_stop () {
case "$1" in
start)
do_start
@ -115,7 +115,6 @@ case "$1" in
status)
status_of_proc -p $PIDFILE "$DAEMON" "$DAEMON_NAME" && exit 0 || exit $?
;;
*)
echo "Usage: $DAEMON_NAME {start|stop|restart|status}"
exit 1

View File

@ -198,5 +198,4 @@ class Client(object):
else:
return [False, 'you are not lock holder']
else:
return [False, 'no one holds this lock']
return [False, 'no one holds this lock']

File diff suppressed because it is too large Load Diff

View File

@ -68,7 +68,7 @@ class Container_Collector(threading.Thread):
else:
self.cpu_quota[container_name] = tmp/100000.0
quota = {'cpu':self.cpu_quota[container_name],'memory':self.mem_quota[container_name]}
logger.info(quota)
#logger.info(quota)
self.etcdser.setkey('/vnodes/%s/quota'%(container_name),quota)
else:
logger.error("Cant't find config file %s"%(confpath))
@ -114,7 +114,7 @@ class Container_Collector(threading.Thread):
if(self.collect_containerinfo(container)):
countR += 1
except Exception as err:
#pass
logger.warning(traceback.format_exc())
logger.warning(err)
containers_num = len(containers)-1
concnt = {}
@ -195,16 +195,20 @@ class Collector(threading.Thread):
diskval = {}
diskval['device'] = part.device
diskval['mountpoint'] = part.mountpoint
usage = psutil.disk_usage(part.mountpoint)
diskval['total'] = usage.total
diskval['used'] = usage.used
diskval['free'] = usage.free
diskval['percent'] = usage.percent
if(part.mountpoint.startswith('/opt/docklet/local/volume')):
names = re.split('/',part.mountpoint)
container = names[len(names)-1]
self.vetcdser.setkey('/%s/disk_use'%(container), diskval)
setval.append(diskval)
try:
usage = psutil.disk_usage(part.mountpoint)
diskval['total'] = usage.total
diskval['used'] = usage.used
diskval['free'] = usage.free
diskval['percent'] = usage.percent
if(part.mountpoint.startswith('/opt/docklet/local/volume')):
names = re.split('/',part.mountpoint)
container = names[len(names)-1]
self.vetcdser.setkey('/%s/disk_use'%(container), diskval)
setval.append(diskval)
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
self.etcdser.setkey('/diskinfo', setval)
#print(output)
#print(diskparts)

View File

@ -13,7 +13,7 @@ import env
# 2. update node list when new node joins
# ETCD table :
# machines/allnodes -- all nodes in docklet, for recovery
# machines/runnodes -- run nodes of this start up
# machines/runnodes -- run nodes of this start up
##############################################
class NodeMgr(object):
def __init__(self, networkmgr, etcdclient, addr, mode):
@ -22,6 +22,7 @@ class NodeMgr(object):
self.networkmgr = networkmgr
self.etcd = etcdclient
self.mode = mode
self.workerport = env.getenv('WORKER_PORT')
# initialize the network
logger.info ("initialize network")
@ -45,21 +46,29 @@ class NodeMgr(object):
logger.error("docklet-br not found")
sys.exit(1)
# get allnodes
self.allnodes = self._nodelist_etcd("allnodes")
self.runnodes = self._nodelist_etcd("runnodes")
logger.info ("all nodes are: %s" % self.allnodes)
logger.info ("run nodes are: %s" % self.runnodes)
if len(self.runnodes)>0:
logger.error ("init runnodes is not null, need to be clean")
sys.exit(1)
# init rpc list
self.rpcs = []
# get allnodes
self.allnodes = self._nodelist_etcd("allnodes")
self.runnodes = []
[status, runlist] = self.etcd.listdir("machines/runnodes")
for node in runlist:
nodeip = node['key'].rsplit('/',1)[1]
if node['value'] == 'ok':
logger.info ("running node %s" % nodeip)
self.runnodes.append(nodeip)
self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s" % (nodeip, self.workerport)))
logger.info ("add %s:%s in rpc client list" % (nodeip, self.workerport))
logger.info ("all nodes are: %s" % self.allnodes)
logger.info ("run nodes are: %s" % self.runnodes)
# start new thread to watch whether a new node joins
logger.info ("start thread to watch new nodes ...")
self.thread_watchnewnode = threading.Thread(target=self._watchnewnode)
self.thread_watchnewnode.start()
# wait for all nodes joins
# wait for all nodes joins
while(True):
allin = True
for node in self.allnodes:
@ -73,7 +82,7 @@ class NodeMgr(object):
logger.info ("run nodes are: %s" % self.runnodes)
# get nodes list from etcd table
# get nodes list from etcd table
def _nodelist_etcd(self, which):
if which == "allnodes" or which == "runnodes":
[status, nodeinfo]=self.etcd.listdir("machines/"+which)
@ -86,36 +95,16 @@ class NodeMgr(object):
# thread target : watch whether a new node joins
def _watchnewnode(self):
workerport = env.getenv('WORKER_PORT')
while(True):
time.sleep(0.1)
[status, runlist] = self.etcd.listdir("machines/runnodes")
if not status:
logger.warning ("get runnodes list failed from etcd ")
logger.warning ("get runnodes list failed from etcd ")
continue
for node in runlist:
nodeip = node['key'].rsplit('/',1)[1]
if node['value']=='waiting':
logger.info ("%s want to joins, call it to init first" % nodeip)
# 'docklet-br' of worker do not need IP Addr. Not need to allocate an IP to it
#if nodeip != self.addr:
# [status, result] = self.networkmgr.acquire_sysips_cidr()
# self.networkmgr.printpools()
# if not status:
# logger.error("no IP for worker bridge, please check network system pool")
# continue
# bridgeip = result[0]
# self.etcd.setkey("network/workbridge", bridgeip)
if nodeip in self.allnodes:
######## HERE MAYBE NEED TO FIX ###############
# here we must use "machines/runnodes/nodeip"
# we cannot use node['key'], node['key'] is absolute
# path, etcd client will append the path to prefix,
# which is wrong
###############################################
self.etcd.setkey("machines/runnodes/"+nodeip, "init-"+self.mode)
else:
self.etcd.setkey('machines/runnodes/'+nodeip, "init-new")
elif node['value']=='work':
logger.info ("new node %s joins" % nodeip)
# setup GRE tunnels for new nodes
@ -127,18 +116,18 @@ class NodeMgr(object):
logger.debug("GRE for %s already exists, reuse it" % nodeip)
else:
netcontrol.setup_gre('docklet-br', nodeip)
self.runnodes.append(nodeip)
self.etcd.setkey("machines/runnodes/"+nodeip, "ok")
if nodeip not in self.allnodes:
self.allnodes.append(nodeip)
self.etcd.setkey("machines/allnodes/"+nodeip, "ok")
logger.debug ("all nodes are: %s" % self.allnodes)
logger.debug ("run nodes are: %s" % self.runnodes)
self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s"
% (nodeip, workerport)))
logger.info ("add %s:%s in rpc client list" %
(nodeip, workerport))
if nodeip not in self.runnodes:
self.runnodes.append(nodeip)
if nodeip not in self.allnodes:
self.allnodes.append(nodeip)
self.etcd.setkey("machines/allnodes/"+nodeip, "ok")
logger.debug ("all nodes are: %s" % self.allnodes)
logger.debug ("run nodes are: %s" % self.runnodes)
self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s"
% (nodeip, self.workerport)))
logger.info ("add %s:%s in rpc client list" %
(nodeip, self.workerport))
# get all run nodes' IP addr
def get_nodeips(self):
return self.allnodes

View File

@ -158,7 +158,7 @@ class userManager:
if not os.path.exists(fspath+"/global/sys/quotainfo"):
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotas = {}
quotas['default'] = 'fundation'
quotas['default'] = 'fundation'
quotas['quotainfo'] = []
quotas['quotainfo'].append({'name':'cpu', 'hint':'the cpu quota, number of cores, e.g. 4'})
quotas['quotainfo'].append({'name':'memory', 'hint':'the memory quota, number of MB , e.g. 4000'})
@ -170,7 +170,6 @@ class userManager:
quotafile.write(json.dumps(quotas))
quotafile.close()
def auth_local(self, username, password):
password = hashlib.sha512(password.encode('utf-8')).hexdigest()
@ -403,7 +402,7 @@ class userManager:
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"groupinfo": group,
"groupinfo": group,
},
}
return result
@ -415,8 +414,8 @@ class userManager:
Modify informantion for oneself
'''
form = kwargs['newValue']
name = form.getvalue('name', None)
value = form.getvalue('value', None)
name = form.get('name', None)
value = form.get('value', None)
if (name == None or value == None):
result = {'success': 'false'}
return result
@ -543,13 +542,13 @@ class userManager:
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == kwargs['newValue'].getvalue('groupname',None):
if group['name'] == kwargs['newValue'].get('groupname',None):
form = kwargs['newValue']
for key in form.keys():
if key == "groupname" or key == "token":
pass
else:
group['quotas'][key] = form.getvalue(key)
group['quotas'][key] = form.get(key)
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
@ -564,28 +563,28 @@ class userManager:
will send an e-mail when status is changed from 'applying' to 'normal'
Usage: modify(newValue = dict_from_form, cur_user = token_from_auth)
'''
user_modify = User.query.filter_by(username = kwargs['newValue'].getvalue('username', None)).first()
user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first()
if (user_modify == None):
return {"success":'false', "reason":"User does not exist"}
#try:
form = kwargs['newValue']
user_modify.truename = form.getvalue('truename', '')
user_modify.e_mail = form.getvalue('e_mail', '')
user_modify.department = form.getvalue('department', '')
user_modify.student_number = form.getvalue('student_number', '')
user_modify.tel = form.getvalue('tel', '')
user_modify.user_group = form.getvalue('group', '')
user_modify.auth_method = form.getvalue('auth_method', '')
if (user_modify.status == 'applying' and form.getvalue('status', '') == 'normal'):
user_modify.truename = form.get('truename', '')
user_modify.e_mail = form.get('e_mail', '')
user_modify.department = form.get('department', '')
user_modify.student_number = form.get('student_number', '')
user_modify.tel = form.get('tel', '')
user_modify.user_group = form.get('group', '')
user_modify.auth_method = form.get('auth_method', '')
if (user_modify.status == 'applying' and form.get('status', '') == 'normal'):
send_activated_email(user_modify.e_mail, user_modify.username)
user_modify.status = form.getvalue('status', '')
if (form.getvalue('Chpassword', '') == 'Yes'):
new_password = form.getvalue('password','no_password')
user_modify.status = form.get('status', '')
if (form.get('Chpassword', '') == 'Yes'):
new_password = form.get('password','no_password')
new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest()
user_modify.password = new_password
#self.chpassword(cur_user = user_modify, password = form.getvalue('password','no_password'))
#self.chpassword(cur_user = user_modify, password = form.get('password','no_password'))
db.session.commit()
return {"success":'true'}
@ -610,7 +609,7 @@ class userManager:
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
user_new.user_group = quotas['default']
user_new.user_group = quotas['default']
user_new.avatar = 'default.png'
return user_new
@ -643,9 +642,9 @@ class userManager:
@administration_required
def quotaadd(*args, **kwargs):
form = kwargs.get('form')
quotaname = form.getvalue("quotaname")
default_value = form.getvalue("default_value")
hint = form.getvalue("hint")
quotaname = form.get("quotaname")
default_value = form.get("default_value")
hint = form.get("hint")
if (quotaname == None):
return { "success":'false', "reason": "Empty quota name"}
if (default_value == None):
@ -670,7 +669,7 @@ class userManager:
@administration_required
def groupadd(*args, **kwargs):
form = kwargs.get('form')
groupname = form.getvalue("groupname")
groupname = form.get("groupname")
if (groupname == None):
return {"success":'false', "reason": "Empty group name"}
groupfile = open(fspath+"/global/sys/quota",'r')
@ -684,13 +683,13 @@ class userManager:
if key == "groupname" or key == "token":
pass
else:
group['quotas'][key] = form.getvalue(key)
group['quotas'][key] = form.get(key)
groups.append(group)
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
@administration_required
def groupdel(*args, **kwargs):
name = kwargs.get('name', None)

View File

@ -12,6 +12,7 @@ from log import logger
import xmlrpc.server, sys, time
from socketserver import ThreadingMixIn
import threading
import etcdlib, network, container
from nettools import netcontrol
import monitor
@ -32,6 +33,11 @@ from lvmtool import *
# start rpc service
##################################################################
# imitate etcdlib to genernate the key of etcdlib manually
def generatekey(path):
clustername = env.getenv("CLUSTER_NAME")
return '/'+clustername+'/'+path
class ThreadXMLRPCServer(ThreadingMixIn,xmlrpc.server.SimpleXMLRPCServer):
pass
@ -48,30 +54,31 @@ class Worker(object):
self.master = self.etcd.getkey("service/master")[1]
self.mode=None
# register self to master
self.etcd.setkey("machines/runnodes/"+self.addr, "waiting")
for f in range (0, 3):
[status, value] = self.etcd.getkey("machines/runnodes/"+self.addr)
if not value.startswith("init"):
# master wakesup every 0.1s to check register
logger.debug("worker % register to master failed %d \
time, sleep %fs" % (self.addr, f+1, 0.1))
time.sleep(0.1)
else:
break
if value.startswith("init"):
# check token to check global directory
[status, token_1] = self.etcd.getkey("token")
tokenfile = open(self.fspath+"/global/token", 'r')
token_2 = tokenfile.readline().strip()
if token_1 != token_2:
logger.error("check token failed, global directory is not a shared filesystem")
sys.exit(1)
[status, key] = self.etcd.getkey("machines/runnodes/"+self.addr)
if status:
self.key = generatekey("machines/allnodes/"+self.addr)
else:
logger.error ("worker register in machines/runnodes failed, maybe master not start")
logger.error("get key failed. %s" % node)
sys.exit(1)
logger.info ("worker registered in master and checked the token")
# check token to check global directory
[status, token_1] = self.etcd.getkey("token")
tokenfile = open(self.fspath+"/global/token", 'r')
token_2 = tokenfile.readline().strip()
if token_1 != token_2:
logger.error("check token failed, global directory is not a shared filesystem")
sys.exit(1)
logger.info ("worker registered and checked the token")
# worker search all run nodes to judge how to init
value = 'init-new'
[status, alllist] = self.etcd.listdir("machines/allnodes")
for node in alllist:
if node['key'] == self.key:
value = 'init-recovery'
break
logger.info("worker start in "+value+" mode")
Containers = container.Container(self.addr, etcdclient)
if value == 'init-new':
@ -146,10 +153,25 @@ class Worker(object):
# start service of worker
def start(self):
self.etcd.setkey("machines/runnodes/"+self.addr, "work")
self.thread_sendheartbeat = threading.Thread(target=self.sendheartbeat)
self.thread_sendheartbeat.start()
# start serving for rpc
logger.info ("begins to work")
self.rpcserver.serve_forever()
# send heardbeat package to keep alive in etcd, ttl=2s
def sendheartbeat(self):
while(True):
# check send heartbeat package every 1s
time.sleep(1)
[status, value] = self.etcd.getkey("machines/runnodes/"+self.addr)
if status:
# master has know the worker so we start send heartbeat package
if value=='ok':
self.etcd.setkey("machines/runnodes/"+self.addr, "ok", ttl = 2)
else:
logger.error("get key %s failed, master crashed or initialized. restart worker please." % self.addr)
sys.exit(1)
if __name__ == '__main__':

View File

@ -158,12 +158,12 @@ var host = window.location.host;
var node_name = $("#node_name").html();
var url = "http://" + host + "/monitor/vnodes/" + node_name;
plot_graph($("#mem-chart"),url + "/mem_use",processMemData,getMemY);
plot_graph($("#cpu-chart"),url + "/cpu_use",processCpuData,getCpuY);
plot_graph($("#mem-chart"),url + "/mem_use/",processMemData,getMemY);
plot_graph($("#cpu-chart"),url + "/cpu_use/",processCpuData,getCpuY);
function processDiskData()
{
$.post(url+"/disk_use",{},function(data){
$.post(url+"/disk_use/",{},function(data){
var diskuse = data.monitor.disk_use;
var usedp = diskuse.percent;
var total = diskuse.total/1024.0/1024.0;

View File

@ -190,8 +190,8 @@ var host = window.location.host;
var com_ip = $("#com_ip").html();
var url = "http://" + host + "/monitor/hosts/"+com_ip;
plot_graph($("#mem-chart"), url + "/meminfo",processMemData,getMemY);
plot_graph($("#cpu-chart"), url + "/cpuinfo",processCpuData,getCpuY);
plot_graph($("#mem-chart"), url + "/meminfo/",processMemData,getMemY);
plot_graph($("#cpu-chart"), url + "/cpuinfo/",processCpuData,getCpuY);
//plot_graph($("#disk-chart"), url + "/diskinfo",processDiskData,getDiskY);
$.post(url+"/diskinfo",{user:"root",key:"unias"},processDiskData,"json");
$.post(url+"/diskinfo/",{user:"root",key:"unias"},processDiskData,"json");

View File

@ -78,7 +78,7 @@
{
var MB = 1024;
$.post(url+"/status",{},function(data){
$.post(url+"/status/",{},function(data){
var status = data.monitor.status;
if(status == 'RUNNING')
{
@ -95,7 +95,7 @@
tmp.html("Stopped");
}
$.post(url+"/containers",{},function(data){
$.post(url+"/containers/",{},function(data){
var containers = data.monitor.containers;
$("#"+index+"_contotal").html(containers.total);
$("#"+index+"_conrunning").html(containers.running);
@ -109,20 +109,20 @@
return;
}
$.post(url+"/cpuinfo",{},function(data){
$.post(url+"/cpuinfo/",{},function(data){
var idle = data.monitor.cpuinfo.idle;
var usedp = (100 - idle).toFixed(2);
$("#"+index+"_cpu").html(String(usedp)+"%");
},"json");
$.post(url+"/meminfo",{},function(data){
$.post(url+"/meminfo/",{},function(data){
var used = data.monitor.meminfo.used;
var total = data.monitor.meminfo.total;
var usedp = String(((used/total)*100).toFixed(2))+"%";
$("#"+index+"_mem").html(usedp);
},"json");
$.post(url+"/diskinfo",{},function(data){
$.post(url+"/diskinfo/",{},function(data){
var val = data.monitor.diskinfo;
var usedp = val[0].percent;
$("#"+index+"_disk").html(String(usedp)+"%");

View File

@ -85,7 +85,7 @@
function update(url,index)
{
$.post(url+"/basic_info",{},function(data){
$.post(url+"/basic_info/",{},function(data){
var state = data.monitor.basic_info.State;
if(state == 'RUNNING')
{
@ -109,13 +109,13 @@
return;
}
$.post(url+"/cpu_use",{},function(data){
$.post(url+"/cpu_use/",{},function(data){
var val = data.monitor.cpu_use.val;
var unit = data.monitor.cpu_use.unit;
$("#"+index+"_cpu").html(val +" "+ unit);
},"json");
$.post(url+"/mem_use",{},function(data){
$.post(url+"/mem_use/",{},function(data){
var val = data.monitor.mem_use.val;
var unit = data.monitor.mem_use.unit
$("#"+index+"_mem").html(val+" "+unit);

View File

@ -122,7 +122,7 @@
function update(url,index)
{
$.post(url+"/basic_info",{},function(data){
$.post(url+"/basic_info/",{},function(data){
var state = data.monitor.basic_info.State;
if(state == 'RUNNING')
{
@ -146,7 +146,7 @@
return;
}
$.post(url+"/cpu_use",{},function(data){
$.post(url+"/cpu_use/",{},function(data){
var usedp = data.monitor.cpu_use.usedp;
var quota = data.monitor.cpu_use.quota.cpu;
var quotaout = "("+quota;
@ -157,7 +157,7 @@
$("#"+index+"_cpu").html((usedp/0.01).toFixed(2)+"%<br/>"+quotaout);
},"json");
$.post(url+"/mem_use",{},function(data){
$.post(url+"/mem_use/",{},function(data){
var usedp = data.monitor.mem_use.usedp;
var unit = data.monitor.mem_use.unit;
var quota = data.monitor.mem_use.quota.memory/1024.0;
@ -166,7 +166,7 @@
$("#"+index+"_mem").html((usedp/0.01).toFixed(2)+"%<br/>"+out);
},"json");
$.post(url+"/disk_use",{},function(data){
$.post(url+"/disk_use/",{},function(data){
var diskuse = data.monitor.disk_use;
var usedp = diskuse.percent;
var total = diskuse.total/1024.0/1024.0;

View File

@ -265,8 +265,8 @@ def statusRealtime(vcluster_name,node_name):
statusRealtimeView.node_name = node_name
return statusRealtimeView.as_view()
@app.route("/monitor/hosts/<comid>/<infotype>", methods=['POST'])
@app.route("/monitor/vnodes/<comid>/<infotype>", methods=['POST'])
@app.route("/monitor/hosts/<comid>/<infotype>/", methods=['POST'])
@app.route("/monitor/vnodes/<comid>/<infotype>/", methods=['POST'])
@login_required
def monitor_request(comid,infotype):
data = {
@ -475,4 +475,4 @@ if __name__ == '__main__':
elif opt in ("-p", "--port"):
webport = int(arg)
app.run(host = webip, port = webport, debug = True, threaded=True)
app.run(host = webip, port = webport, threaded=True)

View File

@ -18,19 +18,19 @@ class adminView(normalView):
class groupaddView(normalView):
@classmethod
def post(self):
dockletRequest.post('/user/groupadd', request.form)
dockletRequest.post('/user/groupadd/', request.form)
return redirect('/admin/')
class quotaaddView(normalView):
@classmethod
def post(self):
dockletRequest.post('/user/quotaadd', request.form)
dockletRequest.post('/user/quotaadd/', request.form)
return redirect('/admin/')
class chdefaultView(normalView):
@classmethod
def post(self):
dockletRequest.post('/user/chdefault', request.form)
dockletRequest.post('/user/chdefault/', request.form)
return redirect('/admin/')
class groupdelView(normalView):
@ -39,9 +39,9 @@ class groupdelView(normalView):
data = {
"name" : self.groupname,
}
dockletRequest.post('/user/groupdel', data)
dockletRequest.post('/user/groupdel/', data)
return redirect('/admin/')
@classmethod
def get(self):
return self.post()

View File

@ -72,7 +72,7 @@ class loginView(normalView):
else:
return redirect('/login/')
else:
self.error()
return redirect('/login/')
class logoutView(normalView):

View File

@ -40,7 +40,7 @@ class statusRealtimeView(normalView):
data = {
"user": session['username'],
}
result = dockletRequest.post('/monitor/vnodes/%s/basic_info'%(self.node_name), data)
result = dockletRequest.post('/monitor/vnodes/%s/basic_info/'%(self.node_name), data)
basic_info = result.get('monitor').get('basic_info')
return self.render(self.template_path, node_name = self.node_name, user = session['username'], container = basic_info)
@ -53,11 +53,11 @@ class hostsRealtimeView(normalView):
data = {
"user": session['username'],
}
result = dockletRequest.post('/monitor/hosts/%s/cpuconfig'%(self.com_ip), data)
result = dockletRequest.post('/monitor/hosts/%s/cpuconfig/'%(self.com_ip), data)
proc = result.get('monitor').get('cpuconfig')
result = dockletRequest.post('/monitor/hosts/%s/osinfo'%(self.com_ip), data)
result = dockletRequest.post('/monitor/hosts/%s/osinfo/'%(self.com_ip), data)
osinfo = result.get('monitor').get('osinfo')
result = dockletRequest.post('/monitor/hosts/%s/diskinfo'%(self.com_ip), data)
result = dockletRequest.post('/monitor/hosts/%s/diskinfo/'%(self.com_ip), data)
diskinfos = result.get('monitor').get('diskinfo')
return self.render(self.template_path, com_ip = self.com_ip, user = session['username'],processors = proc, OSinfo = osinfo, diskinfos = diskinfos)
@ -71,13 +71,13 @@ class hostsConAllView(normalView):
data = {
"user": session['username'],
}
result = dockletRequest.post('/monitor/hosts/%s/containerslist'%(self.com_ip), data)
result = dockletRequest.post('/monitor/hosts/%s/containerslist/'%(self.com_ip), data)
containers = result.get('monitor').get('containerslist')
containerslist = []
for container in containers:
result = dockletRequest.post('/monitor/vnodes/%s/basic_info'%(container), data)
result = dockletRequest.post('/monitor/vnodes/%s/basic_info/'%(container), data)
basic_info = result.get('monitor').get('basic_info')
result = dockletRequest.post('/monitor/vnodes/%s/owner'%(container), data)
result = dockletRequest.post('/monitor/vnodes/%s/owner/'%(container), data)
owner = result.get('monitor')
basic_info['owner'] = owner
containerslist.append(basic_info)
@ -91,14 +91,14 @@ class hostsView(normalView):
data = {
"user": session['username'],
}
result = dockletRequest.post('/monitor/listphynodes', data)
result = dockletRequest.post('/monitor/listphynodes/', data)
iplist = result.get('monitor').get('allnodes')
machines = []
for ip in iplist:
containers = {}
result = dockletRequest.post('/monitor/hosts/%s/containers'%(ip), data)
result = dockletRequest.post('/monitor/hosts/%s/containers/'%(ip), data)
containers = result.get('monitor').get('containers')
result = dockletRequest.post('/monitor/hosts/%s/status'%(ip), data)
result = dockletRequest.post('/monitor/hosts/%s/status/'%(ip), data)
status = result.get('monitor').get('status')
machines.append({'ip':ip,'containers':containers, 'status':status})
#print(machines)
@ -112,9 +112,9 @@ class monitorUserAllView(normalView):
data = {
"user": session['username'],
}
result = dockletRequest.post('/monitor/listphynodes', data)
result = dockletRequest.post('/monitor/listphynodes/', data)
userslist = [{'name':'root'},{'name':'libao'}]
for user in userslist:
result = dockletRequest.post('/monitor/user/%s/clustercnt'%(user['name']), data)
result = dockletRequest.post('/monitor/user/%s/clustercnt/'%(user['name']), data)
user['clustercnt'] = result.get('monitor').get('clustercnt')
return self.render(self.template_path, userslist = userslist, user = session['username'])

View File

@ -16,5 +16,5 @@ class userActivateView(normalView):
@classmethod
def post(self):
dockletRequest.post('/register', request.form)
dockletRequest.post('/register/', request.form)
return redirect('/logout/')

View File

@ -19,34 +19,32 @@ class userlistView(normalView):
class useraddView(normalView):
@classmethod
def post(self):
dockletRequest.post('/user/add', request.form)
dockletRequest.post('/user/add/', request.form)
return redirect('/user/list/')
class userdataView(normalView):
@classmethod
def get(self):
return json.dumps(dockletRequest.post('/user/data', request.form))
return json.dumps(dockletRequest.post('/user/data/', request.form))
@classmethod
def post(self):
return json.dumps(dockletRequest.post('/user/data', request.form))
return json.dumps(dockletRequest.post('/user/data/', request.form))
class userqueryView(normalView):
@classmethod
def get(self):
return json.dumps(dockletRequest.post('/user/query', request.form))
return json.dumps(dockletRequest.post('/user/query/', request.form))
@classmethod
def post(self):
return json.dumps(dockletRequest.post('/user/query', request.form))
return json.dumps(dockletRequest.post('/user/query/', request.form))
class usermodifyView(normalView):
@classmethod
def post(self):
try:
dockletRequest.post('/user/modify', request.form)
dockletRequest.post('/user/modify/', request.form)
except:
return self.render('user/mailservererror.html')
return redirect('/user/list/')