From e313f50b5ec4b92f9e86d2ba2fe8eee464f43cba Mon Sep 17 00:00:00 2001 From: ooooo Date: Mon, 18 Apr 2016 18:16:19 +0800 Subject: [PATCH 01/19] add some comment --- bin/docklet-master | 3 ++- src/httprest.py | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bin/docklet-master b/bin/docklet-master index 0d7c489..8203025 100755 --- a/bin/docklet-master +++ b/bin/docklet-master @@ -3,8 +3,9 @@ [ $(id -u) != '0' ] && echo "root is needed" && exit 1 # get some path of docklet - +echo $0 bindir=${0%/*} +echo "bindir=$bindir" # $bindir maybe like /opt/docklet/src/../sbin # use command below to make $bindir in normal absolute path DOCKLET_BIN=$(cd $bindir; pwd) diff --git a/src/httprest.py b/src/httprest.py index 56d6898..1e6dcb3 100755 --- a/src/httprest.py +++ b/src/httprest.py @@ -6,6 +6,7 @@ # must first init loadenv import tools, env +# default CONFIG=/opt/docklet/local/docklet-running.conf config = env.getenv("CONFIG") tools.loadenv(config) @@ -23,6 +24,7 @@ import userManager import monitor import guest_control, threading +#default EXTERNAL_LOGIN=False external_login = env.getenv('EXTERNAL_LOGIN') if (external_login == 'TRUE'): from userDependence import external_auth @@ -520,6 +522,7 @@ if __name__ == '__main__': etcdclient.clean() else: etcdclient.createdir("") + # token is saved at fs_path/golbal/token token = tools.gen_token() tokenfile = open(fs_path+"/global/token", 'w') tokenfile.write(token) @@ -583,7 +586,7 @@ if __name__ == '__main__': masterport = env.getenv('MASTER_PORT') logger.info("using MASTER_PORT %d", int(masterport)) -# server = http.server.HTTPServer((masterip, masterport), DockletHttpHandler) + # server = http.server.HTTPServer((masterip, masterport), DockletHttpHandler) server = ThreadingHttpServer((masterip, int(masterport)), DockletHttpHandler) logger.info("starting master server") server.serve_forever() From 56242920c48e7b224af4819c4896412a1c0d2a0e Mon Sep 17 00:00:00 2001 From: ooooo Date: Wed, 20 Apr 2016 14:17:12 +0800 Subject: [PATCH 02/19] test --- bin/docklet-master | 2 -- bin/docklet-worker | 7 ++++- src/etcdlib.py | 11 +++++++- src/httprest.py | 3 --- src/nodemgr.py | 66 ++++++++++++++++++++-------------------------- src/stopworker.py | 13 +++++++++ src/worker.py | 43 +++++++++++++++--------------- 7 files changed, 79 insertions(+), 66 deletions(-) create mode 100755 src/stopworker.py diff --git a/bin/docklet-master b/bin/docklet-master index 954dbda..861bfd3 100755 --- a/bin/docklet-master +++ b/bin/docklet-master @@ -3,9 +3,7 @@ [ $(id -u) != '0' ] && echo "root is needed" && exit 1 # get some path of docklet -echo $0 bindir=${0%/*} -echo "bindir=$bindir" # $bindir maybe like /opt/docklet/src/../sbin # use command below to make $bindir in normal absolute path DOCKLET_BIN=$(cd $bindir; pwd) diff --git a/bin/docklet-worker b/bin/docklet-worker index 6e95a84..5ca5fe4 100755 --- a/bin/docklet-worker +++ b/bin/docklet-worker @@ -33,6 +33,7 @@ DAEMON_USER=root # settings for docklet worker DAEMON=$DOCKLET_LIB/worker.py +STOP_DEAMON=$DOCKLET_LIB/stopworker.py DAEMON_NAME=docklet-worker DAEMON_OPTS= # The process ID of the script when it runs is stored here: @@ -43,7 +44,6 @@ PIDFILE=$RUN_DIR/$DAEMON_NAME.pid ########### pre_start () { - log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX" [ ! -d $FS_PREFIX/global ] && mkdir -p $FS_PREFIX/global [ ! -d $FS_PREFIX/local ] && mkdir -p $FS_PREFIX/local @@ -81,6 +81,7 @@ pre_start () { do_start() { pre_start + log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX" start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS log_end_msg $? } @@ -89,6 +90,10 @@ do_stop () { log_daemon_msg "Stopping $DAEMON_NAME daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE --retry 10 log_end_msg $? + log_daemon_msg "Change $DAEMON_NAME daemon state" + pre_start + start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $STOP_DAEMON -- $DAEMON_OPTS + log_end_msg $? } diff --git a/src/etcdlib.py b/src/etcdlib.py index 757c6a2..af15937 100755 --- a/src/etcdlib.py +++ b/src/etcdlib.py @@ -199,4 +199,13 @@ class Client(object): return [False, 'you are not lock holder'] else: return [False, 'no one holds this lock'] - + + def getnode(self, key): + key = key.strip("/") + out = dorequest(self.keysurl+key) + if 'action' not in out: + return [False, "key not found"] + elif 'dir' in out: + return [False, dirname+" is a directory"] + else: + return [True, {"key":out['node']['key'], 'value':out['node']['value']}] \ No newline at end of file diff --git a/src/httprest.py b/src/httprest.py index 6fbfc4f..60f8b42 100755 --- a/src/httprest.py +++ b/src/httprest.py @@ -565,9 +565,6 @@ if __name__ == '__main__': etcdclient.setkey("service/mode", mode) if etcdclient.isdir("_lock")[0]: etcdclient.deldir("_lock") - if etcdclient.isdir("machines/runnodes")[0]: - etcdclient.deldir("machines/runnodes") - etcdclient.createdir("machines/runnodes") G_usermgr = userManager.userManager('root') clusternet = env.getenv("CLUSTER_NET") diff --git a/src/nodemgr.py b/src/nodemgr.py index ffd1a09..3bcd22e 100755 --- a/src/nodemgr.py +++ b/src/nodemgr.py @@ -22,6 +22,7 @@ class NodeMgr(object): self.networkmgr = networkmgr self.etcd = etcdclient self.mode = mode + self.workerport = env.getenv('WORKER_PORT') # initialize the network logger.info ("initialize network") @@ -45,16 +46,24 @@ class NodeMgr(object): logger.error("docklet-br not found") sys.exit(1) - # get allnodes - self.allnodes = self._nodelist_etcd("allnodes") - self.runnodes = self._nodelist_etcd("runnodes") - logger.info ("all nodes are: %s" % self.allnodes) - logger.info ("run nodes are: %s" % self.runnodes) - if len(self.runnodes)>0: - logger.error ("init runnodes is not null, need to be clean") - sys.exit(1) # init rpc list self.rpcs = [] + + # get allnodes + self.allnodes = self._nodelist_etcd("allnodes") + self.runnodes = [] + [status, runlist] = self.etcd.listdir("machines/runnodes") + for node in runlist: + nodeip = node['key'].rsplit('/',1)[1] + if node['value'] == 'ok': + logger.info ("running node %s" % nodeip) + self.runnodes.append(nodeip) + self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s" % (nodeip, self.workerport))) + logger.info ("add %s:%s in rpc client list" % (nodeip, self.workerport)) + + logger.info ("all nodes are: %s" % self.allnodes) + logger.info ("run nodes are: %s" % self.runnodes) + # start new thread to watch whether a new node joins logger.info ("start thread to watch new nodes ...") self.thread_watchnewnode = threading.Thread(target=self._watchnewnode) @@ -86,7 +95,6 @@ class NodeMgr(object): # thread target : watch whether a new node joins def _watchnewnode(self): - workerport = env.getenv('WORKER_PORT') while(True): time.sleep(0.1) [status, runlist] = self.etcd.listdir("machines/runnodes") @@ -97,25 +105,6 @@ class NodeMgr(object): nodeip = node['key'].rsplit('/',1)[1] if node['value']=='waiting': logger.info ("%s want to joins, call it to init first" % nodeip) - # 'docklet-br' of worker do not need IP Addr. Not need to allocate an IP to it - #if nodeip != self.addr: - # [status, result] = self.networkmgr.acquire_sysips_cidr() - # self.networkmgr.printpools() - # if not status: - # logger.error("no IP for worker bridge, please check network system pool") - # continue - # bridgeip = result[0] - # self.etcd.setkey("network/workbridge", bridgeip) - if nodeip in self.allnodes: - ######## HERE MAYBE NEED TO FIX ############### - # here we must use "machines/runnodes/nodeip" - # we cannot use node['key'], node['key'] is absolute - # path, etcd client will append the path to prefix, - # which is wrong - ############################################### - self.etcd.setkey("machines/runnodes/"+nodeip, "init-"+self.mode) - else: - self.etcd.setkey('machines/runnodes/'+nodeip, "init-new") elif node['value']=='work': logger.info ("new node %s joins" % nodeip) # setup GRE tunnels for new nodes @@ -127,17 +116,18 @@ class NodeMgr(object): logger.debug("GRE for %s already exists, reuse it" % nodeip) else: netcontrol.setup_gre('docklet-br', nodeip) - self.runnodes.append(nodeip) self.etcd.setkey("machines/runnodes/"+nodeip, "ok") - if nodeip not in self.allnodes: - self.allnodes.append(nodeip) - self.etcd.setkey("machines/allnodes/"+nodeip, "ok") - logger.debug ("all nodes are: %s" % self.allnodes) - logger.debug ("run nodes are: %s" % self.runnodes) - self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s" - % (nodeip, workerport))) - logger.info ("add %s:%s in rpc client list" % - (nodeip, workerport)) + if nodeip not in self.runnodes: + self.runnodes.append(nodeip) + if nodeip not in self.allnodes: + self.allnodes.append(nodeip) + self.etcd.setkey("machines/allnodes/"+nodeip, "ok") + logger.debug ("all nodes are: %s" % self.allnodes) + logger.debug ("run nodes are: %s" % self.runnodes) + self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s" + % (nodeip, self.workerport))) + logger.info ("add %s:%s in rpc client list" % + (nodeip, self.workerport)) # get all run nodes' IP addr def get_nodeips(self): diff --git a/src/stopworker.py b/src/stopworker.py new file mode 100755 index 0000000..b73e69e --- /dev/null +++ b/src/stopworker.py @@ -0,0 +1,13 @@ +#!/usr/bin/python3 +import env,tools +config = env.getenv("CONFIG") +tools.loadenv(config) +import etcdlib, network + +if __name__ == '__main__': + etcdaddr = env.getenv("ETCD") + clustername = env.getenv("CLUSTER_NAME") + etcdclient = etcdlib.Client(etcdaddr, prefix = clustername) + net_dev = env.getenv("NETWORK_DEVICE") + ipaddr = network.getip(net_dev) + etcdclient.setkey("machines/runnodes/"+ipaddr, "stop") \ No newline at end of file diff --git a/src/worker.py b/src/worker.py index 87ebb33..47ee1f3 100755 --- a/src/worker.py +++ b/src/worker.py @@ -48,30 +48,31 @@ class Worker(object): self.master = self.etcd.getkey("service/master")[1] self.mode=None - # register self to master self.etcd.setkey("machines/runnodes/"+self.addr, "waiting") - for f in range (0, 3): - [status, value] = self.etcd.getkey("machines/runnodes/"+self.addr) - if not value.startswith("init"): - # master wakesup every 0.1s to check register - logger.debug("worker % register to master failed %d \ - time, sleep %fs" % (self.addr, f+1, 0.1)) - time.sleep(0.1) - else: - break - - if value.startswith("init"): - # check token to check global directory - [status, token_1] = self.etcd.getkey("token") - tokenfile = open(self.fspath+"/global/token", 'r') - token_2 = tokenfile.readline().strip() - if token_1 != token_2: - logger.error("check token failed, global directory is not a shared filesystem") - sys.exit(1) + [status, node] = self.etcd.getnode("machines/runnodes/"+self.addr) + if status: + self.key = node['key'] else: - logger.error ("worker register in machines/runnodes failed, maybe master not start") + logger.error("get key failed. %s" % node) sys.exit(1) - logger.info ("worker registered in master and checked the token") + + # check token to check global directory + [status, token_1] = self.etcd.getkey("token") + tokenfile = open(self.fspath+"/global/token", 'r') + token_2 = tokenfile.readline().strip() + if token_1 != token_2: + logger.error("check token failed, global directory is not a shared filesystem") + sys.exit(1) + logger.info ("worker registered and checked the token") + + # worker itself to judge how to init + value = 'init-new' + [status, runlist] = self.etcd.listdir("machines/runnodes") + for node in runlist: + if node['key'] == self.key: + value = 'init-recovery' + break + logger.info("worker start in "+value+" mode") Containers = container.Container(self.addr, etcdclient) if value == 'init-new': From 839ea2313598314c063a1069c13d61ef9b411ffa Mon Sep 17 00:00:00 2001 From: ooooo Date: Wed, 20 Apr 2016 14:50:53 +0800 Subject: [PATCH 03/19] separate master and worker --- bin/docklet-worker | 19 ++++++++++++++----- src/stopworker.py | 2 +- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/bin/docklet-worker b/bin/docklet-worker index 5ca5fe4..7af7dab 100755 --- a/bin/docklet-worker +++ b/bin/docklet-worker @@ -44,7 +44,6 @@ PIDFILE=$RUN_DIR/$DAEMON_NAME.pid ########### pre_start () { - [ ! -d $FS_PREFIX/global ] && mkdir -p $FS_PREFIX/global [ ! -d $FS_PREFIX/local ] && mkdir -p $FS_PREFIX/local [ ! -d $FS_PREFIX/global/users ] && mkdir -p $FS_PREFIX/global/users @@ -86,18 +85,25 @@ do_start() { log_end_msg $? } +do_changestage () { + RUNNING_CONFIG=$FS_PREFIX/local/docklet-running.conf + export CONFIG=$RUNNING_CONFIG + log_daemon_msg "Change $DAEMON_NAME daemon state" + cmd=$(python3 ../src/stopworker.py) + log_end_msg $? +} + do_stop () { log_daemon_msg "Stopping $DAEMON_NAME daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE --retry 10 log_end_msg $? - log_daemon_msg "Change $DAEMON_NAME daemon state" - pre_start - start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $STOP_DAEMON -- $DAEMON_OPTS - log_end_msg $? + + do_changestage } + case "$1" in start) do_start @@ -121,6 +127,9 @@ case "$1" in status_of_proc -p $PIDFILE "$DAEMON" "$DAEMON_NAME" && exit 0 || exit $? ;; + change) + do_changestage + ;; *) echo "Usage: $DAEMON_NAME {start|stop|restart|status}" exit 1 diff --git a/src/stopworker.py b/src/stopworker.py index b73e69e..13e92ba 100755 --- a/src/stopworker.py +++ b/src/stopworker.py @@ -10,4 +10,4 @@ if __name__ == '__main__': etcdclient = etcdlib.Client(etcdaddr, prefix = clustername) net_dev = env.getenv("NETWORK_DEVICE") ipaddr = network.getip(net_dev) - etcdclient.setkey("machines/runnodes/"+ipaddr, "stop") \ No newline at end of file + etcdclient.deldir("machines/runnodes/"+ipaddr) \ No newline at end of file From c56c3d52db90b43e2977d8330df13afca009ece2 Mon Sep 17 00:00:00 2001 From: Peidong Liu Date: Wed, 27 Apr 2016 15:59:33 +0800 Subject: [PATCH 04/19] Now backend is flask-styled fix a bug that will cause 404 in /login/ --- src/httprest.py | 945 +++++++++++++++-------------- src/httprest_old.py | 610 +++++++++++++++++++ src/nodemgr.py | 16 +- src/userManager.py | 50 +- web/webViews/admin.py | 8 +- web/webViews/authenticate/login.py | 2 +- web/webViews/monitor.py | 24 +- web/webViews/user/userActivate.py | 2 +- web/webViews/user/userlist.py | 14 +- 9 files changed, 1163 insertions(+), 508 deletions(-) create mode 100755 src/httprest_old.py diff --git a/src/httprest.py b/src/httprest.py index 0053be7..99ec0f7 100755 --- a/src/httprest.py +++ b/src/httprest.py @@ -4,6 +4,8 @@ # because some modules need variables when import # for example, userManager/model.py +from flask import Flask, request + # must first init loadenv import tools, env config = env.getenv("CONFIG") @@ -27,475 +29,520 @@ external_login = env.getenv('EXTERNAL_LOGIN') if (external_login == 'TRUE'): from userDependence import external_auth -class DockletHttpHandler(http.server.BaseHTTPRequestHandler): - def response(self, code, output): - self.send_response(code) - self.send_header("Content-type", "application/json") - self.end_headers() - # wfile/rfile are in byte/binary encoded. need to recode - self.wfile.write(json.dumps(output).encode('ascii')) - self.wfile.write("\n".encode('ascii')) - # do not wfile.close() - # because self.handle_one_request will call wfile.flush after calling do_* - # and self.handle_one_request will close this wfile after timeout automatically - # (see /usr/lib/python3.4/http/server.py handle_one_request function) - #self.wfile.close() +app = Flask(__name__) - # override log_request to not print default request log - # we use the log info by ourselves in our style - def log_request(code = '-', size = '-'): - pass +from functools import wraps - def do_PUT(self): - self.response(400, {'success':'false', 'message':'Not supported methond'}) - def do_GET(self): - self.response(400, {'success':'false', 'message':'Not supported methond'}) - - def do_DELETE(self): - self.response(400, {'success':'false', 'message':'Not supported methond'}) - - # handler POST request - def do_POST(self): - global G_vclustermgr +def login_required(func): + @wraps(func) + def wrapper(*args, **kwargs): global G_usermgr - #logger.info ("get request, header content:\n%s" % self.headers) - #logger.info ("read request content:\n%s" % self.rfile.read(int(self.headers["Content-Length"]))) - logger.info ("get request, path: %s" % self.path) - # for test - if self.path == '/test': - logger.info ("return welcome for test") - self.response(200, {'success':'true', 'message':'welcome to docklet'}) - return [True, 'test ok'] - - # check for not null content - if 'Content-Length' not in self.headers: - logger.info ("request content is null") - self.response(401, {'success':'false', 'message':'request content is null'}) - return [False, 'content is null'] - - # auth the user - # cgi.FieldStorage need fp/headers/environ. (see /usr/lib/python3.4/cgi.py) - form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,environ={'REQUEST_METHOD':'POST'}) - cmds = self.path.strip('/').split('/') - if cmds[0] == 'register' and form.getvalue('activate', None) == None: - logger.info ("handle request : user register") - username = form.getvalue('username', '') - password = form.getvalue('password', '') - email = form.getvalue('email', '') - description = form.getvalue('description','') - if (username == '' or password == '' or email == ''): - self.response(500, {'success':'false'}) - newuser = G_usermgr.newuser() - newuser.username = form.getvalue('username') - newuser.password = form.getvalue('password') - newuser.e_mail = form.getvalue('email') - newuser.student_number = form.getvalue('studentnumber') - newuser.department = form.getvalue('department') - newuser.nickname = form.getvalue('truename') - newuser.truename = form.getvalue('truename') - newuser.description = form.getvalue('description') - newuser.status = "init" - newuser.auth_method = "local" - result = G_usermgr.register(user = newuser) - self.response(200, result) - return [True, "register succeed"] - if cmds[0] == 'login': - logger.info ("handle request : user login") - user = form.getvalue("user") - key = form.getvalue("key") - if user == None or key == None: - self.response(401, {'success':'false', 'message':'user or key is null'}) - return [False, "auth failed"] - auth_result = G_usermgr.auth(user, key) - if auth_result['success'] == 'false': - self.response(401, {'success':'false', 'message':'auth failed'}) - return [False, "auth failed"] - self.response(200, {'success':'true', 'action':'login', 'data': auth_result['data']}) - return [True, "auth succeeded"] - if cmds[0] == 'external_login': - logger.info ("handle request : external user login") - try: - result = G_usermgr.auth_external(form) - self.response(200, result) - return result - except: - result = {'success': 'false', 'reason': 'Something wrong happened when auth an external account'} - self.response(200, result) - return result - - token = form.getvalue("token") - if token == None: - self.response(401, {'success':'false', 'message':'user or key is null'}) - return [False, "auth failed"] + logger.info ("get request, path: %s" % request.path) + token = request.form.get("token", None) + if (token == None): + return {'success':'false', 'message':'user or key is null'} cur_user = G_usermgr.auth_token(token) - if cur_user == None: - self.response(401, {'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) - return [False, "auth failed"] - + if (cur_user == None): + return {'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'} + return func(cur_user, cur_user.username, request.form, *args, **kwargs) + + return wrapper + +@app.route("/login/", methods=['POST']) +def login(): + global G_usermgr + logger.info ("handle request : user login") + user = request.form.get("user", None) + key = request.form.get("key", None) + if user == None or key == None: + return json.dumps({'success':'false', 'message':'user or key is null'}) + auth_result = G_usermgr.auth(user, key) + if auth_result['success'] == 'false': + return json.dumps({'success':'false', 'message':'auth failed'}) + return json.dumps({'success':'true', 'action':'login', 'data': auth_result['data']}) + +@app.route("/external_login/", methods=['POST']) +def external_login(): + global G_usermgr + logger.info ("handle request : external user login") + try: + result = G_usermgr.auth_external(request.form) + return json.dumps(result) + except: + result = {'success': 'false', 'reason': 'Something wrong happened when auth an external account'} + return json.dumps(result) + +@app.route("/register/", methods=['POST']) +def register(): + global G_usermgr + if request.form.get('activate', None) == None: + logger.info ("handle request : user register") + username = request.form.get('username', '') + password = request.form.get('password', '') + email = request.form.get('email', '') + description = request.form.get('description','') + if (username == '' or password == '' or email == ''): + return json.dumps({'success':'false'}) + newuser = G_usermgr.newuser() + newuser.username = request.form.get('username') + newuser.password = request.form.get('password') + newuser.e_mail = request.form.get('email') + newuser.student_number = request.form.get('studentnumber') + newuser.department = request.form.get('department') + newuser.nickname = request.form.get('truename') + newuser.truename = request.form.get('truename') + newuser.description = request.form.get('description') + newuser.status = "init" + newuser.auth_method = "local" + result = G_usermgr.register(user = newuser) + return json.dumps(result) + else: + logger.info ("handle request, user activating") + token = request.form.get("token", None) + if (token == None): + return json.dumps({'success':'false', 'message':'user or key is null'}) + cur_user = G_usermgr.auth_token(token) + if (cur_user == None): + return json.dumps({'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) + newuser = G_usermgr.newuser() + newuser.username = cur_user.username + newuser.nickname = cur_user.truename + newuser.status = 'applying' + newuser.user_group = cur_user.user_group + newuser.auth_method = cur_user.auth_method + newuser.e_mail = form.get('email','') + newuser.student_number = form.get('studentnumber', '') + newuser.department = form.get('department', '') + newuser.truename = form.get('truename', '') + newuser.tel = form.get('tel', '') + newuser.description = form.get('description', '') + result = G_usermgr.register(user = newuser) + userManager.send_remind_activating_email(newuser.username) + return json.dumps(result) - user = cur_user.username - # parse the url and get to do actions - # /cluster/list - # /cluster/create & clustername - # /cluster/start & clustername - # /cluster/stop & clustername - # /cluster/delete & clustername - # /cluster/info & clustername +@app.route("/cluster/create/", methods=['POST']) +@login_required +def create_cluster(cur_user, user, form): + global G_usermgr + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + image = {} + image['name'] = form.get("imagename", None) + image['type'] = form.get("imagetype", None) + image['owner'] = form.get("imageowner", None) + user_info = G_usermgr.selfQuery(cur_user = cur_user) + user_info = json.dumps(user_info) + logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name'])) + [status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info) + if status: + return json.dumps({'success':'true', 'action':'create cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'create cluster', 'message':result}) + +@app.route("/cluster/scaleout/", methods=['POST']) +@login_required +def scaleout_cluster(cur_user, user, form): + global G_usermgr + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info("handle request : scale out %s" % clustername) + image = {} + image['name'] = form.get("imagename", None) + image['type'] = form.get("imagetype", None) + image['owner'] = form.get("imageowner", None) + logger.debug("imagename:" + image['name']) + logger.debug("imagetype:" + image['type']) + logger.debug("imageowner:" + image['owner']) + user_info = G_usermgr.selfQuery(cur_user = cur_user) + user_info = json.dumps(user_info) + [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info) + if status: + return json.dumps({'success':'true', 'action':'scale out', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'scale out', 'message':result}) + +@app.route("/cluster/scalein/", methods=['POST']) +@login_required +def scalein_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info("handle request : scale in %s" % clustername) + containername = form.get("containername", None) + [status, result] = G_vclustermgr.scale_in_cluster(clustername, user, containername) + if status: + return json.dumps({'success':'true', 'action':'scale in', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'scale in', 'message':result}) + +@app.route("/cluster/start/", methods=['POST']) +@login_required +def start_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info ("handle request : start cluster %s" % clustername) + [status, result] = G_vclustermgr.start_cluster(clustername, user) + if status: + return json.dumps({'success':'true', 'action':'start cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'start cluster', 'message':result}) + +@app.route("/cluster/stop/", methods=['POST']) +@login_required +def stop_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info ("handle request : start cluster %s" % clustername) + [status, result] = G_vclustermgr.stop_cluster(clustername, user) + if status: + return json.dumps({'success':'true', 'action':'stop cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'stop cluster', 'message':result}) + +@app.route("/cluster/delete/", methods=['POST']) +@login_required +def delete_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info ("handle request : delete cluster %s" % clustername) + [status, result] = G_vclustermgr.delete_cluster(clustername, user) + if status: + return json.dumps({'success':'true', 'action':'delete cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'delete cluster', 'message':result}) + +@app.route("/cluster/info/", methods=['POST']) +@login_required +def info_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info ("handle request : info cluster %s" % clustername) + [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) + if status: + return json.dumps({'success':'true', 'action':'info cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'info cluster', 'message':result}) + +@app.route("/cluster/list/", methods=['POST']) +@login_required +def list_cluster(cur_user, user, form): + global G_vclustermgr + logger.info ("handle request : list clusters for %s" % user) + [status, clusterlist] = G_vclustermgr.list_clusters(user) + if status: + return json.dumps({'success':'true', 'action':'list cluster', 'clusters':clusterlist}) + else: + return json.dumps({'success':'false', 'action':'list cluster', 'message':clusterlist}) + +@app.route("/cluster/flush/", methods=['POST']) +@login_required +def flush_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + from_lxc = form.get('from_lxc', None) + G_vclustermgr.flush_cluster(user,clustername,from_lxc) + return json.dumps({'success':'true', 'action':'flush'}) + +@app.route("/cluster/save/", methods=['POST']) +@login_required +def save_cluster(cur_user, user, form): + global G_vclustermgr + global G_usermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + + imagename = form.get("image", None) + description = form.get("description", None) + containername = form.get("containername", None) + isforce = form.get("isforce", None) + if not isforce == "true": + [status,message] = G_vclustermgr.image_check(user,imagename) + if not status: + return json.dumps({'success':'false','reason':'exists', 'message':message}) + + user_info = G_usermgr.selfQuery(cur_user = cur_user) + [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,user_info["data"]["groupinfo"]["image"]) + if status: + logger.info("image has been saved") + return json.dumps({'success':'true', 'action':'save'}) + else: + logger.debug(message) + return json.dumps({'success':'false', 'reason':'exceed', 'message':message}) - if cmds[0] == 'cluster': - clustername = form.getvalue('clustername') - # check for 'clustername' : all actions except 'list' need 'clustername' - if (cmds[1] != 'list') and clustername == None: - self.response(401, {'success':'false', 'message':'clustername is null'}) - return [False, "clustername is null"] - if cmds[1] == 'create': - image = {} - image['name'] = form.getvalue("imagename") - image['type'] = form.getvalue("imagetype") - image['owner'] = form.getvalue("imageowner") - user_info = G_usermgr.selfQuery(cur_user = cur_user) - user_info = json.dumps(user_info) - logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name'])) - [status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info) - if status: - self.response(200, {'success':'true', 'action':'create cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'create cluster', 'message':result}) - elif cmds[1] == 'scaleout': - logger.info("handle request : scale out %s" % clustername) - image = {} - image['name'] = form.getvalue("imagename") - image['type'] = form.getvalue("imagetype") - image['owner'] = form.getvalue("imageowner") - logger.debug("imagename:" + image['name']) - logger.debug("imagetype:" + image['type']) - logger.debug("imageowner:" + image['owner']) - user_info = G_usermgr.selfQuery(cur_user = cur_user) - user_info = json.dumps(user_info) - [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info) - if status: - self.response(200, {'success':'true', 'action':'scale out', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'scale out', 'message':result}) - elif cmds[1] == 'scalein': - logger.info("handle request : scale in %s" % clustername) - containername = form.getvalue("containername") - [status, result] = G_vclustermgr.scale_in_cluster(clustername, user, containername) - if status: - self.response(200, {'success':'true', 'action':'scale in', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'scale in', 'message':result}) - elif cmds[1] == 'start': - logger.info ("handle request : start cluster %s" % clustername) - [status, result] = G_vclustermgr.start_cluster(clustername, user) - if status: - self.response(200, {'success':'true', 'action':'start cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'start cluster', 'message':result}) - elif cmds[1] == 'stop': - logger.info ("handle request : stop cluster %s" % clustername) - [status, result] = G_vclustermgr.stop_cluster(clustername, user) - if status: - self.response(200, {'success':'true', 'action':'stop cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'stop cluster', 'message':result}) - elif cmds[1] == 'delete': - logger.info ("handle request : delete cluster %s" % clustername) - [status, result] = G_vclustermgr.delete_cluster(clustername, user) - if status: - self.response(200, {'success':'true', 'action':'delete cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'delete cluster', 'message':result}) - elif cmds[1] == 'info': - logger.info ("handle request : info cluster %s" % clustername) - [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) - if status: - self.response(200, {'success':'true', 'action':'info cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'info cluster', 'message':result}) - elif cmds[1] == 'list': - logger.info ("handle request : list clusters for %s" % user) - [status, clusterlist] = G_vclustermgr.list_clusters(user) - if status: - self.response(200, {'success':'true', 'action':'list cluster', 'clusters':clusterlist}) - else: - self.response(400, {'success':'false', 'action':'list cluster', 'message':clusterlist}) +@app.route("/image/list/", methods=['POST']) +@login_required +def list_image(cur_user, user, form): + global G_imagemgr + images = G_imagemgr.list_images(user) + return json.dumps({'success':'true', 'images': images}) - elif cmds[1] == 'flush': - from_lxc = form.getvalue('from_lxc') - G_vclustermgr.flush_cluster(user,clustername,from_lxc) - self.response(200, {'success':'true', 'action':'flush'}) +@app.route("/image/description/", methods=['POST']) +@login_required +def description_image(cur_user, user, form): + global G_imagemgr + image = {} + image['name'] = form.get("imagename", None) + image['type'] = form.get("imagetype", None) + image['owner'] = form.get("imageowner", None) + description = G_imagemgr.get_image_description(user,image) + return json.dumps({'success':'true', 'message':description}) - elif cmds[1] == 'save': - imagename = form.getvalue("image") - description = form.getvalue("description") - containername = form.getvalue("containername") - isforce = form.getvalue("isforce") - if not isforce == "true": - [status,message] = G_vclustermgr.image_check(user,imagename) - if not status: - self.response(200, {'success':'false','reason':'exists', 'message':message}) - return [False, "image already exists"] - user_info = G_usermgr.selfQuery(cur_user = cur_user) - [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,user_info["data"]["groupinfo"]["image"]) - if status: - logger.info("image has been saved") - self.response(200, {'success':'true', 'action':'save'}) - else: - logger.debug(message) - self.response(200, {'success':'false', 'reason':'exceed', 'message':message}) +@app.route("/image/share/", methods=['POST']) +@login_required +def share_image(cur_user, user, form): + global G_imagemgr + image = form.getvalue('image') + G_imagemgr.shareImage(user,image) + return json.dumps({'success':'true', 'action':'share'}) - else: - logger.warning ("request not supported ") - self.response(400, {'success':'false', 'message':'not supported request'}) +@app.route("/image/unshare/", methods=['POST']) +@login_required +def unshare_image(cur_user, user, form): + global G_imagemgr + image = form.get('image', None) + G_imagemgr.unshareImage(user,image) + return json.dumps({'success':'true', 'action':'unshare'}) - # Request for Image - elif cmds[0] == 'image': - if cmds[1] == 'list': - images = G_imagemgr.list_images(user) - self.response(200, {'success':'true', 'images': images}) - elif cmds[1] == 'description': - image = {} - image['name'] = form.getvalue("imagename") - image['type'] = form.getvalue("imagetype") - image['owner'] = form.getvalue("imageowner") - description = G_imagemgr.get_image_description(user,image) - self.response(200, {'success':'true', 'message':description}) - elif cmds[1] == 'share': - image = form.getvalue('image') - G_imagemgr.shareImage(user,image) - self.response(200, {'success':'true', 'action':'share'}) - elif cmds[1] == 'unshare': - image = form.getvalue('image') - G_imagemgr.unshareImage(user,image) - self.response(200, {'success':'true', 'action':'unshare'}) - elif cmds[1] == 'delete': - image = form.getvalue('image') - G_imagemgr.removeImage(user,image) - self.response(200, {'success':'true', 'action':'delete'}) - else: - logger.warning("request not supported ") - self.response(400, {'success':'false', 'message':'not supported request'}) +@app.route("/image/delete/", methods=['POST']) +@login_required +def delete_image(cur_user, user, form): + global G_imagemgr + image = form.get('image', None) + G_imagemgr.removeImage(user,image) + return json.dumps({'success':'true', 'action':'delete'}) - # Add Proxy - elif cmds[0] == 'addproxy': - logger.info ("handle request : add proxy") - proxy_ip = form.getvalue("ip") - proxy_port = form.getvalue("port") - clustername = form.getvalue("clustername") - [status, message] = G_vclustermgr.addproxy(user,clustername,proxy_ip,proxy_port) - if status is True: - self.response(200, {'success':'true', 'action':'addproxy'}) - else: - self.response(400, {'success':'false', 'message': message}) - # Delete Proxy - elif cmds[0] == 'deleteproxy': - logger.info ("handle request : delete proxy") - clustername = form.getvalue("clustername") - G_vclustermgr.deleteproxy(user,clustername) - self.response(200, {'success':'true', 'action':'deleteproxy'}) +@app.route("/addproxy/", methods=['POST']) +@login_required +def addproxy(cur_user, user, form): + global G_vclustermgr + logger.info ("handle request : add proxy") + proxy_ip = form.get("ip", None) + proxy_port = form.get("port", None) + clustername = form.get("clustername", None) + [status, message] = G_vclustermgr.addproxy(user,clustername,proxy_ip,proxy_port) + if status is True: + return json.dumps({'success':'true', 'action':'addproxy'}) + else: + return json.dumps({'success':'false', 'message': message}) - # Request for Monitor - elif cmds[0] == 'monitor': - logger.info("handle request: monitor") - res = {} - if cmds[1] == 'hosts': - com_id = cmds[2] - fetcher = monitor.Fetcher(etcdaddr,G_clustername,com_id) - if cmds[3] == 'meminfo': - res['meminfo'] = fetcher.get_meminfo() - elif cmds[3] == 'cpuinfo': - res['cpuinfo'] = fetcher.get_cpuinfo() - elif cmds[3] == 'cpuconfig': - res['cpuconfig'] = fetcher.get_cpuconfig() - elif cmds[3] == 'diskinfo': - res['diskinfo'] = fetcher.get_diskinfo() - elif cmds[3] == 'osinfo': - res['osinfo'] = fetcher.get_osinfo() - elif cmds[3] == 'containers': - res['containers'] = fetcher.get_containers() - elif cmds[3] == 'status': - res['status'] = fetcher.get_status() - elif cmds[3] == 'containerslist': - res['containerslist'] = fetcher.get_containerslist() - elif cmds[3] == 'containersinfo': - res = [] - conlist = fetcher.get_containerslist() - for container in conlist: - ans = {} - confetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) - ans = confetcher.get_basic_info(container) - ans['cpu_use'] = confetcher.get_cpu_use(container) - ans['mem_use'] = confetcher.get_mem_use(container) - res.append(ans) - else: - self.response(400, {'success':'false', 'message':'not supported request'}) - return +@app.route("/deleteproxy/", methods=['POST']) +@login_required +def deleteproxy(cur_user, user, form): + global G_vclustermgr + logger.info ("handle request : delete proxy") + clustername = form.get("clustername", None) + G_vclustermgr.deleteproxy(user,clustername) + self.response(200, {'success':'true', 'action':'deleteproxy'}) - self.response(200, {'success':'true', 'monitor':res}) - elif cmds[1] == 'vnodes': - fetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) - if cmds[3] == 'cpu_use': - res['cpu_use'] = fetcher.get_cpu_use(cmds[2]) - elif cmds[3] == 'mem_use': - res['mem_use'] = fetcher.get_mem_use(cmds[2]) - elif cmds[3] == 'disk_use': - res['disk_use'] = fetcher.get_disk_use(cmds[2]) - elif cmds[3] == 'basic_info': - res['basic_info'] = fetcher.get_basic_info(cmds[2]) - elif cmds[3] == 'owner': - names = cmds[2].split('-') - result = G_usermgr.query(username = names[0], cur_user = cur_user) - if result['success'] == 'false': - res['username'] = "" - res['truename'] = "" - else: - res['username'] = result['data']['username'] - res['truename'] = result['data']['truename'] - else: - res = "Unspported Method!" - self.response(200, {'success':'true', 'monitor':res}) - elif cmds[1] == 'user': - if cmds[2] == 'quotainfo': - user_info = G_usermgr.selfQuery(cur_user = cur_user) - quotainfo = user_info['data']['groupinfo'] - self.response(200, {'success':'true', 'quotainfo':quotainfo}) - '''if not user == 'root': - self.response(400, {'success':'false', 'message':'Root Required'}) - if cmds[3] == 'clustercnt': - flag = True - clutotal = 0 - clurun = 0 - contotal = 0 - conrun = 0 - [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2]) - if status: - for clustername in clusterlist: - clutotal += 1 - [status2, result] = G_vclustermgr.get_clusterinfo(clustername, cmds[2]) - if status2: - contotal += result['size'] - if result['status'] == 'running': - clurun += 1 - conrun += result['size'] - else: - flag = False - if flag: - res = {} - res['clutotal'] = clutotal - res['clurun'] = clurun - res['contotal'] = contotal - res['conrun'] = conrun - self.response(200, {'success':'true', 'monitor':{'clustercnt':res}}) - else: - self.response(200, {'success':'false','message':clusterlist}) - elif cmds[3] == 'cluster': - if cmds[4] == 'list': - [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2]) - if status: - self.response(200, {'success':'true', 'monitor':{'clusters':clusterlist}}) - else: - self.response(400, {'success':'false', 'message':clusterlist}) - elif cmds[4] == 'info': - clustername = form.getvalue('clustername') - logger.info ("handle request : info cluster %s" % clustername) - [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) - if status: - self.response(200, {'success':'true', 'monitor':{'info':result}}) - else: - self.response(200, {'success':'false','message':result}) - else: - self.response(400, {'success':'false', 'message':'not supported request'})''' +@app.route("/monitor/hosts///", methods=['POST']) +@login_required +def hosts_monitor(cur_user, user, form, com_id, issue): + global G_clustername - elif cmds[1] == 'listphynodes': - res['allnodes'] = G_nodemgr.get_allnodes() - self.response(200, {'success':'true', 'monitor':res}) - # Request for User - elif cmds[0] == 'user': - logger.info("handle request: user") - if cmds[1] == 'modify': - #user = G_usermgr.query(username = form.getvalue("username"), cur_user = cur_user).get('token', None) - result = G_usermgr.modify(newValue = form, cur_user = cur_user) - self.response(200, result) - if cmds[1] == 'groupModify': - result = G_usermgr.groupModify(newValue = form, cur_user = cur_user) - self.response(200, result) - if cmds[1] == 'query': - result = G_usermgr.query(ID = form.getvalue("ID"), cur_user = cur_user) - if (result.get('success', None) == None or result.get('success', None) == "false"): - self.response(301,result) - else: - result = G_usermgr.queryForDisplay(user = result['token']) - self.response(200,result) + logger.info("handle request: monitor/hosts") + res = {} + fetcher = monitor.Fetcher(etcdaddr,G_clustername,com_id) + if issue == 'meminfo': + res['meminfo'] = fetcher.get_meminfo() + elif issue == 'cpuinfo': + res['cpuinfo'] = fetcher.get_cpuinfo() + elif issue == 'cpuconfig': + res['cpuconfig'] = fetcher.get_cpuconfig() + elif issue == 'diskinfo': + res['diskinfo'] = fetcher.get_diskinfo() + elif issue == 'osinfo': + res['osinfo'] = fetcher.get_osinfo() + elif issue == 'containers': + res['containers'] = fetcher.get_containers() + elif issue == 'status': + res['status'] = fetcher.get_status() + elif issue == 'containerslist': + res['containerslist'] = fetcher.get_containerslist() + elif issue == 'containersinfo': + res = [] + conlist = fetcher.get_containerslist() + for container in conlist: + ans = {} + confetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) + ans = confetcher.get_basic_info(container) + ans['cpu_use'] = confetcher.get_cpu_use(container) + ans['mem_use'] = confetcher.get_mem_use(container) + res.append(ans) + else: + return json.dumps({'success':'false', 'message':'not supported request'}) - elif cmds[1] == 'add': - user = G_usermgr.newuser(cur_user = cur_user) - user.username = form.getvalue('username') - user.password = form.getvalue('password') - user.e_mail = form.getvalue('e_mail', '') - user.status = "normal" - result = G_usermgr.register(user = user, cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupadd': - result = G_usermgr.groupadd(form = form, cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'quotaadd': - result = G_usermgr.quotaadd(form = form, cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupdel': - result = G_usermgr.groupdel(name = form.getvalue('name', None), cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'data': - logger.info("handle request: user/data") - result = G_usermgr.userList(cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupNameList': - result = G_usermgr.groupListName(cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupList': - result = G_usermgr.groupList(cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupQuery': - result = G_usermgr.groupQuery(name = form.getvalue("name"), cur_user = cur_user) - if (result.get('success', None) == None or result.get('success', None) == "false"): - self.response(301,result) - else: - self.response(200,result) - elif cmds[1] == 'selfQuery': - result = G_usermgr.selfQuery(cur_user = cur_user) - self.response(200,result) - elif cmds[1] == 'selfModify': - result = G_usermgr.selfModify(cur_user = cur_user, newValue = form) - self.response(200,result) - elif cmds[0] == 'register' : - #activate - logger.info("handle request: user/activate") - newuser = G_usermgr.newuser() - newuser.username = cur_user.username - newuser.nickname = cur_user.truename - newuser.status = 'applying' - newuser.user_group = cur_user.user_group - newuser.auth_method = cur_user.auth_method - newuser.e_mail = form.getvalue('email','') - newuser.student_number = form.getvalue('studentnumber', '') - newuser.department = form.getvalue('department', '') - newuser.truename = form.getvalue('truename', '') - newuser.tel = form.getvalue('tel', '') - newuser.description = form.getvalue('description', '') - result = G_usermgr.register(user = newuser) - userManager.send_remind_activating_email(newuser.username) - self.response(200,result) + return json.dumps({'success':'true', 'monitor':res}) + + +@app.route("/monitor/vnodes///", methods=['POST']) +@login_required +def vnodes_monitor(cur_user, user, form, com_id, issue): + global G_clustername + logger.info("handle request: monitor/vnodes") + res = {} + fetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) + if issue == 'cpu_use': + res['cpu_use'] = fetcher.get_cpu_use(cmds[2]) + elif issue == 'mem_use': + res['mem_use'] = fetcher.get_mem_use(cmds[2]) + elif issue == 'disk_use': + res['disk_use'] = fetcher.get_disk_use(cmds[2]) + elif issue == 'basic_info': + res['basic_info'] = fetcher.get_basic_info(cmds[2]) + elif issue == 'owner': + names = com_id.split('-') + result = G_usermgr.query(username = names[0], cur_user = cur_user) + if result['success'] == 'false': + res['username'] = "" + res['truename'] = "" else: - logger.warning ("request not supported ") - self.response(400, {'success':'false', 'message':'not supported request'}) + res['username'] = result['data']['username'] + res['truename'] = result['data']['truename'] + else: + res = "Unspported Method!" + return json.dumps({'success':'true', 'monitor':res}) + + +@app.route("/monitor/user/quotainfo/", methods=['POST']) +@login_required +def user_quotainfo_monitor(cur_user, user, form): + global G_usermgr + logger.info("handle request: monitor/user/quotainfo/") + user_info = G_usermgr.selfQuery(cur_user = cur_user) + quotainfo = user_info['data']['groupinfo'] + return json.dumps({'success':'true', 'quotainfo':quotainfo}) + +@app.route("/monitor/listphynodes/", methods=['POST']) +@login_required +def listphynodes_monitor(cur_user, user, form): + global G_nodemgr + logger.info("handle request: monitor/listphynodes/") + res = {} + res['allnodes'] = G_nodemgr.get_allnodes() + return json.dumps({'success':'true', 'monitor':res}) + + +@app.route("/user/modify/", methods=['POST']) +@login_required +def modify_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/modify/") + result = G_usermgr.modify(newValue = form, cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/groupModify/", methods=['POST']) +@login_required +def groupModify_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupModify/") + result = G_usermgr.groupModify(newValue = form, cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/query/", methods=['POST']) +@login_required +def query_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/query/") + result = G_usermgr.query(ID = form.get("ID"), cur_user = cur_user) + if (result.get('success', None) == None or result.get('success', None) == "false"): + return json.dumps(result) + else: + result = G_usermgr.queryForDisplay(user = result['token']) + return json.dumps(result) + + +@app.route("/user/add/", methods=['POST']) +@login_required +def add_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/add/") + user = G_usermgr.newuser(cur_user = cur_user) + user.username = form.get('username', None) + user.password = form.get('password', None) + user.e_mail = form.get('e_mail', '') + user.status = "normal" + result = G_usermgr.register(user = user, cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/groupadd/", methods=['POST']) +@login_required +def groupadd_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupadd/") + result = G_usermgr.groupadd(form = form, cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/quotaadd/", methods=['POST']) +@login_required +def quotaadd_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/quotaadd/") + result = G_usermgr.quotaadd(form = form, cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/groupdel/", methods=['POST']) +@login_required +def groupdel_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupdel/") + result = G_usermgr.groupdel(name = form.get('name', None), cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/data/", methods=['POST']) +@login_required +def data_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/data/") + result = G_usermgr.userList(cur_user = cur_user) + return json.dumps(result) + -class ThreadingHttpServer(ThreadingMixIn, http.server.HTTPServer): - pass if __name__ == '__main__': + logger.info('Start Flask...:') + try: + secret_key_file = open(env.getenv('FS_PREFIX') + '/local/httprest_secret_key.txt') + app.secret_key = secret_key_file.read() + secret_key_file.close() + except: + from base64 import b64encode + from os import urandom + secret_key = urandom(24) + secret_key = b64encode(secret_key).decode('utf-8') + app.secret_key = secret_key + secret_key_file = open(env.getenv('FS_PREFIX') + '/local/httprest_secret_key.txt', 'w') + secret_key_file.write(secret_key) + secret_key_file.close() + + os.environ['APP_KEY'] = app.secret_key + runcmd = sys.argv[0] + app.runpath = runcmd.rsplit('/', 1)[0] + + global G_nodemgr global G_vclustermgr global G_usermgr @@ -605,6 +652,6 @@ if __name__ == '__main__': logger.info("using MASTER_PORT %d", int(masterport)) # server = http.server.HTTPServer((masterip, masterport), DockletHttpHandler) - server = ThreadingHttpServer((masterip, int(masterport)), DockletHttpHandler) logger.info("starting master server") - server.serve_forever() + + app.run(host = masterip, port = masterport, debug = True, threaded=True) diff --git a/src/httprest_old.py b/src/httprest_old.py new file mode 100755 index 0000000..e856532 --- /dev/null +++ b/src/httprest_old.py @@ -0,0 +1,610 @@ +#!/usr/bin/python3 + +# load environment variables in the beginning +# because some modules need variables when import +# for example, userManager/model.py + +# must first init loadenv +import tools, env +config = env.getenv("CONFIG") +tools.loadenv(config) + +# second init logging +# must import logger after initlogging, ugly +from log import initlogging +initlogging("docklet-master") +from log import logger + +import os +import http.server, cgi, json, sys, shutil +from socketserver import ThreadingMixIn +import nodemgr, vclustermgr, etcdlib, network, imagemgr +import userManager +import monitor +import guest_control, threading + +external_login = env.getenv('EXTERNAL_LOGIN') +if (external_login == 'TRUE'): + from userDependence import external_auth + +class DockletHttpHandler(http.server.BaseHTTPRequestHandler): + def response(self, code, output): + self.send_response(code) + self.send_header("Content-type", "application/json") + self.end_headers() + # wfile/rfile are in byte/binary encoded. need to recode + self.wfile.write(json.dumps(output).encode('ascii')) + self.wfile.write("\n".encode('ascii')) + # do not wfile.close() + # because self.handle_one_request will call wfile.flush after calling do_* + # and self.handle_one_request will close this wfile after timeout automatically + # (see /usr/lib/python3.4/http/server.py handle_one_request function) + #self.wfile.close() + + # override log_request to not print default request log + # we use the log info by ourselves in our style + def log_request(code = '-', size = '-'): + pass + + def do_PUT(self): + self.response(400, {'success':'false', 'message':'Not supported methond'}) + + def do_GET(self): + self.response(400, {'success':'false', 'message':'Not supported methond'}) + + def do_DELETE(self): + self.response(400, {'success':'false', 'message':'Not supported methond'}) + + # handler POST request + def do_POST(self): + global G_vclustermgr + global G_usermgr + #logger.info ("get request, header content:\n%s" % self.headers) + #logger.info ("read request content:\n%s" % self.rfile.read(int(self.headers["Content-Length"]))) + logger.info ("get request, path: %s" % self.path) + # for test + if self.path == '/test': + logger.info ("return welcome for test") + self.response(200, {'success':'true', 'message':'welcome to docklet'}) + return [True, 'test ok'] + + # check for not null content + if 'Content-Length' not in self.headers: + logger.info ("request content is null") + self.response(401, {'success':'false', 'message':'request content is null'}) + return [False, 'content is null'] + + # auth the user + # cgi.FieldStorage need fp/headers/environ. (see /usr/lib/python3.4/cgi.py) + form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,environ={'REQUEST_METHOD':'POST'}) + cmds = self.path.strip('/').split('/') + if cmds[0] == 'register' and form.getvalue('activate', None) == None: + logger.info ("handle request : user register") + username = form.getvalue('username', '') + password = form.getvalue('password', '') + email = form.getvalue('email', '') + description = form.getvalue('description','') + if (username == '' or password == '' or email == ''): + self.response(500, {'success':'false'}) + newuser = G_usermgr.newuser() + newuser.username = form.getvalue('username') + newuser.password = form.getvalue('password') + newuser.e_mail = form.getvalue('email') + newuser.student_number = form.getvalue('studentnumber') + newuser.department = form.getvalue('department') + newuser.nickname = form.getvalue('truename') + newuser.truename = form.getvalue('truename') + newuser.description = form.getvalue('description') + newuser.status = "init" + newuser.auth_method = "local" + result = G_usermgr.register(user = newuser) + self.response(200, result) + return [True, "register succeed"] + if cmds[0] == 'login': + logger.info ("handle request : user login") + user = form.getvalue("user") + key = form.getvalue("key") + if user == None or key == None: + self.response(401, {'success':'false', 'message':'user or key is null'}) + return [False, "auth failed"] + auth_result = G_usermgr.auth(user, key) + if auth_result['success'] == 'false': + self.response(401, {'success':'false', 'message':'auth failed'}) + return [False, "auth failed"] + self.response(200, {'success':'true', 'action':'login', 'data': auth_result['data']}) + return [True, "auth succeeded"] + if cmds[0] == 'external_login': + logger.info ("handle request : external user login") + try: + result = G_usermgr.auth_external(form) + self.response(200, result) + return result + except: + result = {'success': 'false', 'reason': 'Something wrong happened when auth an external account'} + self.response(200, result) + return result + + token = form.getvalue("token") + if token == None: + self.response(401, {'success':'false', 'message':'user or key is null'}) + return [False, "auth failed"] + cur_user = G_usermgr.auth_token(token) + if cur_user == None: + self.response(401, {'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) + return [False, "auth failed"] + + + + user = cur_user.username + # parse the url and get to do actions + # /cluster/list + # /cluster/create & clustername + # /cluster/start & clustername + # /cluster/stop & clustername + # /cluster/delete & clustername + # /cluster/info & clustername + + + if cmds[0] == 'cluster': + clustername = form.getvalue('clustername') + # check for 'clustername' : all actions except 'list' need 'clustername' + if (cmds[1] != 'list') and clustername == None: + self.response(401, {'success':'false', 'message':'clustername is null'}) + return [False, "clustername is null"] + if cmds[1] == 'create': + image = {} + image['name'] = form.getvalue("imagename") + image['type'] = form.getvalue("imagetype") + image['owner'] = form.getvalue("imageowner") + user_info = G_usermgr.selfQuery(cur_user = cur_user) + user_info = json.dumps(user_info) + logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name'])) + [status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info) + if status: + self.response(200, {'success':'true', 'action':'create cluster', 'message':result}) + else: + self.response(200, {'success':'false', 'action':'create cluster', 'message':result}) + elif cmds[1] == 'scaleout': + logger.info("handle request : scale out %s" % clustername) + image = {} + image['name'] = form.getvalue("imagename") + image['type'] = form.getvalue("imagetype") + image['owner'] = form.getvalue("imageowner") + logger.debug("imagename:" + image['name']) + logger.debug("imagetype:" + image['type']) + logger.debug("imageowner:" + image['owner']) + user_info = G_usermgr.selfQuery(cur_user = cur_user) + user_info = json.dumps(user_info) + [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info) + if status: + self.response(200, {'success':'true', 'action':'scale out', 'message':result}) + else: + self.response(200, {'success':'false', 'action':'scale out', 'message':result}) + elif cmds[1] == 'scalein': + logger.info("handle request : scale in %s" % clustername) + containername = form.getvalue("containername") + [status, result] = G_vclustermgr.scale_in_cluster(clustername, user, containername) + if status: + self.response(200, {'success':'true', 'action':'scale in', 'message':result}) + else: + self.response(200, {'success':'false', 'action':'scale in', 'message':result}) + elif cmds[1] == 'start': + logger.info ("handle request : start cluster %s" % clustername) + [status, result] = G_vclustermgr.start_cluster(clustername, user) + if status: + self.response(200, {'success':'true', 'action':'start cluster', 'message':result}) + else: + self.response(200, {'success':'false', 'action':'start cluster', 'message':result}) + elif cmds[1] == 'stop': + logger.info ("handle request : stop cluster %s" % clustername) + [status, result] = G_vclustermgr.stop_cluster(clustername, user) + if status: + self.response(200, {'success':'true', 'action':'stop cluster', 'message':result}) + else: + self.response(200, {'success':'false', 'action':'stop cluster', 'message':result}) + elif cmds[1] == 'delete': + logger.info ("handle request : delete cluster %s" % clustername) + [status, result] = G_vclustermgr.delete_cluster(clustername, user) + if status: + self.response(200, {'success':'true', 'action':'delete cluster', 'message':result}) + else: + self.response(200, {'success':'false', 'action':'delete cluster', 'message':result}) + elif cmds[1] == 'info': + logger.info ("handle request : info cluster %s" % clustername) + [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) + if status: + self.response(200, {'success':'true', 'action':'info cluster', 'message':result}) + else: + self.response(200, {'success':'false', 'action':'info cluster', 'message':result}) + elif cmds[1] == 'list': + logger.info ("handle request : list clusters for %s" % user) + [status, clusterlist] = G_vclustermgr.list_clusters(user) + if status: + self.response(200, {'success':'true', 'action':'list cluster', 'clusters':clusterlist}) + else: + self.response(400, {'success':'false', 'action':'list cluster', 'message':clusterlist}) + + elif cmds[1] == 'flush': + from_lxc = form.getvalue('from_lxc') + G_vclustermgr.flush_cluster(user,clustername,from_lxc) + self.response(200, {'success':'true', 'action':'flush'}) + + elif cmds[1] == 'save': + imagename = form.getvalue("image") + description = form.getvalue("description") + containername = form.getvalue("containername") + isforce = form.getvalue("isforce") + if not isforce == "true": + [status,message] = G_vclustermgr.image_check(user,imagename) + if not status: + self.response(200, {'success':'false','reason':'exists', 'message':message}) + return [False, "image already exists"] + user_info = G_usermgr.selfQuery(cur_user = cur_user) + [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,user_info["data"]["groupinfo"]["image"]) + if status: + logger.info("image has been saved") + self.response(200, {'success':'true', 'action':'save'}) + else: + logger.debug(message) + self.response(200, {'success':'false', 'reason':'exceed', 'message':message}) + + else: + logger.warning ("request not supported ") + self.response(400, {'success':'false', 'message':'not supported request'}) + + # Request for Image + elif cmds[0] == 'image': + if cmds[1] == 'list': + images = G_imagemgr.list_images(user) + self.response(200, {'success':'true', 'images': images}) + elif cmds[1] == 'description': + image = {} + image['name'] = form.getvalue("imagename") + image['type'] = form.getvalue("imagetype") + image['owner'] = form.getvalue("imageowner") + description = G_imagemgr.get_image_description(user,image) + self.response(200, {'success':'true', 'message':description}) + elif cmds[1] == 'share': + image = form.getvalue('image') + G_imagemgr.shareImage(user,image) + self.response(200, {'success':'true', 'action':'share'}) + elif cmds[1] == 'unshare': + image = form.getvalue('image') + G_imagemgr.unshareImage(user,image) + self.response(200, {'success':'true', 'action':'unshare'}) + elif cmds[1] == 'delete': + image = form.getvalue('image') + G_imagemgr.removeImage(user,image) + self.response(200, {'success':'true', 'action':'delete'}) + else: + logger.warning("request not supported ") + self.response(400, {'success':'false', 'message':'not supported request'}) + + # Add Proxy + elif cmds[0] == 'addproxy': + logger.info ("handle request : add proxy") + proxy_ip = form.getvalue("ip") + proxy_port = form.getvalue("port") + clustername = form.getvalue("clustername") + [status, message] = G_vclustermgr.addproxy(user,clustername,proxy_ip,proxy_port) + if status is True: + self.response(200, {'success':'true', 'action':'addproxy'}) + else: + self.response(400, {'success':'false', 'message': message}) + # Delete Proxy + elif cmds[0] == 'deleteproxy': + logger.info ("handle request : delete proxy") + clustername = form.getvalue("clustername") + G_vclustermgr.deleteproxy(user,clustername) + self.response(200, {'success':'true', 'action':'deleteproxy'}) + + # Request for Monitor + elif cmds[0] == 'monitor': + logger.info("handle request: monitor") + res = {} + if cmds[1] == 'hosts': + com_id = cmds[2] + fetcher = monitor.Fetcher(etcdaddr,G_clustername,com_id) + if cmds[3] == 'meminfo': + res['meminfo'] = fetcher.get_meminfo() + elif cmds[3] == 'cpuinfo': + res['cpuinfo'] = fetcher.get_cpuinfo() + elif cmds[3] == 'cpuconfig': + res['cpuconfig'] = fetcher.get_cpuconfig() + elif cmds[3] == 'diskinfo': + res['diskinfo'] = fetcher.get_diskinfo() + elif cmds[3] == 'osinfo': + res['osinfo'] = fetcher.get_osinfo() + elif cmds[3] == 'containers': + res['containers'] = fetcher.get_containers() + elif cmds[3] == 'status': + res['status'] = fetcher.get_status() + elif cmds[3] == 'containerslist': + res['containerslist'] = fetcher.get_containerslist() + elif cmds[3] == 'containersinfo': + res = [] + conlist = fetcher.get_containerslist() + for container in conlist: + ans = {} + confetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) + ans = confetcher.get_basic_info(container) + ans['cpu_use'] = confetcher.get_cpu_use(container) + ans['mem_use'] = confetcher.get_mem_use(container) + res.append(ans) + else: + self.response(400, {'success':'false', 'message':'not supported request'}) + return + + self.response(200, {'success':'true', 'monitor':res}) + elif cmds[1] == 'vnodes': + fetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) + if cmds[3] == 'cpu_use': + res['cpu_use'] = fetcher.get_cpu_use(cmds[2]) + elif cmds[3] == 'mem_use': + res['mem_use'] = fetcher.get_mem_use(cmds[2]) + elif cmds[3] == 'disk_use': + res['disk_use'] = fetcher.get_disk_use(cmds[2]) + elif cmds[3] == 'basic_info': + res['basic_info'] = fetcher.get_basic_info(cmds[2]) + elif cmds[3] == 'owner': + names = cmds[2].split('-') + result = G_usermgr.query(username = names[0], cur_user = cur_user) + if result['success'] == 'false': + res['username'] = "" + res['truename'] = "" + else: + res['username'] = result['data']['username'] + res['truename'] = result['data']['truename'] + else: + res = "Unspported Method!" + self.response(200, {'success':'true', 'monitor':res}) + elif cmds[1] == 'user': + if cmds[2] == 'quotainfo': + user_info = G_usermgr.selfQuery(cur_user = cur_user) + quotainfo = user_info['data']['groupinfo'] + self.response(200, {'success':'true', 'quotainfo':quotainfo}) + '''if not user == 'root': + self.response(400, {'success':'false', 'message':'Root Required'}) + if cmds[3] == 'clustercnt': + flag = True + clutotal = 0 + clurun = 0 + contotal = 0 + conrun = 0 + [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2]) + if status: + for clustername in clusterlist: + clutotal += 1 + [status2, result] = G_vclustermgr.get_clusterinfo(clustername, cmds[2]) + if status2: + contotal += result['size'] + if result['status'] == 'running': + clurun += 1 + conrun += result['size'] + else: + flag = False + if flag: + res = {} + res['clutotal'] = clutotal + res['clurun'] = clurun + res['contotal'] = contotal + res['conrun'] = conrun + self.response(200, {'success':'true', 'monitor':{'clustercnt':res}}) + else: + self.response(200, {'success':'false','message':clusterlist}) + elif cmds[3] == 'cluster': + if cmds[4] == 'list': + [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2]) + if status: + self.response(200, {'success':'true', 'monitor':{'clusters':clusterlist}}) + else: + self.response(400, {'success':'false', 'message':clusterlist}) + elif cmds[4] == 'info': + clustername = form.getvalue('clustername') + logger.info ("handle request : info cluster %s" % clustername) + [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) + if status: + self.response(200, {'success':'true', 'monitor':{'info':result}}) + else: + self.response(200, {'success':'false','message':result}) + else: + self.response(400, {'success':'false', 'message':'not supported request'})''' + + elif cmds[1] == 'listphynodes': + res['allnodes'] = G_nodemgr.get_allnodes() + self.response(200, {'success':'true', 'monitor':res}) + # Request for User + elif cmds[0] == 'user': + logger.info("handle request: user") + if cmds[1] == 'modify': + #user = G_usermgr.query(username = form.getvalue("username"), cur_user = cur_user).get('token', None) + result = G_usermgr.modify(newValue = form, cur_user = cur_user) + self.response(200, result) + if cmds[1] == 'groupModify': + result = G_usermgr.groupModify(newValue = form, cur_user = cur_user) + self.response(200, result) + if cmds[1] == 'query': + result = G_usermgr.query(ID = form.getvalue("ID"), cur_user = cur_user) + if (result.get('success', None) == None or result.get('success', None) == "false"): + self.response(301,result) + else: + result = G_usermgr.queryForDisplay(user = result['token']) + self.response(200,result) + + elif cmds[1] == 'add': + user = G_usermgr.newuser(cur_user = cur_user) + user.username = form.getvalue('username') + user.password = form.getvalue('password') + user.e_mail = form.getvalue('e_mail', '') + user.status = "normal" + result = G_usermgr.register(user = user, cur_user = cur_user) + self.response(200, result) + elif cmds[1] == 'groupadd': + result = G_usermgr.groupadd(form = form, cur_user = cur_user) + self.response(200, result) + elif cmds[1] == 'quotaadd': + result = G_usermgr.quotaadd(form = form, cur_user = cur_user) + self.response(200, result) + elif cmds[1] == 'groupdel': + result = G_usermgr.groupdel(name = form.getvalue('name', None), cur_user = cur_user) + self.response(200, result) + elif cmds[1] == 'data': + logger.info("handle request: user/data") + result = G_usermgr.userList(cur_user = cur_user) + self.response(200, result) + elif cmds[1] == 'groupNameList': + result = G_usermgr.groupListName(cur_user = cur_user) + self.response(200, result) + elif cmds[1] == 'groupList': + result = G_usermgr.groupList(cur_user = cur_user) + self.response(200, result) + elif cmds[1] == 'groupQuery': + result = G_usermgr.groupQuery(name = form.getvalue("name"), cur_user = cur_user) + if (result.get('success', None) == None or result.get('success', None) == "false"): + self.response(301,result) + else: + self.response(200,result) + elif cmds[1] == 'selfQuery': + result = G_usermgr.selfQuery(cur_user = cur_user) + self.response(200,result) + elif cmds[1] == 'selfModify': + result = G_usermgr.selfModify(cur_user = cur_user, newValue = form) + self.response(200,result) + elif cmds[0] == 'register' : + #activate + logger.info("handle request: user/activate") + newuser = G_usermgr.newuser() + newuser.username = cur_user.username + newuser.nickname = cur_user.truename + newuser.status = 'applying' + newuser.user_group = cur_user.user_group + newuser.auth_method = cur_user.auth_method + newuser.e_mail = form.getvalue('email','') + newuser.student_number = form.getvalue('studentnumber', '') + newuser.department = form.getvalue('department', '') + newuser.truename = form.getvalue('truename', '') + newuser.tel = form.getvalue('tel', '') + newuser.description = form.getvalue('description', '') + result = G_usermgr.register(user = newuser) + userManager.send_remind_activating_email(newuser.username) + self.response(200,result) + else: + logger.warning ("request not supported ") + self.response(400, {'success':'false', 'message':'not supported request'}) + +class ThreadingHttpServer(ThreadingMixIn, http.server.HTTPServer): + pass + +if __name__ == '__main__': + global G_nodemgr + global G_vclustermgr + global G_usermgr + global etcdclient + global G_networkmgr + global G_clustername + # move 'tools.loadenv' to the beginning of this file + + fs_path = env.getenv("FS_PREFIX") + logger.info("using FS_PREFIX %s" % fs_path) + + etcdaddr = env.getenv("ETCD") + logger.info("using ETCD %s" % etcdaddr) + + G_clustername = env.getenv("CLUSTER_NAME") + logger.info("using CLUSTER_NAME %s" % G_clustername) + + # get network interface + net_dev = env.getenv("NETWORK_DEVICE") + logger.info("using NETWORK_DEVICE %s" % net_dev) + + ipaddr = network.getip(net_dev) + if ipaddr==False: + logger.error("network device is not correct") + sys.exit(1) + else: + logger.info("using ipaddr %s" % ipaddr) + + # init etcdlib client + try: + etcdclient = etcdlib.Client(etcdaddr, prefix = G_clustername) + except Exception: + logger.error ("connect etcd failed, maybe etcd address not correct...") + sys.exit(1) + mode = 'recovery' + if len(sys.argv) > 1 and sys.argv[1] == "new": + mode = 'new' + + # do some initialization for mode: new/recovery + if mode == 'new': + # clean and initialize the etcd table + if etcdclient.isdir(""): + etcdclient.clean() + else: + etcdclient.createdir("") + token = tools.gen_token() + tokenfile = open(fs_path+"/global/token", 'w') + tokenfile.write(token) + tokenfile.write("\n") + tokenfile.close() + etcdclient.setkey("token", token) + etcdclient.setkey("service/master", ipaddr) + etcdclient.setkey("service/mode", mode) + etcdclient.createdir("machines/allnodes") + etcdclient.createdir("machines/runnodes") + etcdclient.setkey("vcluster/nextid", "1") + # clean all users vclusters files : FS_PREFIX/global/users//clusters/ + usersdir = fs_path+"/global/users/" + for user in os.listdir(usersdir): + shutil.rmtree(usersdir+user+"/clusters") + shutil.rmtree(usersdir+user+"/hosts") + os.mkdir(usersdir+user+"/clusters") + os.mkdir(usersdir+user+"/hosts") + else: + # check whether cluster exists + if not etcdclient.isdir("")[0]: + logger.error ("cluster not exists, you should use mode:new ") + sys.exit(1) + # initialize the etcd table for recovery + token = tools.gen_token() + tokenfile = open(fs_path+"/global/token", 'w') + tokenfile.write(token) + tokenfile.write("\n") + tokenfile.close() + etcdclient.setkey("token", token) + etcdclient.setkey("service/master", ipaddr) + etcdclient.setkey("service/mode", mode) + if etcdclient.isdir("_lock")[0]: + etcdclient.deldir("_lock") + if etcdclient.isdir("machines/runnodes")[0]: + etcdclient.deldir("machines/runnodes") + etcdclient.createdir("machines/runnodes") + + G_usermgr = userManager.userManager('root') + clusternet = env.getenv("CLUSTER_NET") + logger.info("using CLUSTER_NET %s" % clusternet) + + G_networkmgr = network.NetworkMgr(clusternet, etcdclient, mode) + G_networkmgr.printpools() + + # start NodeMgr and NodeMgr will wait for all nodes to start ... + G_nodemgr = nodemgr.NodeMgr(G_networkmgr, etcdclient, addr = ipaddr, mode=mode) + logger.info("nodemgr started") + G_vclustermgr = vclustermgr.VclusterMgr(G_nodemgr, G_networkmgr, etcdclient, ipaddr, mode) + logger.info("vclustermgr started") + G_imagemgr = imagemgr.ImageMgr() + logger.info("imagemgr started") + Guest_control = guest_control.Guest(G_vclustermgr,G_nodemgr) + logger.info("guest control started") + threading.Thread(target=Guest_control.work, args=()).start() + + logger.info("startting to listen on: ") + masterip = env.getenv('MASTER_IP') + logger.info("using MASTER_IP %s", masterip) + + masterport = env.getenv('MASTER_PORT') + logger.info("using MASTER_PORT %d", int(masterport)) + +# server = http.server.HTTPServer((masterip, masterport), DockletHttpHandler) + server = ThreadingHttpServer((masterip, int(masterport)), DockletHttpHandler) + logger.info("starting master server") + server.serve_forever() diff --git a/src/nodemgr.py b/src/nodemgr.py index ffd1a09..f45e8f7 100755 --- a/src/nodemgr.py +++ b/src/nodemgr.py @@ -13,7 +13,7 @@ import env # 2. update node list when new node joins # ETCD table : # machines/allnodes -- all nodes in docklet, for recovery -# machines/runnodes -- run nodes of this start up +# machines/runnodes -- run nodes of this start up ############################################## class NodeMgr(object): def __init__(self, networkmgr, etcdclient, addr, mode): @@ -53,13 +53,13 @@ class NodeMgr(object): if len(self.runnodes)>0: logger.error ("init runnodes is not null, need to be clean") sys.exit(1) - # init rpc list + # init rpc list self.rpcs = [] # start new thread to watch whether a new node joins logger.info ("start thread to watch new nodes ...") self.thread_watchnewnode = threading.Thread(target=self._watchnewnode) self.thread_watchnewnode.start() - # wait for all nodes joins + # wait for all nodes joins while(True): allin = True for node in self.allnodes: @@ -73,7 +73,7 @@ class NodeMgr(object): logger.info ("run nodes are: %s" % self.runnodes) - # get nodes list from etcd table + # get nodes list from etcd table def _nodelist_etcd(self, which): if which == "allnodes" or which == "runnodes": [status, nodeinfo]=self.etcd.listdir("machines/"+which) @@ -91,7 +91,7 @@ class NodeMgr(object): time.sleep(0.1) [status, runlist] = self.etcd.listdir("machines/runnodes") if not status: - logger.warning ("get runnodes list failed from etcd ") + logger.warning ("get runnodes list failed from etcd ") continue for node in runlist: nodeip = node['key'].rsplit('/',1)[1] @@ -109,8 +109,8 @@ class NodeMgr(object): if nodeip in self.allnodes: ######## HERE MAYBE NEED TO FIX ############### # here we must use "machines/runnodes/nodeip" - # we cannot use node['key'], node['key'] is absolute - # path, etcd client will append the path to prefix, + # we cannot use node['key'], node['key'] is absolute + # path, etcd client will append the path to prefix, # which is wrong ############################################### self.etcd.setkey("machines/runnodes/"+nodeip, "init-"+self.mode) @@ -138,7 +138,7 @@ class NodeMgr(object): % (nodeip, workerport))) logger.info ("add %s:%s in rpc client list" % (nodeip, workerport)) - + # get all run nodes' IP addr def get_nodeips(self): return self.allnodes diff --git a/src/userManager.py b/src/userManager.py index d306353..72264dc 100755 --- a/src/userManager.py +++ b/src/userManager.py @@ -166,7 +166,7 @@ class userManager: quotas.append({'name':'vnode', 'hint':'how many containers the user can have, e.g. 8'}) quotafile.write(json.dumps(quotas)) quotafile.close() - + def auth_local(self, username, password): password = hashlib.sha512(password.encode('utf-8')).hexdigest() @@ -399,7 +399,7 @@ class userManager: "tel" : user.tel, "register_date" : "%s"%(user.register_date), "group" : user.user_group, - "groupinfo": group, + "groupinfo": group, }, } return result @@ -411,8 +411,8 @@ class userManager: Modify informantion for oneself ''' form = kwargs['newValue'] - name = form.getvalue('name', None) - value = form.getvalue('value', None) + name = form.get('name', None) + value = form.get('value', None) if (name == None or value == None): result = {'success': 'false'} return result @@ -524,13 +524,13 @@ class userManager: groups = json.loads(groupfile.read()) groupfile.close() for group in groups: - if group['name'] == kwargs['newValue'].getvalue('groupname',None): + if group['name'] == kwargs['newValue'].get('groupname',None): form = kwargs['newValue'] for key in form.keys(): if key == "groupname" or key == "token": pass else: - group['quotas'][key] = form.getvalue(key) + group['quotas'][key] = form.get(key) groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() @@ -545,28 +545,28 @@ class userManager: will send an e-mail when status is changed from 'applying' to 'normal' Usage: modify(newValue = dict_from_form, cur_user = token_from_auth) ''' - user_modify = User.query.filter_by(username = kwargs['newValue'].getvalue('username', None)).first() + user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first() if (user_modify == None): return {"success":'false', "reason":"User does not exist"} #try: form = kwargs['newValue'] - user_modify.truename = form.getvalue('truename', '') - user_modify.e_mail = form.getvalue('e_mail', '') - user_modify.department = form.getvalue('department', '') - user_modify.student_number = form.getvalue('student_number', '') - user_modify.tel = form.getvalue('tel', '') - user_modify.user_group = form.getvalue('group', '') - user_modify.auth_method = form.getvalue('auth_method', '') - if (user_modify.status == 'applying' and form.getvalue('status', '') == 'normal'): + user_modify.truename = form.get('truename', '') + user_modify.e_mail = form.get('e_mail', '') + user_modify.department = form.get('department', '') + user_modify.student_number = form.get('student_number', '') + user_modify.tel = form.get('tel', '') + user_modify.user_group = form.get('group', '') + user_modify.auth_method = form.get('auth_method', '') + if (user_modify.status == 'applying' and form.get('status', '') == 'normal'): send_activated_email(user_modify.e_mail, user_modify.username) - user_modify.status = form.getvalue('status', '') - if (form.getvalue('Chpassword', '') == 'Yes'): - new_password = form.getvalue('password','no_password') + user_modify.status = form.get('status', '') + if (form.get('Chpassword', '') == 'Yes'): + new_password = form.get('password','no_password') new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest() user_modify.password = new_password - #self.chpassword(cur_user = user_modify, password = form.getvalue('password','no_password')) + #self.chpassword(cur_user = user_modify, password = form.get('password','no_password')) db.session.commit() return {"success":'true'} @@ -621,9 +621,9 @@ class userManager: @administration_required def quotaadd(*args, **kwargs): form = kwargs.get('form') - quotaname = form.getvalue("quotaname") - default_value = form.getvalue("default_value") - hint = form.getvalue("hint") + quotaname = form.get("quotaname") + default_value = form.get("default_value") + hint = form.get("hint") if (quotaname == None): return { "success":'false', "reason": "Empty quota name"} if (default_value == None): @@ -648,7 +648,7 @@ class userManager: @administration_required def groupadd(*args, **kwargs): form = kwargs.get('form') - groupname = form.getvalue("groupname") + groupname = form.get("groupname") if (groupname == None): return {"success":'false', "reason": "Empty group name"} groupfile = open(fspath+"/global/sys/quota",'r') @@ -662,13 +662,13 @@ class userManager: if key == "groupname" or key == "token": pass else: - group['quotas'][key] = form.getvalue(key) + group['quotas'][key] = form.get(key) groups.append(group) groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() return {"success":'true'} - + @administration_required def groupdel(*args, **kwargs): name = kwargs.get('name', None) diff --git a/web/webViews/admin.py b/web/webViews/admin.py index a5f5188..4145f2f 100755 --- a/web/webViews/admin.py +++ b/web/webViews/admin.py @@ -17,13 +17,13 @@ class adminView(normalView): class groupaddView(normalView): @classmethod def post(self): - dockletRequest.post('/user/groupadd', request.form) + dockletRequest.post('/user/groupadd/', request.form) return redirect('/admin/') class quotaaddView(normalView): @classmethod def post(self): - dockletRequest.post('/user/quotaadd', request.form) + dockletRequest.post('/user/quotaadd/', request.form) return redirect('/admin/') class groupdelView(normalView): @@ -32,9 +32,9 @@ class groupdelView(normalView): data = { "name" : self.groupname, } - dockletRequest.post('/user/groupdel', data) + dockletRequest.post('/user/groupdel/', data) return redirect('/admin/') - + @classmethod def get(self): return self.post() diff --git a/web/webViews/authenticate/login.py b/web/webViews/authenticate/login.py index 898a710..66bc314 100755 --- a/web/webViews/authenticate/login.py +++ b/web/webViews/authenticate/login.py @@ -72,7 +72,7 @@ class loginView(normalView): else: return redirect('/login/') else: - self.error() + return redirect('/login/') class logoutView(normalView): diff --git a/web/webViews/monitor.py b/web/webViews/monitor.py index 6a48939..e4a655c 100755 --- a/web/webViews/monitor.py +++ b/web/webViews/monitor.py @@ -40,7 +40,7 @@ class statusRealtimeView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/vnodes/%s/basic_info'%(self.node_name), data) + result = dockletRequest.post('/monitor/vnodes/%s/basic_info/'%(self.node_name), data) basic_info = result.get('monitor').get('basic_info') return self.render(self.template_path, node_name = self.node_name, user = session['username'], container = basic_info) @@ -53,11 +53,11 @@ class hostsRealtimeView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/hosts/%s/cpuconfig'%(self.com_ip), data) + result = dockletRequest.post('/monitor/hosts/%s/cpuconfig/'%(self.com_ip), data) proc = result.get('monitor').get('cpuconfig') - result = dockletRequest.post('/monitor/hosts/%s/osinfo'%(self.com_ip), data) + result = dockletRequest.post('/monitor/hosts/%s/osinfo/'%(self.com_ip), data) osinfo = result.get('monitor').get('osinfo') - result = dockletRequest.post('/monitor/hosts/%s/diskinfo'%(self.com_ip), data) + result = dockletRequest.post('/monitor/hosts/%s/diskinfo/'%(self.com_ip), data) diskinfos = result.get('monitor').get('diskinfo') return self.render(self.template_path, com_ip = self.com_ip, user = session['username'],processors = proc, OSinfo = osinfo, diskinfos = diskinfos) @@ -71,13 +71,13 @@ class hostsConAllView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/hosts/%s/containerslist'%(self.com_ip), data) + result = dockletRequest.post('/monitor/hosts/%s/containerslist/'%(self.com_ip), data) containers = result.get('monitor').get('containerslist') containerslist = [] for container in containers: - result = dockletRequest.post('/monitor/vnodes/%s/basic_info'%(container), data) + result = dockletRequest.post('/monitor/vnodes/%s/basic_info/'%(container), data) basic_info = result.get('monitor').get('basic_info') - result = dockletRequest.post('/monitor/vnodes/%s/owner'%(container), data) + result = dockletRequest.post('/monitor/vnodes/%s/owner/'%(container), data) owner = result.get('monitor') basic_info['owner'] = owner containerslist.append(basic_info) @@ -91,14 +91,14 @@ class hostsView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/listphynodes', data) + result = dockletRequest.post('/monitor/listphynodes/', data) iplist = result.get('monitor').get('allnodes') machines = [] for ip in iplist: containers = {} - result = dockletRequest.post('/monitor/hosts/%s/containers'%(ip), data) + result = dockletRequest.post('/monitor/hosts/%s/containers/'%(ip), data) containers = result.get('monitor').get('containers') - result = dockletRequest.post('/monitor/hosts/%s/status'%(ip), data) + result = dockletRequest.post('/monitor/hosts/%s/status/'%(ip), data) status = result.get('monitor').get('status') machines.append({'ip':ip,'containers':containers, 'status':status}) #print(machines) @@ -112,9 +112,9 @@ class monitorUserAllView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/listphynodes', data) + result = dockletRequest.post('/monitor/listphynodes/', data) userslist = [{'name':'root'},{'name':'libao'}] for user in userslist: - result = dockletRequest.post('/monitor/user/%s/clustercnt'%(user['name']), data) + result = dockletRequest.post('/monitor/user/%s/clustercnt/'%(user['name']), data) user['clustercnt'] = result.get('monitor').get('clustercnt') return self.render(self.template_path, userslist = userslist, user = session['username']) diff --git a/web/webViews/user/userActivate.py b/web/webViews/user/userActivate.py index c8fe2b1..d047634 100644 --- a/web/webViews/user/userActivate.py +++ b/web/webViews/user/userActivate.py @@ -16,5 +16,5 @@ class userActivateView(normalView): @classmethod def post(self): - dockletRequest.post('/register', request.form) + dockletRequest.post('/register/', request.form) return redirect('/logout/') diff --git a/web/webViews/user/userlist.py b/web/webViews/user/userlist.py index b2a8a9e..1f9b93a 100644 --- a/web/webViews/user/userlist.py +++ b/web/webViews/user/userlist.py @@ -19,34 +19,32 @@ class userlistView(normalView): class useraddView(normalView): @classmethod def post(self): - dockletRequest.post('/user/add', request.form) + dockletRequest.post('/user/add/', request.form) return redirect('/user/list/') class userdataView(normalView): @classmethod def get(self): - return json.dumps(dockletRequest.post('/user/data', request.form)) + return json.dumps(dockletRequest.post('/user/data/', request.form)) @classmethod def post(self): - return json.dumps(dockletRequest.post('/user/data', request.form)) + return json.dumps(dockletRequest.post('/user/data/', request.form)) class userqueryView(normalView): @classmethod def get(self): - return json.dumps(dockletRequest.post('/user/query', request.form)) + return json.dumps(dockletRequest.post('/user/query/', request.form)) @classmethod def post(self): - return json.dumps(dockletRequest.post('/user/query', request.form)) + return json.dumps(dockletRequest.post('/user/query/', request.form)) class usermodifyView(normalView): @classmethod def post(self): try: - dockletRequest.post('/user/modify', request.form) + dockletRequest.post('/user/modify/', request.form) except: return self.render('user/mailservererror.html') return redirect('/user/list/') - - From a1c166bb091b139aa8297a697e7a5f8c9d739d21 Mon Sep 17 00:00:00 2001 From: ooooo Date: Wed, 27 Apr 2016 16:21:04 +0800 Subject: [PATCH 05/19] use heartbeat package to keep worker alive in order to separate worker and master --- bin/docklet-worker | 10 ---------- src/stopworker.py | 13 ------------- src/worker.py | 20 +++++++++++++++++++- 3 files changed, 19 insertions(+), 24 deletions(-) delete mode 100755 src/stopworker.py diff --git a/bin/docklet-worker b/bin/docklet-worker index 7af7dab..b98098c 100755 --- a/bin/docklet-worker +++ b/bin/docklet-worker @@ -85,20 +85,10 @@ do_start() { log_end_msg $? } -do_changestage () { - RUNNING_CONFIG=$FS_PREFIX/local/docklet-running.conf - export CONFIG=$RUNNING_CONFIG - log_daemon_msg "Change $DAEMON_NAME daemon state" - cmd=$(python3 ../src/stopworker.py) - log_end_msg $? -} - do_stop () { log_daemon_msg "Stopping $DAEMON_NAME daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE --retry 10 log_end_msg $? - - do_changestage } diff --git a/src/stopworker.py b/src/stopworker.py deleted file mode 100755 index 13e92ba..0000000 --- a/src/stopworker.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/python3 -import env,tools -config = env.getenv("CONFIG") -tools.loadenv(config) -import etcdlib, network - -if __name__ == '__main__': - etcdaddr = env.getenv("ETCD") - clustername = env.getenv("CLUSTER_NAME") - etcdclient = etcdlib.Client(etcdaddr, prefix = clustername) - net_dev = env.getenv("NETWORK_DEVICE") - ipaddr = network.getip(net_dev) - etcdclient.deldir("machines/runnodes/"+ipaddr) \ No newline at end of file diff --git a/src/worker.py b/src/worker.py index 47ee1f3..b206e96 100755 --- a/src/worker.py +++ b/src/worker.py @@ -12,6 +12,7 @@ from log import logger import xmlrpc.server, sys, time from socketserver import ThreadingMixIn +import threading import etcdlib, network, container from nettools import netcontrol import monitor @@ -65,7 +66,7 @@ class Worker(object): sys.exit(1) logger.info ("worker registered and checked the token") - # worker itself to judge how to init + # worker search all run nodes to judge how to init value = 'init-new' [status, runlist] = self.etcd.listdir("machines/runnodes") for node in runlist: @@ -147,9 +148,26 @@ class Worker(object): # start service of worker def start(self): self.etcd.setkey("machines/runnodes/"+self.addr, "work") + self.thread_sendheartbeat = threading.Thread(target=self.sendheartbeat) + self.thread_sendheartbeat.start() # start serving for rpc logger.info ("begins to work") self.rpcserver.serve_forever() + + # send heardbeat package to keep alive in etcd, ttl=2s + def sendheartbeat(self): + while(true): + # check send heartbeat package every 1s + time.sleep(1) + [status, value] = self.etcd.getkey("machines/runnodes/"+self.addr) + if status: + # master has know the worker so we start send heartbeat package + if value=='ok': + self.etcd.setkey("machines/runnodes/"+self.addr, "ok", ttl = 2) + else: + logger.error("get key failed. %s" % node) + sys.exit(1) + if __name__ == '__main__': From 25ba87423dda4bef42ec49eb67acec96ce21817a Mon Sep 17 00:00:00 2001 From: ooooo Date: Wed, 27 Apr 2016 16:36:32 +0800 Subject: [PATCH 06/19] fix a little bug --- src/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/worker.py b/src/worker.py index b206e96..05d8892 100755 --- a/src/worker.py +++ b/src/worker.py @@ -156,7 +156,7 @@ class Worker(object): # send heardbeat package to keep alive in etcd, ttl=2s def sendheartbeat(self): - while(true): + while(True): # check send heartbeat package every 1s time.sleep(1) [status, value] = self.etcd.getkey("machines/runnodes/"+self.addr) From c2c2adaa39f933f2d2bcefc9d02f36f8049c5373 Mon Sep 17 00:00:00 2001 From: ooooo Date: Wed, 27 Apr 2016 16:43:11 +0800 Subject: [PATCH 07/19] delete unused var --- bin/docklet-worker | 1 - 1 file changed, 1 deletion(-) diff --git a/bin/docklet-worker b/bin/docklet-worker index b98098c..fd76b00 100755 --- a/bin/docklet-worker +++ b/bin/docklet-worker @@ -33,7 +33,6 @@ DAEMON_USER=root # settings for docklet worker DAEMON=$DOCKLET_LIB/worker.py -STOP_DEAMON=$DOCKLET_LIB/stopworker.py DAEMON_NAME=docklet-worker DAEMON_OPTS= # The process ID of the script when it runs is stored here: From 27413711534c5c800021f3776fbcd2dc0b8c603b Mon Sep 17 00:00:00 2001 From: ooooo Date: Wed, 27 Apr 2016 16:54:47 +0800 Subject: [PATCH 08/19] delete unused var --- bin/docklet-worker | 4 ---- 1 file changed, 4 deletions(-) diff --git a/bin/docklet-worker b/bin/docklet-worker index fd76b00..77ab93a 100755 --- a/bin/docklet-worker +++ b/bin/docklet-worker @@ -115,10 +115,6 @@ case "$1" in status) status_of_proc -p $PIDFILE "$DAEMON" "$DAEMON_NAME" && exit 0 || exit $? ;; - - change) - do_changestage - ;; *) echo "Usage: $DAEMON_NAME {start|stop|restart|status}" exit 1 From 718d70d3579d342fa3177c93ada9e3e4010cdea5 Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Sat, 30 Apr 2016 16:45:48 +0800 Subject: [PATCH 09/19] Using flask to display monitor info. --- src/httprest.py | 14 +++++++------- src/monitor.py | 2 +- web/static/js/plot_monitor.js | 6 +++--- web/static/js/plot_monitorReal.js | 6 +++--- web/templates/monitor/hosts.html | 10 +++++----- web/templates/monitor/hostsConAll.html | 6 +++--- web/templates/monitor/status.html | 8 ++++---- web/web.py | 4 ++-- 8 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/httprest.py b/src/httprest.py index 99ec0f7..97258d5 100755 --- a/src/httprest.py +++ b/src/httprest.py @@ -393,23 +393,23 @@ def hosts_monitor(cur_user, user, form, com_id, issue): return json.dumps({'success':'true', 'monitor':res}) -@app.route("/monitor/vnodes///", methods=['POST']) +@app.route("/monitor/vnodes///", methods=['POST']) @login_required -def vnodes_monitor(cur_user, user, form, com_id, issue): +def vnodes_monitor(cur_user, user, form, con_id, issue): global G_clustername logger.info("handle request: monitor/vnodes") res = {} fetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) if issue == 'cpu_use': - res['cpu_use'] = fetcher.get_cpu_use(cmds[2]) + res['cpu_use'] = fetcher.get_cpu_use(con_id) elif issue == 'mem_use': - res['mem_use'] = fetcher.get_mem_use(cmds[2]) + res['mem_use'] = fetcher.get_mem_use(con_id) elif issue == 'disk_use': - res['disk_use'] = fetcher.get_disk_use(cmds[2]) + res['disk_use'] = fetcher.get_disk_use(con_id) elif issue == 'basic_info': - res['basic_info'] = fetcher.get_basic_info(cmds[2]) + res['basic_info'] = fetcher.get_basic_info(con_id) elif issue == 'owner': - names = com_id.split('-') + names = con_id.split('-') result = G_usermgr.query(username = names[0], cur_user = cur_user) if result['success'] == 'false': res['username'] = "" diff --git a/src/monitor.py b/src/monitor.py index f8917a8..23f84a3 100755 --- a/src/monitor.py +++ b/src/monitor.py @@ -68,7 +68,7 @@ class Container_Collector(threading.Thread): else: self.cpu_quota[container_name] = tmp/100000.0 quota = {'cpu':self.cpu_quota[container_name],'memory':self.mem_quota[container_name]} - logger.info(quota) + #logger.info(quota) self.etcdser.setkey('/vnodes/%s/quota'%(container_name),quota) else: logger.error("Cant't find config file %s"%(confpath)) diff --git a/web/static/js/plot_monitor.js b/web/static/js/plot_monitor.js index 7391d64..53fcc3b 100755 --- a/web/static/js/plot_monitor.js +++ b/web/static/js/plot_monitor.js @@ -158,12 +158,12 @@ var host = window.location.host; var node_name = $("#node_name").html(); var url = "http://" + host + "/monitor/vnodes/" + node_name; -plot_graph($("#mem-chart"),url + "/mem_use",processMemData,getMemY); -plot_graph($("#cpu-chart"),url + "/cpu_use",processCpuData,getCpuY); +plot_graph($("#mem-chart"),url + "/mem_use/",processMemData,getMemY); +plot_graph($("#cpu-chart"),url + "/cpu_use/",processCpuData,getCpuY); function processDiskData() { - $.post(url+"/disk_use",{},function(data){ + $.post(url+"/disk_use/",{},function(data){ var diskuse = data.monitor.disk_use; var usedp = diskuse.percent; var total = diskuse.total/1024.0/1024.0; diff --git a/web/static/js/plot_monitorReal.js b/web/static/js/plot_monitorReal.js index 6237fd2..b85cc6a 100755 --- a/web/static/js/plot_monitorReal.js +++ b/web/static/js/plot_monitorReal.js @@ -190,8 +190,8 @@ var host = window.location.host; var com_ip = $("#com_ip").html(); var url = "http://" + host + "/monitor/hosts/"+com_ip; -plot_graph($("#mem-chart"), url + "/meminfo",processMemData,getMemY); -plot_graph($("#cpu-chart"), url + "/cpuinfo",processCpuData,getCpuY); +plot_graph($("#mem-chart"), url + "/meminfo/",processMemData,getMemY); +plot_graph($("#cpu-chart"), url + "/cpuinfo/",processCpuData,getCpuY); //plot_graph($("#disk-chart"), url + "/diskinfo",processDiskData,getDiskY); -$.post(url+"/diskinfo",{user:"root",key:"unias"},processDiskData,"json"); +$.post(url+"/diskinfo/",{user:"root",key:"unias"},processDiskData,"json"); diff --git a/web/templates/monitor/hosts.html b/web/templates/monitor/hosts.html index afbe6af..c080940 100644 --- a/web/templates/monitor/hosts.html +++ b/web/templates/monitor/hosts.html @@ -78,7 +78,7 @@ { var MB = 1024; - $.post(url+"/status",{},function(data){ + $.post(url+"/status/",{},function(data){ var status = data.monitor.status; if(status == 'RUNNING') { @@ -95,7 +95,7 @@ tmp.html("Stopped"); } - $.post(url+"/containers",{},function(data){ + $.post(url+"/containers/",{},function(data){ var containers = data.monitor.containers; $("#"+index+"_contotal").html(containers.total); $("#"+index+"_conrunning").html(containers.running); @@ -109,20 +109,20 @@ return; } - $.post(url+"/cpuinfo",{},function(data){ + $.post(url+"/cpuinfo/",{},function(data){ var idle = data.monitor.cpuinfo.idle; var usedp = (100 - idle).toFixed(2); $("#"+index+"_cpu").html(String(usedp)+"%"); },"json"); - $.post(url+"/meminfo",{},function(data){ + $.post(url+"/meminfo/",{},function(data){ var used = data.monitor.meminfo.used; var total = data.monitor.meminfo.total; var usedp = String(((used/total)*100).toFixed(2))+"%"; $("#"+index+"_mem").html(usedp); },"json"); - $.post(url+"/diskinfo",{},function(data){ + $.post(url+"/diskinfo/",{},function(data){ var val = data.monitor.diskinfo; var usedp = val[0].percent; $("#"+index+"_disk").html(String(usedp)+"%"); diff --git a/web/templates/monitor/hostsConAll.html b/web/templates/monitor/hostsConAll.html index c2302f5..8c72631 100644 --- a/web/templates/monitor/hostsConAll.html +++ b/web/templates/monitor/hostsConAll.html @@ -85,7 +85,7 @@ function update(url,index) { - $.post(url+"/basic_info",{},function(data){ + $.post(url+"/basic_info/",{},function(data){ var state = data.monitor.basic_info.State; if(state == 'RUNNING') { @@ -109,13 +109,13 @@ return; } - $.post(url+"/cpu_use",{},function(data){ + $.post(url+"/cpu_use/",{},function(data){ var val = data.monitor.cpu_use.val; var unit = data.monitor.cpu_use.unit; $("#"+index+"_cpu").html(val +" "+ unit); },"json"); - $.post(url+"/mem_use",{},function(data){ + $.post(url+"/mem_use/",{},function(data){ var val = data.monitor.mem_use.val; var unit = data.monitor.mem_use.unit $("#"+index+"_mem").html(val+" "+unit); diff --git a/web/templates/monitor/status.html b/web/templates/monitor/status.html index 133a390..e8c1dac 100644 --- a/web/templates/monitor/status.html +++ b/web/templates/monitor/status.html @@ -122,7 +122,7 @@ function update(url,index) { - $.post(url+"/basic_info",{},function(data){ + $.post(url+"/basic_info/",{},function(data){ var state = data.monitor.basic_info.State; if(state == 'RUNNING') { @@ -146,7 +146,7 @@ return; } - $.post(url+"/cpu_use",{},function(data){ + $.post(url+"/cpu_use/",{},function(data){ var usedp = data.monitor.cpu_use.usedp; var quota = data.monitor.cpu_use.quota.cpu; var quotaout = "("+quota; @@ -157,7 +157,7 @@ $("#"+index+"_cpu").html((usedp/0.01).toFixed(2)+"%
"+quotaout); },"json"); - $.post(url+"/mem_use",{},function(data){ + $.post(url+"/mem_use/",{},function(data){ var usedp = data.monitor.mem_use.usedp; var unit = data.monitor.mem_use.unit; var quota = data.monitor.mem_use.quota.memory/1024.0; @@ -166,7 +166,7 @@ $("#"+index+"_mem").html((usedp/0.01).toFixed(2)+"%
"+out); },"json"); - $.post(url+"/disk_use",{},function(data){ + $.post(url+"/disk_use/",{},function(data){ var diskuse = data.monitor.disk_use; var usedp = diskuse.percent; var total = diskuse.total/1024.0/1024.0; diff --git a/web/web.py b/web/web.py index 5d041b6..a7954ee 100755 --- a/web/web.py +++ b/web/web.py @@ -265,8 +265,8 @@ def statusRealtime(vcluster_name,node_name): statusRealtimeView.node_name = node_name return statusRealtimeView.as_view() -@app.route("/monitor/hosts//", methods=['POST']) -@app.route("/monitor/vnodes//", methods=['POST']) +@app.route("/monitor/hosts///", methods=['POST']) +@app.route("/monitor/vnodes///", methods=['POST']) @login_required def monitor_request(comid,infotype): data = { From a1fb4b0aee2e005c3b29fdac5da9c9f564f04f84 Mon Sep 17 00:00:00 2001 From: Peidong Liu Date: Tue, 3 May 2016 15:03:28 +0800 Subject: [PATCH 10/19] Test the code --- src/httprest.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/src/httprest.py b/src/httprest.py index 97258d5..32a672e 100755 --- a/src/httprest.py +++ b/src/httprest.py @@ -520,6 +520,46 @@ def data_user(cur_user, user, form): result = G_usermgr.userList(cur_user = cur_user) return json.dumps(result) +@app.route("/user/groupNameList/", methods=['POST']) +@login_required +def groupNameList_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupNameList/") + result = G_usermgr.groupListName(cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/groupList/", methods=['POST']) +@login_required +def groupList_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupList/") + result = G_usermgr.groupList(cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/groupQuery/", methods=['POST']) +@login_required +def groupQuery_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupQuery/") + result = G_usermgr.groupQuery(name = form.getvalue("name"), cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/selfQuery/", methods=['POST']) +@login_required +def selfQuery_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/selfQuery/") + result = G_usermgr.selfQuery(cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/selfModify/", methods=['POST']) +@login_required +def selfModify_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/selfModify/") + result = G_usermgr.selfModify(cur_user = cur_user, newValue = form) + return json.dumps(result) if __name__ == '__main__': From 2fc5a7916375077c1d9f6bf2054821900331292e Mon Sep 17 00:00:00 2001 From: Peidong Liu Date: Wed, 27 Apr 2016 15:59:33 +0800 Subject: [PATCH 11/19] Now backend is flask-styled fix a bug that will cause 404 in /login/ --- src/httprest.py | 996 ++++++++++++++++------------- src/nodemgr.py | 16 +- src/userManager.py | 53 +- web/webViews/admin.py | 8 +- web/webViews/authenticate/login.py | 2 +- web/webViews/monitor.py | 24 +- web/webViews/user/userActivate.py | 2 +- web/webViews/user/userlist.py | 14 +- 8 files changed, 602 insertions(+), 513 deletions(-) diff --git a/src/httprest.py b/src/httprest.py index 69cd05b..a2c7bb6 100755 --- a/src/httprest.py +++ b/src/httprest.py @@ -4,6 +4,8 @@ # because some modules need variables when import # for example, userManager/model.py +from flask import Flask, request + # must first init loadenv import tools, env config = env.getenv("CONFIG") @@ -27,478 +29,568 @@ external_login = env.getenv('EXTERNAL_LOGIN') if (external_login == 'TRUE'): from userDependence import external_auth -class DockletHttpHandler(http.server.BaseHTTPRequestHandler): - def response(self, code, output): - self.send_response(code) - self.send_header("Content-type", "application/json") - self.end_headers() - # wfile/rfile are in byte/binary encoded. need to recode - self.wfile.write(json.dumps(output).encode('ascii')) - self.wfile.write("\n".encode('ascii')) - # do not wfile.close() - # because self.handle_one_request will call wfile.flush after calling do_* - # and self.handle_one_request will close this wfile after timeout automatically - # (see /usr/lib/python3.4/http/server.py handle_one_request function) - #self.wfile.close() +app = Flask(__name__) - # override log_request to not print default request log - # we use the log info by ourselves in our style - def log_request(code = '-', size = '-'): - pass +from functools import wraps - def do_PUT(self): - self.response(400, {'success':'false', 'message':'Not supported methond'}) - def do_GET(self): - self.response(400, {'success':'false', 'message':'Not supported methond'}) - - def do_DELETE(self): - self.response(400, {'success':'false', 'message':'Not supported methond'}) - - # handler POST request - def do_POST(self): - global G_vclustermgr +def login_required(func): + @wraps(func) + def wrapper(*args, **kwargs): global G_usermgr - #logger.info ("get request, header content:\n%s" % self.headers) - #logger.info ("read request content:\n%s" % self.rfile.read(int(self.headers["Content-Length"]))) - logger.info ("get request, path: %s" % self.path) - # for test - if self.path == '/test': - logger.info ("return welcome for test") - self.response(200, {'success':'true', 'message':'welcome to docklet'}) - return [True, 'test ok'] - - # check for not null content - if 'Content-Length' not in self.headers: - logger.info ("request content is null") - self.response(401, {'success':'false', 'message':'request content is null'}) - return [False, 'content is null'] - - # auth the user - # cgi.FieldStorage need fp/headers/environ. (see /usr/lib/python3.4/cgi.py) - form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,environ={'REQUEST_METHOD':'POST'}) - cmds = self.path.strip('/').split('/') - if cmds[0] == 'register' and form.getvalue('activate', None) == None: - logger.info ("handle request : user register") - username = form.getvalue('username', '') - password = form.getvalue('password', '') - email = form.getvalue('email', '') - description = form.getvalue('description','') - if (username == '' or password == '' or email == ''): - self.response(500, {'success':'false'}) - newuser = G_usermgr.newuser() - newuser.username = form.getvalue('username') - newuser.password = form.getvalue('password') - newuser.e_mail = form.getvalue('email') - newuser.student_number = form.getvalue('studentnumber') - newuser.department = form.getvalue('department') - newuser.nickname = form.getvalue('truename') - newuser.truename = form.getvalue('truename') - newuser.description = form.getvalue('description') - newuser.status = "init" - newuser.auth_method = "local" - result = G_usermgr.register(user = newuser) - self.response(200, result) - return [True, "register succeed"] - if cmds[0] == 'login': - logger.info ("handle request : user login") - user = form.getvalue("user") - key = form.getvalue("key") - if user == None or key == None: - self.response(401, {'success':'false', 'message':'user or key is null'}) - return [False, "auth failed"] - auth_result = G_usermgr.auth(user, key) - if auth_result['success'] == 'false': - self.response(401, {'success':'false', 'message':'auth failed'}) - return [False, "auth failed"] - self.response(200, {'success':'true', 'action':'login', 'data': auth_result['data']}) - return [True, "auth succeeded"] - if cmds[0] == 'external_login': - logger.info ("handle request : external user login") - try: - result = G_usermgr.auth_external(form) - self.response(200, result) - return result - except: - result = {'success': 'false', 'reason': 'Something wrong happened when auth an external account'} - self.response(200, result) - return result - - token = form.getvalue("token") - if token == None: - self.response(401, {'success':'false', 'message':'user or key is null'}) - return [False, "auth failed"] + logger.info ("get request, path: %s" % request.path) + token = request.form.get("token", None) + if (token == None): + return {'success':'false', 'message':'user or key is null'} cur_user = G_usermgr.auth_token(token) - if cur_user == None: - self.response(401, {'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) - return [False, "auth failed"] - + if (cur_user == None): + return {'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'} + return func(cur_user, cur_user.username, request.form, *args, **kwargs) + + return wrapper + +@app.route("/login/", methods=['POST']) +def login(): + global G_usermgr + logger.info ("handle request : user login") + user = request.form.get("user", None) + key = request.form.get("key", None) + if user == None or key == None: + return json.dumps({'success':'false', 'message':'user or key is null'}) + auth_result = G_usermgr.auth(user, key) + if auth_result['success'] == 'false': + return json.dumps({'success':'false', 'message':'auth failed'}) + return json.dumps({'success':'true', 'action':'login', 'data': auth_result['data']}) + +@app.route("/external_login/", methods=['POST']) +def external_login(): + global G_usermgr + logger.info ("handle request : external user login") + try: + result = G_usermgr.auth_external(request.form) + return json.dumps(result) + except: + result = {'success': 'false', 'reason': 'Something wrong happened when auth an external account'} + return json.dumps(result) + +@app.route("/register/", methods=['POST']) +def register(): + global G_usermgr + if request.form.get('activate', None) == None: + logger.info ("handle request : user register") + username = request.form.get('username', '') + password = request.form.get('password', '') + email = request.form.get('email', '') + description = request.form.get('description','') + if (username == '' or password == '' or email == ''): + return json.dumps({'success':'false'}) + newuser = G_usermgr.newuser() + newuser.username = request.form.get('username') + newuser.password = request.form.get('password') + newuser.e_mail = request.form.get('email') + newuser.student_number = request.form.get('studentnumber') + newuser.department = request.form.get('department') + newuser.nickname = request.form.get('truename') + newuser.truename = request.form.get('truename') + newuser.description = request.form.get('description') + newuser.status = "init" + newuser.auth_method = "local" + result = G_usermgr.register(user = newuser) + return json.dumps(result) + else: + logger.info ("handle request, user activating") + token = request.form.get("token", None) + if (token == None): + return json.dumps({'success':'false', 'message':'user or key is null'}) + cur_user = G_usermgr.auth_token(token) + if (cur_user == None): + return json.dumps({'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) + newuser = G_usermgr.newuser() + newuser.username = cur_user.username + newuser.nickname = cur_user.truename + newuser.status = 'applying' + newuser.user_group = cur_user.user_group + newuser.auth_method = cur_user.auth_method + newuser.e_mail = form.get('email','') + newuser.student_number = form.get('studentnumber', '') + newuser.department = form.get('department', '') + newuser.truename = form.get('truename', '') + newuser.tel = form.get('tel', '') + newuser.description = form.get('description', '') + result = G_usermgr.register(user = newuser) + userManager.send_remind_activating_email(newuser.username) + return json.dumps(result) - user = cur_user.username - # parse the url and get to do actions - # /cluster/list - # /cluster/create & clustername - # /cluster/start & clustername - # /cluster/stop & clustername - # /cluster/delete & clustername - # /cluster/info & clustername +@app.route("/cluster/create/", methods=['POST']) +@login_required +def create_cluster(cur_user, user, form): + global G_usermgr + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + image = {} + image['name'] = form.get("imagename", None) + image['type'] = form.get("imagetype", None) + image['owner'] = form.get("imageowner", None) + user_info = G_usermgr.selfQuery(cur_user = cur_user) + user_info = json.dumps(user_info) + logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name'])) + [status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info) + if status: + return json.dumps({'success':'true', 'action':'create cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'create cluster', 'message':result}) + +@app.route("/cluster/scaleout/", methods=['POST']) +@login_required +def scaleout_cluster(cur_user, user, form): + global G_usermgr + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info("handle request : scale out %s" % clustername) + image = {} + image['name'] = form.get("imagename", None) + image['type'] = form.get("imagetype", None) + image['owner'] = form.get("imageowner", None) + logger.debug("imagename:" + image['name']) + logger.debug("imagetype:" + image['type']) + logger.debug("imageowner:" + image['owner']) + user_info = G_usermgr.selfQuery(cur_user = cur_user) + user_info = json.dumps(user_info) + [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info) + if status: + return json.dumps({'success':'true', 'action':'scale out', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'scale out', 'message':result}) + +@app.route("/cluster/scalein/", methods=['POST']) +@login_required +def scalein_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info("handle request : scale in %s" % clustername) + containername = form.get("containername", None) + [status, result] = G_vclustermgr.scale_in_cluster(clustername, user, containername) + if status: + return json.dumps({'success':'true', 'action':'scale in', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'scale in', 'message':result}) + +@app.route("/cluster/start/", methods=['POST']) +@login_required +def start_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info ("handle request : start cluster %s" % clustername) + [status, result] = G_vclustermgr.start_cluster(clustername, user) + if status: + return json.dumps({'success':'true', 'action':'start cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'start cluster', 'message':result}) + +@app.route("/cluster/stop/", methods=['POST']) +@login_required +def stop_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info ("handle request : start cluster %s" % clustername) + [status, result] = G_vclustermgr.stop_cluster(clustername, user) + if status: + return json.dumps({'success':'true', 'action':'stop cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'stop cluster', 'message':result}) + +@app.route("/cluster/delete/", methods=['POST']) +@login_required +def delete_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info ("handle request : delete cluster %s" % clustername) + [status, result] = G_vclustermgr.delete_cluster(clustername, user) + if status: + return json.dumps({'success':'true', 'action':'delete cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'delete cluster', 'message':result}) + +@app.route("/cluster/info/", methods=['POST']) +@login_required +def info_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + logger.info ("handle request : info cluster %s" % clustername) + [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) + if status: + return json.dumps({'success':'true', 'action':'info cluster', 'message':result}) + else: + return json.dumps({'success':'false', 'action':'info cluster', 'message':result}) + +@app.route("/cluster/list/", methods=['POST']) +@login_required +def list_cluster(cur_user, user, form): + global G_vclustermgr + logger.info ("handle request : list clusters for %s" % user) + [status, clusterlist] = G_vclustermgr.list_clusters(user) + if status: + return json.dumps({'success':'true', 'action':'list cluster', 'clusters':clusterlist}) + else: + return json.dumps({'success':'false', 'action':'list cluster', 'message':clusterlist}) + +@app.route("/cluster/flush/", methods=['POST']) +@login_required +def flush_cluster(cur_user, user, form): + global G_vclustermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + from_lxc = form.get('from_lxc', None) + G_vclustermgr.flush_cluster(user,clustername,from_lxc) + return json.dumps({'success':'true', 'action':'flush'}) + +@app.route("/cluster/save/", methods=['POST']) +@login_required +def save_cluster(cur_user, user, form): + global G_vclustermgr + global G_usermgr + clustername = form.get('clustername', None) + if (clustername == None): + return json.dumps({'success':'false', 'message':'clustername is null'}) + + imagename = form.get("image", None) + description = form.get("description", None) + containername = form.get("containername", None) + isforce = form.get("isforce", None) + if not isforce == "true": + [status,message] = G_vclustermgr.image_check(user,imagename) + if not status: + return json.dumps({'success':'false','reason':'exists', 'message':message}) + + user_info = G_usermgr.selfQuery(cur_user = cur_user) + [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,user_info["data"]["groupinfo"]["image"]) + if status: + logger.info("image has been saved") + return json.dumps({'success':'true', 'action':'save'}) + else: + logger.debug(message) + return json.dumps({'success':'false', 'reason':'exceed', 'message':message}) - if cmds[0] == 'cluster': - clustername = form.getvalue('clustername') - # check for 'clustername' : all actions except 'list' need 'clustername' - if (cmds[1] != 'list') and clustername == None: - self.response(401, {'success':'false', 'message':'clustername is null'}) - return [False, "clustername is null"] - if cmds[1] == 'create': - image = {} - image['name'] = form.getvalue("imagename") - image['type'] = form.getvalue("imagetype") - image['owner'] = form.getvalue("imageowner") - user_info = G_usermgr.selfQuery(cur_user = cur_user) - user_info = json.dumps(user_info) - logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name'])) - [status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info) - if status: - self.response(200, {'success':'true', 'action':'create cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'create cluster', 'message':result}) - elif cmds[1] == 'scaleout': - logger.info("handle request : scale out %s" % clustername) - image = {} - image['name'] = form.getvalue("imagename") - image['type'] = form.getvalue("imagetype") - image['owner'] = form.getvalue("imageowner") - logger.debug("imagename:" + image['name']) - logger.debug("imagetype:" + image['type']) - logger.debug("imageowner:" + image['owner']) - user_info = G_usermgr.selfQuery(cur_user = cur_user) - user_info = json.dumps(user_info) - [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info) - if status: - self.response(200, {'success':'true', 'action':'scale out', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'scale out', 'message':result}) - elif cmds[1] == 'scalein': - logger.info("handle request : scale in %s" % clustername) - containername = form.getvalue("containername") - [status, result] = G_vclustermgr.scale_in_cluster(clustername, user, containername) - if status: - self.response(200, {'success':'true', 'action':'scale in', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'scale in', 'message':result}) - elif cmds[1] == 'start': - logger.info ("handle request : start cluster %s" % clustername) - [status, result] = G_vclustermgr.start_cluster(clustername, user) - if status: - self.response(200, {'success':'true', 'action':'start cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'start cluster', 'message':result}) - elif cmds[1] == 'stop': - logger.info ("handle request : stop cluster %s" % clustername) - [status, result] = G_vclustermgr.stop_cluster(clustername, user) - if status: - self.response(200, {'success':'true', 'action':'stop cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'stop cluster', 'message':result}) - elif cmds[1] == 'delete': - logger.info ("handle request : delete cluster %s" % clustername) - [status, result] = G_vclustermgr.delete_cluster(clustername, user) - if status: - self.response(200, {'success':'true', 'action':'delete cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'delete cluster', 'message':result}) - elif cmds[1] == 'info': - logger.info ("handle request : info cluster %s" % clustername) - [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) - if status: - self.response(200, {'success':'true', 'action':'info cluster', 'message':result}) - else: - self.response(200, {'success':'false', 'action':'info cluster', 'message':result}) - elif cmds[1] == 'list': - logger.info ("handle request : list clusters for %s" % user) - [status, clusterlist] = G_vclustermgr.list_clusters(user) - if status: - self.response(200, {'success':'true', 'action':'list cluster', 'clusters':clusterlist}) - else: - self.response(400, {'success':'false', 'action':'list cluster', 'message':clusterlist}) +@app.route("/image/list/", methods=['POST']) +@login_required +def list_image(cur_user, user, form): + global G_imagemgr + images = G_imagemgr.list_images(user) + return json.dumps({'success':'true', 'images': images}) - elif cmds[1] == 'flush': - from_lxc = form.getvalue('from_lxc') - G_vclustermgr.flush_cluster(user,clustername,from_lxc) - self.response(200, {'success':'true', 'action':'flush'}) +@app.route("/image/description/", methods=['POST']) +@login_required +def description_image(cur_user, user, form): + global G_imagemgr + image = {} + image['name'] = form.get("imagename", None) + image['type'] = form.get("imagetype", None) + image['owner'] = form.get("imageowner", None) + description = G_imagemgr.get_image_description(user,image) + return json.dumps({'success':'true', 'message':description}) - elif cmds[1] == 'save': - imagename = form.getvalue("image") - description = form.getvalue("description") - containername = form.getvalue("containername") - isforce = form.getvalue("isforce") - if not isforce == "true": - [status,message] = G_vclustermgr.image_check(user,imagename) - if not status: - self.response(200, {'success':'false','reason':'exists', 'message':message}) - return [False, "image already exists"] - user_info = G_usermgr.selfQuery(cur_user = cur_user) - [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,user_info["data"]["groupinfo"]["image"]) - if status: - logger.info("image has been saved") - self.response(200, {'success':'true', 'action':'save'}) - else: - logger.debug(message) - self.response(200, {'success':'false', 'reason':'exceed', 'message':message}) +@app.route("/image/share/", methods=['POST']) +@login_required +def share_image(cur_user, user, form): + global G_imagemgr + image = form.getvalue('image') + G_imagemgr.shareImage(user,image) + return json.dumps({'success':'true', 'action':'share'}) - else: - logger.warning ("request not supported ") - self.response(400, {'success':'false', 'message':'not supported request'}) +@app.route("/image/unshare/", methods=['POST']) +@login_required +def unshare_image(cur_user, user, form): + global G_imagemgr + image = form.get('image', None) + G_imagemgr.unshareImage(user,image) + return json.dumps({'success':'true', 'action':'unshare'}) - # Request for Image - elif cmds[0] == 'image': - if cmds[1] == 'list': - images = G_imagemgr.list_images(user) - self.response(200, {'success':'true', 'images': images}) - elif cmds[1] == 'description': - image = {} - image['name'] = form.getvalue("imagename") - image['type'] = form.getvalue("imagetype") - image['owner'] = form.getvalue("imageowner") - description = G_imagemgr.get_image_description(user,image) - self.response(200, {'success':'true', 'message':description}) - elif cmds[1] == 'share': - image = form.getvalue('image') - G_imagemgr.shareImage(user,image) - self.response(200, {'success':'true', 'action':'share'}) - elif cmds[1] == 'unshare': - image = form.getvalue('image') - G_imagemgr.unshareImage(user,image) - self.response(200, {'success':'true', 'action':'unshare'}) - elif cmds[1] == 'delete': - image = form.getvalue('image') - G_imagemgr.removeImage(user,image) - self.response(200, {'success':'true', 'action':'delete'}) - else: - logger.warning("request not supported ") - self.response(400, {'success':'false', 'message':'not supported request'}) +@app.route("/image/delete/", methods=['POST']) +@login_required +def delete_image(cur_user, user, form): + global G_imagemgr + image = form.get('image', None) + G_imagemgr.removeImage(user,image) + return json.dumps({'success':'true', 'action':'delete'}) - # Add Proxy - elif cmds[0] == 'addproxy': - logger.info ("handle request : add proxy") - proxy_ip = form.getvalue("ip") - proxy_port = form.getvalue("port") - clustername = form.getvalue("clustername") - [status, message] = G_vclustermgr.addproxy(user,clustername,proxy_ip,proxy_port) - if status is True: - self.response(200, {'success':'true', 'action':'addproxy'}) - else: - self.response(400, {'success':'false', 'message': message}) - # Delete Proxy - elif cmds[0] == 'deleteproxy': - logger.info ("handle request : delete proxy") - clustername = form.getvalue("clustername") - G_vclustermgr.deleteproxy(user,clustername) - self.response(200, {'success':'true', 'action':'deleteproxy'}) +@app.route("/addproxy/", methods=['POST']) +@login_required +def addproxy(cur_user, user, form): + global G_vclustermgr + logger.info ("handle request : add proxy") + proxy_ip = form.get("ip", None) + proxy_port = form.get("port", None) + clustername = form.get("clustername", None) + [status, message] = G_vclustermgr.addproxy(user,clustername,proxy_ip,proxy_port) + if status is True: + return json.dumps({'success':'true', 'action':'addproxy'}) + else: + return json.dumps({'success':'false', 'message': message}) - # Request for Monitor - elif cmds[0] == 'monitor': - logger.info("handle request: monitor") - res = {} - if cmds[1] == 'hosts': - com_id = cmds[2] - fetcher = monitor.Fetcher(etcdaddr,G_clustername,com_id) - if cmds[3] == 'meminfo': - res['meminfo'] = fetcher.get_meminfo() - elif cmds[3] == 'cpuinfo': - res['cpuinfo'] = fetcher.get_cpuinfo() - elif cmds[3] == 'cpuconfig': - res['cpuconfig'] = fetcher.get_cpuconfig() - elif cmds[3] == 'diskinfo': - res['diskinfo'] = fetcher.get_diskinfo() - elif cmds[3] == 'osinfo': - res['osinfo'] = fetcher.get_osinfo() - elif cmds[3] == 'containers': - res['containers'] = fetcher.get_containers() - elif cmds[3] == 'status': - res['status'] = fetcher.get_status() - elif cmds[3] == 'containerslist': - res['containerslist'] = fetcher.get_containerslist() - elif cmds[3] == 'containersinfo': - res = [] - conlist = fetcher.get_containerslist() - for container in conlist: - ans = {} - confetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) - ans = confetcher.get_basic_info(container) - ans['cpu_use'] = confetcher.get_cpu_use(container) - ans['mem_use'] = confetcher.get_mem_use(container) - res.append(ans) - else: - self.response(400, {'success':'false', 'message':'not supported request'}) - return +@app.route("/deleteproxy/", methods=['POST']) +@login_required +def deleteproxy(cur_user, user, form): + global G_vclustermgr + logger.info ("handle request : delete proxy") + clustername = form.get("clustername", None) + G_vclustermgr.deleteproxy(user,clustername) + self.response(200, {'success':'true', 'action':'deleteproxy'}) - self.response(200, {'success':'true', 'monitor':res}) - elif cmds[1] == 'vnodes': - fetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) - if cmds[3] == 'cpu_use': - res['cpu_use'] = fetcher.get_cpu_use(cmds[2]) - elif cmds[3] == 'mem_use': - res['mem_use'] = fetcher.get_mem_use(cmds[2]) - elif cmds[3] == 'disk_use': - res['disk_use'] = fetcher.get_disk_use(cmds[2]) - elif cmds[3] == 'basic_info': - res['basic_info'] = fetcher.get_basic_info(cmds[2]) - elif cmds[3] == 'owner': - names = cmds[2].split('-') - result = G_usermgr.query(username = names[0], cur_user = cur_user) - if result['success'] == 'false': - res['username'] = "" - res['truename'] = "" - else: - res['username'] = result['data']['username'] - res['truename'] = result['data']['truename'] - else: - res = "Unspported Method!" - self.response(200, {'success':'true', 'monitor':res}) - elif cmds[1] == 'user': - if cmds[2] == 'quotainfo': - user_info = G_usermgr.selfQuery(cur_user = cur_user) - quotainfo = user_info['data']['groupinfo'] - self.response(200, {'success':'true', 'quotainfo':quotainfo}) - '''if not user == 'root': - self.response(400, {'success':'false', 'message':'Root Required'}) - if cmds[3] == 'clustercnt': - flag = True - clutotal = 0 - clurun = 0 - contotal = 0 - conrun = 0 - [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2]) - if status: - for clustername in clusterlist: - clutotal += 1 - [status2, result] = G_vclustermgr.get_clusterinfo(clustername, cmds[2]) - if status2: - contotal += result['size'] - if result['status'] == 'running': - clurun += 1 - conrun += result['size'] - else: - flag = False - if flag: - res = {} - res['clutotal'] = clutotal - res['clurun'] = clurun - res['contotal'] = contotal - res['conrun'] = conrun - self.response(200, {'success':'true', 'monitor':{'clustercnt':res}}) - else: - self.response(200, {'success':'false','message':clusterlist}) - elif cmds[3] == 'cluster': - if cmds[4] == 'list': - [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2]) - if status: - self.response(200, {'success':'true', 'monitor':{'clusters':clusterlist}}) - else: - self.response(400, {'success':'false', 'message':clusterlist}) - elif cmds[4] == 'info': - clustername = form.getvalue('clustername') - logger.info ("handle request : info cluster %s" % clustername) - [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) - if status: - self.response(200, {'success':'true', 'monitor':{'info':result}}) - else: - self.response(200, {'success':'false','message':result}) - else: - self.response(400, {'success':'false', 'message':'not supported request'})''' +@app.route("/monitor/hosts///", methods=['POST']) +@login_required +def hosts_monitor(cur_user, user, form, com_id, issue): + global G_clustername - elif cmds[1] == 'listphynodes': - res['allnodes'] = G_nodemgr.get_allnodes() - self.response(200, {'success':'true', 'monitor':res}) - # Request for User - elif cmds[0] == 'user': - logger.info("handle request: user") - if cmds[1] == 'modify': - #user = G_usermgr.query(username = form.getvalue("username"), cur_user = cur_user).get('token', None) - result = G_usermgr.modify(newValue = form, cur_user = cur_user) - self.response(200, result) - if cmds[1] == 'groupModify': - result = G_usermgr.groupModify(newValue = form, cur_user = cur_user) - self.response(200, result) - if cmds[1] == 'query': - result = G_usermgr.query(ID = form.getvalue("ID"), cur_user = cur_user) - if (result.get('success', None) == None or result.get('success', None) == "false"): - self.response(301,result) - else: - result = G_usermgr.queryForDisplay(user = result['token']) - self.response(200,result) + logger.info("handle request: monitor/hosts") + res = {} + fetcher = monitor.Fetcher(etcdaddr,G_clustername,com_id) + if issue == 'meminfo': + res['meminfo'] = fetcher.get_meminfo() + elif issue == 'cpuinfo': + res['cpuinfo'] = fetcher.get_cpuinfo() + elif issue == 'cpuconfig': + res['cpuconfig'] = fetcher.get_cpuconfig() + elif issue == 'diskinfo': + res['diskinfo'] = fetcher.get_diskinfo() + elif issue == 'osinfo': + res['osinfo'] = fetcher.get_osinfo() + elif issue == 'containers': + res['containers'] = fetcher.get_containers() + elif issue == 'status': + res['status'] = fetcher.get_status() + elif issue == 'containerslist': + res['containerslist'] = fetcher.get_containerslist() + elif issue == 'containersinfo': + res = [] + conlist = fetcher.get_containerslist() + for container in conlist: + ans = {} + confetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) + ans = confetcher.get_basic_info(container) + ans['cpu_use'] = confetcher.get_cpu_use(container) + ans['mem_use'] = confetcher.get_mem_use(container) + res.append(ans) + else: + return json.dumps({'success':'false', 'message':'not supported request'}) - elif cmds[1] == 'add': - user = G_usermgr.newuser(cur_user = cur_user) - user.username = form.getvalue('username') - user.password = form.getvalue('password') - user.e_mail = form.getvalue('e_mail', '') - user.status = "normal" - result = G_usermgr.register(user = user, cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupadd': - result = G_usermgr.groupadd(form = form, cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'quotaadd': - result = G_usermgr.quotaadd(form = form, cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'chdefault': - result = G_usermgr.change_default_group(form = form, cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupdel': - result = G_usermgr.groupdel(name = form.getvalue('name', None), cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'data': - logger.info("handle request: user/data") - result = G_usermgr.userList(cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupNameList': - result = G_usermgr.groupListName(cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupList': - result = G_usermgr.groupList(cur_user = cur_user) - self.response(200, result) - elif cmds[1] == 'groupQuery': - result = G_usermgr.groupQuery(name = form.getvalue("name"), cur_user = cur_user) - if (result.get('success', None) == None or result.get('success', None) == "false"): - self.response(301,result) - else: - self.response(200,result) - elif cmds[1] == 'selfQuery': - result = G_usermgr.selfQuery(cur_user = cur_user) - self.response(200,result) - elif cmds[1] == 'selfModify': - result = G_usermgr.selfModify(cur_user = cur_user, newValue = form) - self.response(200,result) - elif cmds[0] == 'register' : - #activate - logger.info("handle request: user/activate") - newuser = G_usermgr.newuser() - newuser.username = cur_user.username - newuser.nickname = cur_user.truename - newuser.status = 'applying' - newuser.user_group = cur_user.user_group - newuser.auth_method = cur_user.auth_method - newuser.e_mail = form.getvalue('email','') - newuser.student_number = form.getvalue('studentnumber', '') - newuser.department = form.getvalue('department', '') - newuser.truename = form.getvalue('truename', '') - newuser.tel = form.getvalue('tel', '') - newuser.description = form.getvalue('description', '') - result = G_usermgr.register(user = newuser) - userManager.send_remind_activating_email(newuser.username) - self.response(200,result) + return json.dumps({'success':'true', 'monitor':res}) + + +@app.route("/monitor/vnodes///", methods=['POST']) +@login_required +def vnodes_monitor(cur_user, user, form, con_id, issue): + global G_clustername + logger.info("handle request: monitor/vnodes") + res = {} + fetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) + if issue == 'cpu_use': + res['cpu_use'] = fetcher.get_cpu_use(con_id) + elif issue == 'mem_use': + res['mem_use'] = fetcher.get_mem_use(con_id) + elif issue == 'disk_use': + res['disk_use'] = fetcher.get_disk_use(con_id) + elif issue == 'basic_info': + res['basic_info'] = fetcher.get_basic_info(con_id) + elif issue == 'owner': + names = con_id.split('-') + result = G_usermgr.query(username = names[0], cur_user = cur_user) + if result['success'] == 'false': + res['username'] = "" + res['truename'] = "" else: - logger.warning ("request not supported ") - self.response(400, {'success':'false', 'message':'not supported request'}) + res['username'] = result['data']['username'] + res['truename'] = result['data']['truename'] + else: + res = "Unspported Method!" + return json.dumps({'success':'true', 'monitor':res}) + + +@app.route("/monitor/user/quotainfo/", methods=['POST']) +@login_required +def user_quotainfo_monitor(cur_user, user, form): + global G_usermgr + logger.info("handle request: monitor/user/quotainfo/") + user_info = G_usermgr.selfQuery(cur_user = cur_user) + quotainfo = user_info['data']['groupinfo'] + return json.dumps({'success':'true', 'quotainfo':quotainfo}) + +@app.route("/monitor/listphynodes/", methods=['POST']) +@login_required +def listphynodes_monitor(cur_user, user, form): + global G_nodemgr + logger.info("handle request: monitor/listphynodes/") + res = {} + res['allnodes'] = G_nodemgr.get_allnodes() + return json.dumps({'success':'true', 'monitor':res}) + + +@app.route("/user/modify/", methods=['POST']) +@login_required +def modify_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/modify/") + result = G_usermgr.modify(newValue = form, cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/groupModify/", methods=['POST']) +@login_required +def groupModify_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupModify/") + result = G_usermgr.groupModify(newValue = form, cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/query/", methods=['POST']) +@login_required +def query_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/query/") + result = G_usermgr.query(ID = form.get("ID"), cur_user = cur_user) + if (result.get('success', None) == None or result.get('success', None) == "false"): + return json.dumps(result) + else: + result = G_usermgr.queryForDisplay(user = result['token']) + return json.dumps(result) + + +@app.route("/user/add/", methods=['POST']) +@login_required +def add_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/add/") + user = G_usermgr.newuser(cur_user = cur_user) + user.username = form.get('username', None) + user.password = form.get('password', None) + user.e_mail = form.get('e_mail', '') + user.status = "normal" + result = G_usermgr.register(user = user, cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/groupadd/", methods=['POST']) +@login_required +def groupadd_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupadd/") + result = G_usermgr.groupadd(form = form, cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/chdefault/", methods=['POST']) +@login_required +def chdefault(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/chdefault/") + result = G_usermgr.change_default_group(form = form, cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/quotaadd/", methods=['POST']) +@login_required +def quotaadd_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/quotaadd/") + result = G_usermgr.quotaadd(form = form, cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/groupdel/", methods=['POST']) +@login_required +def groupdel_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupdel/") + result = G_usermgr.groupdel(name = form.get('name', None), cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/data/", methods=['POST']) +@login_required +def data_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/data/") + result = G_usermgr.userList(cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/groupNameList/", methods=['POST']) +@login_required +def groupNameList_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupNameList/") + result = G_usermgr.groupListName(cur_user = cur_user) + return json.dumps(result) + + +@app.route("/user/groupList/", methods=['POST']) +@login_required +def groupList_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupList/") + result = G_usermgr.groupList(cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/groupQuery/", methods=['POST']) +@login_required +def groupQuery_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/groupQuery/") + result = G_usermgr.groupQuery(name = form.getvalue("name"), cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/selfQuery/", methods=['POST']) +@login_required +def selfQuery_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/selfQuery/") + result = G_usermgr.selfQuery(cur_user = cur_user) + return json.dumps(result) + +@app.route("/user/selfModify/", methods=['POST']) +@login_required +def selfModify_user(cur_user, user, form): + global G_usermgr + logger.info("handle request: user/selfModify/") + result = G_usermgr.selfModify(cur_user = cur_user, newValue = form) + return json.dumps(result) -class ThreadingHttpServer(ThreadingMixIn, http.server.HTTPServer): - pass if __name__ == '__main__': + logger.info('Start Flask...:') + try: + secret_key_file = open(env.getenv('FS_PREFIX') + '/local/httprest_secret_key.txt') + app.secret_key = secret_key_file.read() + secret_key_file.close() + except: + from base64 import b64encode + from os import urandom + secret_key = urandom(24) + secret_key = b64encode(secret_key).decode('utf-8') + app.secret_key = secret_key + secret_key_file = open(env.getenv('FS_PREFIX') + '/local/httprest_secret_key.txt', 'w') + secret_key_file.write(secret_key) + secret_key_file.close() + + os.environ['APP_KEY'] = app.secret_key + runcmd = sys.argv[0] + app.runpath = runcmd.rsplit('/', 1)[0] + + global G_nodemgr global G_vclustermgr global G_usermgr @@ -608,6 +700,6 @@ if __name__ == '__main__': logger.info("using MASTER_PORT %d", int(masterport)) # server = http.server.HTTPServer((masterip, masterport), DockletHttpHandler) - server = ThreadingHttpServer((masterip, int(masterport)), DockletHttpHandler) logger.info("starting master server") - server.serve_forever() + + app.run(host = masterip, port = masterport, debug = True, threaded=True) diff --git a/src/nodemgr.py b/src/nodemgr.py index ffd1a09..f45e8f7 100755 --- a/src/nodemgr.py +++ b/src/nodemgr.py @@ -13,7 +13,7 @@ import env # 2. update node list when new node joins # ETCD table : # machines/allnodes -- all nodes in docklet, for recovery -# machines/runnodes -- run nodes of this start up +# machines/runnodes -- run nodes of this start up ############################################## class NodeMgr(object): def __init__(self, networkmgr, etcdclient, addr, mode): @@ -53,13 +53,13 @@ class NodeMgr(object): if len(self.runnodes)>0: logger.error ("init runnodes is not null, need to be clean") sys.exit(1) - # init rpc list + # init rpc list self.rpcs = [] # start new thread to watch whether a new node joins logger.info ("start thread to watch new nodes ...") self.thread_watchnewnode = threading.Thread(target=self._watchnewnode) self.thread_watchnewnode.start() - # wait for all nodes joins + # wait for all nodes joins while(True): allin = True for node in self.allnodes: @@ -73,7 +73,7 @@ class NodeMgr(object): logger.info ("run nodes are: %s" % self.runnodes) - # get nodes list from etcd table + # get nodes list from etcd table def _nodelist_etcd(self, which): if which == "allnodes" or which == "runnodes": [status, nodeinfo]=self.etcd.listdir("machines/"+which) @@ -91,7 +91,7 @@ class NodeMgr(object): time.sleep(0.1) [status, runlist] = self.etcd.listdir("machines/runnodes") if not status: - logger.warning ("get runnodes list failed from etcd ") + logger.warning ("get runnodes list failed from etcd ") continue for node in runlist: nodeip = node['key'].rsplit('/',1)[1] @@ -109,8 +109,8 @@ class NodeMgr(object): if nodeip in self.allnodes: ######## HERE MAYBE NEED TO FIX ############### # here we must use "machines/runnodes/nodeip" - # we cannot use node['key'], node['key'] is absolute - # path, etcd client will append the path to prefix, + # we cannot use node['key'], node['key'] is absolute + # path, etcd client will append the path to prefix, # which is wrong ############################################### self.etcd.setkey("machines/runnodes/"+nodeip, "init-"+self.mode) @@ -138,7 +138,7 @@ class NodeMgr(object): % (nodeip, workerport))) logger.info ("add %s:%s in rpc client list" % (nodeip, workerport)) - + # get all run nodes' IP addr def get_nodeips(self): return self.allnodes diff --git a/src/userManager.py b/src/userManager.py index 1137c61..74b9c11 100755 --- a/src/userManager.py +++ b/src/userManager.py @@ -158,7 +158,7 @@ class userManager: if not os.path.exists(fspath+"/global/sys/quotainfo"): quotafile = open(fspath+"/global/sys/quotainfo",'w') quotas = {} - quotas['default'] = 'fundation' + quotas['default'] = 'fundation' quotas['quotainfo'] = [] quotas['quotainfo'].append({'name':'cpu', 'hint':'the cpu quota, number of cores, e.g. 4'}) quotas['quotainfo'].append({'name':'memory', 'hint':'the memory quota, number of MB , e.g. 4000'}) @@ -170,7 +170,6 @@ class userManager: quotafile.write(json.dumps(quotas)) quotafile.close() - def auth_local(self, username, password): password = hashlib.sha512(password.encode('utf-8')).hexdigest() @@ -403,7 +402,7 @@ class userManager: "tel" : user.tel, "register_date" : "%s"%(user.register_date), "group" : user.user_group, - "groupinfo": group, + "groupinfo": group, }, } return result @@ -415,8 +414,8 @@ class userManager: Modify informantion for oneself ''' form = kwargs['newValue'] - name = form.getvalue('name', None) - value = form.getvalue('value', None) + name = form.get('name', None) + value = form.get('value', None) if (name == None or value == None): result = {'success': 'false'} return result @@ -543,13 +542,13 @@ class userManager: groups = json.loads(groupfile.read()) groupfile.close() for group in groups: - if group['name'] == kwargs['newValue'].getvalue('groupname',None): + if group['name'] == kwargs['newValue'].get('groupname',None): form = kwargs['newValue'] for key in form.keys(): if key == "groupname" or key == "token": pass else: - group['quotas'][key] = form.getvalue(key) + group['quotas'][key] = form.get(key) groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() @@ -564,28 +563,28 @@ class userManager: will send an e-mail when status is changed from 'applying' to 'normal' Usage: modify(newValue = dict_from_form, cur_user = token_from_auth) ''' - user_modify = User.query.filter_by(username = kwargs['newValue'].getvalue('username', None)).first() + user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first() if (user_modify == None): return {"success":'false', "reason":"User does not exist"} #try: form = kwargs['newValue'] - user_modify.truename = form.getvalue('truename', '') - user_modify.e_mail = form.getvalue('e_mail', '') - user_modify.department = form.getvalue('department', '') - user_modify.student_number = form.getvalue('student_number', '') - user_modify.tel = form.getvalue('tel', '') - user_modify.user_group = form.getvalue('group', '') - user_modify.auth_method = form.getvalue('auth_method', '') - if (user_modify.status == 'applying' and form.getvalue('status', '') == 'normal'): + user_modify.truename = form.get('truename', '') + user_modify.e_mail = form.get('e_mail', '') + user_modify.department = form.get('department', '') + user_modify.student_number = form.get('student_number', '') + user_modify.tel = form.get('tel', '') + user_modify.user_group = form.get('group', '') + user_modify.auth_method = form.get('auth_method', '') + if (user_modify.status == 'applying' and form.get('status', '') == 'normal'): send_activated_email(user_modify.e_mail, user_modify.username) - user_modify.status = form.getvalue('status', '') - if (form.getvalue('Chpassword', '') == 'Yes'): - new_password = form.getvalue('password','no_password') + user_modify.status = form.get('status', '') + if (form.get('Chpassword', '') == 'Yes'): + new_password = form.get('password','no_password') new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest() user_modify.password = new_password - #self.chpassword(cur_user = user_modify, password = form.getvalue('password','no_password')) + #self.chpassword(cur_user = user_modify, password = form.get('password','no_password')) db.session.commit() return {"success":'true'} @@ -610,7 +609,7 @@ class userManager: quotafile = open(fspath+"/global/sys/quotainfo",'r') quotas = json.loads(quotafile.read()) quotafile.close() - user_new.user_group = quotas['default'] + user_new.user_group = quotas['default'] user_new.avatar = 'default.png' return user_new @@ -643,9 +642,9 @@ class userManager: @administration_required def quotaadd(*args, **kwargs): form = kwargs.get('form') - quotaname = form.getvalue("quotaname") - default_value = form.getvalue("default_value") - hint = form.getvalue("hint") + quotaname = form.get("quotaname") + default_value = form.get("default_value") + hint = form.get("hint") if (quotaname == None): return { "success":'false', "reason": "Empty quota name"} if (default_value == None): @@ -670,7 +669,7 @@ class userManager: @administration_required def groupadd(*args, **kwargs): form = kwargs.get('form') - groupname = form.getvalue("groupname") + groupname = form.get("groupname") if (groupname == None): return {"success":'false', "reason": "Empty group name"} groupfile = open(fspath+"/global/sys/quota",'r') @@ -684,13 +683,13 @@ class userManager: if key == "groupname" or key == "token": pass else: - group['quotas'][key] = form.getvalue(key) + group['quotas'][key] = form.get(key) groups.append(group) groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() return {"success":'true'} - + @administration_required def groupdel(*args, **kwargs): name = kwargs.get('name', None) diff --git a/web/webViews/admin.py b/web/webViews/admin.py index 3fcc1f7..2b067b5 100755 --- a/web/webViews/admin.py +++ b/web/webViews/admin.py @@ -18,13 +18,13 @@ class adminView(normalView): class groupaddView(normalView): @classmethod def post(self): - dockletRequest.post('/user/groupadd', request.form) + dockletRequest.post('/user/groupadd/', request.form) return redirect('/admin/') class quotaaddView(normalView): @classmethod def post(self): - dockletRequest.post('/user/quotaadd', request.form) + dockletRequest.post('/user/quotaadd/', request.form) return redirect('/admin/') class chdefaultView(normalView): @@ -39,9 +39,9 @@ class groupdelView(normalView): data = { "name" : self.groupname, } - dockletRequest.post('/user/groupdel', data) + dockletRequest.post('/user/groupdel/', data) return redirect('/admin/') - + @classmethod def get(self): return self.post() diff --git a/web/webViews/authenticate/login.py b/web/webViews/authenticate/login.py index 898a710..66bc314 100755 --- a/web/webViews/authenticate/login.py +++ b/web/webViews/authenticate/login.py @@ -72,7 +72,7 @@ class loginView(normalView): else: return redirect('/login/') else: - self.error() + return redirect('/login/') class logoutView(normalView): diff --git a/web/webViews/monitor.py b/web/webViews/monitor.py index 6a48939..e4a655c 100755 --- a/web/webViews/monitor.py +++ b/web/webViews/monitor.py @@ -40,7 +40,7 @@ class statusRealtimeView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/vnodes/%s/basic_info'%(self.node_name), data) + result = dockletRequest.post('/monitor/vnodes/%s/basic_info/'%(self.node_name), data) basic_info = result.get('monitor').get('basic_info') return self.render(self.template_path, node_name = self.node_name, user = session['username'], container = basic_info) @@ -53,11 +53,11 @@ class hostsRealtimeView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/hosts/%s/cpuconfig'%(self.com_ip), data) + result = dockletRequest.post('/monitor/hosts/%s/cpuconfig/'%(self.com_ip), data) proc = result.get('monitor').get('cpuconfig') - result = dockletRequest.post('/monitor/hosts/%s/osinfo'%(self.com_ip), data) + result = dockletRequest.post('/monitor/hosts/%s/osinfo/'%(self.com_ip), data) osinfo = result.get('monitor').get('osinfo') - result = dockletRequest.post('/monitor/hosts/%s/diskinfo'%(self.com_ip), data) + result = dockletRequest.post('/monitor/hosts/%s/diskinfo/'%(self.com_ip), data) diskinfos = result.get('monitor').get('diskinfo') return self.render(self.template_path, com_ip = self.com_ip, user = session['username'],processors = proc, OSinfo = osinfo, diskinfos = diskinfos) @@ -71,13 +71,13 @@ class hostsConAllView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/hosts/%s/containerslist'%(self.com_ip), data) + result = dockletRequest.post('/monitor/hosts/%s/containerslist/'%(self.com_ip), data) containers = result.get('monitor').get('containerslist') containerslist = [] for container in containers: - result = dockletRequest.post('/monitor/vnodes/%s/basic_info'%(container), data) + result = dockletRequest.post('/monitor/vnodes/%s/basic_info/'%(container), data) basic_info = result.get('monitor').get('basic_info') - result = dockletRequest.post('/monitor/vnodes/%s/owner'%(container), data) + result = dockletRequest.post('/monitor/vnodes/%s/owner/'%(container), data) owner = result.get('monitor') basic_info['owner'] = owner containerslist.append(basic_info) @@ -91,14 +91,14 @@ class hostsView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/listphynodes', data) + result = dockletRequest.post('/monitor/listphynodes/', data) iplist = result.get('monitor').get('allnodes') machines = [] for ip in iplist: containers = {} - result = dockletRequest.post('/monitor/hosts/%s/containers'%(ip), data) + result = dockletRequest.post('/monitor/hosts/%s/containers/'%(ip), data) containers = result.get('monitor').get('containers') - result = dockletRequest.post('/monitor/hosts/%s/status'%(ip), data) + result = dockletRequest.post('/monitor/hosts/%s/status/'%(ip), data) status = result.get('monitor').get('status') machines.append({'ip':ip,'containers':containers, 'status':status}) #print(machines) @@ -112,9 +112,9 @@ class monitorUserAllView(normalView): data = { "user": session['username'], } - result = dockletRequest.post('/monitor/listphynodes', data) + result = dockletRequest.post('/monitor/listphynodes/', data) userslist = [{'name':'root'},{'name':'libao'}] for user in userslist: - result = dockletRequest.post('/monitor/user/%s/clustercnt'%(user['name']), data) + result = dockletRequest.post('/monitor/user/%s/clustercnt/'%(user['name']), data) user['clustercnt'] = result.get('monitor').get('clustercnt') return self.render(self.template_path, userslist = userslist, user = session['username']) diff --git a/web/webViews/user/userActivate.py b/web/webViews/user/userActivate.py index c8fe2b1..d047634 100644 --- a/web/webViews/user/userActivate.py +++ b/web/webViews/user/userActivate.py @@ -16,5 +16,5 @@ class userActivateView(normalView): @classmethod def post(self): - dockletRequest.post('/register', request.form) + dockletRequest.post('/register/', request.form) return redirect('/logout/') diff --git a/web/webViews/user/userlist.py b/web/webViews/user/userlist.py index b2a8a9e..1f9b93a 100644 --- a/web/webViews/user/userlist.py +++ b/web/webViews/user/userlist.py @@ -19,34 +19,32 @@ class userlistView(normalView): class useraddView(normalView): @classmethod def post(self): - dockletRequest.post('/user/add', request.form) + dockletRequest.post('/user/add/', request.form) return redirect('/user/list/') class userdataView(normalView): @classmethod def get(self): - return json.dumps(dockletRequest.post('/user/data', request.form)) + return json.dumps(dockletRequest.post('/user/data/', request.form)) @classmethod def post(self): - return json.dumps(dockletRequest.post('/user/data', request.form)) + return json.dumps(dockletRequest.post('/user/data/', request.form)) class userqueryView(normalView): @classmethod def get(self): - return json.dumps(dockletRequest.post('/user/query', request.form)) + return json.dumps(dockletRequest.post('/user/query/', request.form)) @classmethod def post(self): - return json.dumps(dockletRequest.post('/user/query', request.form)) + return json.dumps(dockletRequest.post('/user/query/', request.form)) class usermodifyView(normalView): @classmethod def post(self): try: - dockletRequest.post('/user/modify', request.form) + dockletRequest.post('/user/modify/', request.form) except: return self.render('user/mailservererror.html') return redirect('/user/list/') - - From d4cbf43ed15cffb85554654ae04b139f6f4c959b Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Sat, 30 Apr 2016 16:45:48 +0800 Subject: [PATCH 12/19] Using flask to display monitor info. --- src/monitor.py | 2 +- web/static/js/plot_monitor.js | 6 +++--- web/static/js/plot_monitorReal.js | 6 +++--- web/templates/monitor/hosts.html | 10 +++++----- web/templates/monitor/hostsConAll.html | 6 +++--- web/templates/monitor/status.html | 8 ++++---- web/web.py | 4 ++-- 7 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/monitor.py b/src/monitor.py index f8917a8..23f84a3 100755 --- a/src/monitor.py +++ b/src/monitor.py @@ -68,7 +68,7 @@ class Container_Collector(threading.Thread): else: self.cpu_quota[container_name] = tmp/100000.0 quota = {'cpu':self.cpu_quota[container_name],'memory':self.mem_quota[container_name]} - logger.info(quota) + #logger.info(quota) self.etcdser.setkey('/vnodes/%s/quota'%(container_name),quota) else: logger.error("Cant't find config file %s"%(confpath)) diff --git a/web/static/js/plot_monitor.js b/web/static/js/plot_monitor.js index 7391d64..53fcc3b 100755 --- a/web/static/js/plot_monitor.js +++ b/web/static/js/plot_monitor.js @@ -158,12 +158,12 @@ var host = window.location.host; var node_name = $("#node_name").html(); var url = "http://" + host + "/monitor/vnodes/" + node_name; -plot_graph($("#mem-chart"),url + "/mem_use",processMemData,getMemY); -plot_graph($("#cpu-chart"),url + "/cpu_use",processCpuData,getCpuY); +plot_graph($("#mem-chart"),url + "/mem_use/",processMemData,getMemY); +plot_graph($("#cpu-chart"),url + "/cpu_use/",processCpuData,getCpuY); function processDiskData() { - $.post(url+"/disk_use",{},function(data){ + $.post(url+"/disk_use/",{},function(data){ var diskuse = data.monitor.disk_use; var usedp = diskuse.percent; var total = diskuse.total/1024.0/1024.0; diff --git a/web/static/js/plot_monitorReal.js b/web/static/js/plot_monitorReal.js index 6237fd2..b85cc6a 100755 --- a/web/static/js/plot_monitorReal.js +++ b/web/static/js/plot_monitorReal.js @@ -190,8 +190,8 @@ var host = window.location.host; var com_ip = $("#com_ip").html(); var url = "http://" + host + "/monitor/hosts/"+com_ip; -plot_graph($("#mem-chart"), url + "/meminfo",processMemData,getMemY); -plot_graph($("#cpu-chart"), url + "/cpuinfo",processCpuData,getCpuY); +plot_graph($("#mem-chart"), url + "/meminfo/",processMemData,getMemY); +plot_graph($("#cpu-chart"), url + "/cpuinfo/",processCpuData,getCpuY); //plot_graph($("#disk-chart"), url + "/diskinfo",processDiskData,getDiskY); -$.post(url+"/diskinfo",{user:"root",key:"unias"},processDiskData,"json"); +$.post(url+"/diskinfo/",{user:"root",key:"unias"},processDiskData,"json"); diff --git a/web/templates/monitor/hosts.html b/web/templates/monitor/hosts.html index afbe6af..c080940 100644 --- a/web/templates/monitor/hosts.html +++ b/web/templates/monitor/hosts.html @@ -78,7 +78,7 @@ { var MB = 1024; - $.post(url+"/status",{},function(data){ + $.post(url+"/status/",{},function(data){ var status = data.monitor.status; if(status == 'RUNNING') { @@ -95,7 +95,7 @@ tmp.html("Stopped"); } - $.post(url+"/containers",{},function(data){ + $.post(url+"/containers/",{},function(data){ var containers = data.monitor.containers; $("#"+index+"_contotal").html(containers.total); $("#"+index+"_conrunning").html(containers.running); @@ -109,20 +109,20 @@ return; } - $.post(url+"/cpuinfo",{},function(data){ + $.post(url+"/cpuinfo/",{},function(data){ var idle = data.monitor.cpuinfo.idle; var usedp = (100 - idle).toFixed(2); $("#"+index+"_cpu").html(String(usedp)+"%"); },"json"); - $.post(url+"/meminfo",{},function(data){ + $.post(url+"/meminfo/",{},function(data){ var used = data.monitor.meminfo.used; var total = data.monitor.meminfo.total; var usedp = String(((used/total)*100).toFixed(2))+"%"; $("#"+index+"_mem").html(usedp); },"json"); - $.post(url+"/diskinfo",{},function(data){ + $.post(url+"/diskinfo/",{},function(data){ var val = data.monitor.diskinfo; var usedp = val[0].percent; $("#"+index+"_disk").html(String(usedp)+"%"); diff --git a/web/templates/monitor/hostsConAll.html b/web/templates/monitor/hostsConAll.html index c2302f5..8c72631 100644 --- a/web/templates/monitor/hostsConAll.html +++ b/web/templates/monitor/hostsConAll.html @@ -85,7 +85,7 @@ function update(url,index) { - $.post(url+"/basic_info",{},function(data){ + $.post(url+"/basic_info/",{},function(data){ var state = data.monitor.basic_info.State; if(state == 'RUNNING') { @@ -109,13 +109,13 @@ return; } - $.post(url+"/cpu_use",{},function(data){ + $.post(url+"/cpu_use/",{},function(data){ var val = data.monitor.cpu_use.val; var unit = data.monitor.cpu_use.unit; $("#"+index+"_cpu").html(val +" "+ unit); },"json"); - $.post(url+"/mem_use",{},function(data){ + $.post(url+"/mem_use/",{},function(data){ var val = data.monitor.mem_use.val; var unit = data.monitor.mem_use.unit $("#"+index+"_mem").html(val+" "+unit); diff --git a/web/templates/monitor/status.html b/web/templates/monitor/status.html index 133a390..e8c1dac 100644 --- a/web/templates/monitor/status.html +++ b/web/templates/monitor/status.html @@ -122,7 +122,7 @@ function update(url,index) { - $.post(url+"/basic_info",{},function(data){ + $.post(url+"/basic_info/",{},function(data){ var state = data.monitor.basic_info.State; if(state == 'RUNNING') { @@ -146,7 +146,7 @@ return; } - $.post(url+"/cpu_use",{},function(data){ + $.post(url+"/cpu_use/",{},function(data){ var usedp = data.monitor.cpu_use.usedp; var quota = data.monitor.cpu_use.quota.cpu; var quotaout = "("+quota; @@ -157,7 +157,7 @@ $("#"+index+"_cpu").html((usedp/0.01).toFixed(2)+"%
"+quotaout); },"json"); - $.post(url+"/mem_use",{},function(data){ + $.post(url+"/mem_use/",{},function(data){ var usedp = data.monitor.mem_use.usedp; var unit = data.monitor.mem_use.unit; var quota = data.monitor.mem_use.quota.memory/1024.0; @@ -166,7 +166,7 @@ $("#"+index+"_mem").html((usedp/0.01).toFixed(2)+"%
"+out); },"json"); - $.post(url+"/disk_use",{},function(data){ + $.post(url+"/disk_use/",{},function(data){ var diskuse = data.monitor.disk_use; var usedp = diskuse.percent; var total = diskuse.total/1024.0/1024.0; diff --git a/web/web.py b/web/web.py index 40cb709..22e7efe 100755 --- a/web/web.py +++ b/web/web.py @@ -265,8 +265,8 @@ def statusRealtime(vcluster_name,node_name): statusRealtimeView.node_name = node_name return statusRealtimeView.as_view() -@app.route("/monitor/hosts//", methods=['POST']) -@app.route("/monitor/vnodes//", methods=['POST']) +@app.route("/monitor/hosts///", methods=['POST']) +@app.route("/monitor/vnodes///", methods=['POST']) @login_required def monitor_request(comid,infotype): data = { From 6a4dae1d1ce849e61c08025a652cb4a3807c7e29 Mon Sep 17 00:00:00 2001 From: leebaok Date: Tue, 3 May 2016 17:06:30 +0800 Subject: [PATCH 13/19] [merge] merge flask-rewrite with reuse-vlanid --- src/httprest.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/httprest.py b/src/httprest.py index a2c7bb6..d826f4c 100755 --- a/src/httprest.py +++ b/src/httprest.py @@ -214,11 +214,14 @@ def stop_cluster(cur_user, user, form): @login_required def delete_cluster(cur_user, user, form): global G_vclustermgr + global G_usermgr clustername = form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) logger.info ("handle request : delete cluster %s" % clustername) - [status, result] = G_vclustermgr.delete_cluster(clustername, user) + user_info = G_usermgr.selfQuery(cur_user=cur_user) + user_info = json.dumps(user_info) + [status, result] = G_vclustermgr.delete_cluster(clustername, user, user_info) if status: return json.dumps({'success':'true', 'action':'delete cluster', 'message':result}) else: From 44cd289ceaed5a9af1ac74c16ee02b51625691f0 Mon Sep 17 00:00:00 2001 From: leebaok Date: Tue, 3 May 2016 19:04:58 +0800 Subject: [PATCH 14/19] Master : disable debug mode of flask in httprest.py debug mode of flask will restart the process this will make some work before flask lost --- src/httprest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/httprest.py b/src/httprest.py index d826f4c..dff6649 100755 --- a/src/httprest.py +++ b/src/httprest.py @@ -705,4 +705,4 @@ if __name__ == '__main__': # server = http.server.HTTPServer((masterip, masterport), DockletHttpHandler) logger.info("starting master server") - app.run(host = masterip, port = masterport, debug = True, threaded=True) + app.run(host = masterip, port = masterport, threaded=True) From 628c4781984be2febc1da2d62507647ef32aff41 Mon Sep 17 00:00:00 2001 From: Peidong Liu Date: Tue, 3 May 2016 22:29:32 +0800 Subject: [PATCH 15/19] Fix a bug that will occur when user token expires. Closed the debug mode in httprest.py and web.py Now 500 error in web.py will lead to a 500 page 500 error in httprest.py will lead to logout page --- src/httprest.py | 10 ++++++++-- web/web.py | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/httprest.py b/src/httprest.py index dff6649..cb33641 100755 --- a/src/httprest.py +++ b/src/httprest.py @@ -41,10 +41,10 @@ def login_required(func): logger.info ("get request, path: %s" % request.path) token = request.form.get("token", None) if (token == None): - return {'success':'false', 'message':'user or key is null'} + return json.dumps({'success':'false', 'message':'user or key is null'}) cur_user = G_usermgr.auth_token(token) if (cur_user == None): - return {'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'} + return json.dumps({'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) return func(cur_user, cur_user.username, request.form, *args, **kwargs) return wrapper @@ -573,6 +573,12 @@ def selfModify_user(cur_user, user, form): return json.dumps(result) +@app.errorhandler(500) +def internal_server_error(error): + logger.debug("An internel server error occured") + return json.dumps({'success':'false', 'message':'500 Internal Server Error', 'Unauthorized': 'True'}) + + if __name__ == '__main__': logger.info('Start Flask...:') try: diff --git a/web/web.py b/web/web.py index 22e7efe..03d99ae 100755 --- a/web/web.py +++ b/web/web.py @@ -475,4 +475,4 @@ if __name__ == '__main__': elif opt in ("-p", "--port"): webport = int(arg) - app.run(host = webip, port = webport, debug = True, threaded=True) + app.run(host = webip, port = webport, threaded=True) From 8a1056a0bfc595c0bc768b27ef3b1bb28d8545ac Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Wed, 4 May 2016 11:54:11 +0800 Subject: [PATCH 16/19] Fix a bug in monitor.py, which may stop the thread. --- src/monitor.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/monitor.py b/src/monitor.py index 23f84a3..e7e782d 100755 --- a/src/monitor.py +++ b/src/monitor.py @@ -114,7 +114,7 @@ class Container_Collector(threading.Thread): if(self.collect_containerinfo(container)): countR += 1 except Exception as err: - #pass + logger.warning(traceback.format_exc()) logger.warning(err) containers_num = len(containers)-1 concnt = {} @@ -195,16 +195,20 @@ class Collector(threading.Thread): diskval = {} diskval['device'] = part.device diskval['mountpoint'] = part.mountpoint - usage = psutil.disk_usage(part.mountpoint) - diskval['total'] = usage.total - diskval['used'] = usage.used - diskval['free'] = usage.free - diskval['percent'] = usage.percent - if(part.mountpoint.startswith('/opt/docklet/local/volume')): - names = re.split('/',part.mountpoint) - container = names[len(names)-1] - self.vetcdser.setkey('/%s/disk_use'%(container), diskval) - setval.append(diskval) + try: + usage = psutil.disk_usage(part.mountpoint) + diskval['total'] = usage.total + diskval['used'] = usage.used + diskval['free'] = usage.free + diskval['percent'] = usage.percent + if(part.mountpoint.startswith('/opt/docklet/local/volume')): + names = re.split('/',part.mountpoint) + container = names[len(names)-1] + self.vetcdser.setkey('/%s/disk_use'%(container), diskval) + setval.append(diskval) + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) self.etcdser.setkey('/diskinfo', setval) #print(output) #print(diskparts) From ff7c5b04df479fac2b3b7eb5a2d5d044a47f642d Mon Sep 17 00:00:00 2001 From: ooooo Date: Wed, 4 May 2016 20:08:50 +0800 Subject: [PATCH 17/19] fix a bug --- src/worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/worker.py b/src/worker.py index 514f74b..4b1a3af 100755 --- a/src/worker.py +++ b/src/worker.py @@ -57,7 +57,7 @@ class Worker(object): self.etcd.setkey("machines/runnodes/"+self.addr, "waiting") [status, key] = self.etcd.getkey("machines/runnodes/"+self.addr) if status: - self.key = generatekey("machines/runnodes/"+self.addr) + self.key = generatekey("machines/allnodes/"+self.addr) else: logger.error("get key failed. %s" % node) sys.exit(1) @@ -73,7 +73,7 @@ class Worker(object): # worker search all run nodes to judge how to init value = 'init-new' - [status, runlist] = self.etcd.listdir("machines/runnodes") + [status, runlist] = self.etcd.listdir("machines/allnodes") for node in runlist: if node['key'] == self.key: value = 'init-recovery' From f4c5126b067ebba42d8e82c423ac723782761ecd Mon Sep 17 00:00:00 2001 From: ooooo Date: Wed, 4 May 2016 20:15:19 +0800 Subject: [PATCH 18/19] rename variable --- src/worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/worker.py b/src/worker.py index 4b1a3af..4a43007 100755 --- a/src/worker.py +++ b/src/worker.py @@ -73,8 +73,8 @@ class Worker(object): # worker search all run nodes to judge how to init value = 'init-new' - [status, runlist] = self.etcd.listdir("machines/allnodes") - for node in runlist: + [status, alllist] = self.etcd.listdir("machines/allnodes") + for node in alllist: if node['key'] == self.key: value = 'init-recovery' break From 4396710c6f5176527e5724562b445791c33689c6 Mon Sep 17 00:00:00 2001 From: Donggang Cao Date: Thu, 5 May 2016 00:25:25 -0400 Subject: [PATCH 19/19] fix a bug in worker.py of var not found error --- src/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/worker.py b/src/worker.py index 4a43007..ec1b802 100755 --- a/src/worker.py +++ b/src/worker.py @@ -170,7 +170,7 @@ class Worker(object): if value=='ok': self.etcd.setkey("machines/runnodes/"+self.addr, "ok", ttl = 2) else: - logger.error("get key failed. %s" % node) + logger.error("get key %s failed, master crashed or initialized. restart worker please." % self.addr) sys.exit(1) if __name__ == '__main__':