diff --git a/src/vclustermgr.py b/src/vclustermgr.py index 99b3978..a8d540b 100755 --- a/src/vclustermgr.py +++ b/src/vclustermgr.py @@ -52,7 +52,7 @@ class VclusterMgr(object): logger.info ("recovering cluster:%s for user:%s ..." % (cluster, user)) self.recover_cluster(cluster, user) logger.info("recovered all vclusters for all users") - + def mount_allclusters(self): logger.info("mounting all vclusters for all users...") usersdir = self.fspath+"/global/users/" @@ -70,7 +70,7 @@ class VclusterMgr(object): logger.info ("stopping cluster:%s for user:%s ..." % (cluster, user)) self.stop_cluster(cluster, user) logger.info("stopped all vclusters for all users") - + def detach_allclusters(self): logger.info("detaching all vclusters for all users...") usersdir = self.fspath+"/global/users/" @@ -79,7 +79,7 @@ class VclusterMgr(object): logger.info ("detaching cluster:%s for user:%s ..." % (cluster, user)) self.detach_cluster(cluster, user) logger.info("detached all vclusters for all users") - + def create_cluster(self, clustername, username, image, user_info, setting): if self.is_cluster(clustername, username): return [False, "cluster:%s already exists" % clustername] @@ -111,6 +111,9 @@ class VclusterMgr(object): clusterpath = self.fspath+"/global/users/"+username+"/clusters/"+clustername hostpath = self.fspath+"/global/users/"+username+"/hosts/"+str(clusterid)+".hosts" hosts = "127.0.0.1\tlocalhost\n" + proxy_url = "" + proxy_server_ip = "" + proxy_server_port = "" containers = [] for i in range(0, clustersize): onework = workers[random.randint(0, len(workers)-1)] @@ -118,6 +121,10 @@ class VclusterMgr(object): [success,message] = self.networkmgr.setup_usrgw(username, self.nodemgr, onework) if not success: return [False, message] + if i == 0: + proxy_url = "http://" + self.nodemgr.rpc_to_ip(onework) + ":" + str(env.getenv("PROXY_PORT")) + "/_web/" + username + "/" + clustername + proxy_server_ip = self.nodemgr.rpc_to_ip(onework) + proxy_server_port = env.getenv("PROXY_PORT") lxc_name = username + "-" + str(clusterid) + "-" + str(i) hostname = "host-"+str(i) logger.info ("create container with : name-%s, username-%s, clustername-%s, clusterid-%s, hostname-%s, ip-%s, gateway-%s, image-%s" % (lxc_name, username, clustername, str(clusterid), hostname, ips[i], gateway, image_json)) @@ -132,8 +139,8 @@ class VclusterMgr(object): hostfile.write(hosts) hostfile.close() clusterfile = open(clusterpath, 'w') - proxy_url = env.getenv("PORTAL_URL") + "/_web/" + username + "/" + clustername - info = {'clusterid':clusterid, 'status':'stopped', 'size':clustersize, 'containers':containers, 'nextcid': clustersize, 'create_time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'start_time':"------" , 'proxy_url':proxy_url} + info = {'clusterid':clusterid, 'status':'stopped', 'size':clustersize, 'containers':containers, 'nextcid': clustersize, 'create_time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'start_time':"------" , 'proxy_url':proxy_url, 'proxy_server_ip':proxy_server_ip} + info['proxy_server_port'] = proxy_server_port clusterfile.write(json.dumps(info)) clusterfile.close() return [True, info] @@ -185,24 +192,26 @@ class VclusterMgr(object): if 'proxy_ip' in clusterinfo: return [False, "proxy already exists"] target = "http://" + ip + ":" + port - clusterinfo['proxy_ip'] = ip + ":" + port + clusterinfo['proxy_ip'] = ip + ":" + port + worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip']) clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w') clusterfile.write(json.dumps(clusterinfo)) clusterfile.close() - proxytool.set_route("/_web/" + username + "/" + clustername, target) - return [True, clusterinfo] + worker.set_route("/_web/" + username + "/" + clustername, target) + return [True, clusterinfo] def deleteproxy(self, username, clustername): [status, clusterinfo] = self.get_clusterinfo(clustername, username) if 'proxy_ip' not in clusterinfo: return [True, clusterinfo] clusterinfo.pop('proxy_ip') + worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip']) clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w') clusterfile.write(json.dumps(clusterinfo)) clusterfile.close() - proxytool.delete_route("/_web/" + username + "/" + clustername) + worker.delete_route("/_web/" + username + "/" + clustername) return [True, clusterinfo] - + def flush_cluster(self,username,clustername,containername): begintime = datetime.datetime.now() [status, info] = self.get_clusterinfo(clustername, username) @@ -242,7 +251,7 @@ class VclusterMgr(object): def image_check(self,username,imagename): - imagepath = self.fspath + "/global/images/private/" + username + "/" + imagepath = self.fspath + "/global/images/private/" + username + "/" if os.path.exists(imagepath + imagename): return [False, "image already exists"] else: @@ -286,14 +295,14 @@ class VclusterMgr(object): self.networkmgr.printpools() os.remove(self.fspath+"/global/users/"+username+"/clusters/"+clustername) os.remove(self.fspath+"/global/users/"+username+"/hosts/"+str(info['clusterid'])+".hosts") - + groupname = json.loads(user_info)["data"]["group"] [status, clusters] = self.list_clusters(username) if len(clusters) == 0: self.networkmgr.del_user(username, isshared = True if str(groupname) == "fundation" else False) self.networkmgr.del_usrgw(username, self.nodemgr) logger.info("vlanid release triggered") - + return [True, "cluster delete"] def scale_in_cluster(self, clustername, username, containername): @@ -345,10 +354,11 @@ class VclusterMgr(object): # after reboot, user gateway goes down and lose its configuration # so, check is necessary self.networkmgr.check_usergw(username, self.nodemgr) - # set proxy + # set proxy try: - target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000" - proxytool.set_route('/go/'+username+'/'+clustername, target) + target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000" + worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip']) + worker.set_route('/go/'+username+'/'+clustername, target) except: return [False, "start cluster failed with setting proxy failed"] for container in info['containers']: @@ -381,8 +391,9 @@ class VclusterMgr(object): self.networkmgr.check_usergw(username, self.nodemgr) # recover proxy of cluster try: - target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000" - proxytool.set_route('/go/'+username+'/'+clustername, target) + target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000" + worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip']) + worker.set_route('/go/'+username+'/'+clustername, target) except: return [False, "start cluster failed with setting proxy failed"] # recover containers of this cluster @@ -408,7 +419,7 @@ class VclusterMgr(object): infofile.write(json.dumps(info)) infofile.close() return [True, "stop cluster"] - + def detach_cluster(self, clustername, username): [status, info] = self.get_clusterinfo(clustername, username) if not status: diff --git a/src/worker.py b/src/worker.py index 0f472bb..f1d77d4 100755 --- a/src/worker.py +++ b/src/worker.py @@ -15,7 +15,7 @@ from socketserver import ThreadingMixIn import threading import etcdlib, network, container from nettools import netcontrol -import monitor +import monitor, proxytool from lvmtool import new_group, recover_group ################################################################## @@ -27,7 +27,7 @@ from lvmtool import new_group, recover_group # register rpc functions # initialize network # initialize lvm group -# Start() : +# Start() : # register in etcd # setup GRE tunnel # start rpc service @@ -128,6 +128,8 @@ class Worker(object): self.rpcserver.register_function(netcontrol.setup_gw) self.rpcserver.register_function(netcontrol.del_gw) self.rpcserver.register_function(netcontrol.check_gw) + self.rpcserver.register_function(proxytool.set_route) + self.rpcserver.register_function(proxytool.delete_route) # register functions or instances to server for rpc #self.rpcserver.register_function(function_name) @@ -142,7 +144,7 @@ class Worker(object): logger.info ("master also on this node. reuse master's network") else: logger.info ("initialize network") - # 'docklet-br' of worker do not need IP Addr. + # 'docklet-br' of worker do not need IP Addr. #[status, result] = self.etcd.getkey("network/workbridge") #if not status: # logger.error ("get bridge IP failed, please check whether master set bridge IP for worker") @@ -189,7 +191,7 @@ class Worker(object): else: logger.error("get key %s failed, master crashed or initialized. restart worker please." % self.addr) sys.exit(1) - + if __name__ == '__main__': etcdaddr = env.getenv("ETCD") diff --git a/web/templates/dashboard.html b/web/templates/dashboard.html index 2bc848e..5ec7dbc 100755 --- a/web/templates/dashboard.html +++ b/web/templates/dashboard.html @@ -53,7 +53,7 @@ - + {% else %}
Stopped
diff --git a/web/webViews/dashboard.py b/web/webViews/dashboard.py index 006fa07..126dbfa 100644 --- a/web/webViews/dashboard.py +++ b/web/webViews/dashboard.py @@ -24,6 +24,7 @@ class dashboardView(normalView): message = message.get("message") single_cluster['status'] = message['status'] single_cluster['id'] = message['clusterid'] + single_cluster['proxy_server_url'] = "http://" + message['proxy_server_ip'] + ":" + str(message['proxy_server_port']) full_clusters.append(single_cluster) else: self.error()