Add migrate cluster

This commit is contained in:
Firmlyzhu 2019-04-21 19:53:41 +08:00
parent 7736f219db
commit e9dc2b0fdc
3 changed files with 78 additions and 4 deletions

View File

@ -389,6 +389,44 @@ def save_cluster(user, beans, form):
finally: finally:
G_ulockmgr.release(user) G_ulockmgr.release(user)
@app.route("/admin/migrate_cluster/", methods=['POST'])
@auth_key_required
def migrate_cluster():
global G_vclustermgr
global G_ulockmgr
user = request.form.get('username',None)
if user is None:
return json.dumps({'success':'false', 'message':'User is required!'})
clustername = form.get('clustername', None)
if (clustername == None):
return json.dumps({'success':'false', 'message':'clustername is null'})
containername = form.get('containername', None)
new_hosts = form.get('new_hosts', None)
if (new_hosts == None):
return json.dumps({'success':'false', 'message':'new_hosts is null'})
new_host_list = new_hosts.split(',')
G_ulockmgr.acquire(user)
auth_key = env.getenv('AUTH_KEY')
try:
logger.info ("handle request : migrate cluster to %s. user:%s clustername:%s" % (str(new_hosts), user, clustername))
res = post_to_user("/master/user/groupinfo/", {'auth_key':auth_key})
groups = json.loads(res['groups'])
quotas = {}
rc_info = post_to_user("/master/user/recoverinfo/", {'username':user,'auth_key':auth_key})
groupname = re_info['groupname']
user_info = {"data":{"id":rc_info['uid'],"groupinfo":quotas[groupname]}}
[status,msg] = G_vclustermgr.migrate_cluster(clustername, username, new_host_list, user_info)
if not status:
logger.error(msg)
return json.dumps({'success':'false', 'message': msg})
return json.dumps({'success':'true', 'action':'migrate_container'})
except Exception as ex:
logger.error(str(ex))
return json.dumps({'success':'false', 'message': str(ex)})
finally:
G_ulockmgr.release(user)
@app.route("/image/list/", methods=['POST']) @app.route("/image/list/", methods=['POST'])
@login_required @login_required

View File

@ -411,6 +411,7 @@ class userManager:
"success":'true', "success":'true',
"data":{ "data":{
"username" : user.username, "username" : user.username,
"id": user.id,
"password" : user.password, "password" : user.password,
"avatar" : user.avatar, "avatar" : user.avatar,
"nickname" : user.nickname, "nickname" : user.nickname,
@ -440,6 +441,7 @@ class userManager:
"success": 'true', "success": 'true',
"data":{ "data":{
"username" : user.username, "username" : user.username,
"id": user.id,
"password" : user.password, "password" : user.password,
"avatar" : user.avatar, "avatar" : user.avatar,
"nickname" : user.nickname, "nickname" : user.nickname,

View File

@ -728,7 +728,7 @@ class VclusterMgr(object):
full_clusters.append(single_cluster)''' full_clusters.append(single_cluster)'''
return [True, clusters] return [True, clusters]
def migrate_container(self, clustername, username, containername, new_host, proxy_public_ip, user_info): def migrate_container(self, clustername, username, containername, new_host, user_info):
[status, info] = self.get_clusterinfo(clustername, username) [status, info] = self.get_clusterinfo(clustername, username)
if not status: if not status:
return [False, "cluster not found"] return [False, "cluster not found"]
@ -746,19 +746,21 @@ class VclusterMgr(object):
return [False, "Old host worker can't be found or has been stopped."] return [False, "Old host worker can't be found or has been stopped."]
oldworker.stop_container(containername) oldworker.stop_container(containername)
imagename = "migrate-" + containername + "-" + datetime.datetime.now().strftime("%Y-%m-%d") imagename = "migrate-" + containername + "-" + datetime.datetime.now().strftime("%Y-%m-%d")
logger.info("Save Image for container:%s imagename:%s host:%s"%(containername, imagename, con_db.host))
status,msg = oldworker.create_image(username,imagename,containername,"",10000) status,msg = oldworker.create_image(username,imagename,containername,"",10000)
if not status: if not status:
return [False, msg] return [False, msg]
#con_db.lastsave = datetime.datetime.now() #con_db.lastsave = datetime.datetime.now()
#con_db.image = imagename #con_db.image = imagename
'''self.networkmgr.load_usrgw(username) self.networkmgr.load_usrgw(username)
proxy_server_ip = self.networkmgr.usrgws[username] proxy_server_ip = self.networkmgr.usrgws[username]
[status, proxy_public_ip] = self.etcd.getkey("machines/publicIP/"+proxy_server_ip) [status, proxy_public_ip] = self.etcd.getkey("machines/publicIP/"+proxy_server_ip)
if not status: if not status:
self.imagemgr.removeImage(username,imagename)
logger.error("Fail to get proxy_public_ip %s."%(proxy_server_ip)) logger.error("Fail to get proxy_public_ip %s."%(proxy_server_ip))
return [False, "Fail to get proxy server public IP."]''' return [False, "Fail to get proxy server public IP."]
uid = json.loads(user_info)["data"]["id"] uid = user_info['data']['id']
setting = { setting = {
'cpu': con_db.setting_cpu, 'cpu': con_db.setting_cpu,
'memory': con_db.setting_mem, 'memory': con_db.setting_mem,
@ -768,6 +770,9 @@ class VclusterMgr(object):
hostname = "host-"+str(cid) hostname = "host-"+str(cid)
gateway = self.networkmgr.get_usergw(username) gateway = self.networkmgr.get_usergw(username)
image = {'name':imagename,'type':'private','owner':username } image = {'name':imagename,'type':'private','owner':username }
logger.info("Migrate: proxy_ip:%s uid:%s setting:%s clusterid:%s cid:%s hostname:%s gateway:%s image:%s"
%(proxy_public_ip, str(uid), str(setting), clusterid, cid, hostname, gateway, str(image))
logger.info("Migrate: create container(%s) on new host %s"%(containername, new_host))
worker = self.nodemgr.ip_to_rpc(new_host) worker = self.nodemgr.ip_to_rpc(new_host)
if worker is None: if worker is None:
@ -776,11 +781,40 @@ class VclusterMgr(object):
status,msg = worker.create_container(containername, proxy_public_ip, username, uid, json.dumps(setting), status,msg = worker.create_container(containername, proxy_public_ip, username, uid, json.dumps(setting),
clustername, str(clusterid), str(cid), hostname, con_db.ip, gateway, json.dumps(image)) clustername, str(clusterid), str(cid), hostname, con_db.ip, gateway, json.dumps(image))
if not status: if not status:
self.imagemgr.removeImage(username,imagename)
return [False, msg] return [False, msg]
con_db.host = new_host
db.session.commit()
oldworker.delete_container(containername) oldworker.delete_container(containername)
self.imagemgr.removeImage(username,imagename) self.imagemgr.removeImage(username,imagename)
return [True,""] return [True,""]
def migrate_cluster(self, clustername, username, new_host_list, user_info):
[status, info] = self.get_clusterinfo(clustername, username)
if not status:
return [False, "cluster not found"]
prestatus = info['status']
self.stop_cluster(clustername, username)
for container in info['containers']:
if container['host'] in new_host_list:
continue
random.shuffle(new_host_list)
for new_host in new_host_list:
status,msg = self.migrate_container(clustername,username,container['containername'],new_host,user_info)
if status:
break
else:
logger.error(msg)
else:
if prestatus == 'running':
self.start_cluster(clustername, username, user_info)
return [False, msg]
if prestatus == 'running':
status, msg = self.start_cluster(clustername, username, user_info)
if not status:
return [False, msg]
return [True, ""]
def is_cluster(self, clustername, username): def is_cluster(self, clustername, username):
[status, clusters] = self.list_clusters(username) [status, clusters] = self.list_clusters(username)
if clustername in clusters: if clustername in clusters: