debug migration

This commit is contained in:
zhuyj17 2019-04-22 00:48:08 +08:00
parent e9dc2b0fdc
commit aedee43caf
2 changed files with 23 additions and 17 deletions

View File

@ -397,11 +397,10 @@ def migrate_cluster():
user = request.form.get('username',None) user = request.form.get('username',None)
if user is None: if user is None:
return json.dumps({'success':'false', 'message':'User is required!'}) return json.dumps({'success':'false', 'message':'User is required!'})
clustername = form.get('clustername', None) clustername = request.form.get('clustername', None)
if (clustername == None): if (clustername == None):
return json.dumps({'success':'false', 'message':'clustername is null'}) return json.dumps({'success':'false', 'message':'clustername is null'})
containername = form.get('containername', None) new_hosts = request.form.get('new_hosts', None)
new_hosts = form.get('new_hosts', None)
if (new_hosts == None): if (new_hosts == None):
return json.dumps({'success':'false', 'message':'new_hosts is null'}) return json.dumps({'success':'false', 'message':'new_hosts is null'})
new_host_list = new_hosts.split(',') new_host_list = new_hosts.split(',')
@ -412,17 +411,23 @@ def migrate_cluster():
res = post_to_user("/master/user/groupinfo/", {'auth_key':auth_key}) res = post_to_user("/master/user/groupinfo/", {'auth_key':auth_key})
groups = json.loads(res['groups']) groups = json.loads(res['groups'])
quotas = {} quotas = {}
for group in groups:
#logger.info(group)
quotas[group['name']] = group['quotas']
rc_info = post_to_user("/master/user/recoverinfo/", {'username':user,'auth_key':auth_key}) rc_info = post_to_user("/master/user/recoverinfo/", {'username':user,'auth_key':auth_key})
groupname = re_info['groupname'] groupname = rc_info['groupname']
user_info = {"data":{"id":rc_info['uid'],"groupinfo":quotas[groupname]}} user_info = {"data":{"id":rc_info['uid'],"groupinfo":quotas[groupname]}}
[status,msg] = G_vclustermgr.migrate_cluster(clustername, username, new_host_list, user_info) logger.info("Migrate cluster for user(%s) cluster(%s) to new_hosts(%s). user_info(%s)"
%(clustername, user, str(new_host_list), user_info))
[status,msg] = G_vclustermgr.migrate_cluster(clustername, user, new_host_list, user_info)
if not status: if not status:
logger.error(msg) logger.error(msg)
return json.dumps({'success':'false', 'message': msg}) return json.dumps({'success':'false', 'message': msg})
return json.dumps({'success':'true', 'action':'migrate_container'}) return json.dumps({'success':'true', 'action':'migrate_container'})
except Exception as ex: except Exception as ex:
logger.error(str(ex)) logger.error(traceback.format_exc())
return json.dumps({'success':'false', 'message': str(ex)}) return json.dumps({'success':'false', 'message': str(ex)})
finally: finally:
G_ulockmgr.release(user) G_ulockmgr.release(user)

View File

@ -738,7 +738,7 @@ class VclusterMgr(object):
con_db = Container.query.get(containername) con_db = Container.query.get(containername)
if con_db is None: if con_db is None:
return [False, 'Container not found'] return [False, 'Container not found']
if host == new_host: if con_db.host == new_host:
return [False, 'Container has been on the new host'] return [False, 'Container has been on the new host']
oldworker = self.nodemgr.ip_to_rpc(con_db.host) oldworker = self.nodemgr.ip_to_rpc(con_db.host)
@ -757,7 +757,7 @@ class VclusterMgr(object):
proxy_server_ip = self.networkmgr.usrgws[username] proxy_server_ip = self.networkmgr.usrgws[username]
[status, proxy_public_ip] = self.etcd.getkey("machines/publicIP/"+proxy_server_ip) [status, proxy_public_ip] = self.etcd.getkey("machines/publicIP/"+proxy_server_ip)
if not status: if not status:
self.imagemgr.removeImage(username,imagename) self.imgmgr.removeImage(username,imagename)
logger.error("Fail to get proxy_public_ip %s."%(proxy_server_ip)) logger.error("Fail to get proxy_public_ip %s."%(proxy_server_ip))
return [False, "Fail to get proxy server public IP."] return [False, "Fail to get proxy server public IP."]
uid = user_info['data']['id'] uid = user_info['data']['id']
@ -771,22 +771,22 @@ class VclusterMgr(object):
gateway = self.networkmgr.get_usergw(username) gateway = self.networkmgr.get_usergw(username)
image = {'name':imagename,'type':'private','owner':username } image = {'name':imagename,'type':'private','owner':username }
logger.info("Migrate: proxy_ip:%s uid:%s setting:%s clusterid:%s cid:%s hostname:%s gateway:%s image:%s" logger.info("Migrate: proxy_ip:%s uid:%s setting:%s clusterid:%s cid:%s hostname:%s gateway:%s image:%s"
%(proxy_public_ip, str(uid), str(setting), clusterid, cid, hostname, gateway, str(image)) %(proxy_public_ip, str(uid), str(setting), clusterid, cid, hostname, gateway, str(image)))
logger.info("Migrate: create container(%s) on new host %s"%(containername, new_host)) logger.info("Migrate: create container(%s) on new host %s"%(containername, new_host))
worker = self.nodemgr.ip_to_rpc(new_host) worker = self.nodemgr.ip_to_rpc(new_host)
if worker is None: if worker is None:
self.imagemgr.removeImage(username,imagename) self.imgmgr.removeImage(username,imagename)
return [False, "New host worker can't be found or has been stopped."] return [False, "New host worker can't be found or has been stopped."]
status,msg = worker.create_container(containername, proxy_public_ip, username, uid, json.dumps(setting), status,msg = worker.create_container(containername, proxy_public_ip, username, uid, json.dumps(setting),
clustername, str(clusterid), str(cid), hostname, con_db.ip, gateway, json.dumps(image)) clustername, str(clusterid), str(cid), hostname, con_db.ip, gateway, json.dumps(image))
if not status: if not status:
self.imagemgr.removeImage(username,imagename) self.imgmgr.removeImage(username,imagename)
return [False, msg] return [False, msg]
con_db.host = new_host con_db.host = new_host
db.session.commit() db.session.commit()
oldworker.delete_container(containername) oldworker.delete_container(containername)
self.imagemgr.removeImage(username,imagename) self.imgmgr.removeImage(username,imagename)
return [True,""] return [True,""]
def migrate_cluster(self, clustername, username, new_host_list, user_info): def migrate_cluster(self, clustername, username, new_host_list, user_info):
@ -809,11 +809,12 @@ class VclusterMgr(object):
if prestatus == 'running': if prestatus == 'running':
self.start_cluster(clustername, username, user_info) self.start_cluster(clustername, username, user_info)
return [False, msg] return [False, msg]
if prestatus == 'running': logger.info("[Migrate] prestatus:%s for cluster(%s) user(%s)"%(prestatus, clustername, username))
status, msg = self.start_cluster(clustername, username, user_info) if prestatus == 'running':
if not status: status, msg = self.start_cluster(clustername, username, user_info)
return [False, msg] if not status:
return [True, ""] return [False, msg]
return [True, ""]
def is_cluster(self, clustername, username): def is_cluster(self, clustername, username):
[status, clusters] = self.list_clusters(username) [status, clusters] = self.list_clusters(username)