Add recovery mode into ports mapping & stop cluster will delete ports mappings.
This commit is contained in:
parent
05317336de
commit
3597ef7b08
|
@ -334,21 +334,26 @@ class portcontrol(object):
|
||||||
return [True,""]
|
return [True,""]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def acquire_port_mapping(container_name, container_ip, container_port):
|
def acquire_port_mapping(container_name, container_ip, container_port, host_port=None):
|
||||||
global free_ports
|
global free_ports
|
||||||
global allocated_ports
|
global allocated_ports
|
||||||
if container_name in allocated_ports.keys():
|
if container_name in allocated_ports.keys():
|
||||||
return [False, "This container already has a port mapping."]
|
return [False, "This container already has a port mapping."]
|
||||||
if container_name == "" or container_ip == "" or container_port == "":
|
if container_name == "" or container_ip == "" or container_port == "":
|
||||||
return [False, "Node Name or Node IP or Node Port can't be null."]
|
return [False, "Node Name or Node IP or Node Port can't be null."]
|
||||||
#print(free_ports[10000])
|
#print("acquire_port_mapping1")
|
||||||
free_port = 1
|
free_port = 1
|
||||||
while free_port <= 65535:
|
if host_port is not None:
|
||||||
if free_ports[free_port]:
|
# recover from host_port
|
||||||
break
|
free_port = int(host_port)
|
||||||
free_port += 1
|
else:
|
||||||
if free_port == 65536:
|
# acquire new free port
|
||||||
return [False, "No free ports."]
|
while free_port <= 65535:
|
||||||
|
if free_ports[free_port]:
|
||||||
|
break
|
||||||
|
free_port += 1
|
||||||
|
if free_port == 65536:
|
||||||
|
return [False, "No free ports."]
|
||||||
free_ports[free_port] = False
|
free_ports[free_port] = False
|
||||||
allocated_ports[container_name] = free_port
|
allocated_ports[container_name] = free_port
|
||||||
public_ip = env.getenv("PUBLIC_IP")
|
public_ip = env.getenv("PUBLIC_IP")
|
||||||
|
|
|
@ -264,6 +264,18 @@ class VclusterMgr(object):
|
||||||
clusterfile.close()
|
clusterfile.close()
|
||||||
return [True, clusterinfo]
|
return [True, clusterinfo]
|
||||||
|
|
||||||
|
def recover_port_mapping(self,username,clustername):
|
||||||
|
[status, clusterinfo] = self.get_clusterinfo(clustername, username)
|
||||||
|
for rec in clusterinfo['port_mapping']:
|
||||||
|
if self.distributedgw == 'True':
|
||||||
|
worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip'])
|
||||||
|
[success, host_port] = worker.acquire_port_mapping(rec['node_name'], rec['node_ip'], rec['node_port'], rec['host_port'])
|
||||||
|
else:
|
||||||
|
[success, host_port] = portcontrol.acquire_port_mapping(rec['node_name'], rec['node_ip'], rec['node_port'], rec['host_port'])
|
||||||
|
if not success:
|
||||||
|
return [False, host_port]
|
||||||
|
return [True, clusterinfo]
|
||||||
|
|
||||||
def delete_port_mapping(self, username, clustername, node_name):
|
def delete_port_mapping(self, username, clustername, node_name):
|
||||||
[status, clusterinfo] = self.get_clusterinfo(clustername, username)
|
[status, clusterinfo] = self.get_clusterinfo(clustername, username)
|
||||||
idx = 0
|
idx = 0
|
||||||
|
@ -591,6 +603,10 @@ class VclusterMgr(object):
|
||||||
namesplit = container['containername'].split('-')
|
namesplit = container['containername'].split('-')
|
||||||
portname = namesplit[1] + '-' + namesplit[2]
|
portname = namesplit[1] + '-' + namesplit[2]
|
||||||
worker.recover_usernet(portname, uid, info['proxy_server_ip'], container['host']==info['proxy_server_ip'])
|
worker.recover_usernet(portname, uid, info['proxy_server_ip'], container['host']==info['proxy_server_ip'])
|
||||||
|
# recover ports mapping
|
||||||
|
[success, msg] = self.recover_port_mapping(username,clustername)
|
||||||
|
if not success:
|
||||||
|
return [False, msg]
|
||||||
return [True, "start cluster"]
|
return [True, "start cluster"]
|
||||||
|
|
||||||
# maybe here should use cluster id
|
# maybe here should use cluster id
|
||||||
|
@ -606,10 +622,12 @@ class VclusterMgr(object):
|
||||||
else:
|
else:
|
||||||
proxytool.delete_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername)
|
proxytool.delete_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername)
|
||||||
for container in info['containers']:
|
for container in info['containers']:
|
||||||
|
self.delete_port_mapping(username,clustername,container['containername'])
|
||||||
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
|
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
|
||||||
if worker is None:
|
if worker is None:
|
||||||
return [False, "The worker can't be found or has been stopped."]
|
return [False, "The worker can't be found or has been stopped."]
|
||||||
worker.stop_container(container['containername'])
|
worker.stop_container(container['containername'])
|
||||||
|
[status, info] = self.get_clusterinfo(clustername, username)
|
||||||
info['status']='stopped'
|
info['status']='stopped'
|
||||||
info['start_time']="------"
|
info['start_time']="------"
|
||||||
infofile = open(self.fspath+"/global/users/"+username+"/clusters/"+clustername, 'w')
|
infofile = open(self.fspath+"/global/users/"+username+"/clusters/"+clustername, 'w')
|
||||||
|
|
Loading…
Reference in New Issue