add multiple port mapping and quota
This commit is contained in:
parent
1ccf86ade9
commit
4ab4e5b4ca
|
@ -409,7 +409,8 @@ def add_port_mapping(user, beans, form):
|
|||
clustername = form.get("clustername", None)
|
||||
if node_name is None or node_ip is None or node_port is None or clustername is None:
|
||||
return json.dumps({'success':'false', 'message': 'Illegal form.'})
|
||||
[status, message] = G_vclustermgr.add_port_mapping(user,clustername,node_name,node_ip,node_port)
|
||||
user_info = post_to_user("/user/selfQuery/", data = {"token": form.get("token")})
|
||||
[status, message] = G_vclustermgr.add_port_mapping(user,clustername,node_name,node_ip,node_port,user_info['data']['groupinfo'])
|
||||
if status is True:
|
||||
return json.dumps({'success':'true', 'action':'addproxy'})
|
||||
else:
|
||||
|
@ -422,9 +423,10 @@ def delete_port_mapping(user, beans, form):
|
|||
logger.info ("handle request : delete port mapping")
|
||||
node_name = form.get("node_name",None)
|
||||
clustername = form.get("clustername", None)
|
||||
node_port = form.get("node_port", None)
|
||||
if node_name is None or clustername is None:
|
||||
return json.dumps({'success':'false', 'message': 'Illegal form.'})
|
||||
[status, message] = G_vclustermgr.delete_port_mapping(user,clustername,node_name)
|
||||
[status, message] = G_vclustermgr.delete_port_mapping(user,clustername,node_name,node_port)
|
||||
if status is True:
|
||||
return json.dumps({'success':'true', 'action':'addproxy'})
|
||||
else:
|
||||
|
|
|
@ -179,8 +179,10 @@ class Container_Collector(threading.Thread):
|
|||
disk_quota = workercinfo[vnode_name]['disk_use']['total']
|
||||
else:
|
||||
disk_quota = 0
|
||||
# billing value = cpu used/a + memory used/b + disk quota/c
|
||||
billingval = math.ceil(cpu_increment/a_cpu + avemem/b_mem + float(disk_quota)/1024.0/1024.0/c_disk)
|
||||
# get ports
|
||||
ports_count = count_port_mapping(vnode_name)
|
||||
# billing value = cpu used/a + memory used/b + disk quota/c + ports
|
||||
billingval = math.ceil(cpu_increment/a_cpu + avemem/b_mem + float(disk_quota)/1024.0/1024.0/c_disk + ports_count)
|
||||
if billingval > 100:
|
||||
# report outsize billing value
|
||||
logger.info("Huge Billingval for "+vnode_name+". cpu_increment:"+str(cpu_increment)+" avemem:"+str(avemem)+" disk:"+str(disk_quota)+"\n")
|
||||
|
@ -522,6 +524,22 @@ def get_owner(container_name):
|
|||
names = container_name.split('-')
|
||||
return names[0]
|
||||
|
||||
def count_port_mapping(vnode_name):
|
||||
user = get_owner(vnode_name)
|
||||
fspath = env.getenv("FS_PREFIX")
|
||||
if not os.path.exists(fspath+"/global/users/"+user+"/clusters"):
|
||||
return 0
|
||||
clusters = os.listdir(fspath+"/global/users/"+user+"/clusters")
|
||||
ports_count = 0
|
||||
for cluster in clusters:
|
||||
clusterpath = fspath + "/global/users/" + get_owner(vnode_name) + "/clusters/" + cluster
|
||||
if not os.path.isfile(clusterpath):
|
||||
return 0
|
||||
infofile = open(clusterpath, 'r')
|
||||
info = json.loads(infofile.read())
|
||||
ports_count += len([mapping for mapping in info['port_mapping'] if mapping['node_name'] == vnode_name])
|
||||
return ports_count
|
||||
|
||||
# the thread to collect data from each worker and store them in monitor_hosts and monitor_vnodes
|
||||
class Master_Collector(threading.Thread):
|
||||
|
||||
|
|
|
@ -337,7 +337,11 @@ class portcontrol(object):
|
|||
def acquire_port_mapping(container_name, container_ip, container_port, host_port=None):
|
||||
global free_ports
|
||||
global allocated_ports
|
||||
if container_name in allocated_ports.keys():
|
||||
# if container_name in allocated_ports.keys():
|
||||
# return [False, "This container already has a port mapping."]
|
||||
if container_name not in allocated_ports.keys():
|
||||
allocated_ports[container_name] = {}
|
||||
elif container_port in allocated_ports[container_name].keys():
|
||||
return [False, "This container already has a port mapping."]
|
||||
if container_name == "" or container_ip == "" or container_port == "":
|
||||
return [False, "Node Name or Node IP or Node Port can't be null."]
|
||||
|
@ -355,7 +359,7 @@ class portcontrol(object):
|
|||
if free_port == 65536:
|
||||
return [False, "No free ports."]
|
||||
free_ports[free_port] = False
|
||||
allocated_ports[container_name] = free_port
|
||||
allocated_ports[container_name][container_port] = free_port
|
||||
public_ip = env.getenv("PUBLIC_IP")
|
||||
try:
|
||||
subprocess.run(['iptables','-t','nat','-A','PREROUTING','-p','tcp','--dport',str(free_port),"-j","DNAT",'--to-destination','%s:%s'%(container_ip,container_port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
|
@ -369,12 +373,12 @@ class portcontrol(object):
|
|||
global allocated_ports
|
||||
if container_name not in allocated_ports.keys():
|
||||
return [False, "This container does not have a port mapping."]
|
||||
free_port = allocated_ports[container_name]
|
||||
free_port = allocated_ports[container_name][container_port]
|
||||
public_ip = env.getenv("PUBLIC_IP")
|
||||
try:
|
||||
subprocess.run(['iptables','-t','nat','-D','PREROUTING','-p','tcp','--dport',str(free_port),"-j","DNAT",'--to-destination','%s:%s'%(container_ip,container_port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "release port mapping failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
free_ports[free_port] = True
|
||||
allocated_ports.pop(container_name)
|
||||
allocated_ports[container_name].pop(container_port)
|
||||
return [True, ""]
|
||||
|
|
|
@ -167,10 +167,10 @@ class userManager:
|
|||
if not os.path.exists(fspath+"/global/sys/quota"):
|
||||
groupfile = open(fspath+"/global/sys/quota",'w')
|
||||
groups = []
|
||||
groups.append({'name':'root', 'quotas':{ 'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8' }})
|
||||
groups.append({'name':'admin', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8'}})
|
||||
groups.append({'name':'primary', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8'}})
|
||||
groups.append({'name':'foundation', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8'}})
|
||||
groups.append({'name':'root', 'quotas':{ 'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8'}})
|
||||
groups.append({'name':'admin', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8'}})
|
||||
groups.append({'name':'primary', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8'}})
|
||||
groups.append({'name':'foundation', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8'}})
|
||||
groupfile.write(json.dumps(groups))
|
||||
groupfile.close()
|
||||
if not os.path.exists(fspath+"/global/sys/quotainfo"):
|
||||
|
@ -185,6 +185,7 @@ class userManager:
|
|||
quotas['quotainfo'].append({'name':'image', 'hint':'how many images the user can save, e.g. 10'})
|
||||
quotas['quotainfo'].append({'name':'idletime', 'hint':'will stop cluster after idletime, number of hours, e.g. 24'})
|
||||
quotas['quotainfo'].append({'name':'vnode', 'hint':'how many containers the user can have, e.g. 8'})
|
||||
quotas['quotainfo'].append({'name':'portmapping', 'hint':'how many ports the user can map, e.g. 8'})
|
||||
quotafile.write(json.dumps(quotas))
|
||||
quotafile.close()
|
||||
if not os.path.exists(fspath+"/global/sys/lxc.default"):
|
||||
|
|
|
@ -249,7 +249,15 @@ class VclusterMgr(object):
|
|||
clusterfile.close()
|
||||
return [True, clusterinfo]
|
||||
|
||||
def add_port_mapping(self,username,clustername,node_name,node_ip, port):
|
||||
def count_port_mapping(self, username):
|
||||
return sum([len(self.get_clusterinfo(cluster, username)[1]['port_mapping']) for cluster in self.list_clusters(username)[1]])
|
||||
|
||||
def add_port_mapping(self,username,clustername,node_name,node_ip,port,quota):
|
||||
port_mapping_count = self.count_port_mapping(username)
|
||||
|
||||
if port_mapping_count >= int(quota['portmapping']):
|
||||
return [False, 'Port mapping quota exceed.']
|
||||
|
||||
[status, clusterinfo] = self.get_clusterinfo(clustername, username)
|
||||
host_port = 0
|
||||
if self.distributedgw == 'True':
|
||||
|
@ -279,11 +287,41 @@ class VclusterMgr(object):
|
|||
return [False, host_port]
|
||||
return [True, clusterinfo]
|
||||
|
||||
def delete_port_mapping(self, username, clustername, node_name):
|
||||
def delete_all_port_mapping(self, username, clustername, node_name):
|
||||
[status, clusterinfo] = self.get_clusterinfo(clustername, username)
|
||||
error_msg = None
|
||||
delete_list = []
|
||||
for item in clusterinfo['port_mapping']:
|
||||
if item['node_name'] == node_name:
|
||||
node_ip = item['node_ip']
|
||||
node_port = item['node_port']
|
||||
if self.distributedgw == 'True':
|
||||
worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip'])
|
||||
[success,msg] = worker.release_port_mapping(node_name, node_ip, node_port)
|
||||
else:
|
||||
[success,msg] = portcontrol.release_port_mapping(node_name, node_ip, node_port)
|
||||
if not success:
|
||||
error_msg = msg
|
||||
else:
|
||||
delete_list.append(item)
|
||||
if len(delete_list) > 0:
|
||||
for item in delete_list:
|
||||
clusterinfo['port_mapping'].remove(item)
|
||||
clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w')
|
||||
clusterfile.write(json.dumps(clusterinfo))
|
||||
clusterfile.close()
|
||||
else:
|
||||
return [False,"No port mapping."]
|
||||
if error_msg is not None:
|
||||
return [False,error_msg]
|
||||
else:
|
||||
return [True,"Success"]
|
||||
|
||||
def delete_port_mapping(self, username, clustername, node_name, node_port):
|
||||
[status, clusterinfo] = self.get_clusterinfo(clustername, username)
|
||||
idx = 0
|
||||
for item in clusterinfo['port_mapping']:
|
||||
if item['node_name'] == node_name:
|
||||
if item['node_name'] == node_name and item['node_port'] == node_port:
|
||||
break
|
||||
idx += 1
|
||||
if idx == len(clusterinfo['port_mapping']):
|
||||
|
@ -439,7 +477,7 @@ class VclusterMgr(object):
|
|||
new_hostinfo.append(host)
|
||||
hostfile.writelines(new_hostinfo)
|
||||
hostfile.close()
|
||||
[success, msg] = self.delete_port_mapping(username, clustername, containername)
|
||||
[success, msg] = self.delete_all_port_mapping(username, clustername, containername)
|
||||
if not success:
|
||||
return [False, msg]
|
||||
[status, info] = self.get_clusterinfo(clustername, username)
|
||||
|
@ -629,7 +667,7 @@ class VclusterMgr(object):
|
|||
else:
|
||||
proxytool.delete_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername)
|
||||
for container in info['containers']:
|
||||
self.delete_port_mapping(username,clustername,container['containername'])
|
||||
self.delete_all_port_mapping(username,clustername,container['containername'])
|
||||
worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
|
||||
if worker is None:
|
||||
return [False, "The worker can't be found or has been stopped."]
|
||||
|
|
|
@ -293,7 +293,7 @@
|
|||
<td>{{ record['node_port'] }}</td>
|
||||
<td>{{ clusterinfo['proxy_public_ip'] }}</td>
|
||||
<td>{{ record['host_port'] }}</td>
|
||||
<td><a class="btn btn-xs btn-danger" href="/port_mapping/delete/{{master.split("@")[0]}}/{{ clustername }}/{{ record['node_name'] }}/">Delete</a></td>
|
||||
<td><a class="btn btn-xs btn-danger" href="/port_mapping/delete/{{master.split("@")[0]}}/{{ clustername }}/{{ record['node_name'] }}/{{ record['node_port'] }}/">Delete</a></td>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
<th>Vnode</th>
|
||||
<th>Image</th>
|
||||
<th>Idletime</th>
|
||||
<th>Ports</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
|
@ -52,6 +53,7 @@
|
|||
<th>{{ quotainfo['vnode'] }}</th>
|
||||
<th>{{ quotainfo['image'] }}</th>
|
||||
<th>{{ quotainfo['idletime'] }} hours</th>
|
||||
<th>{{ quotainfo['portmapping'] }}</th>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
|
|
@ -234,12 +234,13 @@ def addPortMapping(masterip):
|
|||
addPortMappingView.masterip = masterip
|
||||
return addPortMappingView.as_view()
|
||||
|
||||
@app.route("/port_mapping/delete/<masterip>/<clustername>/<node_name>/", methods=['GET'])
|
||||
@app.route("/port_mapping/delete/<masterip>/<clustername>/<node_name>/<node_port>/", methods=['GET'])
|
||||
@login_required
|
||||
def delPortMapping(masterip,clustername,node_name):
|
||||
def delPortMapping(masterip,clustername,node_name,node_port):
|
||||
delPortMappingView.masterip = masterip
|
||||
delPortMappingView.clustername = clustername
|
||||
delPortMappingView.node_name = node_name
|
||||
delPortMappingView.node_port = node_port
|
||||
return delPortMappingView.as_view()
|
||||
|
||||
@app.route("/getmasterdesc/<mastername>/", methods=['POST'])
|
||||
|
|
|
@ -426,7 +426,7 @@ class delPortMappingView(normalView):
|
|||
|
||||
@classmethod
|
||||
def post(self):
|
||||
data = {"clustername":self.clustername,"node_name":self.node_name}
|
||||
data = {"clustername":self.clustername,"node_name":self.node_name,"node_port":self.node_port}
|
||||
result = dockletRequest.post('/port_mapping/delete/',data, self.masterip)
|
||||
success = result.get("success")
|
||||
if success == "true":
|
||||
|
|
Loading…
Reference in New Issue