Fix minus quota bug
This commit is contained in:
parent
21d10f1650
commit
5bdf6f1404
|
@ -161,13 +161,6 @@ class DockletHttpHandler(http.server.BaseHTTPRequestHandler):
|
||||||
logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name']))
|
logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name']))
|
||||||
[status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info)
|
[status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info)
|
||||||
if status:
|
if status:
|
||||||
user_info = G_usermgr.selfQuery(cur_user = cur_user)
|
|
||||||
quota = {}
|
|
||||||
quota['cpu'] = float(user_info['data']['groupinfo']['cpu'])/100000.0
|
|
||||||
quota['memory'] = float(user_info['data']['groupinfo']['memory'])*1000000/1024
|
|
||||||
etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (G_clustername))
|
|
||||||
for con in result['containers']:
|
|
||||||
etcdser.setkey('/vnodes/%s/quota'%(con['containername']), quota)
|
|
||||||
self.response(200, {'success':'true', 'action':'create cluster', 'message':result})
|
self.response(200, {'success':'true', 'action':'create cluster', 'message':result})
|
||||||
else:
|
else:
|
||||||
self.response(200, {'success':'false', 'action':'create cluster', 'message':result})
|
self.response(200, {'success':'false', 'action':'create cluster', 'message':result})
|
||||||
|
@ -184,13 +177,6 @@ class DockletHttpHandler(http.server.BaseHTTPRequestHandler):
|
||||||
user_info = json.dumps(user_info)
|
user_info = json.dumps(user_info)
|
||||||
[status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info)
|
[status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info)
|
||||||
if status:
|
if status:
|
||||||
user_info = G_usermgr.selfQuery(cur_user = cur_user)
|
|
||||||
quota = {}
|
|
||||||
quota['cpu'] = float(user_info['data']['groupinfo']['cpu'])/100000.0
|
|
||||||
quota['memory'] = float(user_info['data']['groupinfo']['memory'])*1000000/1024
|
|
||||||
etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (G_clustername))
|
|
||||||
for con in result['containers']:
|
|
||||||
etcdser.setkey('/vnodes/%s/quota'%(con['containername']), quota)
|
|
||||||
self.response(200, {'success':'true', 'action':'scale out', 'message':result})
|
self.response(200, {'success':'true', 'action':'scale out', 'message':result})
|
||||||
else:
|
else:
|
||||||
self.response(200, {'success':'false', 'action':'scale out', 'message':result})
|
self.response(200, {'success':'false', 'action':'scale out', 'message':result})
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import subprocess,re,sys,etcdlib,psutil
|
import subprocess,re,os,etcdlib,psutil
|
||||||
import time,threading,json,traceback,platform
|
import time,threading,json,traceback,platform
|
||||||
|
|
||||||
from log import logger
|
from log import logger
|
||||||
|
@ -12,13 +12,12 @@ class Container_Collector(threading.Thread):
|
||||||
self.thread_stop = False
|
self.thread_stop = False
|
||||||
self.host = host
|
self.host = host
|
||||||
self.etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (cluster_name))
|
self.etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (cluster_name))
|
||||||
#self.cpu_quota = float(cpu_quota)/100000.0
|
|
||||||
#self.mem_quota = float(mem_quota)*1000000/1024
|
|
||||||
self.interval = 2
|
self.interval = 2
|
||||||
self.test = test
|
self.test = test
|
||||||
self.cpu_last = {}
|
self.cpu_last = {}
|
||||||
self.cpu_quota = {}
|
self.cpu_quota = {}
|
||||||
self.mem_quota = {}
|
self.mem_quota = {}
|
||||||
|
self.cores_num = int(subprocess.getoutput("grep processor /proc/cpuinfo | wc -l"))
|
||||||
return
|
return
|
||||||
|
|
||||||
def list_container(self):
|
def list_container(self):
|
||||||
|
@ -52,16 +51,25 @@ class Container_Collector(threading.Thread):
|
||||||
cpu_val = cpu_parts[0].strip()
|
cpu_val = cpu_parts[0].strip()
|
||||||
cpu_unit = cpu_parts[1].strip()
|
cpu_unit = cpu_parts[1].strip()
|
||||||
if not container_name in self.cpu_last.keys():
|
if not container_name in self.cpu_last.keys():
|
||||||
[ret, ans] = self.etcdser.getkey('/vnodes/%s/quota'%(container_name))
|
confpath = "/var/lib/lxc/%s/config"%(container_name)
|
||||||
if ret == True :
|
if os.path.exists(confpath):
|
||||||
res = dict(eval(ans))
|
confile = open(confpath,'r')
|
||||||
self.cpu_quota[container_name] = res['cpu']
|
res = confile.read()
|
||||||
self.mem_quota[container_name] = res['memory']
|
lines = re.split('\n',res)
|
||||||
self.cpu_last[container_name] = 0
|
for line in lines:
|
||||||
|
words = re.split('=',line)
|
||||||
|
key = words[0].strip()
|
||||||
|
if key == "lxc.cgroup.memory.limit_in_bytes":
|
||||||
|
self.mem_quota[container_name] = float(words[1].strip().strip("M"))*1000000/1024
|
||||||
|
elif key == "lxc.cgroup.cpu.cfs_quota_us":
|
||||||
|
tmp = int(words[1].strip())
|
||||||
|
if tmp == -1:
|
||||||
|
self.cpu_quota[container_name] = self.cores_num
|
||||||
else:
|
else:
|
||||||
logger.warning(ans)
|
self.cpu_quota[container_name] = tmp/100000.0
|
||||||
self.cpu_quota[container_name] = 1
|
else:
|
||||||
self.mem_quota[container_name] = 2000*1000000/1024
|
logger.error("Cant't find config file %s"%(confpath))
|
||||||
|
return False
|
||||||
self.cpu_last[container_name] = 0
|
self.cpu_last[container_name] = 0
|
||||||
cpu_use = {}
|
cpu_use = {}
|
||||||
cpu_use['val'] = cpu_val
|
cpu_use['val'] = cpu_val
|
||||||
|
|
Loading…
Reference in New Issue