Fix quota bug. Now quota will be read from groupinfo.
This commit is contained in:
parent
990721be17
commit
8e091a5dfc
|
@ -161,6 +161,13 @@ class DockletHttpHandler(http.server.BaseHTTPRequestHandler):
|
||||||
logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name']))
|
logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name']))
|
||||||
[status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info)
|
[status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info)
|
||||||
if status:
|
if status:
|
||||||
|
user_info = G_usermgr.selfQuery(cur_user = cur_user)
|
||||||
|
quota = {}
|
||||||
|
quota['cpu'] = float(user_info['data']['groupinfo']['cpu'])/100000.0
|
||||||
|
quota['memory'] = float(user_info['data']['groupinfo']['memory'])*1000000/1024
|
||||||
|
etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (G_clustername))
|
||||||
|
for con in result['containers']:
|
||||||
|
etcdser.setkey('/vnodes/%s/quota'%(con['containername']), quota)
|
||||||
self.response(200, {'success':'true', 'action':'create cluster', 'message':result})
|
self.response(200, {'success':'true', 'action':'create cluster', 'message':result})
|
||||||
else:
|
else:
|
||||||
self.response(200, {'success':'false', 'action':'create cluster', 'message':result})
|
self.response(200, {'success':'false', 'action':'create cluster', 'message':result})
|
||||||
|
@ -177,6 +184,13 @@ class DockletHttpHandler(http.server.BaseHTTPRequestHandler):
|
||||||
user_info = json.dumps(user_info)
|
user_info = json.dumps(user_info)
|
||||||
[status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info)
|
[status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info)
|
||||||
if status:
|
if status:
|
||||||
|
user_info = G_usermgr.selfQuery(cur_user = cur_user)
|
||||||
|
quota = {}
|
||||||
|
quota['cpu'] = float(user_info['data']['groupinfo']['cpu'])/100000.0
|
||||||
|
quota['memory'] = float(user_info['data']['groupinfo']['memory'])*1000000/1024
|
||||||
|
etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (G_clustername))
|
||||||
|
for con in result['containers']:
|
||||||
|
etcdser.setkey('/vnodes/%s/quota'%(con['containername']), quota)
|
||||||
self.response(200, {'success':'true', 'action':'scale out', 'message':result})
|
self.response(200, {'success':'true', 'action':'scale out', 'message':result})
|
||||||
else:
|
else:
|
||||||
self.response(200, {'success':'false', 'action':'scale out', 'message':result})
|
self.response(200, {'success':'false', 'action':'scale out', 'message':result})
|
||||||
|
|
|
@ -7,17 +7,18 @@ from log import logger
|
||||||
|
|
||||||
class Container_Collector(threading.Thread):
|
class Container_Collector(threading.Thread):
|
||||||
|
|
||||||
def __init__(self,etcdaddr,cluster_name,host,cpu_quota,mem_quota,test=False):
|
def __init__(self,etcdaddr,cluster_name,host,test=False):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
self.thread_stop = False
|
self.thread_stop = False
|
||||||
self.host = host
|
self.host = host
|
||||||
self.etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (cluster_name))
|
self.etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (cluster_name))
|
||||||
self.etcdser.setkey('/vnodes/cpu_quota', cpu_quota)
|
#self.cpu_quota = float(cpu_quota)/100000.0
|
||||||
self.etcdser.setkey('/vnodes/mem_quota', mem_quota)
|
#self.mem_quota = float(mem_quota)*1000000/1024
|
||||||
self.cpu_quota = float(cpu_quota)/100000.0
|
|
||||||
self.mem_quota = float(mem_quota)*1000000/1024
|
|
||||||
self.interval = 2
|
self.interval = 2
|
||||||
self.test = test
|
self.test = test
|
||||||
|
self.cpu_last = {}
|
||||||
|
self.cpu_quota = {}
|
||||||
|
self.mem_quota = {}
|
||||||
return
|
return
|
||||||
|
|
||||||
def list_container(self):
|
def list_container(self):
|
||||||
|
@ -46,22 +47,32 @@ class Container_Collector(threading.Thread):
|
||||||
basic_info['PID'] = info['PID']
|
basic_info['PID'] = info['PID']
|
||||||
basic_info['IP'] = info['IP']
|
basic_info['IP'] = info['IP']
|
||||||
self.etcdser.setkey('/vnodes/%s/basic_info'%(container_name), basic_info)
|
self.etcdser.setkey('/vnodes/%s/basic_info'%(container_name), basic_info)
|
||||||
|
|
||||||
cpu_parts = re.split(' +',info['CPU use'])
|
cpu_parts = re.split(' +',info['CPU use'])
|
||||||
cpu_val = cpu_parts[0].strip()
|
cpu_val = cpu_parts[0].strip()
|
||||||
cpu_unit = cpu_parts[1].strip()
|
cpu_unit = cpu_parts[1].strip()
|
||||||
res = self.etcdser.getkey('/vnodes/%s/cpu_use'%(container_name))
|
if not container_name in self.cpu_last.keys():
|
||||||
cpu_last = 0
|
[ret, ans] = self.etcdser.getkey('/vnodes/%s/quota'%(container_name))
|
||||||
if res[0] == True:
|
if ret == True :
|
||||||
last_use = dict(eval(res[1]))
|
res = dict(eval(ans))
|
||||||
cpu_last = float(last_use['val'])
|
self.cpu_quota[container_name] = res['cpu']
|
||||||
|
self.mem_quota[container_name] = res['memory']
|
||||||
|
self.cpu_last[container_name] = 0
|
||||||
|
else:
|
||||||
|
logger.warning(ans)
|
||||||
|
self.cpu_quota[container_name] = 1
|
||||||
|
self.mem_quota[container_name] = 2000*1000000/1024
|
||||||
|
self.cpu_last[container_name] = 0
|
||||||
cpu_use = {}
|
cpu_use = {}
|
||||||
cpu_use['val'] = cpu_val
|
cpu_use['val'] = cpu_val
|
||||||
cpu_use['unit'] = cpu_unit
|
cpu_use['unit'] = cpu_unit
|
||||||
cpu_usedp = (float(cpu_val)-float(cpu_last))/(self.cpu_quota*self.interval*1.3)
|
cpu_usedp = (float(cpu_val)-float(self.cpu_last[container_name]))/(self.cpu_quota[container_name]*self.interval*1.3)
|
||||||
if(cpu_usedp > 1):
|
if(cpu_usedp > 1 or cpu_usedp < 0):
|
||||||
cpu_usedp = 1
|
cpu_usedp = 1
|
||||||
cpu_use['usedp'] = cpu_usedp
|
cpu_use['usedp'] = cpu_usedp
|
||||||
|
self.cpu_last[container_name] = cpu_val;
|
||||||
self.etcdser.setkey('vnodes/%s/cpu_use'%(container_name), cpu_use)
|
self.etcdser.setkey('vnodes/%s/cpu_use'%(container_name), cpu_use)
|
||||||
|
|
||||||
mem_parts = re.split(' +',info['Memory use'])
|
mem_parts = re.split(' +',info['Memory use'])
|
||||||
mem_val = mem_parts[0].strip()
|
mem_val = mem_parts[0].strip()
|
||||||
mem_unit = mem_parts[1].strip()
|
mem_unit = mem_parts[1].strip()
|
||||||
|
@ -70,7 +81,9 @@ class Container_Collector(threading.Thread):
|
||||||
mem_use['unit'] = mem_unit
|
mem_use['unit'] = mem_unit
|
||||||
if(mem_unit == "MiB"):
|
if(mem_unit == "MiB"):
|
||||||
mem_val = float(mem_val) * 1024
|
mem_val = float(mem_val) * 1024
|
||||||
mem_usedp = float(mem_val) / self.mem_quota
|
elif (mem_unit == "GiB"):
|
||||||
|
mem_val = float(mem_val) * 1024 * 1024
|
||||||
|
mem_usedp = float(mem_val) / self.mem_quota[container_name]
|
||||||
mem_use['usedp'] = mem_usedp
|
mem_use['usedp'] = mem_usedp
|
||||||
self.etcdser.setkey('/vnodes/%s/mem_use'%(container_name), mem_use)
|
self.etcdser.setkey('/vnodes/%s/mem_use'%(container_name), mem_use)
|
||||||
#print(output)
|
#print(output)
|
||||||
|
@ -220,7 +233,6 @@ class Container_Fetcher:
|
||||||
[ret, ans] = self.etcdser.getkey('/%s/cpu_use'%(container_name))
|
[ret, ans] = self.etcdser.getkey('/%s/cpu_use'%(container_name))
|
||||||
if ret == True :
|
if ret == True :
|
||||||
res = dict(eval(ans))
|
res = dict(eval(ans))
|
||||||
res['quota'] = self.etcdser.getkey('/cpu_quota')[1]
|
|
||||||
return res
|
return res
|
||||||
else:
|
else:
|
||||||
logger.warning(ans)
|
logger.warning(ans)
|
||||||
|
@ -231,7 +243,6 @@ class Container_Fetcher:
|
||||||
[ret, ans] = self.etcdser.getkey('/%s/mem_use'%(container_name))
|
[ret, ans] = self.etcdser.getkey('/%s/mem_use'%(container_name))
|
||||||
if ret == True :
|
if ret == True :
|
||||||
res = dict(eval(ans))
|
res = dict(eval(ans))
|
||||||
res['quota'] = self.etcdser.getkey('/mem_quota')[1]
|
|
||||||
return res
|
return res
|
||||||
else:
|
else:
|
||||||
logger.warning(ans)
|
logger.warning(ans)
|
||||||
|
|
|
@ -192,7 +192,7 @@ if __name__ == '__main__':
|
||||||
logger.info ("using WORKER_PORT %s" % worker_port )
|
logger.info ("using WORKER_PORT %s" % worker_port )
|
||||||
|
|
||||||
con_collector = monitor.Container_Collector(etcdaddr, clustername,
|
con_collector = monitor.Container_Collector(etcdaddr, clustername,
|
||||||
ipaddr, cpu_quota, mem_quota)
|
ipaddr)
|
||||||
con_collector.start()
|
con_collector.start()
|
||||||
logger.info("CPU and Memory usage monitor started")
|
logger.info("CPU and Memory usage monitor started")
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue