Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
99a8bcb07f
|
@ -161,13 +161,6 @@ class DockletHttpHandler(http.server.BaseHTTPRequestHandler):
|
|||
logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name']))
|
||||
[status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info)
|
||||
if status:
|
||||
user_info = G_usermgr.selfQuery(cur_user = cur_user)
|
||||
quota = {}
|
||||
quota['cpu'] = float(user_info['data']['groupinfo']['cpu'])/100000.0
|
||||
quota['memory'] = float(user_info['data']['groupinfo']['memory'])*1000000/1024
|
||||
etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (G_clustername))
|
||||
for con in result['containers']:
|
||||
etcdser.setkey('/vnodes/%s/quota'%(con['containername']), quota)
|
||||
self.response(200, {'success':'true', 'action':'create cluster', 'message':result})
|
||||
else:
|
||||
self.response(200, {'success':'false', 'action':'create cluster', 'message':result})
|
||||
|
@ -184,13 +177,6 @@ class DockletHttpHandler(http.server.BaseHTTPRequestHandler):
|
|||
user_info = json.dumps(user_info)
|
||||
[status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info)
|
||||
if status:
|
||||
user_info = G_usermgr.selfQuery(cur_user = cur_user)
|
||||
quota = {}
|
||||
quota['cpu'] = float(user_info['data']['groupinfo']['cpu'])/100000.0
|
||||
quota['memory'] = float(user_info['data']['groupinfo']['memory'])*1000000/1024
|
||||
etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (G_clustername))
|
||||
for con in result['containers']:
|
||||
etcdser.setkey('/vnodes/%s/quota'%(con['containername']), quota)
|
||||
self.response(200, {'success':'true', 'action':'scale out', 'message':result})
|
||||
else:
|
||||
self.response(200, {'success':'false', 'action':'scale out', 'message':result})
|
||||
|
@ -356,8 +342,7 @@ class DockletHttpHandler(http.server.BaseHTTPRequestHandler):
|
|||
res['mem_use'] = fetcher.get_mem_use(cmds[2])
|
||||
elif cmds[3] == 'basic_info':
|
||||
res['basic_info'] = fetcher.get_basic_info(cmds[2])
|
||||
user_info = G_usermgr.selfQuery(cur_user = cur_user)
|
||||
self.response(200, {'success':'true', 'monitor':res, 'groupinfo':user_info['data']['groupinfo']})
|
||||
self.response(200, {'success':'true', 'monitor':res})
|
||||
elif cmds[1] == 'user':
|
||||
if not user == 'root':
|
||||
self.response(400, {'success':'false', 'message':'Root Required'})
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import subprocess,re,sys,etcdlib,psutil
|
||||
import subprocess,re,os,etcdlib,psutil
|
||||
import time,threading,json,traceback,platform
|
||||
|
||||
from log import logger
|
||||
|
@ -12,13 +12,12 @@ class Container_Collector(threading.Thread):
|
|||
self.thread_stop = False
|
||||
self.host = host
|
||||
self.etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (cluster_name))
|
||||
#self.cpu_quota = float(cpu_quota)/100000.0
|
||||
#self.mem_quota = float(mem_quota)*1000000/1024
|
||||
self.interval = 2
|
||||
self.test = test
|
||||
self.cpu_last = {}
|
||||
self.cpu_quota = {}
|
||||
self.mem_quota = {}
|
||||
self.cores_num = int(subprocess.getoutput("grep processor /proc/cpuinfo | wc -l"))
|
||||
return
|
||||
|
||||
def list_container(self):
|
||||
|
@ -52,17 +51,28 @@ class Container_Collector(threading.Thread):
|
|||
cpu_val = cpu_parts[0].strip()
|
||||
cpu_unit = cpu_parts[1].strip()
|
||||
if not container_name in self.cpu_last.keys():
|
||||
[ret, ans] = self.etcdser.getkey('/vnodes/%s/quota'%(container_name))
|
||||
if ret == True :
|
||||
res = dict(eval(ans))
|
||||
self.cpu_quota[container_name] = res['cpu']
|
||||
self.mem_quota[container_name] = res['memory']
|
||||
self.cpu_last[container_name] = 0
|
||||
confpath = "/var/lib/lxc/%s/config"%(container_name)
|
||||
if os.path.exists(confpath):
|
||||
confile = open(confpath,'r')
|
||||
res = confile.read()
|
||||
lines = re.split('\n',res)
|
||||
for line in lines:
|
||||
words = re.split('=',line)
|
||||
key = words[0].strip()
|
||||
if key == "lxc.cgroup.memory.limit_in_bytes":
|
||||
self.mem_quota[container_name] = float(words[1].strip().strip("M"))*1000000/1024
|
||||
elif key == "lxc.cgroup.cpu.cfs_quota_us":
|
||||
tmp = int(words[1].strip())
|
||||
if tmp == -1:
|
||||
self.cpu_quota[container_name] = self.cores_num
|
||||
else:
|
||||
self.cpu_quota[container_name] = tmp/100000.0
|
||||
quota = {'cpu':self.cpu_quota[container_name],'memory':self.mem_quota[container_name]}
|
||||
self.etcdser.setkey('/vnodes/%s/quota'%(container_name),quota)
|
||||
else:
|
||||
logger.warning(ans)
|
||||
self.cpu_quota[container_name] = 1
|
||||
self.mem_quota[container_name] = 2000*1000000/1024
|
||||
self.cpu_last[container_name] = 0
|
||||
logger.error("Cant't find config file %s"%(confpath))
|
||||
return False
|
||||
self.cpu_last[container_name] = 0
|
||||
cpu_use = {}
|
||||
cpu_use['val'] = cpu_val
|
||||
cpu_use['unit'] = cpu_unit
|
||||
|
@ -233,6 +243,11 @@ class Container_Fetcher:
|
|||
[ret, ans] = self.etcdser.getkey('/%s/cpu_use'%(container_name))
|
||||
if ret == True :
|
||||
res = dict(eval(ans))
|
||||
[ret,quota] = self.etcdser.getkey('/%s/quota'%(container_name))
|
||||
if ret == False:
|
||||
res['quota'] = {'cpu':0}
|
||||
logger.warning(quota)
|
||||
res['quota'] = dict(eval(quota))
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
|
@ -243,6 +258,11 @@ class Container_Fetcher:
|
|||
[ret, ans] = self.etcdser.getkey('/%s/mem_use'%(container_name))
|
||||
if ret == True :
|
||||
res = dict(eval(ans))
|
||||
[ret,quota] = self.etcdser.getkey('/%s/quota'%(container_name))
|
||||
if ret == False:
|
||||
res['quota'] = {'memory':0}
|
||||
logger.warning(quota)
|
||||
res['quota'] = dict(eval(quota))
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
|
|
|
@ -7,9 +7,9 @@ function processMemData(data)
|
|||
mem_usedp = data.monitor.mem_use.usedp;
|
||||
var usedp = data.monitor.mem_use.usedp;
|
||||
var unit = data.monitor.mem_use.unit;
|
||||
var quota = data.groupinfo.memory;
|
||||
var quota = data.monitor.mem_use.quota.memory/1024.0;
|
||||
var val = data.monitor.mem_use.val;
|
||||
var out = "("+val+unit+"/"+quota+"MB)";
|
||||
var out = "("+val+unit+"/"+quota.toFixed(2)+"MiB)";
|
||||
$("#con_mem").html((usedp/0.01).toFixed(2)+"%<br/>"+out);
|
||||
}
|
||||
function getMemY()
|
||||
|
@ -21,10 +21,9 @@ function processCpuData(data)
|
|||
cpu_usedp = data.monitor.cpu_use.usedp;
|
||||
var val = data.monitor.cpu_use.val;
|
||||
var unit = data.monitor.cpu_use.unit;
|
||||
var quota = data.groupinfo.cpu;
|
||||
quota = quota/1000.0;
|
||||
var quota = data.monitor.cpu_use.quota.cpu;
|
||||
$("#con_cpu").html(val +" "+ unit);
|
||||
$("#con_cpuquota").html(quota.toFixed(2)+"% Cores");
|
||||
$("#con_cpuquota").html(quota + " Cores");
|
||||
}
|
||||
function getCpuY()
|
||||
{
|
||||
|
|
|
@ -106,18 +106,17 @@
|
|||
|
||||
$.post(url+"/cpu_use",{},function(data){
|
||||
var usedp = data.monitor.cpu_use.usedp;
|
||||
var quota = data.groupinfo.cpu;
|
||||
quota = quota/100000.0;
|
||||
var quota = data.monitor.cpu_use.quota.cpu;
|
||||
$("#"+index+"_cpu").html((usedp/0.01).toFixed(2)+"%");
|
||||
$("#"+index+"_cpuquota").html((quota*100).toFixed(2)+"% Cores");
|
||||
$("#"+index+"_cpuquota").html(quota+" Cores");
|
||||
},"json");
|
||||
|
||||
$.post(url+"/mem_use",{},function(data){
|
||||
var usedp = data.monitor.mem_use.usedp;
|
||||
var unit = data.monitor.mem_use.unit;
|
||||
var quota = data.groupinfo.memory;
|
||||
var quota = data.monitor.mem_use.quota.memory/1024.0;
|
||||
var val = data.monitor.mem_use.val;
|
||||
var out = "("+val+unit+"/"+quota+"MB)";
|
||||
var out = "("+val+unit+"/"+quota.toFixed(2)+"MiB)";
|
||||
$("#"+index+"_mem").html((usedp/0.01).toFixed(2)+"%<br/>"+out);
|
||||
},"json");
|
||||
|
||||
|
|
Loading…
Reference in New Issue