Merge remote-tracking branch 'upstream/master'

This commit is contained in:
zhongyehong 2018-05-20 16:08:00 +08:00
commit 37c9b13e28
4 changed files with 137 additions and 57 deletions

View File

@ -310,12 +310,32 @@ class PortMapping(db.Model):
def __init__(self, node_name, node_ip, node_port, host_port):
self.node_name = node_name
self.node_ip = node_ip
self.node_port = node_port
self.host_port = host_port
self.node_port = int(node_port)
self.host_port = int(host_port)
def __repr__(self):
return "{\"id\":\"%d\", \"node_name\":\"%s\", \"node_ip\": \"%s\", \"node_port\":\"%s\", \"host_port\":\"%s\"}" % (self.id, self.node_name, self.node_ip, self.node_port, self.host_port)
class BillingHistory(db.Model):
__bind_key__ = 'system'
node_name = db.Column(db.String(100), primary_key=True)
vclusterid = db.Column(db.Integer, db.ForeignKey('v_cluster.clusterid'))
cpu = db.Column(db.Float)
mem = db.Column(db.Float)
disk = db.Column(db.Float)
port = db.Column(db.Float)
def __init__(self,node_name,cpu,mem,disk,port):
self.node_name = node_name
self.cpu = cpu
self.mem = mem
self.disk = disk
self.port = port
def __repr__(self):
return "{\"node_name\":\"%s\", \"cpu\": %f, \"mem\": %f, \"disk\": %f, \"port\": %f}" % (self.node_name, self.cpu, self.mem, self.disk, self.port)
class VCluster(db.Model):
__bind_key__ = 'system'
clusterid = db.Column(db.BigInteger, primary_key=True, autoincrement=False)
@ -330,6 +350,7 @@ class VCluster(db.Model):
proxy_server_ip = db.Column(db.String(20))
proxy_public_ip = db.Column(db.String(20))
port_mapping = db.relationship('PortMapping', backref='v_cluster', lazy='dynamic')
billing_history = db.relationship('BillingHistory', backref='v_cluster', lazy='dynamic')
def __init__(self, clusterid, clustername, ownername, status, size, nextcid, proxy_server_ip, proxy_public_ip):
self.clusterid = clusterid
@ -342,6 +363,7 @@ class VCluster(db.Model):
self.proxy_public_ip = proxy_public_ip
self.containers = []
self.port_mapping = []
self.billing_history = []
self.create_time = datetime.now()
self.start_time = "------"
@ -359,6 +381,7 @@ class VCluster(db.Model):
info["start_time"] = self.start_time
info["containers"] = [dict(eval(str(con))) for con in self.containers]
info["port_mapping"] = [dict(eval(str(pm))) for pm in self.port_mapping]
info["billing_history"] = [dict(eval(str(bh))) for bh in self.billing_history]
#return "{\"clusterid\":\"%d\", \"clustername\":\"%s\", \"ownername\": \"%s\", \"status\":\"%s\", \"size\":\"%d\", \"proxy_server_ip\":\"%s\", \"create_time\":\"%s\"}" % (self.clusterid, self.clustername, self.ownername, self.status, self.size, self.proxy_server_ip, self.create_time.strftime("%Y-%m-%d %H:%M:%S"))
return json.dumps(info)

View File

@ -24,7 +24,7 @@ import lxc
import xmlrpc.client
from datetime import datetime
from model import db,VNode,History
from model import db,VNode,History,BillingHistory,VCluster,PortMapping
from log import logger
from httplib2 import Http
from urllib.parse import urlencode
@ -594,59 +594,33 @@ def get_cluster(container_name):
return names[1]
def count_port_mapping(vnode_name):
clusters_dir = env.getenv("FS_PREFIX")+"/global/users/"+get_owner(vnode_name)+"/clusters/"
if not os.path.exists(clusters_dir):
return 0
clusters = os.listdir(clusters_dir)
ports_count = 0
for cluster in clusters:
clusterpath = clusters_dir + cluster
if not os.path.isfile(clusterpath):
continue
infofile = open(clusterpath, 'r')
info = json.loads(infofile.read())
infofile.close()
ports_count += len([mapping for mapping in info['port_mapping'] if mapping['node_name'] == vnode_name])
return ports_count
pms = PortMapping.query.filter_by(node_name=vnode_name).all()
return len(pms)
def save_billing_history(vnode_name, billing_history):
clusters_dir = env.getenv("FS_PREFIX")+"/global/users/"+get_owner(vnode_name)+"/clusters/"
if not os.path.exists(clusters_dir):
return
clusters = os.listdir(clusters_dir)
vnode_cluster_id = get_cluster(vnode_name)
for cluster in clusters:
clusterpath = clusters_dir + cluster
if not os.path.isfile(clusterpath):
continue
infofile = open(clusterpath, 'r')
info = json.loads(infofile.read())
infofile.close()
if vnode_cluster_id != str(info['clusterid']):
continue
if 'billing_history' not in info:
info['billing_history'] = {}
info['billing_history'][vnode_name] = billing_history
infofile = open(clusterpath, 'w')
infofile.write(json.dumps(info))
infofile.close()
break
try:
vcluster = VCluster.query.get(int(vnode_cluster_id))
billinghistory = BillingHistory.query.get(vnode_name)
if billinghistory is not None:
billinghistory.cpu = billing_history["cpu"]
billinghistory.mem = billing_history["mem"]
billinghistory.disk = billing_history["disk"]
billinghistory.port = billing_history["port"]
else:
billinghistory = BillingHistory(vnode_name,billing_history["cpu"],billing_history["mem"],billing_history["disk"],billing_history["port"])
vcluster.billing_history.append(billinghistory)
db.session.add(vcluster)
db.session.commit()
except Exception as err:
logger.error(traceback.format_exc())
return
def get_billing_history(vnode_name):
clusters_dir = env.getenv("FS_PREFIX")+"/global/users/"+get_owner(vnode_name)+"/clusters/"
if os.path.exists(clusters_dir):
clusters = os.listdir(clusters_dir)
for cluster in clusters:
clusterpath = clusters_dir + cluster
if not os.path.isfile(clusterpath):
continue
infofile = open(clusterpath, 'r')
info = json.loads(infofile.read())
infofile.close()
if 'billing_history' not in info or vnode_name not in info['billing_history']:
continue
return info['billing_history'][vnode_name]
billinghistory = BillingHistory.query.get(vnode_name)
if billinghistory is not None:
return dict(eval(str(billinghistory)))
else:
default = {}
default['cpu'] = 0
default['mem'] = 0

View File

@ -296,6 +296,8 @@ class VclusterMgr(object):
return [False, 'Port mapping quota exceed.']
[status, clusterinfo] = self.get_clusterinfo(clustername, username)
if clusterinfo['status'] == 'stopped':
return [False, 'Please start the clusters first.']
host_port = 0
if self.distributedgw == 'True':
worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip'])
@ -370,7 +372,6 @@ class VclusterMgr(object):
if not success:
return [False,msg]
db.session.delete(item)
print("HHH")
break
else:
return [False,"No port mapping."]
@ -461,6 +462,8 @@ class VclusterMgr(object):
self.networkmgr.release_userips(username, ips)
self.networkmgr.printpools()
#os.remove(self.fspath+"/global/users/"+username+"/clusters/"+clustername)
for bh in vcluster.billing_history:
db.session.delete(bh)
db.session.delete(vcluster)
db.session.commit()
os.remove(self.fspath+"/global/users/"+username+"/hosts/"+str(vcluster.clusterid)+".hosts")

80
tools/upgrade_file2db.py Normal file
View File

@ -0,0 +1,80 @@
import sys
sys.path.append("../src/")
import os,json
from datetime import datetime
from model import db, VCluster, Container, PortMapping, Image, BillingHistory
timeFormat = "%Y-%m-%d %H:%M:%S"
dockletPath = "/opt/docklet/global"
usersdir = dockletPath + "/users/"
try:
VCluster.query.all()
except Exception as err:
print("Create database...")
db.create_all()
print("Update vcluster...")
for user in os.listdir(usersdir):
tmppath = usersdir+user+"/clusters/"
if not os.path.exists(tmppath):
continue
print("Update User: "+str(user))
clusterfiles = os.listdir(tmppath)
for cluname in clusterfiles:
cluFile = open(tmppath+cluname,"r")
cluinfo = json.loads(cluFile.read())
vcluster = VCluster(cluinfo['clusterid'],cluname,user,cluinfo['status'],cluinfo['size'],cluinfo['nextcid'],cluinfo['proxy_server_ip'],cluinfo['proxy_public_ip'])
vcluster.create_time = datetime.strptime(cluinfo['create_time'],timeFormat)
vcluster.start_time = cluinfo['start_time']
for coninfo in cluinfo['containers']:
lastsavet = datetime.strptime(coninfo['lastsave'],timeFormat)
con = Container(coninfo['containername'], coninfo['hostname'], coninfo['ip'], coninfo['host'], coninfo['image'], lastsavet, coninfo['setting'])
vcluster.containers.append(con)
for pminfo in cluinfo['port_mapping']:
pm = PortMapping(pminfo['node_name'], pminfo['node_ip'], int(pminfo['node_port']), int(pminfo['host_port']))
vcluster.port_mapping.append(pm)
if "billing_history" in cluinfo.keys():
for nodename in cluinfo['billing_history'].keys():
bhinfo = cluinfo['billing_history'][nodename]
bh = BillingHistory(nodename,bhinfo['cpu'],bhinfo['mem'],bhinfo['disk'],bhinfo['port'])
vcluster.billing_history.append(bh)
try:
db.session.add(vcluster)
db.session.commit()
except Exception as err:
print(err)
cluFile.close()
print("Update Images...")
for shareStr in ['private/','public/']:
print("Update "+shareStr+" Images...")
for user in os.listdir(dockletPath+"/images/"+shareStr):
print("Update User: "+user)
tmppath = dockletPath+"/images/"+shareStr+user+"/"
files = os.listdir(tmppath)
images = []
for file in files:
if file[0] == "." or file[-3] != ".":
continue
images.append(file[:-3])
for img in images:
infofile = open(tmppath+"."+img+".info","r")
imginfo = infofile.read().split('\n')
infofile.close()
desfile = open(tmppath+"."+img+".description","r")
desinfo = desfile.read()
dbimage = Image.query.filter_by(imagename=img,ownername=user).first()
if dbimage is None:
dbimage = Image(img,False,False,user,desinfo)
dbimage.create_time = datetime.strptime(imginfo[0],timeFormat)
if shareStr == 'public/':
dbimage.hasPublic = True
else:
dbimage.hasPrivate = True
try:
db.session.add(dbimage)
db.session.commit()
except Exception as err:
print(err)
print("Finished!")