Change proxy url to let user can change distributed gateway to non-distributed.
This commit is contained in:
parent
fe9fb6dee9
commit
38854367ed
|
@ -9,7 +9,10 @@ server
|
|||
access_log off;
|
||||
}
|
||||
|
||||
location ~ ^/(\d+\.\d+\.\d+\.\d+)/ {
|
||||
location ~ ^/(\d+\.\d+\.\d+\.\d+)/(.+)$ {
|
||||
set $proxy_server $1;
|
||||
set $other $2;
|
||||
rewrite ^(.*)$ /$other break;
|
||||
proxy_pass http://$1:%PROXY_PORT;
|
||||
proxy_set_header Host $host:$server_port;
|
||||
proxy_http_version 1.1;
|
||||
|
|
|
@ -23,31 +23,31 @@ class Container(object):
|
|||
self.imgmgr = imagemgr.ImageMgr()
|
||||
self.historymgr = History_Manager()
|
||||
|
||||
def create_container(self, lxc_name, proxy_server_ip, username, setting, clustername, clusterid, containerid, hostname, ip, gateway, vlanid, image):
|
||||
def create_container(self, lxc_name, username, setting, clustername, clusterid, containerid, hostname, ip, gateway, vlanid, image):
|
||||
logger.info("create container %s of %s for %s" %(lxc_name, clustername, username))
|
||||
try:
|
||||
setting = json.loads(setting)
|
||||
cpu = int(setting['cpu']) * 100000
|
||||
memory = setting["memory"]
|
||||
disk = setting["disk"]
|
||||
image = json.loads(image)
|
||||
image = json.loads(image)
|
||||
status = self.imgmgr.prepareFS(username,image,lxc_name,disk)
|
||||
if not status:
|
||||
return [False, "Create container failed when preparing filesystem, possibly insufficient space"]
|
||||
|
||||
|
||||
#Ret = subprocess.run([self.libpath+"/lxc_control.sh",
|
||||
# "create", lxc_name, username, str(clusterid), hostname,
|
||||
# ip, gateway, str(vlanid), str(cpu), str(memory)], stdout=subprocess.PIPE,
|
||||
# stderr=subprocess.STDOUT,shell=False, check=True)
|
||||
|
||||
|
||||
rootfs = "/var/lib/lxc/%s/rootfs" % lxc_name
|
||||
|
||||
|
||||
if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)):
|
||||
logger.error("user %s directory not found" % username)
|
||||
return [False, "user directory not found"]
|
||||
sys_run("mkdir -p /var/lib/lxc/%s" % lxc_name)
|
||||
logger.info("generate config file for %s" % lxc_name)
|
||||
|
||||
|
||||
def config_prepare(content):
|
||||
content = content.replace("%ROOTFS%",rootfs)
|
||||
content = content.replace("%HOSTNAME%",hostname)
|
||||
|
@ -97,7 +97,7 @@ class Container(object):
|
|||
else:
|
||||
logger.error ("get AUTH COOKIE URL failed for jupyter")
|
||||
authurl = "error"
|
||||
|
||||
|
||||
cookiename='docklet-jupyter-cookie'
|
||||
|
||||
rundir = self.lxcpath+'/'+lxc_name+'/rootfs' + self.rundir
|
||||
|
@ -120,7 +120,7 @@ BASE_URL=%s
|
|||
HUB_PREFIX=%s
|
||||
HUB_API_URL=%s
|
||||
IP=%s
|
||||
""" % (username, 10000, cookiename, '/'+proxy_server_ip+'/go/'+username+'/'+clustername, '/jupyter',
|
||||
""" % (username, 10000, cookiename, '/go/'+username+'/'+clustername, '/jupyter',
|
||||
authurl, ip.split('/')[0])
|
||||
config.write(jconfigs)
|
||||
config.close()
|
||||
|
@ -177,7 +177,7 @@ IP=%s
|
|||
return [False, "start container failed"]
|
||||
|
||||
# start container services
|
||||
# for the master node, jupyter must be started,
|
||||
# for the master node, jupyter must be started,
|
||||
# for other node, ssh must be started.
|
||||
# container must be RUNNING before calling this service
|
||||
def start_services(self, lxc_name, services=[]):
|
||||
|
@ -189,7 +189,7 @@ IP=%s
|
|||
#shell=True, check=False)
|
||||
#logger.debug ("prepare nfs for %s: %s" % (lxc_name,
|
||||
#Ret.stdout.decode('utf-8')))
|
||||
# not sure whether should execute this
|
||||
# not sure whether should execute this
|
||||
Ret = subprocess.run(["lxc-attach -n %s -- service ssh start" % lxc_name],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
shell=True, check=False)
|
||||
|
@ -356,7 +356,7 @@ IP=%s
|
|||
|
||||
def create_image(self,username,imagename,containername,description="not thing",imagenum=10):
|
||||
return self.imgmgr.createImage(username,imagename,containername,description,imagenum)
|
||||
|
||||
|
||||
def update_basefs(self,imagename):
|
||||
return self.imgmgr.update_basefs(imagename)
|
||||
|
||||
|
|
|
@ -523,11 +523,16 @@ class NetworkMgr(object):
|
|||
del self.users[username]
|
||||
return [True, 'delete user success']
|
||||
|
||||
def check_usergw(self, username, nodemgr):
|
||||
self.load_usrgw(username)
|
||||
if username not in self.usrgws.keys():
|
||||
return [False, 'user does not exist.']
|
||||
ip = self.usrgws[username]
|
||||
def check_usergw(self, username, nodemgr, distributedgw=False):
|
||||
if not distributedgw:
|
||||
self.usrgws[username] = self.masterip
|
||||
self.dump_usrgw(username)
|
||||
ip = self.masterip
|
||||
else:
|
||||
self.load_usrgw(username)
|
||||
if username not in self.usrgws.keys():
|
||||
return [False, 'user does not exist.']
|
||||
ip = self.usrgws[username]
|
||||
self.load_user(username)
|
||||
if ip == self.masterip:
|
||||
netcontrol.check_gw('docklet-br', username, self.users[username].get_gateway_cidr(), str(self.users[username].vlanid))
|
||||
|
|
|
@ -138,7 +138,6 @@ class VclusterMgr(object):
|
|||
clusterfile = open(clusterpath, 'w')
|
||||
proxy_url = env.getenv("PORTAL_URL") +"/"+ proxy_server_ip +"/_web/" + username + "/" + clustername
|
||||
info = {'clusterid':clusterid, 'status':'stopped', 'size':clustersize, 'containers':containers, 'nextcid': clustersize, 'create_time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'start_time':"------" , 'proxy_url':proxy_url, 'proxy_server_ip':proxy_server_ip}
|
||||
info['proxy_server_port'] = env.getenv("PROXY_PORT")
|
||||
clusterfile.write(json.dumps(info))
|
||||
clusterfile.close()
|
||||
return [True, info]
|
||||
|
@ -194,9 +193,9 @@ class VclusterMgr(object):
|
|||
clusterinfo['proxy_ip'] = ip + ":" + port
|
||||
if not clusterinfo['proxy_server_ip'] == self.addr:
|
||||
worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip'])
|
||||
worker.set_route("/"+ clusterinfo['proxy_server_ip'] +"/_web/" + username + "/" + clustername, target)
|
||||
worker.set_route("/_web/" + username + "/" + clustername, target)
|
||||
else:
|
||||
proxytool.set_route("/"+ clusterinfo['proxy_server_ip'] + "/_web/" + username + "/" + clustername, target)
|
||||
proxytool.set_route("/_web/" + username + "/" + clustername, target)
|
||||
clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w')
|
||||
clusterfile.write(json.dumps(clusterinfo))
|
||||
clusterfile.close()
|
||||
|
@ -209,9 +208,9 @@ class VclusterMgr(object):
|
|||
clusterinfo.pop('proxy_ip')
|
||||
if not clusterinfo['proxy_server_ip'] == self.addr:
|
||||
worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip'])
|
||||
worker.delete_route("/"+ clusterinfo['proxy_server_ip'] + "/_web/" + username + "/" + clustername)
|
||||
worker.delete_route("/_web/" + username + "/" + clustername)
|
||||
else:
|
||||
proxytool.delete_route("/"+ clusterinfo['proxy_server_ip'] + "/_web/" + username + "/" + clustername)
|
||||
proxytool.delete_route("/_web/" + username + "/" + clustername)
|
||||
clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w')
|
||||
clusterfile.write(json.dumps(clusterinfo))
|
||||
clusterfile.close()
|
||||
|
@ -364,15 +363,18 @@ class VclusterMgr(object):
|
|||
# check gateway for user
|
||||
# after reboot, user gateway goes down and lose its configuration
|
||||
# so, check is necessary
|
||||
self.networkmgr.check_usergw(username, self.nodemgr)
|
||||
self.networkmgr.check_usergw(username, self.nodemgr,self.distributedgw=='True')
|
||||
# set proxy
|
||||
if (not info['proxy_server_ip'] == self.addr) and (self.distributedgw == 'False'):
|
||||
info['proxy_server_ip'] = self.addr
|
||||
info['proxy_url'] = env.getenv("PORTAL_URL") +"/"+ self.addr +"/_web/" + username + "/" + clustername
|
||||
try:
|
||||
target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000"
|
||||
if not info['proxy_server_ip'] == self.addr:
|
||||
worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip'])
|
||||
worker.set_route('/'+info['proxy_server_ip']+'/go/'+username+'/'+clustername, target)
|
||||
worker.set_route('/go/'+username+'/'+clustername, target)
|
||||
else:
|
||||
proxytool.set_route('/'+info['proxy_server_ip']+'/go/'+username+'/'+clustername, target)
|
||||
proxytool.set_route('/go/'+username+'/'+clustername, target)
|
||||
except:
|
||||
return [False, "start cluster failed with setting proxy failed"]
|
||||
for container in info['containers']:
|
||||
|
@ -383,9 +385,7 @@ class VclusterMgr(object):
|
|||
worker.start_services(container['containername'])
|
||||
info['status']='running'
|
||||
info['start_time']=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
infofile = open(self.fspath+"/global/users/"+username+"/clusters/"+clustername, 'w')
|
||||
infofile.write(json.dumps(info))
|
||||
infofile.close()
|
||||
self.write_clusterinfo(info,clustername,username)
|
||||
return [True, "start cluster"]
|
||||
|
||||
def mount_cluster(self, clustername, username):
|
||||
|
@ -406,15 +406,19 @@ class VclusterMgr(object):
|
|||
if info['status'] == 'stopped':
|
||||
return [True, "cluster no need to start"]
|
||||
# need to check and recover gateway of this user
|
||||
self.networkmgr.check_usergw(username, self.nodemgr)
|
||||
self.networkmgr.check_usergw(username, self.nodemgr,self.distributedgw=='True')
|
||||
# recover proxy of cluster
|
||||
if (not info['proxy_server_ip'] == self.addr) and (self.distributedgw == 'False'):
|
||||
info['proxy_server_ip'] = self.addr
|
||||
info['proxy_url'] = env.getenv("PORTAL_URL") +"/"+ self.addr +"/_web/" + username + "/" + clustername
|
||||
self.write_clusterinfo(info,clustername,username)
|
||||
try:
|
||||
target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000"
|
||||
if not info['proxy_server_ip'] == self.addr:
|
||||
worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip'])
|
||||
worker.set_route('/'+info['proxy_server_ip']+'/go/'+username+'/'+clustername, target)
|
||||
worker.set_route('/go/'+username+'/'+clustername, target)
|
||||
else:
|
||||
proxytool.set_route('/'+info['proxy_server_ip']+'/go/'+username+'/'+clustername, target)
|
||||
proxytool.set_route('/go/'+username+'/'+clustername, target)
|
||||
except:
|
||||
return [False, "start cluster failed with setting proxy failed"]
|
||||
# recover containers of this cluster
|
||||
|
@ -434,9 +438,9 @@ class VclusterMgr(object):
|
|||
return [False, 'cluster is already stopped']
|
||||
if not info['proxy_server_ip'] == self.addr:
|
||||
worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip'])
|
||||
worker.delete_route('/'+info['proxy_server_ip']+'/go/'+username+'/'+clustername)
|
||||
worker.delete_route('/go/'+username+'/'+clustername)
|
||||
else:
|
||||
proxytool.delete_route('/'+info['proxy_server_ip']+'/go/'+username+'/'+clustername)
|
||||
proxytool.delete_route('/go/'+username+'/'+clustername)
|
||||
for container in info['containers']:
|
||||
worker = self.nodemgr.ip_to_rpc(container['host'])
|
||||
if worker is None:
|
||||
|
@ -503,6 +507,15 @@ class VclusterMgr(object):
|
|||
info = json.loads(infofile.read())
|
||||
return [True, info]
|
||||
|
||||
def write_clusterinfo(self, info, clustername, username):
|
||||
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
|
||||
if not os.path.isfile(clusterpath):
|
||||
return [False, "cluster not found"]
|
||||
infofile = open(clusterpath, 'w')
|
||||
infofile.write(json.dumps(info))
|
||||
infofile.close()
|
||||
return [True, info]
|
||||
|
||||
# acquire cluster id from etcd
|
||||
def _acquire_id(self):
|
||||
clusterid = self.etcd.getkey("vcluster/nextid")[1]
|
||||
|
|
|
@ -40,6 +40,7 @@ from webViews.authenticate.register import registerView
|
|||
from webViews.authenticate.login import loginView, logoutView
|
||||
import webViews.dockletrequest
|
||||
from webViews import cookie_tool
|
||||
import traceback
|
||||
|
||||
|
||||
|
||||
|
@ -570,6 +571,7 @@ def not_authorized(error):
|
|||
@app.errorhandler(500)
|
||||
def internal_server_error(error):
|
||||
logger.error(error)
|
||||
logger.error(traceback.format_exc())
|
||||
if "username" in session:
|
||||
if "500" in session and "500_title" in session:
|
||||
reason = session['500']
|
||||
|
|
Loading…
Reference in New Issue