Merge pull request #301 from FirmlyReality/tidycode

Tidycode
This commit is contained in:
amber 2018-06-12 10:04:13 +08:00 committed by GitHub
commit 4c3b11cc25
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 396 additions and 386 deletions

2
.gitignore vendored
View File

@ -6,4 +6,4 @@ __temp
.DS_Store
docklet.conf
home.html
src/migrations/
src/utils/migrations/

View File

@ -46,7 +46,7 @@ export FS_PREFIX
DAEMON_USER=root
# settings for docklet master
DAEMON_MASTER=$DOCKLET_LIB/httprest.py
DAEMON_MASTER=$DOCKLET_LIB/master/httprest.py
DAEMON_NAME_MASTER=docklet-master
DAEMON_OPTS_MASTER=
# The process ID of the script when it runs is stored here:

View File

@ -45,7 +45,7 @@ export FS_PREFIX
DAEMON_USER=root
# settings for docklet master
DAEMON_MASTER=$DOCKLET_LIB/httprest.py
DAEMON_MASTER=$DOCKLET_LIB/master/httprest.py
DAEMON_NAME_MASTER=docklet-master
DAEMON_OPTS_MASTER=
# The process ID of the script when it runs is stored here:
@ -158,7 +158,7 @@ do_start_web () {
}
do_start_user () {
log_daemon_msg "Starting $DAEMON_NAME_USER in $FS_PREFIX"
DAEMON_OPTS_USER="-p $USER_PORT"

View File

@ -37,7 +37,7 @@ export FS_PREFIX
DAEMON_USER=root
# settings for docklet worker
DAEMON=$DOCKLET_LIB/worker.py
DAEMON=$DOCKLET_LIB/worker/worker.py
DAEMON_NAME=docklet-worker
DAEMON_OPTS=
# The process ID of the script when it runs is stored here:
@ -87,7 +87,7 @@ pre_start () {
if [ ! -d $FS_PREFIX/local/basefs ]; then
log_daemon_msg "basefs does not exist, run prepare.sh first" && exit 1
fi
if [ ! -d $FS_PREFIX/local/packagefs ]; then
mkdir -p $FS_PREFIX/local/packagefs
fi
@ -96,6 +96,7 @@ pre_start () {
do_start() {
pre_start
log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX"
#python3 $DAEMON
start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS
log_end_msg $?
}

View File

@ -11,14 +11,14 @@ This module consists of three parts:
'''
import threading,datetime,random,time
from model import db,User,ApplyMsg
from userManager import administration_required
import env
from utils.model import db,User,ApplyMsg
from master.userManager import administration_required
from utils import env
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from settings import settings
from master.settings import settings
# send email to remind users of their beans

View File

@ -1,12 +1,12 @@
#!/usr/bin/python3
from io import StringIO
import os,sys,subprocess,time,re,datetime,threading,random,shutil
from model import db, Image
from deploy import *
from utils.model import db, Image
from master.deploy import *
import json
from log import logger
import env
from utils.log import logger
from utils import env
import requests
fspath = env.getenv('FS_PREFIX')
@ -42,12 +42,12 @@ class AliyunMgr():
except Exception as e:
logger.error(e)
return False
def createInstance(self):
request = self.Request.CreateInstanceRequest.CreateInstanceRequest()
request.set_accept_format('json')
request.add_query_param('RegionId', self.setting['RegionId'])
if 'ZoneId' in self.setting and not self.setting['ZoneId'] == "":
if 'ZoneId' in self.setting and not self.setting['ZoneId'] == "":
request.add_query_param('ZoneId', self.setting['ZoneId'])
if 'VSwitchId' in self.setting and not self.setting['VSwitchId'] == "":
request.add_query_param('VSwitchId', self.setting['VSwitchId'])
@ -60,25 +60,25 @@ class AliyunMgr():
request.add_query_param('Password', self.setting['Password'])
response = self.clt.do_action_with_exception(request)
logger.info(response)
instanceid=json.loads(bytes.decode(response))['InstanceId']
return instanceid
def startInstance(self, instanceid):
request = self.Request.StartInstanceRequest.StartInstanceRequest()
request.set_accept_format('json')
request.add_query_param('InstanceId', instanceid)
response = self.clt.do_action_with_exception(request)
logger.info(response)
def createEIP(self):
request = self.Request.AllocateEipAddressRequest.AllocateEipAddressRequest()
request.set_accept_format('json')
request.add_query_param('RegionId', self.setting['RegionId'])
response = self.clt.do_action_with_exception(request)
logger.info(response)
response=json.loads(bytes.decode(response))
eipid=response['AllocationId']
eipaddr=response['EipAddress']
@ -94,7 +94,7 @@ class AliyunMgr():
response = self.clt.do_action_with_exception(request)
logger.info(response)
def getInnerIP(self, instanceid):
request = self.Request.DescribeInstancesRequest.DescribeInstancesRequest()
request.set_accept_format('json')
@ -168,7 +168,7 @@ class EmptyMgr():
return False
class CloudMgr():
def getSettingFile(self):
if not os.path.exists(fspath+"/global/sys/cloudsetting.json"):
currentfilepath = os.path.dirname(os.path.abspath(__file__))

View File

@ -1,8 +1,8 @@
#!/usr/bin/python3
import paramiko, time
from log import logger
import env,os
import paramiko, time, os
from utils.log import logger
from utils import env
def myexec(ssh,command):
stdin,stdout,stderr = ssh.exec_command(command)
@ -12,7 +12,7 @@ def myexec(ssh,command):
if time.time() > endtime:
stdout.channel.close()
logger.error(command + ": fail")
return
return
# for line in stdout.readlines():
# if line is None:
# time.sleep(5)
@ -35,7 +35,7 @@ def deploy(ipaddr,masterip,account,password,volumename):
sftp.put(deployscriptpath,'/root/docklet-deploy.sh')
sftp.put('/etc/hosts', '/etc/hosts')
transport.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
while True:

View File

@ -4,10 +4,13 @@
# because some modules need variables when import
# for example, userManager/model.py
import sys
if sys.path[0].endswith("master"):
sys.path[0] = sys.path[0][:-6]
from flask import Flask, request
# must first init loadenv
import tools, env
from utils import tools, env
# default CONFIG=/opt/docklet/local/docklet-running.conf
config = env.getenv("CONFIG")
@ -15,22 +18,22 @@ tools.loadenv(config)
# second init logging
# must import logger after initlogging, ugly
from log import initlogging
from utils.log import initlogging
initlogging("docklet-master")
from log import logger
from utils.log import logger
import os
import http.server, cgi, json, sys, shutil
import http.server, cgi, json, sys, shutil, traceback
import xmlrpc.client
from socketserver import ThreadingMixIn
import nodemgr, vclustermgr, etcdlib, network, imagemgr, notificationmgr, lockmgr, cloudmgr
from logs import logs
import userManager,beansapplicationmgr
import monitor,traceback
from utils import etcdlib, imagemgr
from master import nodemgr, vclustermgr, notificationmgr, lockmgr, cloudmgr
from utils.logs import logs
from master import userManager, beansapplicationmgr, monitor, sysmgr, network
from worker.monitor import History_Manager
import threading
import sysmgr
import requests
from nettools import portcontrol
from utils.nettools import portcontrol
#default EXTERNAL_LOGIN=False
external_login = env.getenv('EXTERNAL_LOGIN')
@ -895,7 +898,7 @@ if __name__ == '__main__':
masterport = env.getenv('MASTER_PORT')
logger.info("using MASTER_PORT %d", int(masterport))
G_historymgr = monitor.History_Manager()
G_historymgr = History_Manager()
master_collector = monitor.Master_Collector(G_nodemgr,ipaddr+":"+str(masterport))
master_collector.start()
logger.info("master_collector started")

264
src/master/monitor.py Normal file
View File

@ -0,0 +1,264 @@
import threading, time, traceback
from utils import env
from utils.log import logger
from httplib2 import Http
# major dict to store the monitoring data
# only use on Master
# monitor_hosts: use workers' ip addresses as first key.
# second key: cpuinfo,diskinfo,meminfo,osinfo,cpuconfig,running,containers,containerslist
# 1.cpuinfo stores the cpu usages data, and it has keys: user,system,idle,iowait
# 2.diskinfo stores the disks usages data, and it has keys: device,mountpoint,total,used,free,percent
# 3.meminfo stores the memory usages data, and it has keys: total,used,free,buffers,cached,percent
# 4.osinfo stores the information of operating system,
# and it has keys: platform,system,node,release,version,machine,processor
# 5.cpuconfig stores the information of processors, and it is a list, each element of list is a dict
# which stores the information of a processor, each element has key: processor,model name,
# core id, cpu MHz, cache size, physical id.
# 6.running indicates the status of worker,and it has two values: True, False.
# 7.containers store the amount of containers on the worker.
# 8.containers store a list which consists of the names of containers on the worker.
monitor_hosts = {}
# monitor_vnodes: use the owners' names of vnodes(containers) as first key.
# use the names of vnodes(containers) as second key.
# third key: cpu_use,mem_use,disk_use,basic_info,quota
# 1.cpu_use has keys: val,unit,hostpercent
# 2.mem_use has keys: val,unit,usedp
# 3.disk_use has keys: device,mountpoint,total,used,free,percent
# 4.basic_info has keys: Name,State,PID,IP,RunningTime,billing,billing_this_hour
# 5.quota has keys: cpu,memeory
monitor_vnodes = {}
# get owner name of a container
def get_owner(container_name):
names = container_name.split('-')
return names[0]
# the thread to collect data from each worker and store them in monitor_hosts and monitor_vnodes
class Master_Collector(threading.Thread):
def __init__(self,nodemgr,master_ip):
threading.Thread.__init__(self)
self.thread_stop = False
self.nodemgr = nodemgr
self.master_ip = master_ip
self.net_lastbillings = {}
self.bytes_per_beans = 1000000000
return
def net_billings(self, username, now_bytes_total):
global monitor_vnodes
if not username in self.net_lastbillings.keys():
self.net_lastbillings[username] = 0
elif int(now_bytes_total/self.bytes_per_beans) < self.net_lastbillings[username]:
self.net_lastbillings[username] = 0
diff = int(now_bytes_total/self.bytes_per_beans) - self.net_lastbillings[username]
if diff > 0:
auth_key = env.getenv('AUTH_KEY')
data = {"owner_name":username,"billing":diff, "auth_key":auth_key}
header = {'Content-Type':'application/x-www-form-urlencoded'}
http = Http()
[resp,content] = http.request("http://"+self.master_ip+"/billing/beans/","POST",urlencode(data),headers = header)
logger.info("response from master:"+content.decode('utf-8'))
self.net_lastbillings[username] += diff
monitor_vnodes[username]['net_stats']['net_billings'] = self.net_lastbillings[username]
def run(self):
global monitor_hosts
global monitor_vnodes
while not self.thread_stop:
for worker in monitor_hosts.keys():
monitor_hosts[worker]['running'] = False
workers = self.nodemgr.get_nodeips()
for worker in workers:
try:
ip = worker
workerrpc = self.nodemgr.ip_to_rpc(worker)
# fetch data
info = list(eval(workerrpc.workerFetchInfo(self.master_ip)))
#logger.info(info[0])
# store data in monitor_hosts and monitor_vnodes
monitor_hosts[ip] = info[0]
for container in info[1].keys():
owner = get_owner(container)
if not owner in monitor_vnodes.keys():
monitor_vnodes[owner] = {}
monitor_vnodes[owner][container] = info[1][container]
for user in info[2].keys():
if not user in monitor_vnodes.keys():
continue
else:
monitor_vnodes[user]['net_stats'] = info[2][user]
self.net_billings(user, info[2][user]['bytes_total'])
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
time.sleep(2)
#logger.info(History.query.all())
#logger.info(VNode.query.all())
return
def stop(self):
self.thread_stop = True
return
# master use this class to fetch specific data of containers(vnodes)
class Container_Fetcher:
def __init__(self,container_name):
self.owner = get_owner(container_name)
self.con_id = container_name
return
def get_cpu_use(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['cpu_use']
res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_mem_use(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['mem_use']
res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_disk_use(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['disk_use']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_net_stats(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['net_stats']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_basic_info(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['basic_info']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
# Master use this class to fetch specific data of physical machines(hosts)
class Fetcher:
def __init__(self,host):
global monitor_hosts
self.info = monitor_hosts[host]
return
#def get_clcnt(self):
# return DockletMonitor.clcnt
#def get_nodecnt(self):
# return DockletMonitor.nodecnt
#def get_meminfo(self):
# return self.get_meminfo_('172.31.0.1')
def get_meminfo(self):
try:
res = self.info['meminfo']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_cpuinfo(self):
try:
res = self.info['cpuinfo']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_cpuconfig(self):
try:
res = self.info['cpuconfig']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_diskinfo(self):
try:
res = self.info['diskinfo']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_osinfo(self):
try:
res = self.info['osinfo']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_concpuinfo(self):
try:
res = self.info['concpupercent']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_containers(self):
try:
res = self.info['containers']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_status(self):
try:
isexist = self.info['running']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
isexist = False
if(isexist):
return 'RUNNING'
else:
return 'STOPPED'
def get_containerslist(self):
try:
res = self.info['containerslist']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res

View File

@ -1,9 +1,9 @@
#!/usr/bin/python3
import json, sys, netifaces, threading
from nettools import netcontrol,ovscontrol
from utils.nettools import netcontrol,ovscontrol
from log import logger
from utils.log import logger
# getip : get ip from network interface
# ifname : name of network interface

View File

@ -2,9 +2,9 @@
import threading, random, time, xmlrpc.client, sys
#import network
from nettools import netcontrol,ovscontrol
from log import logger
import env
from utils.nettools import netcontrol,ovscontrol
from utils.log import logger
from utils import env
##########################################
# NodeMgr
@ -149,7 +149,7 @@ class NodeMgr(object):
taskargs = task['args']
logger.info("recover task:%s in worker:%s" % (taskname, ip))
eval('worker.'+taskname)(*taskargs)
# get all run nodes' IP addr
def get_nodeips(self):
return self.runnodes

View File

@ -1,15 +1,15 @@
import json
from log import logger
from model import db, Notification, NotificationGroups, User, UserNotificationPair
from userManager import administration_required, token_required
from utils.log import logger
from utils.model import db, Notification, NotificationGroups, User, UserNotificationPair
from master.userManager import administration_required, token_required
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from datetime import datetime
import env
from settings import settings
from utils import env
from master.settings import settings
class NotificationMgr:
def __init__(self):

View File

@ -1,9 +1,9 @@
#!/usr/bin/python3
import env
from utils import env
import json, os
from functools import wraps
from log import logger
from utils.log import logger
class settingsClass:

View File

@ -7,22 +7,22 @@ Warning: in some early versions, "token" stand for the instance of class model.U
Original author: Liu Peidong
'''
from model import db, User, UserGroup, Notification, UserUsage
from utils.model import db, User, UserGroup, Notification, UserUsage
from functools import wraps
import os, subprocess, math
import hashlib
import pam
from base64 import b64encode
import env
from settings import settings
from utils import env
from master.settings import settings
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from datetime import datetime
import json
from log import logger
from lvmtool import *
from utils.log import logger
from utils.lvmtool import *
PAM = pam.pam()
fspath = env.getenv('FS_PREFIX')
@ -162,7 +162,7 @@ class userManager:
sys_admin.auth_method = 'local'
db.session.add(sys_admin)
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/userinit.sh", username])
subprocess.call([path+"/master/userinit.sh", username])
db.session.commit()
if not os.path.exists(fspath+"/global/sys/quota"):
groupfile = open(fspath+"/global/sys/quota",'w')
@ -870,7 +870,7 @@ class userManager:
# now initialize for all kind of users
#if newuser.status == 'normal':
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/userinit.sh", newuser.username])
subprocess.call([path+"/master/userinit.sh", newuser.username])
res = self.groupQuery(name=newuser.user_group)
if res['success']:
self.set_nfs_quota(newuser.username,res['data']['data'])

View File

@ -1,15 +1,13 @@
#!/usr/bin/python3
import os, random, json, sys, imagemgr
import os, random, json, sys
import datetime, math
from log import logger
import env
import proxytool
import requests, threading
import traceback
from nettools import portcontrol
from model import db, Container, PortMapping, VCluster
from utils.log import logger
from utils import env, imagemgr, proxytool
import requests, threading, traceback
from utils.nettools import portcontrol
from utils.model import db, Container, PortMapping, VCluster
userpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT'))
def post_to_user(url = '/', data={}):

View File

@ -20,12 +20,11 @@ from configparser import ConfigParser
from io import StringIO
import os,sys,subprocess,time,re,datetime,threading,random
import xmlrpc.client
from model import db, Image
from utils.model import db, Image
from log import logger
import env
from lvmtool import *
import updatebase
from utils.log import logger
from utils import env, updatebase
from utils.lvmtool import *
import requests
master_port = str(env.getenv('MASTER_PORT'))

View File

@ -6,7 +6,7 @@ import argparse
import sys
import time # this is only being used as part of the example
import os
import env
from utils import env
# logger should only be imported after initlogging has been called
logger = None
@ -17,7 +17,7 @@ def initlogging(name='docklet'):
homepath = env.getenv('FS_PREFIX')
LOG_FILENAME = homepath + '/local/log/' + name + '.log'
LOG_LIFE = env.getenv('LOG_LIFE')
LOG_LEVEL = env.getenv('LOG_LEVEL')
if LOG_LEVEL == "DEBUG":
@ -32,7 +32,6 @@ def initlogging(name='docklet'):
LOG_LEVEL = logging.CRITIAL
else:
LOG_LEVEL = logging.DEBUG
logger = logging.getLogger(name)
# Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data
# Give the logger a unique name (good practice)
@ -47,7 +46,6 @@ def initlogging(name='docklet'):
handler.setFormatter(formatter)
# Attach the handler to the logger
logger.addHandler(handler)
# Replace stdout with logging to file at INFO level
sys.stdout = RedirectLogger(logger, logging.INFO)
# Replace stderr with logging to file at ERROR level

View File

@ -1,8 +1,8 @@
#!/usr/bin/python3
import env
from utils import env
import json, os
from log import logger
from utils.log import logger
from werkzeug.utils import secure_filename
logsPath = env.getenv('FS_PREFIX') + '/local/log/'

View File

@ -1,7 +1,8 @@
#!/usr/bin/python3
import env,subprocess,os,time
from log import logger
import subprocess,os,time
from utils.log import logger
from utils import env
def sys_run(command,check=False):
Ret = subprocess.run(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True, check=check)
@ -45,12 +46,12 @@ def new_group(group_name, size = "5000", file_path = "/opt/docklet/local/docklet
logger.error(e)
logger.info("initialize lvm group:%s with size %sM success" % (group_name,size))
return True
elif storage == "disk":
disk = env.getenv("DISK")
if disk is None:
logger.error("use disk for story without a physical disk")
return False
return False
#check vg
Ret = sys_run("vgdisplay " + group_name)
if Ret.returncode == 0:
@ -63,8 +64,8 @@ def new_group(group_name, size = "5000", file_path = "/opt/docklet/local/docklet
except Exception as e:
logger.error(e)
logger.info("initialize lvm group:%s with size %sM success" % (group_name,size))
return True
return True
else:
logger.info("unknown storage type:" + storage)
return False
@ -85,7 +86,7 @@ def recover_group(group_name,file_path="/opt/docklet/local/docklet-storage"):
time.sleep(1)
#recover vg
Ret = sys_run("vgdisplay " + group_name)
if Ret.returncode != 0:
if Ret.returncode != 0:
Ret = sys_run("vgcreate %s /dev/loop0" % group_name)
if Ret.returncode != 0:
logger.error("create VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8')))
@ -96,10 +97,10 @@ def recover_group(group_name,file_path="/opt/docklet/local/docklet-storage"):
disk = env.getenv("DISK")
if disk is None:
logger.error("use disk for story without a physical disk")
return False
return False
#recover vg
Ret = sys_run("vgdisplay " + group_name)
if Ret.returncode != 0:
if Ret.returncode != 0:
Ret = sys_run("vgcreate %s %s" % (group_name,disk))
if Ret.returncode != 0:
logger.error("create VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8')))
@ -161,5 +162,3 @@ def delete_volume(group_name, volume_name):
return False
else:
logger.info("lv %s in vg %s does not exists" % (volume_name,group_name))

View File

@ -34,7 +34,7 @@ import os, json
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import SignatureExpired, BadSignature
import env
from utils import env
fsdir = env.getenv('FS_PREFIX')

View File

@ -1,7 +1,8 @@
#!/usr/bin/python3
import subprocess, env, threading
from log import logger
import subprocess, threading
from utils.log import logger
from utils import env
class ipcontrol(object):
@staticmethod

View File

@ -1,7 +1,7 @@
#!/usr/bin/python3
import requests, json
import env
from utils import env
proxy_api_port = env.getenv("PROXY_API_PORT")
proxy_control="http://localhost:"+ str(proxy_api_port) +"/api/routes"
@ -22,7 +22,7 @@ def set_route(path, target):
except:
return [False, 'Connect Failed']
return [True, 'set ok']
def delete_route(path):
path='/'+path.strip('/')
try:

View File

@ -1,7 +1,7 @@
#!/usr/bin/python3
import os, shutil
from log import logger
from utils.log import logger
def aufs_remove(basefs):
try:
@ -13,7 +13,7 @@ def aufs_remove(basefs):
logger.error(e)
def aufs_clean(basefs):
# clean the aufs mark
# clean the aufs mark
allfiles = os.listdir(basefs)
for onefile in allfiles:
if onefile[:4] == ".wh.":

View File

@ -1,12 +1,10 @@
#!/usr/bin/python3
import subprocess, os, json
import imagemgr
import network
from log import logger
import env
from lvmtool import sys_run, check_volume
from monitor import Container_Collector, History_Manager
from utils.log import logger
from utils import env, imagemgr
from utils.lvmtool import sys_run, check_volume
from worker.monitor import Container_Collector, History_Manager
import lxc
class Container(object):
@ -46,7 +44,7 @@ class Container(object):
if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)):
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/userinit.sh", username])
subprocess.call([path+"/master/userinit.sh", username])
logger.info("user %s directory not found, create it" % username)
sys_run("mkdir -p /var/lib/lxc/%s" % lxc_name)
logger.info("generate config file for %s" % lxc_name)

View File

@ -17,18 +17,17 @@ Design:Monitor mainly consists of three parts: Collectors, Master_Collector and
'''
import subprocess,re,os,etcdlib,psutil,math,sys
import subprocess,re,os,psutil,math,sys
import time,threading,json,traceback,platform
import env
from utils import env, etcdlib
import lxc
import xmlrpc.client
from datetime import datetime
from model import db,VNode,History,BillingHistory,VCluster,PortMapping
from log import logger
from utils.model import db,VNode,History,BillingHistory,VCluster,PortMapping
from utils.log import logger
from httplib2 import Http
from urllib.parse import urlencode
from httprest import post_to_user
# billing parameters
a_cpu = 500 # seconds
@ -36,33 +35,6 @@ b_mem = 2000000 # MB
c_disk = 4000 # MB
d_port = 1
# major dict to store the monitoring data
# only use on Master
# monitor_hosts: use workers' ip addresses as first key.
# second key: cpuinfo,diskinfo,meminfo,osinfo,cpuconfig,running,containers,containerslist
# 1.cpuinfo stores the cpu usages data, and it has keys: user,system,idle,iowait
# 2.diskinfo stores the disks usages data, and it has keys: device,mountpoint,total,used,free,percent
# 3.meminfo stores the memory usages data, and it has keys: total,used,free,buffers,cached,percent
# 4.osinfo stores the information of operating system,
# and it has keys: platform,system,node,release,version,machine,processor
# 5.cpuconfig stores the information of processors, and it is a list, each element of list is a dict
# which stores the information of a processor, each element has key: processor,model name,
# core id, cpu MHz, cache size, physical id.
# 6.running indicates the status of worker,and it has two values: True, False.
# 7.containers store the amount of containers on the worker.
# 8.containers store a list which consists of the names of containers on the worker.
monitor_hosts = {}
# monitor_vnodes: use the owners' names of vnodes(containers) as first key.
# use the names of vnodes(containers) as second key.
# third key: cpu_use,mem_use,disk_use,basic_info,quota
# 1.cpu_use has keys: val,unit,hostpercent
# 2.mem_use has keys: val,unit,usedp
# 3.disk_use has keys: device,mountpoint,total,used,free,percent
# 4.basic_info has keys: Name,State,PID,IP,RunningTime,billing,billing_this_hour
# 5.quota has keys: cpu,memeory
monitor_vnodes = {}
# major dict to store the monitoring data on Worker
# only use on Worker
# workerinfo: only store the data collected on current Worker,
@ -628,234 +600,6 @@ def get_billing_history(vnode_name):
default['port'] = 0
return default
# the thread to collect data from each worker and store them in monitor_hosts and monitor_vnodes
class Master_Collector(threading.Thread):
def __init__(self,nodemgr,master_ip):
threading.Thread.__init__(self)
self.thread_stop = False
self.nodemgr = nodemgr
self.master_ip = master_ip
self.net_lastbillings = {}
self.bytes_per_beans = 1000000000
return
def net_billings(self, username, now_bytes_total):
global monitor_vnodes
if not username in self.net_lastbillings.keys():
self.net_lastbillings[username] = 0
elif int(now_bytes_total/self.bytes_per_beans) < self.net_lastbillings[username]:
self.net_lastbillings[username] = 0
diff = int(now_bytes_total/self.bytes_per_beans) - self.net_lastbillings[username]
if diff > 0:
auth_key = env.getenv('AUTH_KEY')
data = {"owner_name":username,"billing":diff, "auth_key":auth_key}
header = {'Content-Type':'application/x-www-form-urlencoded'}
http = Http()
[resp,content] = http.request("http://"+self.master_ip+"/billing/beans/","POST",urlencode(data),headers = header)
logger.info("response from master:"+content.decode('utf-8'))
self.net_lastbillings[username] += diff
monitor_vnodes[username]['net_stats']['net_billings'] = self.net_lastbillings[username]
def run(self):
global monitor_hosts
global monitor_vnodes
while not self.thread_stop:
for worker in monitor_hosts.keys():
monitor_hosts[worker]['running'] = False
workers = self.nodemgr.get_nodeips()
for worker in workers:
try:
ip = worker
workerrpc = self.nodemgr.ip_to_rpc(worker)
# fetch data
info = list(eval(workerrpc.workerFetchInfo(self.master_ip)))
#logger.info(info[0])
# store data in monitor_hosts and monitor_vnodes
monitor_hosts[ip] = info[0]
for container in info[1].keys():
owner = get_owner(container)
if not owner in monitor_vnodes.keys():
monitor_vnodes[owner] = {}
monitor_vnodes[owner][container] = info[1][container]
for user in info[2].keys():
if not user in monitor_vnodes.keys():
continue
else:
monitor_vnodes[user]['net_stats'] = info[2][user]
self.net_billings(user, info[2][user]['bytes_total'])
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
time.sleep(2)
#logger.info(History.query.all())
#logger.info(VNode.query.all())
return
def stop(self):
self.thread_stop = True
return
# master use this class to fetch specific data of containers(vnodes)
class Container_Fetcher:
def __init__(self,container_name):
self.owner = get_owner(container_name)
self.con_id = container_name
return
def get_cpu_use(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['cpu_use']
res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_mem_use(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['mem_use']
res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_disk_use(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['disk_use']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_net_stats(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['net_stats']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_basic_info(self):
global monitor_vnodes
try:
res = monitor_vnodes[self.owner][self.con_id]['basic_info']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
# Master use this class to fetch specific data of physical machines(hosts)
class Fetcher:
def __init__(self,host):
global monitor_hosts
self.info = monitor_hosts[host]
return
#def get_clcnt(self):
# return DockletMonitor.clcnt
#def get_nodecnt(self):
# return DockletMonitor.nodecnt
#def get_meminfo(self):
# return self.get_meminfo_('172.31.0.1')
def get_meminfo(self):
try:
res = self.info['meminfo']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_cpuinfo(self):
try:
res = self.info['cpuinfo']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_cpuconfig(self):
try:
res = self.info['cpuconfig']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_diskinfo(self):
try:
res = self.info['diskinfo']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_osinfo(self):
try:
res = self.info['osinfo']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_concpuinfo(self):
try:
res = self.info['concpupercent']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_containers(self):
try:
res = self.info['containers']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
def get_status(self):
try:
isexist = self.info['running']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
isexist = False
if(isexist):
return 'RUNNING'
else:
return 'STOPPED'
def get_containerslist(self):
try:
res = self.info['containerslist']
except Exception as err:
logger.warning(traceback.format_exc())
logger.warning(err)
res = {}
return res
# To record data when the status of containers change
class History_Manager:

View File

@ -1,22 +1,27 @@
#!/usr/bin/python3
# first init env
import env, tools
import sys
if sys.path[0].endswith("worker"):
sys.path[0] = sys.path[0][:-6]
from utils import env, tools
config = env.getenv("CONFIG")
#config = "/opt/docklet/local/docklet-running.conf"
tools.loadenv(config)
# must import logger after initlogging, ugly
from log import initlogging
from utils.log import initlogging
initlogging("docklet-worker")
from log import logger
from utils.log import logger
import xmlrpc.server, sys, time
from socketserver import ThreadingMixIn
import threading
import etcdlib, network, container
from nettools import netcontrol,ovscontrol,portcontrol
import monitor, proxytool
from lvmtool import new_group, recover_group
from utils import etcdlib, proxytool
from worker import container, monitor
from utils.nettools import netcontrol,ovscontrol,portcontrol
from utils.lvmtool import new_group, recover_group
from master import network
##################################################################
# Worker
@ -174,7 +179,7 @@ class Worker(object):
netcontrol.new_bridge('docklet-br')
else:
if not netcontrol.bridge_exists('docklet-br'):
logger.error("docklet-br not found")
utils logger.error("docklet-br not found")
sys.exit(1)
logger.info ("setup GRE tunnel to master %s" % self.master)
#network.netsetup("gre", self.master)

View File

@ -13,12 +13,7 @@ src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"..", "sr
if src_folder not in sys.path:
sys.path.insert(0, src_folder)
# must first init loadenv
from log import initlogging
initlogging("docklet-user")
from log import logger
import tools, env
from utils import tools, env
config = env.getenv("CONFIG")
tools.loadenv(config)
masterips = env.getenv("MASTER_IPS").split(",")
@ -26,14 +21,19 @@ G_masterips = []
for masterip in masterips:
G_masterips.append(masterip.split("@")[0] + ":" + str(env.getenv("MASTER_PORT")))
# must first init loadenv
from utils.log import initlogging
initlogging("docklet-user")
from utils.log import logger
from flask import Flask, request, session, render_template, redirect, send_from_directory, make_response, url_for, abort
from functools import wraps
import userManager,beansapplicationmgr, notificationmgr, lockmgr
from master import userManager,beansapplicationmgr, notificationmgr, lockmgr
import threading,traceback
from model import User,db
from utils.model import User,db
from httplib2 import Http
from urllib.parse import urlencode
from settings import settings
from master.settings import settings
external_login = env.getenv('EXTERNAL_LOGIN')
if(external_login == 'TRUE'):

View File

@ -12,7 +12,7 @@ if src_folder not in sys.path:
sys.path.insert(0, src_folder)
# must first init loadenv
import tools, env
from utils import tools, env
config = env.getenv("CONFIG")
tools.loadenv(config)

View File

@ -13,7 +13,7 @@ src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../../..
if src_folder not in sys.path:
sys.path.insert(0, src_folder)
import env
from utils import env
if (env.getenv('EXTERNAL_LOGIN') == 'True'):
sys.path.insert(0, os.path.realpath(os.path.abspath(os.path.join(this_folder,"../../../src", "plugin"))))
@ -140,7 +140,7 @@ class external_login_callbackView(normalView):
class external_loginView(normalView):
if (env.getenv('EXTERNAL_LOGIN') == 'True'):
template_path = external_generate.html_path
@classmethod
def post(self):
return render_template(self.template_path)

View File

@ -9,7 +9,7 @@ src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../..",
if src_folder not in sys.path:
sys.path.insert(0, src_folder)
import env
from utils import env
masterips=env.getenv('MASTER_IPS').split(",")
user_endpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT'))
@ -60,7 +60,7 @@ class dockletRequest():
return result
#except:
#abort(500)
@classmethod
def getdesc(self,mastername):
return env.getenv(mastername+"_desc")[1:-1]

View File

@ -12,7 +12,7 @@ this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(ins
src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../..", "src")))
if src_folder not in sys.path:
sys.path.insert(0, src_folder)
import env
from utils import env
# logger should only be imported after initlogging has been called
logger = None