commit
4c3b11cc25
|
@ -6,4 +6,4 @@ __temp
|
||||||
.DS_Store
|
.DS_Store
|
||||||
docklet.conf
|
docklet.conf
|
||||||
home.html
|
home.html
|
||||||
src/migrations/
|
src/utils/migrations/
|
||||||
|
|
|
@ -46,7 +46,7 @@ export FS_PREFIX
|
||||||
DAEMON_USER=root
|
DAEMON_USER=root
|
||||||
|
|
||||||
# settings for docklet master
|
# settings for docklet master
|
||||||
DAEMON_MASTER=$DOCKLET_LIB/httprest.py
|
DAEMON_MASTER=$DOCKLET_LIB/master/httprest.py
|
||||||
DAEMON_NAME_MASTER=docklet-master
|
DAEMON_NAME_MASTER=docklet-master
|
||||||
DAEMON_OPTS_MASTER=
|
DAEMON_OPTS_MASTER=
|
||||||
# The process ID of the script when it runs is stored here:
|
# The process ID of the script when it runs is stored here:
|
||||||
|
|
|
@ -45,7 +45,7 @@ export FS_PREFIX
|
||||||
DAEMON_USER=root
|
DAEMON_USER=root
|
||||||
|
|
||||||
# settings for docklet master
|
# settings for docklet master
|
||||||
DAEMON_MASTER=$DOCKLET_LIB/httprest.py
|
DAEMON_MASTER=$DOCKLET_LIB/master/httprest.py
|
||||||
DAEMON_NAME_MASTER=docklet-master
|
DAEMON_NAME_MASTER=docklet-master
|
||||||
DAEMON_OPTS_MASTER=
|
DAEMON_OPTS_MASTER=
|
||||||
# The process ID of the script when it runs is stored here:
|
# The process ID of the script when it runs is stored here:
|
||||||
|
|
|
@ -37,7 +37,7 @@ export FS_PREFIX
|
||||||
DAEMON_USER=root
|
DAEMON_USER=root
|
||||||
|
|
||||||
# settings for docklet worker
|
# settings for docklet worker
|
||||||
DAEMON=$DOCKLET_LIB/worker.py
|
DAEMON=$DOCKLET_LIB/worker/worker.py
|
||||||
DAEMON_NAME=docklet-worker
|
DAEMON_NAME=docklet-worker
|
||||||
DAEMON_OPTS=
|
DAEMON_OPTS=
|
||||||
# The process ID of the script when it runs is stored here:
|
# The process ID of the script when it runs is stored here:
|
||||||
|
@ -96,6 +96,7 @@ pre_start () {
|
||||||
do_start() {
|
do_start() {
|
||||||
pre_start
|
pre_start
|
||||||
log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX"
|
log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX"
|
||||||
|
#python3 $DAEMON
|
||||||
start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS
|
start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS
|
||||||
log_end_msg $?
|
log_end_msg $?
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,14 +11,14 @@ This module consists of three parts:
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import threading,datetime,random,time
|
import threading,datetime,random,time
|
||||||
from model import db,User,ApplyMsg
|
from utils.model import db,User,ApplyMsg
|
||||||
from userManager import administration_required
|
from master.userManager import administration_required
|
||||||
import env
|
from utils import env
|
||||||
import smtplib
|
import smtplib
|
||||||
from email.mime.text import MIMEText
|
from email.mime.text import MIMEText
|
||||||
from email.mime.multipart import MIMEMultipart
|
from email.mime.multipart import MIMEMultipart
|
||||||
from email.header import Header
|
from email.header import Header
|
||||||
from settings import settings
|
from master.settings import settings
|
||||||
|
|
||||||
|
|
||||||
# send email to remind users of their beans
|
# send email to remind users of their beans
|
|
@ -1,12 +1,12 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
import os,sys,subprocess,time,re,datetime,threading,random,shutil
|
import os,sys,subprocess,time,re,datetime,threading,random,shutil
|
||||||
from model import db, Image
|
from utils.model import db, Image
|
||||||
from deploy import *
|
from master.deploy import *
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
import env
|
from utils import env
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
fspath = env.getenv('FS_PREFIX')
|
fspath = env.getenv('FS_PREFIX')
|
|
@ -1,8 +1,8 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import paramiko, time
|
import paramiko, time, os
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
import env,os
|
from utils import env
|
||||||
|
|
||||||
def myexec(ssh,command):
|
def myexec(ssh,command):
|
||||||
stdin,stdout,stderr = ssh.exec_command(command)
|
stdin,stdout,stderr = ssh.exec_command(command)
|
|
@ -4,10 +4,13 @@
|
||||||
# because some modules need variables when import
|
# because some modules need variables when import
|
||||||
# for example, userManager/model.py
|
# for example, userManager/model.py
|
||||||
|
|
||||||
|
import sys
|
||||||
|
if sys.path[0].endswith("master"):
|
||||||
|
sys.path[0] = sys.path[0][:-6]
|
||||||
from flask import Flask, request
|
from flask import Flask, request
|
||||||
|
|
||||||
# must first init loadenv
|
# must first init loadenv
|
||||||
import tools, env
|
from utils import tools, env
|
||||||
# default CONFIG=/opt/docklet/local/docklet-running.conf
|
# default CONFIG=/opt/docklet/local/docklet-running.conf
|
||||||
|
|
||||||
config = env.getenv("CONFIG")
|
config = env.getenv("CONFIG")
|
||||||
|
@ -15,22 +18,22 @@ tools.loadenv(config)
|
||||||
|
|
||||||
# second init logging
|
# second init logging
|
||||||
# must import logger after initlogging, ugly
|
# must import logger after initlogging, ugly
|
||||||
from log import initlogging
|
from utils.log import initlogging
|
||||||
initlogging("docklet-master")
|
initlogging("docklet-master")
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import http.server, cgi, json, sys, shutil
|
import http.server, cgi, json, sys, shutil, traceback
|
||||||
import xmlrpc.client
|
import xmlrpc.client
|
||||||
from socketserver import ThreadingMixIn
|
from socketserver import ThreadingMixIn
|
||||||
import nodemgr, vclustermgr, etcdlib, network, imagemgr, notificationmgr, lockmgr, cloudmgr
|
from utils import etcdlib, imagemgr
|
||||||
from logs import logs
|
from master import nodemgr, vclustermgr, notificationmgr, lockmgr, cloudmgr
|
||||||
import userManager,beansapplicationmgr
|
from utils.logs import logs
|
||||||
import monitor,traceback
|
from master import userManager, beansapplicationmgr, monitor, sysmgr, network
|
||||||
|
from worker.monitor import History_Manager
|
||||||
import threading
|
import threading
|
||||||
import sysmgr
|
|
||||||
import requests
|
import requests
|
||||||
from nettools import portcontrol
|
from utils.nettools import portcontrol
|
||||||
|
|
||||||
#default EXTERNAL_LOGIN=False
|
#default EXTERNAL_LOGIN=False
|
||||||
external_login = env.getenv('EXTERNAL_LOGIN')
|
external_login = env.getenv('EXTERNAL_LOGIN')
|
||||||
|
@ -895,7 +898,7 @@ if __name__ == '__main__':
|
||||||
masterport = env.getenv('MASTER_PORT')
|
masterport = env.getenv('MASTER_PORT')
|
||||||
logger.info("using MASTER_PORT %d", int(masterport))
|
logger.info("using MASTER_PORT %d", int(masterport))
|
||||||
|
|
||||||
G_historymgr = monitor.History_Manager()
|
G_historymgr = History_Manager()
|
||||||
master_collector = monitor.Master_Collector(G_nodemgr,ipaddr+":"+str(masterport))
|
master_collector = monitor.Master_Collector(G_nodemgr,ipaddr+":"+str(masterport))
|
||||||
master_collector.start()
|
master_collector.start()
|
||||||
logger.info("master_collector started")
|
logger.info("master_collector started")
|
|
@ -0,0 +1,264 @@
|
||||||
|
import threading, time, traceback
|
||||||
|
from utils import env
|
||||||
|
from utils.log import logger
|
||||||
|
from httplib2 import Http
|
||||||
|
|
||||||
|
# major dict to store the monitoring data
|
||||||
|
# only use on Master
|
||||||
|
# monitor_hosts: use workers' ip addresses as first key.
|
||||||
|
# second key: cpuinfo,diskinfo,meminfo,osinfo,cpuconfig,running,containers,containerslist
|
||||||
|
# 1.cpuinfo stores the cpu usages data, and it has keys: user,system,idle,iowait
|
||||||
|
# 2.diskinfo stores the disks usages data, and it has keys: device,mountpoint,total,used,free,percent
|
||||||
|
# 3.meminfo stores the memory usages data, and it has keys: total,used,free,buffers,cached,percent
|
||||||
|
# 4.osinfo stores the information of operating system,
|
||||||
|
# and it has keys: platform,system,node,release,version,machine,processor
|
||||||
|
# 5.cpuconfig stores the information of processors, and it is a list, each element of list is a dict
|
||||||
|
# which stores the information of a processor, each element has key: processor,model name,
|
||||||
|
# core id, cpu MHz, cache size, physical id.
|
||||||
|
# 6.running indicates the status of worker,and it has two values: True, False.
|
||||||
|
# 7.containers store the amount of containers on the worker.
|
||||||
|
# 8.containers store a list which consists of the names of containers on the worker.
|
||||||
|
monitor_hosts = {}
|
||||||
|
|
||||||
|
# monitor_vnodes: use the owners' names of vnodes(containers) as first key.
|
||||||
|
# use the names of vnodes(containers) as second key.
|
||||||
|
# third key: cpu_use,mem_use,disk_use,basic_info,quota
|
||||||
|
# 1.cpu_use has keys: val,unit,hostpercent
|
||||||
|
# 2.mem_use has keys: val,unit,usedp
|
||||||
|
# 3.disk_use has keys: device,mountpoint,total,used,free,percent
|
||||||
|
# 4.basic_info has keys: Name,State,PID,IP,RunningTime,billing,billing_this_hour
|
||||||
|
# 5.quota has keys: cpu,memeory
|
||||||
|
monitor_vnodes = {}
|
||||||
|
|
||||||
|
# get owner name of a container
|
||||||
|
def get_owner(container_name):
|
||||||
|
names = container_name.split('-')
|
||||||
|
return names[0]
|
||||||
|
|
||||||
|
# the thread to collect data from each worker and store them in monitor_hosts and monitor_vnodes
|
||||||
|
class Master_Collector(threading.Thread):
|
||||||
|
|
||||||
|
def __init__(self,nodemgr,master_ip):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.thread_stop = False
|
||||||
|
self.nodemgr = nodemgr
|
||||||
|
self.master_ip = master_ip
|
||||||
|
self.net_lastbillings = {}
|
||||||
|
self.bytes_per_beans = 1000000000
|
||||||
|
return
|
||||||
|
|
||||||
|
def net_billings(self, username, now_bytes_total):
|
||||||
|
global monitor_vnodes
|
||||||
|
if not username in self.net_lastbillings.keys():
|
||||||
|
self.net_lastbillings[username] = 0
|
||||||
|
elif int(now_bytes_total/self.bytes_per_beans) < self.net_lastbillings[username]:
|
||||||
|
self.net_lastbillings[username] = 0
|
||||||
|
diff = int(now_bytes_total/self.bytes_per_beans) - self.net_lastbillings[username]
|
||||||
|
if diff > 0:
|
||||||
|
auth_key = env.getenv('AUTH_KEY')
|
||||||
|
data = {"owner_name":username,"billing":diff, "auth_key":auth_key}
|
||||||
|
header = {'Content-Type':'application/x-www-form-urlencoded'}
|
||||||
|
http = Http()
|
||||||
|
[resp,content] = http.request("http://"+self.master_ip+"/billing/beans/","POST",urlencode(data),headers = header)
|
||||||
|
logger.info("response from master:"+content.decode('utf-8'))
|
||||||
|
self.net_lastbillings[username] += diff
|
||||||
|
monitor_vnodes[username]['net_stats']['net_billings'] = self.net_lastbillings[username]
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
global monitor_hosts
|
||||||
|
global monitor_vnodes
|
||||||
|
while not self.thread_stop:
|
||||||
|
for worker in monitor_hosts.keys():
|
||||||
|
monitor_hosts[worker]['running'] = False
|
||||||
|
workers = self.nodemgr.get_nodeips()
|
||||||
|
for worker in workers:
|
||||||
|
try:
|
||||||
|
ip = worker
|
||||||
|
workerrpc = self.nodemgr.ip_to_rpc(worker)
|
||||||
|
# fetch data
|
||||||
|
info = list(eval(workerrpc.workerFetchInfo(self.master_ip)))
|
||||||
|
#logger.info(info[0])
|
||||||
|
# store data in monitor_hosts and monitor_vnodes
|
||||||
|
monitor_hosts[ip] = info[0]
|
||||||
|
for container in info[1].keys():
|
||||||
|
owner = get_owner(container)
|
||||||
|
if not owner in monitor_vnodes.keys():
|
||||||
|
monitor_vnodes[owner] = {}
|
||||||
|
monitor_vnodes[owner][container] = info[1][container]
|
||||||
|
for user in info[2].keys():
|
||||||
|
if not user in monitor_vnodes.keys():
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
monitor_vnodes[user]['net_stats'] = info[2][user]
|
||||||
|
self.net_billings(user, info[2][user]['bytes_total'])
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
time.sleep(2)
|
||||||
|
#logger.info(History.query.all())
|
||||||
|
#logger.info(VNode.query.all())
|
||||||
|
return
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.thread_stop = True
|
||||||
|
return
|
||||||
|
|
||||||
|
# master use this class to fetch specific data of containers(vnodes)
|
||||||
|
class Container_Fetcher:
|
||||||
|
def __init__(self,container_name):
|
||||||
|
self.owner = get_owner(container_name)
|
||||||
|
self.con_id = container_name
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_cpu_use(self):
|
||||||
|
global monitor_vnodes
|
||||||
|
try:
|
||||||
|
res = monitor_vnodes[self.owner][self.con_id]['cpu_use']
|
||||||
|
res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_mem_use(self):
|
||||||
|
global monitor_vnodes
|
||||||
|
try:
|
||||||
|
res = monitor_vnodes[self.owner][self.con_id]['mem_use']
|
||||||
|
res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_disk_use(self):
|
||||||
|
global monitor_vnodes
|
||||||
|
try:
|
||||||
|
res = monitor_vnodes[self.owner][self.con_id]['disk_use']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_net_stats(self):
|
||||||
|
global monitor_vnodes
|
||||||
|
try:
|
||||||
|
res = monitor_vnodes[self.owner][self.con_id]['net_stats']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_basic_info(self):
|
||||||
|
global monitor_vnodes
|
||||||
|
try:
|
||||||
|
res = monitor_vnodes[self.owner][self.con_id]['basic_info']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
# Master use this class to fetch specific data of physical machines(hosts)
|
||||||
|
class Fetcher:
|
||||||
|
|
||||||
|
def __init__(self,host):
|
||||||
|
global monitor_hosts
|
||||||
|
self.info = monitor_hosts[host]
|
||||||
|
return
|
||||||
|
|
||||||
|
#def get_clcnt(self):
|
||||||
|
# return DockletMonitor.clcnt
|
||||||
|
|
||||||
|
#def get_nodecnt(self):
|
||||||
|
# return DockletMonitor.nodecnt
|
||||||
|
|
||||||
|
#def get_meminfo(self):
|
||||||
|
# return self.get_meminfo_('172.31.0.1')
|
||||||
|
|
||||||
|
def get_meminfo(self):
|
||||||
|
try:
|
||||||
|
res = self.info['meminfo']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_cpuinfo(self):
|
||||||
|
try:
|
||||||
|
res = self.info['cpuinfo']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_cpuconfig(self):
|
||||||
|
try:
|
||||||
|
res = self.info['cpuconfig']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_diskinfo(self):
|
||||||
|
try:
|
||||||
|
res = self.info['diskinfo']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_osinfo(self):
|
||||||
|
try:
|
||||||
|
res = self.info['osinfo']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_concpuinfo(self):
|
||||||
|
try:
|
||||||
|
res = self.info['concpupercent']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_containers(self):
|
||||||
|
try:
|
||||||
|
res = self.info['containers']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def get_status(self):
|
||||||
|
try:
|
||||||
|
isexist = self.info['running']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
isexist = False
|
||||||
|
if(isexist):
|
||||||
|
return 'RUNNING'
|
||||||
|
else:
|
||||||
|
return 'STOPPED'
|
||||||
|
|
||||||
|
def get_containerslist(self):
|
||||||
|
try:
|
||||||
|
res = self.info['containerslist']
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
logger.warning(err)
|
||||||
|
res = {}
|
||||||
|
return res
|
|
@ -1,9 +1,9 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import json, sys, netifaces, threading
|
import json, sys, netifaces, threading
|
||||||
from nettools import netcontrol,ovscontrol
|
from utils.nettools import netcontrol,ovscontrol
|
||||||
|
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
|
|
||||||
# getip : get ip from network interface
|
# getip : get ip from network interface
|
||||||
# ifname : name of network interface
|
# ifname : name of network interface
|
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
import threading, random, time, xmlrpc.client, sys
|
import threading, random, time, xmlrpc.client, sys
|
||||||
#import network
|
#import network
|
||||||
from nettools import netcontrol,ovscontrol
|
from utils.nettools import netcontrol,ovscontrol
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
import env
|
from utils import env
|
||||||
|
|
||||||
##########################################
|
##########################################
|
||||||
# NodeMgr
|
# NodeMgr
|
|
@ -1,15 +1,15 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
from model import db, Notification, NotificationGroups, User, UserNotificationPair
|
from utils.model import db, Notification, NotificationGroups, User, UserNotificationPair
|
||||||
from userManager import administration_required, token_required
|
from master.userManager import administration_required, token_required
|
||||||
import smtplib
|
import smtplib
|
||||||
from email.mime.text import MIMEText
|
from email.mime.text import MIMEText
|
||||||
from email.mime.multipart import MIMEMultipart
|
from email.mime.multipart import MIMEMultipart
|
||||||
from email.header import Header
|
from email.header import Header
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import env
|
from utils import env
|
||||||
from settings import settings
|
from master.settings import settings
|
||||||
|
|
||||||
class NotificationMgr:
|
class NotificationMgr:
|
||||||
def __init__(self):
|
def __init__(self):
|
|
@ -1,9 +1,9 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import env
|
from utils import env
|
||||||
import json, os
|
import json, os
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
|
|
||||||
|
|
||||||
class settingsClass:
|
class settingsClass:
|
|
@ -7,22 +7,22 @@ Warning: in some early versions, "token" stand for the instance of class model.U
|
||||||
Original author: Liu Peidong
|
Original author: Liu Peidong
|
||||||
'''
|
'''
|
||||||
|
|
||||||
from model import db, User, UserGroup, Notification, UserUsage
|
from utils.model import db, User, UserGroup, Notification, UserUsage
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
import os, subprocess, math
|
import os, subprocess, math
|
||||||
import hashlib
|
import hashlib
|
||||||
import pam
|
import pam
|
||||||
from base64 import b64encode
|
from base64 import b64encode
|
||||||
import env
|
from utils import env
|
||||||
from settings import settings
|
from master.settings import settings
|
||||||
import smtplib
|
import smtplib
|
||||||
from email.mime.text import MIMEText
|
from email.mime.text import MIMEText
|
||||||
from email.mime.multipart import MIMEMultipart
|
from email.mime.multipart import MIMEMultipart
|
||||||
from email.header import Header
|
from email.header import Header
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import json
|
import json
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
from lvmtool import *
|
from utils.lvmtool import *
|
||||||
|
|
||||||
PAM = pam.pam()
|
PAM = pam.pam()
|
||||||
fspath = env.getenv('FS_PREFIX')
|
fspath = env.getenv('FS_PREFIX')
|
||||||
|
@ -162,7 +162,7 @@ class userManager:
|
||||||
sys_admin.auth_method = 'local'
|
sys_admin.auth_method = 'local'
|
||||||
db.session.add(sys_admin)
|
db.session.add(sys_admin)
|
||||||
path = env.getenv('DOCKLET_LIB')
|
path = env.getenv('DOCKLET_LIB')
|
||||||
subprocess.call([path+"/userinit.sh", username])
|
subprocess.call([path+"/master/userinit.sh", username])
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
if not os.path.exists(fspath+"/global/sys/quota"):
|
if not os.path.exists(fspath+"/global/sys/quota"):
|
||||||
groupfile = open(fspath+"/global/sys/quota",'w')
|
groupfile = open(fspath+"/global/sys/quota",'w')
|
||||||
|
@ -870,7 +870,7 @@ class userManager:
|
||||||
# now initialize for all kind of users
|
# now initialize for all kind of users
|
||||||
#if newuser.status == 'normal':
|
#if newuser.status == 'normal':
|
||||||
path = env.getenv('DOCKLET_LIB')
|
path = env.getenv('DOCKLET_LIB')
|
||||||
subprocess.call([path+"/userinit.sh", newuser.username])
|
subprocess.call([path+"/master/userinit.sh", newuser.username])
|
||||||
res = self.groupQuery(name=newuser.user_group)
|
res = self.groupQuery(name=newuser.user_group)
|
||||||
if res['success']:
|
if res['success']:
|
||||||
self.set_nfs_quota(newuser.username,res['data']['data'])
|
self.set_nfs_quota(newuser.username,res['data']['data'])
|
|
@ -1,15 +1,13 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import os, random, json, sys, imagemgr
|
import os, random, json, sys
|
||||||
import datetime, math
|
import datetime, math
|
||||||
|
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
import env
|
from utils import env, imagemgr, proxytool
|
||||||
import proxytool
|
import requests, threading, traceback
|
||||||
import requests, threading
|
from utils.nettools import portcontrol
|
||||||
import traceback
|
from utils.model import db, Container, PortMapping, VCluster
|
||||||
from nettools import portcontrol
|
|
||||||
from model import db, Container, PortMapping, VCluster
|
|
||||||
|
|
||||||
userpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT'))
|
userpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT'))
|
||||||
def post_to_user(url = '/', data={}):
|
def post_to_user(url = '/', data={}):
|
|
@ -20,12 +20,11 @@ from configparser import ConfigParser
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
import os,sys,subprocess,time,re,datetime,threading,random
|
import os,sys,subprocess,time,re,datetime,threading,random
|
||||||
import xmlrpc.client
|
import xmlrpc.client
|
||||||
from model import db, Image
|
from utils.model import db, Image
|
||||||
|
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
import env
|
from utils import env, updatebase
|
||||||
from lvmtool import *
|
from utils.lvmtool import *
|
||||||
import updatebase
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
master_port = str(env.getenv('MASTER_PORT'))
|
master_port = str(env.getenv('MASTER_PORT'))
|
|
@ -6,7 +6,7 @@ import argparse
|
||||||
import sys
|
import sys
|
||||||
import time # this is only being used as part of the example
|
import time # this is only being used as part of the example
|
||||||
import os
|
import os
|
||||||
import env
|
from utils import env
|
||||||
|
|
||||||
# logger should only be imported after initlogging has been called
|
# logger should only be imported after initlogging has been called
|
||||||
logger = None
|
logger = None
|
||||||
|
@ -32,7 +32,6 @@ def initlogging(name='docklet'):
|
||||||
LOG_LEVEL = logging.CRITIAL
|
LOG_LEVEL = logging.CRITIAL
|
||||||
else:
|
else:
|
||||||
LOG_LEVEL = logging.DEBUG
|
LOG_LEVEL = logging.DEBUG
|
||||||
|
|
||||||
logger = logging.getLogger(name)
|
logger = logging.getLogger(name)
|
||||||
# Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data
|
# Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data
|
||||||
# Give the logger a unique name (good practice)
|
# Give the logger a unique name (good practice)
|
||||||
|
@ -47,7 +46,6 @@ def initlogging(name='docklet'):
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
# Attach the handler to the logger
|
# Attach the handler to the logger
|
||||||
logger.addHandler(handler)
|
logger.addHandler(handler)
|
||||||
|
|
||||||
# Replace stdout with logging to file at INFO level
|
# Replace stdout with logging to file at INFO level
|
||||||
sys.stdout = RedirectLogger(logger, logging.INFO)
|
sys.stdout = RedirectLogger(logger, logging.INFO)
|
||||||
# Replace stderr with logging to file at ERROR level
|
# Replace stderr with logging to file at ERROR level
|
|
@ -1,8 +1,8 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import env
|
from utils import env
|
||||||
import json, os
|
import json, os
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
from werkzeug.utils import secure_filename
|
from werkzeug.utils import secure_filename
|
||||||
|
|
||||||
logsPath = env.getenv('FS_PREFIX') + '/local/log/'
|
logsPath = env.getenv('FS_PREFIX') + '/local/log/'
|
|
@ -1,7 +1,8 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import env,subprocess,os,time
|
import subprocess,os,time
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
|
from utils import env
|
||||||
|
|
||||||
def sys_run(command,check=False):
|
def sys_run(command,check=False):
|
||||||
Ret = subprocess.run(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True, check=check)
|
Ret = subprocess.run(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True, check=check)
|
||||||
|
@ -161,5 +162,3 @@ def delete_volume(group_name, volume_name):
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
logger.info("lv %s in vg %s does not exists" % (volume_name,group_name))
|
logger.info("lv %s in vg %s does not exists" % (volume_name,group_name))
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ import os, json
|
||||||
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
|
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
|
||||||
from itsdangerous import SignatureExpired, BadSignature
|
from itsdangerous import SignatureExpired, BadSignature
|
||||||
|
|
||||||
import env
|
from utils import env
|
||||||
|
|
||||||
fsdir = env.getenv('FS_PREFIX')
|
fsdir = env.getenv('FS_PREFIX')
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import subprocess, env, threading
|
import subprocess, threading
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
|
from utils import env
|
||||||
|
|
||||||
class ipcontrol(object):
|
class ipcontrol(object):
|
||||||
@staticmethod
|
@staticmethod
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import requests, json
|
import requests, json
|
||||||
import env
|
from utils import env
|
||||||
|
|
||||||
proxy_api_port = env.getenv("PROXY_API_PORT")
|
proxy_api_port = env.getenv("PROXY_API_PORT")
|
||||||
proxy_control="http://localhost:"+ str(proxy_api_port) +"/api/routes"
|
proxy_control="http://localhost:"+ str(proxy_api_port) +"/api/routes"
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import os, shutil
|
import os, shutil
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
|
|
||||||
def aufs_remove(basefs):
|
def aufs_remove(basefs):
|
||||||
try:
|
try:
|
|
@ -1,12 +1,10 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import subprocess, os, json
|
import subprocess, os, json
|
||||||
import imagemgr
|
from utils.log import logger
|
||||||
import network
|
from utils import env, imagemgr
|
||||||
from log import logger
|
from utils.lvmtool import sys_run, check_volume
|
||||||
import env
|
from worker.monitor import Container_Collector, History_Manager
|
||||||
from lvmtool import sys_run, check_volume
|
|
||||||
from monitor import Container_Collector, History_Manager
|
|
||||||
import lxc
|
import lxc
|
||||||
|
|
||||||
class Container(object):
|
class Container(object):
|
||||||
|
@ -46,7 +44,7 @@ class Container(object):
|
||||||
|
|
||||||
if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)):
|
if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)):
|
||||||
path = env.getenv('DOCKLET_LIB')
|
path = env.getenv('DOCKLET_LIB')
|
||||||
subprocess.call([path+"/userinit.sh", username])
|
subprocess.call([path+"/master/userinit.sh", username])
|
||||||
logger.info("user %s directory not found, create it" % username)
|
logger.info("user %s directory not found, create it" % username)
|
||||||
sys_run("mkdir -p /var/lib/lxc/%s" % lxc_name)
|
sys_run("mkdir -p /var/lib/lxc/%s" % lxc_name)
|
||||||
logger.info("generate config file for %s" % lxc_name)
|
logger.info("generate config file for %s" % lxc_name)
|
|
@ -17,18 +17,17 @@ Design:Monitor mainly consists of three parts: Collectors, Master_Collector and
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
import subprocess,re,os,etcdlib,psutil,math,sys
|
import subprocess,re,os,psutil,math,sys
|
||||||
import time,threading,json,traceback,platform
|
import time,threading,json,traceback,platform
|
||||||
import env
|
from utils import env, etcdlib
|
||||||
import lxc
|
import lxc
|
||||||
import xmlrpc.client
|
import xmlrpc.client
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from model import db,VNode,History,BillingHistory,VCluster,PortMapping
|
from utils.model import db,VNode,History,BillingHistory,VCluster,PortMapping
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
from httplib2 import Http
|
from httplib2 import Http
|
||||||
from urllib.parse import urlencode
|
from urllib.parse import urlencode
|
||||||
from httprest import post_to_user
|
|
||||||
|
|
||||||
# billing parameters
|
# billing parameters
|
||||||
a_cpu = 500 # seconds
|
a_cpu = 500 # seconds
|
||||||
|
@ -36,33 +35,6 @@ b_mem = 2000000 # MB
|
||||||
c_disk = 4000 # MB
|
c_disk = 4000 # MB
|
||||||
d_port = 1
|
d_port = 1
|
||||||
|
|
||||||
# major dict to store the monitoring data
|
|
||||||
# only use on Master
|
|
||||||
# monitor_hosts: use workers' ip addresses as first key.
|
|
||||||
# second key: cpuinfo,diskinfo,meminfo,osinfo,cpuconfig,running,containers,containerslist
|
|
||||||
# 1.cpuinfo stores the cpu usages data, and it has keys: user,system,idle,iowait
|
|
||||||
# 2.diskinfo stores the disks usages data, and it has keys: device,mountpoint,total,used,free,percent
|
|
||||||
# 3.meminfo stores the memory usages data, and it has keys: total,used,free,buffers,cached,percent
|
|
||||||
# 4.osinfo stores the information of operating system,
|
|
||||||
# and it has keys: platform,system,node,release,version,machine,processor
|
|
||||||
# 5.cpuconfig stores the information of processors, and it is a list, each element of list is a dict
|
|
||||||
# which stores the information of a processor, each element has key: processor,model name,
|
|
||||||
# core id, cpu MHz, cache size, physical id.
|
|
||||||
# 6.running indicates the status of worker,and it has two values: True, False.
|
|
||||||
# 7.containers store the amount of containers on the worker.
|
|
||||||
# 8.containers store a list which consists of the names of containers on the worker.
|
|
||||||
monitor_hosts = {}
|
|
||||||
|
|
||||||
# monitor_vnodes: use the owners' names of vnodes(containers) as first key.
|
|
||||||
# use the names of vnodes(containers) as second key.
|
|
||||||
# third key: cpu_use,mem_use,disk_use,basic_info,quota
|
|
||||||
# 1.cpu_use has keys: val,unit,hostpercent
|
|
||||||
# 2.mem_use has keys: val,unit,usedp
|
|
||||||
# 3.disk_use has keys: device,mountpoint,total,used,free,percent
|
|
||||||
# 4.basic_info has keys: Name,State,PID,IP,RunningTime,billing,billing_this_hour
|
|
||||||
# 5.quota has keys: cpu,memeory
|
|
||||||
monitor_vnodes = {}
|
|
||||||
|
|
||||||
# major dict to store the monitoring data on Worker
|
# major dict to store the monitoring data on Worker
|
||||||
# only use on Worker
|
# only use on Worker
|
||||||
# workerinfo: only store the data collected on current Worker,
|
# workerinfo: only store the data collected on current Worker,
|
||||||
|
@ -628,234 +600,6 @@ def get_billing_history(vnode_name):
|
||||||
default['port'] = 0
|
default['port'] = 0
|
||||||
return default
|
return default
|
||||||
|
|
||||||
# the thread to collect data from each worker and store them in monitor_hosts and monitor_vnodes
|
|
||||||
class Master_Collector(threading.Thread):
|
|
||||||
|
|
||||||
def __init__(self,nodemgr,master_ip):
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.thread_stop = False
|
|
||||||
self.nodemgr = nodemgr
|
|
||||||
self.master_ip = master_ip
|
|
||||||
self.net_lastbillings = {}
|
|
||||||
self.bytes_per_beans = 1000000000
|
|
||||||
return
|
|
||||||
|
|
||||||
def net_billings(self, username, now_bytes_total):
|
|
||||||
global monitor_vnodes
|
|
||||||
if not username in self.net_lastbillings.keys():
|
|
||||||
self.net_lastbillings[username] = 0
|
|
||||||
elif int(now_bytes_total/self.bytes_per_beans) < self.net_lastbillings[username]:
|
|
||||||
self.net_lastbillings[username] = 0
|
|
||||||
diff = int(now_bytes_total/self.bytes_per_beans) - self.net_lastbillings[username]
|
|
||||||
if diff > 0:
|
|
||||||
auth_key = env.getenv('AUTH_KEY')
|
|
||||||
data = {"owner_name":username,"billing":diff, "auth_key":auth_key}
|
|
||||||
header = {'Content-Type':'application/x-www-form-urlencoded'}
|
|
||||||
http = Http()
|
|
||||||
[resp,content] = http.request("http://"+self.master_ip+"/billing/beans/","POST",urlencode(data),headers = header)
|
|
||||||
logger.info("response from master:"+content.decode('utf-8'))
|
|
||||||
self.net_lastbillings[username] += diff
|
|
||||||
monitor_vnodes[username]['net_stats']['net_billings'] = self.net_lastbillings[username]
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
global monitor_hosts
|
|
||||||
global monitor_vnodes
|
|
||||||
while not self.thread_stop:
|
|
||||||
for worker in monitor_hosts.keys():
|
|
||||||
monitor_hosts[worker]['running'] = False
|
|
||||||
workers = self.nodemgr.get_nodeips()
|
|
||||||
for worker in workers:
|
|
||||||
try:
|
|
||||||
ip = worker
|
|
||||||
workerrpc = self.nodemgr.ip_to_rpc(worker)
|
|
||||||
# fetch data
|
|
||||||
info = list(eval(workerrpc.workerFetchInfo(self.master_ip)))
|
|
||||||
#logger.info(info[0])
|
|
||||||
# store data in monitor_hosts and monitor_vnodes
|
|
||||||
monitor_hosts[ip] = info[0]
|
|
||||||
for container in info[1].keys():
|
|
||||||
owner = get_owner(container)
|
|
||||||
if not owner in monitor_vnodes.keys():
|
|
||||||
monitor_vnodes[owner] = {}
|
|
||||||
monitor_vnodes[owner][container] = info[1][container]
|
|
||||||
for user in info[2].keys():
|
|
||||||
if not user in monitor_vnodes.keys():
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
monitor_vnodes[user]['net_stats'] = info[2][user]
|
|
||||||
self.net_billings(user, info[2][user]['bytes_total'])
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
time.sleep(2)
|
|
||||||
#logger.info(History.query.all())
|
|
||||||
#logger.info(VNode.query.all())
|
|
||||||
return
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
self.thread_stop = True
|
|
||||||
return
|
|
||||||
|
|
||||||
# master use this class to fetch specific data of containers(vnodes)
|
|
||||||
class Container_Fetcher:
|
|
||||||
def __init__(self,container_name):
|
|
||||||
self.owner = get_owner(container_name)
|
|
||||||
self.con_id = container_name
|
|
||||||
return
|
|
||||||
|
|
||||||
def get_cpu_use(self):
|
|
||||||
global monitor_vnodes
|
|
||||||
try:
|
|
||||||
res = monitor_vnodes[self.owner][self.con_id]['cpu_use']
|
|
||||||
res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_mem_use(self):
|
|
||||||
global monitor_vnodes
|
|
||||||
try:
|
|
||||||
res = monitor_vnodes[self.owner][self.con_id]['mem_use']
|
|
||||||
res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_disk_use(self):
|
|
||||||
global monitor_vnodes
|
|
||||||
try:
|
|
||||||
res = monitor_vnodes[self.owner][self.con_id]['disk_use']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_net_stats(self):
|
|
||||||
global monitor_vnodes
|
|
||||||
try:
|
|
||||||
res = monitor_vnodes[self.owner][self.con_id]['net_stats']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_basic_info(self):
|
|
||||||
global monitor_vnodes
|
|
||||||
try:
|
|
||||||
res = monitor_vnodes[self.owner][self.con_id]['basic_info']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
# Master use this class to fetch specific data of physical machines(hosts)
|
|
||||||
class Fetcher:
|
|
||||||
|
|
||||||
def __init__(self,host):
|
|
||||||
global monitor_hosts
|
|
||||||
self.info = monitor_hosts[host]
|
|
||||||
return
|
|
||||||
|
|
||||||
#def get_clcnt(self):
|
|
||||||
# return DockletMonitor.clcnt
|
|
||||||
|
|
||||||
#def get_nodecnt(self):
|
|
||||||
# return DockletMonitor.nodecnt
|
|
||||||
|
|
||||||
#def get_meminfo(self):
|
|
||||||
# return self.get_meminfo_('172.31.0.1')
|
|
||||||
|
|
||||||
def get_meminfo(self):
|
|
||||||
try:
|
|
||||||
res = self.info['meminfo']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_cpuinfo(self):
|
|
||||||
try:
|
|
||||||
res = self.info['cpuinfo']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_cpuconfig(self):
|
|
||||||
try:
|
|
||||||
res = self.info['cpuconfig']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_diskinfo(self):
|
|
||||||
try:
|
|
||||||
res = self.info['diskinfo']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_osinfo(self):
|
|
||||||
try:
|
|
||||||
res = self.info['osinfo']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_concpuinfo(self):
|
|
||||||
try:
|
|
||||||
res = self.info['concpupercent']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_containers(self):
|
|
||||||
try:
|
|
||||||
res = self.info['containers']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
def get_status(self):
|
|
||||||
try:
|
|
||||||
isexist = self.info['running']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
isexist = False
|
|
||||||
if(isexist):
|
|
||||||
return 'RUNNING'
|
|
||||||
else:
|
|
||||||
return 'STOPPED'
|
|
||||||
|
|
||||||
def get_containerslist(self):
|
|
||||||
try:
|
|
||||||
res = self.info['containerslist']
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
logger.warning(err)
|
|
||||||
res = {}
|
|
||||||
return res
|
|
||||||
|
|
||||||
# To record data when the status of containers change
|
# To record data when the status of containers change
|
||||||
class History_Manager:
|
class History_Manager:
|
||||||
|
|
|
@ -1,22 +1,27 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
# first init env
|
# first init env
|
||||||
import env, tools
|
import sys
|
||||||
|
if sys.path[0].endswith("worker"):
|
||||||
|
sys.path[0] = sys.path[0][:-6]
|
||||||
|
from utils import env, tools
|
||||||
config = env.getenv("CONFIG")
|
config = env.getenv("CONFIG")
|
||||||
|
#config = "/opt/docklet/local/docklet-running.conf"
|
||||||
tools.loadenv(config)
|
tools.loadenv(config)
|
||||||
|
|
||||||
# must import logger after initlogging, ugly
|
# must import logger after initlogging, ugly
|
||||||
from log import initlogging
|
from utils.log import initlogging
|
||||||
initlogging("docklet-worker")
|
initlogging("docklet-worker")
|
||||||
from log import logger
|
from utils.log import logger
|
||||||
|
|
||||||
import xmlrpc.server, sys, time
|
import xmlrpc.server, sys, time
|
||||||
from socketserver import ThreadingMixIn
|
from socketserver import ThreadingMixIn
|
||||||
import threading
|
import threading
|
||||||
import etcdlib, network, container
|
from utils import etcdlib, proxytool
|
||||||
from nettools import netcontrol,ovscontrol,portcontrol
|
from worker import container, monitor
|
||||||
import monitor, proxytool
|
from utils.nettools import netcontrol,ovscontrol,portcontrol
|
||||||
from lvmtool import new_group, recover_group
|
from utils.lvmtool import new_group, recover_group
|
||||||
|
from master import network
|
||||||
|
|
||||||
##################################################################
|
##################################################################
|
||||||
# Worker
|
# Worker
|
||||||
|
@ -174,7 +179,7 @@ class Worker(object):
|
||||||
netcontrol.new_bridge('docklet-br')
|
netcontrol.new_bridge('docklet-br')
|
||||||
else:
|
else:
|
||||||
if not netcontrol.bridge_exists('docklet-br'):
|
if not netcontrol.bridge_exists('docklet-br'):
|
||||||
logger.error("docklet-br not found")
|
utils logger.error("docklet-br not found")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
logger.info ("setup GRE tunnel to master %s" % self.master)
|
logger.info ("setup GRE tunnel to master %s" % self.master)
|
||||||
#network.netsetup("gre", self.master)
|
#network.netsetup("gre", self.master)
|
18
user/user.py
18
user/user.py
|
@ -13,12 +13,7 @@ src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"..", "sr
|
||||||
if src_folder not in sys.path:
|
if src_folder not in sys.path:
|
||||||
sys.path.insert(0, src_folder)
|
sys.path.insert(0, src_folder)
|
||||||
|
|
||||||
# must first init loadenv
|
from utils import tools, env
|
||||||
from log import initlogging
|
|
||||||
initlogging("docklet-user")
|
|
||||||
from log import logger
|
|
||||||
|
|
||||||
import tools, env
|
|
||||||
config = env.getenv("CONFIG")
|
config = env.getenv("CONFIG")
|
||||||
tools.loadenv(config)
|
tools.loadenv(config)
|
||||||
masterips = env.getenv("MASTER_IPS").split(",")
|
masterips = env.getenv("MASTER_IPS").split(",")
|
||||||
|
@ -26,14 +21,19 @@ G_masterips = []
|
||||||
for masterip in masterips:
|
for masterip in masterips:
|
||||||
G_masterips.append(masterip.split("@")[0] + ":" + str(env.getenv("MASTER_PORT")))
|
G_masterips.append(masterip.split("@")[0] + ":" + str(env.getenv("MASTER_PORT")))
|
||||||
|
|
||||||
|
# must first init loadenv
|
||||||
|
from utils.log import initlogging
|
||||||
|
initlogging("docklet-user")
|
||||||
|
from utils.log import logger
|
||||||
|
|
||||||
from flask import Flask, request, session, render_template, redirect, send_from_directory, make_response, url_for, abort
|
from flask import Flask, request, session, render_template, redirect, send_from_directory, make_response, url_for, abort
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
import userManager,beansapplicationmgr, notificationmgr, lockmgr
|
from master import userManager,beansapplicationmgr, notificationmgr, lockmgr
|
||||||
import threading,traceback
|
import threading,traceback
|
||||||
from model import User,db
|
from utils.model import User,db
|
||||||
from httplib2 import Http
|
from httplib2 import Http
|
||||||
from urllib.parse import urlencode
|
from urllib.parse import urlencode
|
||||||
from settings import settings
|
from master.settings import settings
|
||||||
|
|
||||||
external_login = env.getenv('EXTERNAL_LOGIN')
|
external_login = env.getenv('EXTERNAL_LOGIN')
|
||||||
if(external_login == 'TRUE'):
|
if(external_login == 'TRUE'):
|
||||||
|
|
|
@ -12,7 +12,7 @@ if src_folder not in sys.path:
|
||||||
sys.path.insert(0, src_folder)
|
sys.path.insert(0, src_folder)
|
||||||
|
|
||||||
# must first init loadenv
|
# must first init loadenv
|
||||||
import tools, env
|
from utils import tools, env
|
||||||
config = env.getenv("CONFIG")
|
config = env.getenv("CONFIG")
|
||||||
tools.loadenv(config)
|
tools.loadenv(config)
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../../..
|
||||||
if src_folder not in sys.path:
|
if src_folder not in sys.path:
|
||||||
sys.path.insert(0, src_folder)
|
sys.path.insert(0, src_folder)
|
||||||
|
|
||||||
import env
|
from utils import env
|
||||||
|
|
||||||
if (env.getenv('EXTERNAL_LOGIN') == 'True'):
|
if (env.getenv('EXTERNAL_LOGIN') == 'True'):
|
||||||
sys.path.insert(0, os.path.realpath(os.path.abspath(os.path.join(this_folder,"../../../src", "plugin"))))
|
sys.path.insert(0, os.path.realpath(os.path.abspath(os.path.join(this_folder,"../../../src", "plugin"))))
|
||||||
|
|
|
@ -9,7 +9,7 @@ src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../..",
|
||||||
if src_folder not in sys.path:
|
if src_folder not in sys.path:
|
||||||
sys.path.insert(0, src_folder)
|
sys.path.insert(0, src_folder)
|
||||||
|
|
||||||
import env
|
from utils import env
|
||||||
|
|
||||||
masterips=env.getenv('MASTER_IPS').split(",")
|
masterips=env.getenv('MASTER_IPS').split(",")
|
||||||
user_endpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT'))
|
user_endpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT'))
|
||||||
|
|
|
@ -12,7 +12,7 @@ this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(ins
|
||||||
src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../..", "src")))
|
src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../..", "src")))
|
||||||
if src_folder not in sys.path:
|
if src_folder not in sys.path:
|
||||||
sys.path.insert(0, src_folder)
|
sys.path.insert(0, src_folder)
|
||||||
import env
|
from utils import env
|
||||||
|
|
||||||
# logger should only be imported after initlogging has been called
|
# logger should only be imported after initlogging has been called
|
||||||
logger = None
|
logger = None
|
||||||
|
|
Loading…
Reference in New Issue