From c486930d8fa4c859a0ba8d0600beed5c2281ba5d Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Sun, 27 May 2018 15:52:45 +0800 Subject: [PATCH 1/8] tidy worker codes --- src/{ => com}/env.py | 0 src/{ => com}/etcdlib.py | 0 src/{ => com}/imagemgr.py | 9 ++++----- src/{ => com}/log.py | 4 ++-- src/{ => com}/nettools.py | 5 +++-- src/{ => com}/proxytool.py | 4 ++-- src/{ => com}/tools.py | 0 src/{ => com}/updatebase.py | 2 +- src/{ => master}/network.py | 0 src/{ => worker}/container.py | 10 ++++------ src/{ => worker}/lvmtool.py | 21 ++++++++++----------- src/{ => worker}/worker.py | 20 +++++++++++--------- 12 files changed, 37 insertions(+), 38 deletions(-) rename src/{ => com}/env.py (100%) rename src/{ => com}/etcdlib.py (100%) rename src/{ => com}/imagemgr.py (99%) rename src/{ => com}/log.py (99%) rename src/{ => com}/nettools.py (99%) rename src/{ => com}/proxytool.py (97%) rename src/{ => com}/tools.py (100%) rename src/{ => com}/updatebase.py (99%) rename src/{ => master}/network.py (100%) rename src/{ => worker}/container.py (98%) rename src/{ => worker}/lvmtool.py (96%) rename src/{ => worker}/worker.py (96%) diff --git a/src/env.py b/src/com/env.py similarity index 100% rename from src/env.py rename to src/com/env.py diff --git a/src/etcdlib.py b/src/com/etcdlib.py similarity index 100% rename from src/etcdlib.py rename to src/com/etcdlib.py diff --git a/src/imagemgr.py b/src/com/imagemgr.py similarity index 99% rename from src/imagemgr.py rename to src/com/imagemgr.py index 0e97f6d..86f66fb 100755 --- a/src/imagemgr.py +++ b/src/com/imagemgr.py @@ -20,12 +20,11 @@ from configparser import ConfigParser from io import StringIO import os,sys,subprocess,time,re,datetime,threading,random import xmlrpc.client -from model import db, Image +from com.model import db, Image -from log import logger -import env -from lvmtool import * -import updatebase +from com.log import logger +from com import env, updatebase +from worker.lvmtool import * import requests master_port = str(env.getenv('MASTER_PORT')) diff --git a/src/log.py b/src/com/log.py similarity index 99% rename from src/log.py rename to src/com/log.py index ccf2bb7..ddb2ee1 100755 --- a/src/log.py +++ b/src/com/log.py @@ -6,7 +6,7 @@ import argparse import sys import time # this is only being used as part of the example import os -import env +from com import env # logger should only be imported after initlogging has been called logger = None @@ -17,7 +17,7 @@ def initlogging(name='docklet'): homepath = env.getenv('FS_PREFIX') LOG_FILENAME = homepath + '/local/log/' + name + '.log' - + LOG_LIFE = env.getenv('LOG_LIFE') LOG_LEVEL = env.getenv('LOG_LEVEL') if LOG_LEVEL == "DEBUG": diff --git a/src/nettools.py b/src/com/nettools.py similarity index 99% rename from src/nettools.py rename to src/com/nettools.py index ee5f8ba..98621b3 100755 --- a/src/nettools.py +++ b/src/com/nettools.py @@ -1,7 +1,8 @@ #!/usr/bin/python3 -import subprocess, env, threading -from log import logger +import subprocess, threading +from com.log import logger +from com import env class ipcontrol(object): @staticmethod diff --git a/src/proxytool.py b/src/com/proxytool.py similarity index 97% rename from src/proxytool.py rename to src/com/proxytool.py index b1d4f0e..69b071c 100755 --- a/src/proxytool.py +++ b/src/com/proxytool.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 import requests, json -import env +from com import env proxy_api_port = env.getenv("PROXY_API_PORT") proxy_control="http://localhost:"+ str(proxy_api_port) +"/api/routes" @@ -22,7 +22,7 @@ def set_route(path, target): except: return [False, 'Connect Failed'] return [True, 'set ok'] - + def delete_route(path): path='/'+path.strip('/') try: diff --git a/src/tools.py b/src/com/tools.py similarity index 100% rename from src/tools.py rename to src/com/tools.py diff --git a/src/updatebase.py b/src/com/updatebase.py similarity index 99% rename from src/updatebase.py rename to src/com/updatebase.py index a811c8b..7c54e4e 100755 --- a/src/updatebase.py +++ b/src/com/updatebase.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 import os, shutil -from log import logger +from com.log import logger def aufs_remove(basefs): try: diff --git a/src/network.py b/src/master/network.py similarity index 100% rename from src/network.py rename to src/master/network.py diff --git a/src/container.py b/src/worker/container.py similarity index 98% rename from src/container.py rename to src/worker/container.py index 9f78861..203e150 100755 --- a/src/container.py +++ b/src/worker/container.py @@ -1,12 +1,10 @@ #!/usr/bin/python3 import subprocess, os, json -import imagemgr -import network -from log import logger -import env -from lvmtool import sys_run, check_volume -from monitor import Container_Collector, History_Manager +from com.log import logger +from com import env, imagemgr +from worker.lvmtool import sys_run, check_volume +from worker.monitor import Container_Collector, History_Manager import lxc class Container(object): diff --git a/src/lvmtool.py b/src/worker/lvmtool.py similarity index 96% rename from src/lvmtool.py rename to src/worker/lvmtool.py index c030a2d..7a83933 100755 --- a/src/lvmtool.py +++ b/src/worker/lvmtool.py @@ -1,7 +1,8 @@ #!/usr/bin/python3 -import env,subprocess,os,time -from log import logger +import subprocess,os,time +from com.log import logger +from com import env def sys_run(command,check=False): Ret = subprocess.run(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True, check=check) @@ -45,12 +46,12 @@ def new_group(group_name, size = "5000", file_path = "/opt/docklet/local/docklet logger.error(e) logger.info("initialize lvm group:%s with size %sM success" % (group_name,size)) return True - + elif storage == "disk": disk = env.getenv("DISK") if disk is None: logger.error("use disk for story without a physical disk") - return False + return False #check vg Ret = sys_run("vgdisplay " + group_name) if Ret.returncode == 0: @@ -63,8 +64,8 @@ def new_group(group_name, size = "5000", file_path = "/opt/docklet/local/docklet except Exception as e: logger.error(e) logger.info("initialize lvm group:%s with size %sM success" % (group_name,size)) - return True - + return True + else: logger.info("unknown storage type:" + storage) return False @@ -85,7 +86,7 @@ def recover_group(group_name,file_path="/opt/docklet/local/docklet-storage"): time.sleep(1) #recover vg Ret = sys_run("vgdisplay " + group_name) - if Ret.returncode != 0: + if Ret.returncode != 0: Ret = sys_run("vgcreate %s /dev/loop0" % group_name) if Ret.returncode != 0: logger.error("create VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8'))) @@ -96,10 +97,10 @@ def recover_group(group_name,file_path="/opt/docklet/local/docklet-storage"): disk = env.getenv("DISK") if disk is None: logger.error("use disk for story without a physical disk") - return False + return False #recover vg Ret = sys_run("vgdisplay " + group_name) - if Ret.returncode != 0: + if Ret.returncode != 0: Ret = sys_run("vgcreate %s %s" % (group_name,disk)) if Ret.returncode != 0: logger.error("create VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8'))) @@ -161,5 +162,3 @@ def delete_volume(group_name, volume_name): return False else: logger.info("lv %s in vg %s does not exists" % (volume_name,group_name)) - - diff --git a/src/worker.py b/src/worker/worker.py similarity index 96% rename from src/worker.py rename to src/worker/worker.py index a629cbc..0e9828f 100755 --- a/src/worker.py +++ b/src/worker/worker.py @@ -1,22 +1,24 @@ #!/usr/bin/python3 # first init env -import env, tools +import sys +sys.path.append("../") +from com import env, tools config = env.getenv("CONFIG") -tools.loadenv(config) +#tools.loadenv(config) # must import logger after initlogging, ugly -from log import initlogging -initlogging("docklet-worker") -from log import logger +from com.log import initlogging +#initlogging("docklet-worker") +from com.log import logger import xmlrpc.server, sys, time from socketserver import ThreadingMixIn import threading -import etcdlib, network, container -from nettools import netcontrol,ovscontrol,portcontrol -import monitor, proxytool -from lvmtool import new_group, recover_group +from com import etcdlib, proxytool +from worker import container, monitor +from com.nettools import netcontrol,ovscontrol,portcontrol +from worker.lvmtool import new_group, recover_group ################################################################## # Worker From 9fb63fb4752ca56c0d9855bd3deddba2a4d79100 Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Sun, 27 May 2018 17:08:44 +0800 Subject: [PATCH 2/8] Tide worker codes --- bin/docklet-worker | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bin/docklet-worker b/bin/docklet-worker index 5c2b887..02f1a01 100755 --- a/bin/docklet-worker +++ b/bin/docklet-worker @@ -37,7 +37,7 @@ export FS_PREFIX DAEMON_USER=root # settings for docklet worker -DAEMON=$DOCKLET_LIB/worker.py +DAEMON=$DOCKLET_LIB/worker/worker.py DAEMON_NAME=docklet-worker DAEMON_OPTS= # The process ID of the script when it runs is stored here: @@ -87,7 +87,7 @@ pre_start () { if [ ! -d $FS_PREFIX/local/basefs ]; then log_daemon_msg "basefs does not exist, run prepare.sh first" && exit 1 fi - + if [ ! -d $FS_PREFIX/local/packagefs ]; then mkdir -p $FS_PREFIX/local/packagefs fi @@ -96,6 +96,7 @@ pre_start () { do_start() { pre_start log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX" + #python3 $DAEMON start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS log_end_msg $? } From 9b8a9a7135284019e214916d3e4626104db66cd0 Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Sun, 27 May 2018 17:09:09 +0800 Subject: [PATCH 3/8] Tidy worker codes --- src/com/log.py | 2 -- src/{ => com}/model.py | 2 +- src/master/network.py | 4 ++-- src/{ => worker}/monitor.py | 9 ++++----- src/worker/worker.py | 9 ++++++--- 5 files changed, 13 insertions(+), 13 deletions(-) rename src/{ => com}/model.py (99%) rename src/{ => worker}/monitor.py (99%) diff --git a/src/com/log.py b/src/com/log.py index ddb2ee1..46568e1 100755 --- a/src/com/log.py +++ b/src/com/log.py @@ -32,7 +32,6 @@ def initlogging(name='docklet'): LOG_LEVEL = logging.CRITIAL else: LOG_LEVEL = logging.DEBUG - logger = logging.getLogger(name) # Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data # Give the logger a unique name (good practice) @@ -47,7 +46,6 @@ def initlogging(name='docklet'): handler.setFormatter(formatter) # Attach the handler to the logger logger.addHandler(handler) - # Replace stdout with logging to file at INFO level sys.stdout = RedirectLogger(logger, logging.INFO) # Replace stderr with logging to file at ERROR level diff --git a/src/model.py b/src/com/model.py similarity index 99% rename from src/model.py rename to src/com/model.py index 6dafa35..fcc1ac9 100755 --- a/src/model.py +++ b/src/com/model.py @@ -34,7 +34,7 @@ import os, json from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from itsdangerous import SignatureExpired, BadSignature -import env +from com import env fsdir = env.getenv('FS_PREFIX') diff --git a/src/master/network.py b/src/master/network.py index 5e9e7c2..ef7924d 100755 --- a/src/master/network.py +++ b/src/master/network.py @@ -1,9 +1,9 @@ #!/usr/bin/python3 import json, sys, netifaces, threading -from nettools import netcontrol,ovscontrol +from com.nettools import netcontrol,ovscontrol -from log import logger +from com.log import logger # getip : get ip from network interface # ifname : name of network interface diff --git a/src/monitor.py b/src/worker/monitor.py similarity index 99% rename from src/monitor.py rename to src/worker/monitor.py index 65aa4f5..c0793b1 100755 --- a/src/monitor.py +++ b/src/worker/monitor.py @@ -17,18 +17,17 @@ Design:Monitor mainly consists of three parts: Collectors, Master_Collector and ''' -import subprocess,re,os,etcdlib,psutil,math,sys +import subprocess,re,os,psutil,math,sys import time,threading,json,traceback,platform -import env +from com import env, etcdlib import lxc import xmlrpc.client from datetime import datetime -from model import db,VNode,History,BillingHistory,VCluster,PortMapping -from log import logger +from com.model import db,VNode,History,BillingHistory,VCluster,PortMapping +from com.log import logger from httplib2 import Http from urllib.parse import urlencode -from httprest import post_to_user # billing parameters a_cpu = 500 # seconds diff --git a/src/worker/worker.py b/src/worker/worker.py index 0e9828f..a6305b3 100755 --- a/src/worker/worker.py +++ b/src/worker/worker.py @@ -2,14 +2,16 @@ # first init env import sys -sys.path.append("../") +if sys.path[0].endswith("worker"): + sys.path[0] = sys.path[0][:-6] from com import env, tools config = env.getenv("CONFIG") -#tools.loadenv(config) +#config = "/opt/docklet/local/docklet-running.conf" +tools.loadenv(config) # must import logger after initlogging, ugly from com.log import initlogging -#initlogging("docklet-worker") +initlogging("docklet-worker") from com.log import logger import xmlrpc.server, sys, time @@ -19,6 +21,7 @@ from com import etcdlib, proxytool from worker import container, monitor from com.nettools import netcontrol,ovscontrol,portcontrol from worker.lvmtool import new_group, recover_group +from master import network ################################################################## # Worker From 868be0c8c02f509aa1479d2e0a7fcf7ef821e828 Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Sun, 27 May 2018 17:17:08 +0800 Subject: [PATCH 4/8] Tidy master codes --- src/{ => com}/logs.py | 0 src/{ => com}/manage.py | 0 src/com/migrations/README | 1 + src/com/migrations/alembic.ini | 45 ++++++++ src/com/migrations/env.py | 87 +++++++++++++++ src/com/migrations/script.py.mako | 22 ++++ src/com/migrations/versions/37dcfdb2604_.py | 116 ++++++++++++++++++++ src/{ => master}/beansapplicationmgr.py | 0 src/{ => master}/cloudmgr.py | 0 src/{ => master}/deploy.py | 0 src/{ => master}/httprest.py | 0 src/{ => master}/lockmgr.py | 0 src/{ => master}/nodemgr.py | 0 src/{ => master}/notificationmgr.py | 0 src/{ => master}/settings.py | 0 src/{ => master}/sysmgr.py | 0 src/{ => master}/userManager.py | 0 src/{ => master}/userinit.sh | 0 src/{ => master}/vclustermgr.py | 0 19 files changed, 271 insertions(+) rename src/{ => com}/logs.py (100%) rename src/{ => com}/manage.py (100%) create mode 100644 src/com/migrations/README create mode 100644 src/com/migrations/alembic.ini create mode 100644 src/com/migrations/env.py create mode 100644 src/com/migrations/script.py.mako create mode 100644 src/com/migrations/versions/37dcfdb2604_.py rename src/{ => master}/beansapplicationmgr.py (100%) rename src/{ => master}/cloudmgr.py (100%) rename src/{ => master}/deploy.py (100%) rename src/{ => master}/httprest.py (100%) rename src/{ => master}/lockmgr.py (100%) rename src/{ => master}/nodemgr.py (100%) rename src/{ => master}/notificationmgr.py (100%) rename src/{ => master}/settings.py (100%) rename src/{ => master}/sysmgr.py (100%) rename src/{ => master}/userManager.py (100%) rename src/{ => master}/userinit.sh (100%) rename src/{ => master}/vclustermgr.py (100%) diff --git a/src/logs.py b/src/com/logs.py similarity index 100% rename from src/logs.py rename to src/com/logs.py diff --git a/src/manage.py b/src/com/manage.py similarity index 100% rename from src/manage.py rename to src/com/manage.py diff --git a/src/com/migrations/README b/src/com/migrations/README new file mode 100644 index 0000000..98e4f9c --- /dev/null +++ b/src/com/migrations/README @@ -0,0 +1 @@ +Generic single-database configuration. \ No newline at end of file diff --git a/src/com/migrations/alembic.ini b/src/com/migrations/alembic.ini new file mode 100644 index 0000000..f8ed480 --- /dev/null +++ b/src/com/migrations/alembic.ini @@ -0,0 +1,45 @@ +# A generic, single database configuration. + +[alembic] +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/src/com/migrations/env.py b/src/com/migrations/env.py new file mode 100644 index 0000000..4593816 --- /dev/null +++ b/src/com/migrations/env.py @@ -0,0 +1,87 @@ +from __future__ import with_statement +from alembic import context +from sqlalchemy import engine_from_config, pool +from logging.config import fileConfig +import logging + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) +logger = logging.getLogger('alembic.env') + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +from flask import current_app +config.set_main_option('sqlalchemy.url', + current_app.config.get('SQLALCHEMY_DATABASE_URI')) +target_metadata = current_app.extensions['migrate'].db.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure(url=url) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + + # this callback is used to prevent an auto-migration from being generated + # when there are no changes to the schema + # reference: http://alembic.readthedocs.org/en/latest/cookbook.html + def process_revision_directives(context, revision, directives): + if getattr(config.cmd_opts, 'autogenerate', False): + script = directives[0] + if script.upgrade_ops.is_empty(): + directives[:] = [] + logger.info('No changes in schema detected.') + + engine = engine_from_config(config.get_section(config.config_ini_section), + prefix='sqlalchemy.', + poolclass=pool.NullPool) + + connection = engine.connect() + context.configure(connection=connection, + target_metadata=target_metadata, + process_revision_directives=process_revision_directives, + **current_app.extensions['migrate'].configure_args) + + try: + with context.begin_transaction(): + context.run_migrations() + finally: + connection.close() + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/src/com/migrations/script.py.mako b/src/com/migrations/script.py.mako new file mode 100644 index 0000000..9570201 --- /dev/null +++ b/src/com/migrations/script.py.mako @@ -0,0 +1,22 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision} +Create Date: ${create_date} + +""" + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/src/com/migrations/versions/37dcfdb2604_.py b/src/com/migrations/versions/37dcfdb2604_.py new file mode 100644 index 0000000..c746d1c --- /dev/null +++ b/src/com/migrations/versions/37dcfdb2604_.py @@ -0,0 +1,116 @@ +"""empty message + +Revision ID: 37dcfdb2604 +Revises: None +Create Date: 2018-04-22 11:58:48.307690 + +""" + +# revision identifiers, used by Alembic. +revision = '37dcfdb2604' +down_revision = None + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + ### commands auto generated by Alembic - please adjust! ### + op.create_table('apply_msg', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('username', sa.String(length=10), nullable=True), + sa.Column('number', sa.Integer(), nullable=True), + sa.Column('reason', sa.String(length=600), nullable=True), + sa.Column('status', sa.String(length=10), nullable=True), + sa.Column('time', sa.DateTime(timezone=10), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table('image', + sa.Column('imagename', sa.String(length=50), nullable=True), + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('hasPrivate', sa.Boolean(), nullable=True), + sa.Column('hasPublic', sa.Boolean(), nullable=True), + sa.Column('ownername', sa.String(length=20), nullable=True), + sa.Column('create_time', sa.DateTime(), nullable=True), + sa.Column('description', sa.Text(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table('v_cluster', + sa.Column('clusterid', sa.BigInteger(), autoincrement=False, nullable=False), + sa.Column('clustername', sa.String(length=50), nullable=True), + sa.Column('ownername', sa.String(length=20), nullable=True), + sa.Column('status', sa.String(length=10), nullable=True), + sa.Column('size', sa.Integer(), nullable=True), + sa.Column('nextcid', sa.Integer(), nullable=True), + sa.Column('create_time', sa.DateTime(), nullable=True), + sa.Column('start_time', sa.String(length=20), nullable=True), + sa.Column('proxy_server_ip', sa.String(length=20), nullable=True), + sa.Column('proxy_public_ip', sa.String(length=20), nullable=True), + sa.PrimaryKeyConstraint('clusterid') + ) + op.create_table('v_node', + sa.Column('name', sa.String(length=100), nullable=False), + sa.Column('laststopcpuval', sa.Float(), nullable=True), + sa.Column('laststopruntime', sa.Integer(), nullable=True), + sa.Column('billing', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('name') + ) + op.create_table('billing_history', + sa.Column('node_name', sa.String(length=100), nullable=False), + sa.Column('vclusterid', sa.Integer(), nullable=True), + sa.Column('cpu', sa.Float(), nullable=True), + sa.Column('mem', sa.Float(), nullable=True), + sa.Column('disk', sa.Float(), nullable=True), + sa.Column('port', sa.Float(), nullable=True), + sa.ForeignKeyConstraint(['vclusterid'], ['v_cluster.clusterid'], ), + sa.PrimaryKeyConstraint('node_name') + ) + op.create_table('container', + sa.Column('containername', sa.String(length=100), nullable=False), + sa.Column('hostname', sa.String(length=30), nullable=True), + sa.Column('ip', sa.String(length=20), nullable=True), + sa.Column('host', sa.String(length=20), nullable=True), + sa.Column('image', sa.String(length=50), nullable=True), + sa.Column('lastsave', sa.DateTime(), nullable=True), + sa.Column('setting_cpu', sa.Integer(), nullable=True), + sa.Column('setting_mem', sa.Integer(), nullable=True), + sa.Column('setting_disk', sa.Integer(), nullable=True), + sa.Column('vclusterid', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['vclusterid'], ['v_cluster.clusterid'], ), + sa.PrimaryKeyConstraint('containername') + ) + op.create_table('history', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('vnode', sa.String(length=100), nullable=True), + sa.Column('action', sa.String(length=30), nullable=True), + sa.Column('runningtime', sa.Integer(), nullable=True), + sa.Column('cputime', sa.Float(), nullable=True), + sa.Column('billing', sa.Integer(), nullable=True), + sa.Column('actionTime', sa.DateTime(), nullable=True), + sa.ForeignKeyConstraint(['vnode'], ['v_node.name'], ), + sa.PrimaryKeyConstraint('id') + ) + op.create_table('port_mapping', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('node_name', sa.String(length=100), nullable=True), + sa.Column('node_ip', sa.String(length=20), nullable=True), + sa.Column('node_port', sa.Integer(), nullable=True), + sa.Column('host_port', sa.Integer(), nullable=True), + sa.Column('vclusterid', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['vclusterid'], ['v_cluster.clusterid'], ), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(): + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('port_mapping') + op.drop_table('history') + op.drop_table('container') + op.drop_table('billing_history') + op.drop_table('v_node') + op.drop_table('v_cluster') + op.drop_table('image') + op.drop_table('apply_msg') + ### end Alembic commands ### diff --git a/src/beansapplicationmgr.py b/src/master/beansapplicationmgr.py similarity index 100% rename from src/beansapplicationmgr.py rename to src/master/beansapplicationmgr.py diff --git a/src/cloudmgr.py b/src/master/cloudmgr.py similarity index 100% rename from src/cloudmgr.py rename to src/master/cloudmgr.py diff --git a/src/deploy.py b/src/master/deploy.py similarity index 100% rename from src/deploy.py rename to src/master/deploy.py diff --git a/src/httprest.py b/src/master/httprest.py similarity index 100% rename from src/httprest.py rename to src/master/httprest.py diff --git a/src/lockmgr.py b/src/master/lockmgr.py similarity index 100% rename from src/lockmgr.py rename to src/master/lockmgr.py diff --git a/src/nodemgr.py b/src/master/nodemgr.py similarity index 100% rename from src/nodemgr.py rename to src/master/nodemgr.py diff --git a/src/notificationmgr.py b/src/master/notificationmgr.py similarity index 100% rename from src/notificationmgr.py rename to src/master/notificationmgr.py diff --git a/src/settings.py b/src/master/settings.py similarity index 100% rename from src/settings.py rename to src/master/settings.py diff --git a/src/sysmgr.py b/src/master/sysmgr.py similarity index 100% rename from src/sysmgr.py rename to src/master/sysmgr.py diff --git a/src/userManager.py b/src/master/userManager.py similarity index 100% rename from src/userManager.py rename to src/master/userManager.py diff --git a/src/userinit.sh b/src/master/userinit.sh similarity index 100% rename from src/userinit.sh rename to src/master/userinit.sh diff --git a/src/vclustermgr.py b/src/master/vclustermgr.py similarity index 100% rename from src/vclustermgr.py rename to src/master/vclustermgr.py From 18d9ea08fd18b87691f7b02304603ef2b9229399 Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Mon, 4 Jun 2018 01:16:32 +0800 Subject: [PATCH 5/8] Rename com to utils --- .gitignore | 2 +- src/com/migrations/README | 1 - src/com/migrations/alembic.ini | 45 -------- src/com/migrations/env.py | 87 --------------- src/com/migrations/script.py.mako | 22 ---- src/com/migrations/versions/37dcfdb2604_.py | 116 -------------------- src/{com => utils}/env.py | 0 src/{com => utils}/etcdlib.py | 0 src/{com => utils}/imagemgr.py | 6 +- src/{com => utils}/log.py | 2 +- src/{com => utils}/logs.py | 0 src/{com => utils}/manage.py | 0 src/{com => utils}/model.py | 2 +- src/{com => utils}/nettools.py | 4 +- src/{com => utils}/proxytool.py | 2 +- src/{com => utils}/tools.py | 0 src/{com => utils}/updatebase.py | 4 +- src/worker/container.py | 4 +- src/worker/lvmtool.py | 4 +- src/worker/monitor.py | 6 +- src/worker/worker.py | 12 +- 21 files changed, 24 insertions(+), 295 deletions(-) delete mode 100644 src/com/migrations/README delete mode 100644 src/com/migrations/alembic.ini delete mode 100644 src/com/migrations/env.py delete mode 100644 src/com/migrations/script.py.mako delete mode 100644 src/com/migrations/versions/37dcfdb2604_.py rename src/{com => utils}/env.py (100%) rename src/{com => utils}/etcdlib.py (100%) rename src/{com => utils}/imagemgr.py (99%) rename src/{com => utils}/log.py (99%) rename src/{com => utils}/logs.py (100%) rename src/{com => utils}/manage.py (100%) rename src/{com => utils}/model.py (99%) rename src/{com => utils}/nettools.py (99%) rename src/{com => utils}/proxytool.py (97%) rename src/{com => utils}/tools.py (100%) rename src/{com => utils}/updatebase.py (98%) diff --git a/.gitignore b/.gitignore index feb7369..5a1df5d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,4 @@ __temp .DS_Store docklet.conf home.html -src/migrations/ +src/utils/migrations/ diff --git a/src/com/migrations/README b/src/com/migrations/README deleted file mode 100644 index 98e4f9c..0000000 --- a/src/com/migrations/README +++ /dev/null @@ -1 +0,0 @@ -Generic single-database configuration. \ No newline at end of file diff --git a/src/com/migrations/alembic.ini b/src/com/migrations/alembic.ini deleted file mode 100644 index f8ed480..0000000 --- a/src/com/migrations/alembic.ini +++ /dev/null @@ -1,45 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/src/com/migrations/env.py b/src/com/migrations/env.py deleted file mode 100644 index 4593816..0000000 --- a/src/com/migrations/env.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import with_statement -from alembic import context -from sqlalchemy import engine_from_config, pool -from logging.config import fileConfig -import logging - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) -logger = logging.getLogger('alembic.env') - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -from flask import current_app -config.set_main_option('sqlalchemy.url', - current_app.config.get('SQLALCHEMY_DATABASE_URI')) -target_metadata = current_app.extensions['migrate'].db.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure(url=url) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - - # this callback is used to prevent an auto-migration from being generated - # when there are no changes to the schema - # reference: http://alembic.readthedocs.org/en/latest/cookbook.html - def process_revision_directives(context, revision, directives): - if getattr(config.cmd_opts, 'autogenerate', False): - script = directives[0] - if script.upgrade_ops.is_empty(): - directives[:] = [] - logger.info('No changes in schema detected.') - - engine = engine_from_config(config.get_section(config.config_ini_section), - prefix='sqlalchemy.', - poolclass=pool.NullPool) - - connection = engine.connect() - context.configure(connection=connection, - target_metadata=target_metadata, - process_revision_directives=process_revision_directives, - **current_app.extensions['migrate'].configure_args) - - try: - with context.begin_transaction(): - context.run_migrations() - finally: - connection.close() - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/src/com/migrations/script.py.mako b/src/com/migrations/script.py.mako deleted file mode 100644 index 9570201..0000000 --- a/src/com/migrations/script.py.mako +++ /dev/null @@ -1,22 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/src/com/migrations/versions/37dcfdb2604_.py b/src/com/migrations/versions/37dcfdb2604_.py deleted file mode 100644 index c746d1c..0000000 --- a/src/com/migrations/versions/37dcfdb2604_.py +++ /dev/null @@ -1,116 +0,0 @@ -"""empty message - -Revision ID: 37dcfdb2604 -Revises: None -Create Date: 2018-04-22 11:58:48.307690 - -""" - -# revision identifiers, used by Alembic. -revision = '37dcfdb2604' -down_revision = None - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - ### commands auto generated by Alembic - please adjust! ### - op.create_table('apply_msg', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('username', sa.String(length=10), nullable=True), - sa.Column('number', sa.Integer(), nullable=True), - sa.Column('reason', sa.String(length=600), nullable=True), - sa.Column('status', sa.String(length=10), nullable=True), - sa.Column('time', sa.DateTime(timezone=10), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table('image', - sa.Column('imagename', sa.String(length=50), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('hasPrivate', sa.Boolean(), nullable=True), - sa.Column('hasPublic', sa.Boolean(), nullable=True), - sa.Column('ownername', sa.String(length=20), nullable=True), - sa.Column('create_time', sa.DateTime(), nullable=True), - sa.Column('description', sa.Text(), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_table('v_cluster', - sa.Column('clusterid', sa.BigInteger(), autoincrement=False, nullable=False), - sa.Column('clustername', sa.String(length=50), nullable=True), - sa.Column('ownername', sa.String(length=20), nullable=True), - sa.Column('status', sa.String(length=10), nullable=True), - sa.Column('size', sa.Integer(), nullable=True), - sa.Column('nextcid', sa.Integer(), nullable=True), - sa.Column('create_time', sa.DateTime(), nullable=True), - sa.Column('start_time', sa.String(length=20), nullable=True), - sa.Column('proxy_server_ip', sa.String(length=20), nullable=True), - sa.Column('proxy_public_ip', sa.String(length=20), nullable=True), - sa.PrimaryKeyConstraint('clusterid') - ) - op.create_table('v_node', - sa.Column('name', sa.String(length=100), nullable=False), - sa.Column('laststopcpuval', sa.Float(), nullable=True), - sa.Column('laststopruntime', sa.Integer(), nullable=True), - sa.Column('billing', sa.Integer(), nullable=True), - sa.PrimaryKeyConstraint('name') - ) - op.create_table('billing_history', - sa.Column('node_name', sa.String(length=100), nullable=False), - sa.Column('vclusterid', sa.Integer(), nullable=True), - sa.Column('cpu', sa.Float(), nullable=True), - sa.Column('mem', sa.Float(), nullable=True), - sa.Column('disk', sa.Float(), nullable=True), - sa.Column('port', sa.Float(), nullable=True), - sa.ForeignKeyConstraint(['vclusterid'], ['v_cluster.clusterid'], ), - sa.PrimaryKeyConstraint('node_name') - ) - op.create_table('container', - sa.Column('containername', sa.String(length=100), nullable=False), - sa.Column('hostname', sa.String(length=30), nullable=True), - sa.Column('ip', sa.String(length=20), nullable=True), - sa.Column('host', sa.String(length=20), nullable=True), - sa.Column('image', sa.String(length=50), nullable=True), - sa.Column('lastsave', sa.DateTime(), nullable=True), - sa.Column('setting_cpu', sa.Integer(), nullable=True), - sa.Column('setting_mem', sa.Integer(), nullable=True), - sa.Column('setting_disk', sa.Integer(), nullable=True), - sa.Column('vclusterid', sa.Integer(), nullable=True), - sa.ForeignKeyConstraint(['vclusterid'], ['v_cluster.clusterid'], ), - sa.PrimaryKeyConstraint('containername') - ) - op.create_table('history', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('vnode', sa.String(length=100), nullable=True), - sa.Column('action', sa.String(length=30), nullable=True), - sa.Column('runningtime', sa.Integer(), nullable=True), - sa.Column('cputime', sa.Float(), nullable=True), - sa.Column('billing', sa.Integer(), nullable=True), - sa.Column('actionTime', sa.DateTime(), nullable=True), - sa.ForeignKeyConstraint(['vnode'], ['v_node.name'], ), - sa.PrimaryKeyConstraint('id') - ) - op.create_table('port_mapping', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('node_name', sa.String(length=100), nullable=True), - sa.Column('node_ip', sa.String(length=20), nullable=True), - sa.Column('node_port', sa.Integer(), nullable=True), - sa.Column('host_port', sa.Integer(), nullable=True), - sa.Column('vclusterid', sa.Integer(), nullable=True), - sa.ForeignKeyConstraint(['vclusterid'], ['v_cluster.clusterid'], ), - sa.PrimaryKeyConstraint('id') - ) - ### end Alembic commands ### - - -def downgrade(): - ### commands auto generated by Alembic - please adjust! ### - op.drop_table('port_mapping') - op.drop_table('history') - op.drop_table('container') - op.drop_table('billing_history') - op.drop_table('v_node') - op.drop_table('v_cluster') - op.drop_table('image') - op.drop_table('apply_msg') - ### end Alembic commands ### diff --git a/src/com/env.py b/src/utils/env.py similarity index 100% rename from src/com/env.py rename to src/utils/env.py diff --git a/src/com/etcdlib.py b/src/utils/etcdlib.py similarity index 100% rename from src/com/etcdlib.py rename to src/utils/etcdlib.py diff --git a/src/com/imagemgr.py b/src/utils/imagemgr.py similarity index 99% rename from src/com/imagemgr.py rename to src/utils/imagemgr.py index 86f66fb..fc6a2bb 100755 --- a/src/com/imagemgr.py +++ b/src/utils/imagemgr.py @@ -20,10 +20,10 @@ from configparser import ConfigParser from io import StringIO import os,sys,subprocess,time,re,datetime,threading,random import xmlrpc.client -from com.model import db, Image +from utils.model import db, Image -from com.log import logger -from com import env, updatebase +from utils.log import logger +from utils import env, updatebase from worker.lvmtool import * import requests diff --git a/src/com/log.py b/src/utils/log.py similarity index 99% rename from src/com/log.py rename to src/utils/log.py index 46568e1..721776e 100755 --- a/src/com/log.py +++ b/src/utils/log.py @@ -6,7 +6,7 @@ import argparse import sys import time # this is only being used as part of the example import os -from com import env +from utils import env # logger should only be imported after initlogging has been called logger = None diff --git a/src/com/logs.py b/src/utils/logs.py similarity index 100% rename from src/com/logs.py rename to src/utils/logs.py diff --git a/src/com/manage.py b/src/utils/manage.py similarity index 100% rename from src/com/manage.py rename to src/utils/manage.py diff --git a/src/com/model.py b/src/utils/model.py similarity index 99% rename from src/com/model.py rename to src/utils/model.py index fcc1ac9..250ad10 100755 --- a/src/com/model.py +++ b/src/utils/model.py @@ -34,7 +34,7 @@ import os, json from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from itsdangerous import SignatureExpired, BadSignature -from com import env +from utils import env fsdir = env.getenv('FS_PREFIX') diff --git a/src/com/nettools.py b/src/utils/nettools.py similarity index 99% rename from src/com/nettools.py rename to src/utils/nettools.py index 98621b3..4f9e7f8 100755 --- a/src/com/nettools.py +++ b/src/utils/nettools.py @@ -1,8 +1,8 @@ #!/usr/bin/python3 import subprocess, threading -from com.log import logger -from com import env +from utils.log import logger +from utils import env class ipcontrol(object): @staticmethod diff --git a/src/com/proxytool.py b/src/utils/proxytool.py similarity index 97% rename from src/com/proxytool.py rename to src/utils/proxytool.py index 69b071c..c3c2032 100755 --- a/src/com/proxytool.py +++ b/src/utils/proxytool.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 import requests, json -from com import env +from utils import env proxy_api_port = env.getenv("PROXY_API_PORT") proxy_control="http://localhost:"+ str(proxy_api_port) +"/api/routes" diff --git a/src/com/tools.py b/src/utils/tools.py similarity index 100% rename from src/com/tools.py rename to src/utils/tools.py diff --git a/src/com/updatebase.py b/src/utils/updatebase.py similarity index 98% rename from src/com/updatebase.py rename to src/utils/updatebase.py index 7c54e4e..692db59 100755 --- a/src/com/updatebase.py +++ b/src/utils/updatebase.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 import os, shutil -from com.log import logger +from utils.log import logger def aufs_remove(basefs): try: @@ -13,7 +13,7 @@ def aufs_remove(basefs): logger.error(e) def aufs_clean(basefs): - # clean the aufs mark + # clean the aufs mark allfiles = os.listdir(basefs) for onefile in allfiles: if onefile[:4] == ".wh.": diff --git a/src/worker/container.py b/src/worker/container.py index 203e150..7f2d977 100755 --- a/src/worker/container.py +++ b/src/worker/container.py @@ -1,8 +1,8 @@ #!/usr/bin/python3 import subprocess, os, json -from com.log import logger -from com import env, imagemgr +from utils.log import logger +from utils import env, imagemgr from worker.lvmtool import sys_run, check_volume from worker.monitor import Container_Collector, History_Manager import lxc diff --git a/src/worker/lvmtool.py b/src/worker/lvmtool.py index 7a83933..ce4626c 100755 --- a/src/worker/lvmtool.py +++ b/src/worker/lvmtool.py @@ -1,8 +1,8 @@ #!/usr/bin/python3 import subprocess,os,time -from com.log import logger -from com import env +from utils.log import logger +from utils import env def sys_run(command,check=False): Ret = subprocess.run(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True, check=check) diff --git a/src/worker/monitor.py b/src/worker/monitor.py index c0793b1..0291b1a 100755 --- a/src/worker/monitor.py +++ b/src/worker/monitor.py @@ -19,13 +19,13 @@ Design:Monitor mainly consists of three parts: Collectors, Master_Collector and import subprocess,re,os,psutil,math,sys import time,threading,json,traceback,platform -from com import env, etcdlib +from utils import env, etcdlib import lxc import xmlrpc.client from datetime import datetime -from com.model import db,VNode,History,BillingHistory,VCluster,PortMapping -from com.log import logger +from utils.model import db,VNode,History,BillingHistory,VCluster,PortMapping +from utils.log import logger from httplib2 import Http from urllib.parse import urlencode diff --git a/src/worker/worker.py b/src/worker/worker.py index a6305b3..7121090 100755 --- a/src/worker/worker.py +++ b/src/worker/worker.py @@ -4,22 +4,22 @@ import sys if sys.path[0].endswith("worker"): sys.path[0] = sys.path[0][:-6] -from com import env, tools +from utils import env, tools config = env.getenv("CONFIG") #config = "/opt/docklet/local/docklet-running.conf" tools.loadenv(config) # must import logger after initlogging, ugly -from com.log import initlogging +from utils.log import initlogging initlogging("docklet-worker") -from com.log import logger +from utils.log import logger import xmlrpc.server, sys, time from socketserver import ThreadingMixIn import threading -from com import etcdlib, proxytool +from utils import etcdlib, proxytool from worker import container, monitor -from com.nettools import netcontrol,ovscontrol,portcontrol +from utils.nettools import netcontrol,ovscontrol,portcontrol from worker.lvmtool import new_group, recover_group from master import network @@ -179,7 +179,7 @@ class Worker(object): netcontrol.new_bridge('docklet-br') else: if not netcontrol.bridge_exists('docklet-br'): - logger.error("docklet-br not found") + utils logger.error("docklet-br not found") sys.exit(1) logger.info ("setup GRE tunnel to master %s" % self.master) #network.netsetup("gre", self.master) From 183ccb3d21f1c4587522a8d0c2d8862292099b7e Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Mon, 4 Jun 2018 01:22:50 +0800 Subject: [PATCH 6/8] Rename com to utils --- src/master/network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/master/network.py b/src/master/network.py index ef7924d..3cca93a 100755 --- a/src/master/network.py +++ b/src/master/network.py @@ -1,9 +1,9 @@ #!/usr/bin/python3 import json, sys, netifaces, threading -from com.nettools import netcontrol,ovscontrol +from utils.nettools import netcontrol,ovscontrol -from com.log import logger +from utils.log import logger # getip : get ip from network interface # ifname : name of network interface From 1315ca0c3a08da9655b120c180b07b208e784c97 Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Sun, 10 Jun 2018 23:53:21 +0800 Subject: [PATCH 7/8] Tidy master code --- bin/docklet-master | 2 +- bin/docklet-supermaster | 4 +- src/master/beansapplicationmgr.py | 8 +- src/master/cloudmgr.py | 26 +-- src/master/deploy.py | 10 +- src/master/httprest.py | 25 +-- src/master/monitor.py | 264 ++++++++++++++++++++++++++++++ src/master/nodemgr.py | 8 +- src/master/notificationmgr.py | 10 +- src/master/settings.py | 4 +- src/master/userManager.py | 14 +- src/master/vclustermgr.py | 14 +- src/utils/imagemgr.py | 2 +- src/utils/logs.py | 4 +- src/{worker => utils}/lvmtool.py | 0 src/worker/container.py | 4 +- src/worker/monitor.py | 255 ----------------------------- src/worker/worker.py | 2 +- 18 files changed, 333 insertions(+), 323 deletions(-) create mode 100644 src/master/monitor.py rename src/{worker => utils}/lvmtool.py (100%) diff --git a/bin/docklet-master b/bin/docklet-master index 89c223e..807ba83 100755 --- a/bin/docklet-master +++ b/bin/docklet-master @@ -46,7 +46,7 @@ export FS_PREFIX DAEMON_USER=root # settings for docklet master -DAEMON_MASTER=$DOCKLET_LIB/httprest.py +DAEMON_MASTER=$DOCKLET_LIB/master/httprest.py DAEMON_NAME_MASTER=docklet-master DAEMON_OPTS_MASTER= # The process ID of the script when it runs is stored here: diff --git a/bin/docklet-supermaster b/bin/docklet-supermaster index 629239e..ed93ef1 100755 --- a/bin/docklet-supermaster +++ b/bin/docklet-supermaster @@ -45,7 +45,7 @@ export FS_PREFIX DAEMON_USER=root # settings for docklet master -DAEMON_MASTER=$DOCKLET_LIB/httprest.py +DAEMON_MASTER=$DOCKLET_LIB/master/httprest.py DAEMON_NAME_MASTER=docklet-master DAEMON_OPTS_MASTER= # The process ID of the script when it runs is stored here: @@ -158,7 +158,7 @@ do_start_web () { } do_start_user () { - + log_daemon_msg "Starting $DAEMON_NAME_USER in $FS_PREFIX" DAEMON_OPTS_USER="-p $USER_PORT" diff --git a/src/master/beansapplicationmgr.py b/src/master/beansapplicationmgr.py index b64df00..2ec5f3e 100755 --- a/src/master/beansapplicationmgr.py +++ b/src/master/beansapplicationmgr.py @@ -11,14 +11,14 @@ This module consists of three parts: ''' import threading,datetime,random,time -from model import db,User,ApplyMsg -from userManager import administration_required -import env +from utils.model import db,User,ApplyMsg +from master.userManager import administration_required +from utils import env import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.header import Header -from settings import settings +from master.settings import settings # send email to remind users of their beans diff --git a/src/master/cloudmgr.py b/src/master/cloudmgr.py index 2b8957a..a62c8b0 100755 --- a/src/master/cloudmgr.py +++ b/src/master/cloudmgr.py @@ -1,12 +1,12 @@ #!/usr/bin/python3 from io import StringIO import os,sys,subprocess,time,re,datetime,threading,random,shutil -from model import db, Image -from deploy import * +from utils.model import db, Image +from master.deploy import * import json -from log import logger -import env +from utils.log import logger +from utils import env import requests fspath = env.getenv('FS_PREFIX') @@ -42,12 +42,12 @@ class AliyunMgr(): except Exception as e: logger.error(e) return False - + def createInstance(self): request = self.Request.CreateInstanceRequest.CreateInstanceRequest() request.set_accept_format('json') request.add_query_param('RegionId', self.setting['RegionId']) - if 'ZoneId' in self.setting and not self.setting['ZoneId'] == "": + if 'ZoneId' in self.setting and not self.setting['ZoneId'] == "": request.add_query_param('ZoneId', self.setting['ZoneId']) if 'VSwitchId' in self.setting and not self.setting['VSwitchId'] == "": request.add_query_param('VSwitchId', self.setting['VSwitchId']) @@ -60,25 +60,25 @@ class AliyunMgr(): request.add_query_param('Password', self.setting['Password']) response = self.clt.do_action_with_exception(request) logger.info(response) - + instanceid=json.loads(bytes.decode(response))['InstanceId'] return instanceid - + def startInstance(self, instanceid): request = self.Request.StartInstanceRequest.StartInstanceRequest() request.set_accept_format('json') request.add_query_param('InstanceId', instanceid) response = self.clt.do_action_with_exception(request) logger.info(response) - - + + def createEIP(self): request = self.Request.AllocateEipAddressRequest.AllocateEipAddressRequest() request.set_accept_format('json') request.add_query_param('RegionId', self.setting['RegionId']) response = self.clt.do_action_with_exception(request) logger.info(response) - + response=json.loads(bytes.decode(response)) eipid=response['AllocationId'] eipaddr=response['EipAddress'] @@ -94,7 +94,7 @@ class AliyunMgr(): response = self.clt.do_action_with_exception(request) logger.info(response) - + def getInnerIP(self, instanceid): request = self.Request.DescribeInstancesRequest.DescribeInstancesRequest() request.set_accept_format('json') @@ -168,7 +168,7 @@ class EmptyMgr(): return False class CloudMgr(): - + def getSettingFile(self): if not os.path.exists(fspath+"/global/sys/cloudsetting.json"): currentfilepath = os.path.dirname(os.path.abspath(__file__)) diff --git a/src/master/deploy.py b/src/master/deploy.py index 0be22a4..5b5a585 100755 --- a/src/master/deploy.py +++ b/src/master/deploy.py @@ -1,8 +1,8 @@ #!/usr/bin/python3 -import paramiko, time -from log import logger -import env,os +import paramiko, time, os +from utils.log import logger +from utils import env def myexec(ssh,command): stdin,stdout,stderr = ssh.exec_command(command) @@ -12,7 +12,7 @@ def myexec(ssh,command): if time.time() > endtime: stdout.channel.close() logger.error(command + ": fail") - return + return # for line in stdout.readlines(): # if line is None: # time.sleep(5) @@ -35,7 +35,7 @@ def deploy(ipaddr,masterip,account,password,volumename): sftp.put(deployscriptpath,'/root/docklet-deploy.sh') sftp.put('/etc/hosts', '/etc/hosts') transport.close() - + ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) while True: diff --git a/src/master/httprest.py b/src/master/httprest.py index 1bc5d28..f01b972 100755 --- a/src/master/httprest.py +++ b/src/master/httprest.py @@ -4,10 +4,13 @@ # because some modules need variables when import # for example, userManager/model.py +import sys +if sys.path[0].endswith("master"): + sys.path[0] = sys.path[0][:-6] from flask import Flask, request # must first init loadenv -import tools, env +from utils import tools, env # default CONFIG=/opt/docklet/local/docklet-running.conf config = env.getenv("CONFIG") @@ -15,22 +18,22 @@ tools.loadenv(config) # second init logging # must import logger after initlogging, ugly -from log import initlogging +from utils.log import initlogging initlogging("docklet-master") -from log import logger +from utils.log import logger import os -import http.server, cgi, json, sys, shutil +import http.server, cgi, json, sys, shutil, traceback import xmlrpc.client from socketserver import ThreadingMixIn -import nodemgr, vclustermgr, etcdlib, network, imagemgr, notificationmgr, lockmgr, cloudmgr -from logs import logs -import userManager,beansapplicationmgr -import monitor,traceback +from utils import etcdlib, imagemgr +from master import nodemgr, vclustermgr, notificationmgr, lockmgr, cloudmgr +from utils.logs import logs +from master import userManager, beansapplicationmgr, monitor, sysmgr, network +from worker.monitor import History_Manager import threading -import sysmgr import requests -from nettools import portcontrol +from utils.nettools import portcontrol #default EXTERNAL_LOGIN=False external_login = env.getenv('EXTERNAL_LOGIN') @@ -895,7 +898,7 @@ if __name__ == '__main__': masterport = env.getenv('MASTER_PORT') logger.info("using MASTER_PORT %d", int(masterport)) - G_historymgr = monitor.History_Manager() + G_historymgr = History_Manager() master_collector = monitor.Master_Collector(G_nodemgr,ipaddr+":"+str(masterport)) master_collector.start() logger.info("master_collector started") diff --git a/src/master/monitor.py b/src/master/monitor.py new file mode 100644 index 0000000..2aaaa60 --- /dev/null +++ b/src/master/monitor.py @@ -0,0 +1,264 @@ +import threading, time, traceback +from utils import env +from utils.log import logger +from httplib2 import Http + +# major dict to store the monitoring data +# only use on Master +# monitor_hosts: use workers' ip addresses as first key. +# second key: cpuinfo,diskinfo,meminfo,osinfo,cpuconfig,running,containers,containerslist +# 1.cpuinfo stores the cpu usages data, and it has keys: user,system,idle,iowait +# 2.diskinfo stores the disks usages data, and it has keys: device,mountpoint,total,used,free,percent +# 3.meminfo stores the memory usages data, and it has keys: total,used,free,buffers,cached,percent +# 4.osinfo stores the information of operating system, +# and it has keys: platform,system,node,release,version,machine,processor +# 5.cpuconfig stores the information of processors, and it is a list, each element of list is a dict +# which stores the information of a processor, each element has key: processor,model name, +# core id, cpu MHz, cache size, physical id. +# 6.running indicates the status of worker,and it has two values: True, False. +# 7.containers store the amount of containers on the worker. +# 8.containers store a list which consists of the names of containers on the worker. +monitor_hosts = {} + +# monitor_vnodes: use the owners' names of vnodes(containers) as first key. +# use the names of vnodes(containers) as second key. +# third key: cpu_use,mem_use,disk_use,basic_info,quota +# 1.cpu_use has keys: val,unit,hostpercent +# 2.mem_use has keys: val,unit,usedp +# 3.disk_use has keys: device,mountpoint,total,used,free,percent +# 4.basic_info has keys: Name,State,PID,IP,RunningTime,billing,billing_this_hour +# 5.quota has keys: cpu,memeory +monitor_vnodes = {} + +# get owner name of a container +def get_owner(container_name): + names = container_name.split('-') + return names[0] + +# the thread to collect data from each worker and store them in monitor_hosts and monitor_vnodes +class Master_Collector(threading.Thread): + + def __init__(self,nodemgr,master_ip): + threading.Thread.__init__(self) + self.thread_stop = False + self.nodemgr = nodemgr + self.master_ip = master_ip + self.net_lastbillings = {} + self.bytes_per_beans = 1000000000 + return + + def net_billings(self, username, now_bytes_total): + global monitor_vnodes + if not username in self.net_lastbillings.keys(): + self.net_lastbillings[username] = 0 + elif int(now_bytes_total/self.bytes_per_beans) < self.net_lastbillings[username]: + self.net_lastbillings[username] = 0 + diff = int(now_bytes_total/self.bytes_per_beans) - self.net_lastbillings[username] + if diff > 0: + auth_key = env.getenv('AUTH_KEY') + data = {"owner_name":username,"billing":diff, "auth_key":auth_key} + header = {'Content-Type':'application/x-www-form-urlencoded'} + http = Http() + [resp,content] = http.request("http://"+self.master_ip+"/billing/beans/","POST",urlencode(data),headers = header) + logger.info("response from master:"+content.decode('utf-8')) + self.net_lastbillings[username] += diff + monitor_vnodes[username]['net_stats']['net_billings'] = self.net_lastbillings[username] + + def run(self): + global monitor_hosts + global monitor_vnodes + while not self.thread_stop: + for worker in monitor_hosts.keys(): + monitor_hosts[worker]['running'] = False + workers = self.nodemgr.get_nodeips() + for worker in workers: + try: + ip = worker + workerrpc = self.nodemgr.ip_to_rpc(worker) + # fetch data + info = list(eval(workerrpc.workerFetchInfo(self.master_ip))) + #logger.info(info[0]) + # store data in monitor_hosts and monitor_vnodes + monitor_hosts[ip] = info[0] + for container in info[1].keys(): + owner = get_owner(container) + if not owner in monitor_vnodes.keys(): + monitor_vnodes[owner] = {} + monitor_vnodes[owner][container] = info[1][container] + for user in info[2].keys(): + if not user in monitor_vnodes.keys(): + continue + else: + monitor_vnodes[user]['net_stats'] = info[2][user] + self.net_billings(user, info[2][user]['bytes_total']) + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + time.sleep(2) + #logger.info(History.query.all()) + #logger.info(VNode.query.all()) + return + + def stop(self): + self.thread_stop = True + return + +# master use this class to fetch specific data of containers(vnodes) +class Container_Fetcher: + def __init__(self,container_name): + self.owner = get_owner(container_name) + self.con_id = container_name + return + + def get_cpu_use(self): + global monitor_vnodes + try: + res = monitor_vnodes[self.owner][self.con_id]['cpu_use'] + res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_mem_use(self): + global monitor_vnodes + try: + res = monitor_vnodes[self.owner][self.con_id]['mem_use'] + res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_disk_use(self): + global monitor_vnodes + try: + res = monitor_vnodes[self.owner][self.con_id]['disk_use'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_net_stats(self): + global monitor_vnodes + try: + res = monitor_vnodes[self.owner][self.con_id]['net_stats'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_basic_info(self): + global monitor_vnodes + try: + res = monitor_vnodes[self.owner][self.con_id]['basic_info'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + +# Master use this class to fetch specific data of physical machines(hosts) +class Fetcher: + + def __init__(self,host): + global monitor_hosts + self.info = monitor_hosts[host] + return + + #def get_clcnt(self): + # return DockletMonitor.clcnt + + #def get_nodecnt(self): + # return DockletMonitor.nodecnt + + #def get_meminfo(self): + # return self.get_meminfo_('172.31.0.1') + + def get_meminfo(self): + try: + res = self.info['meminfo'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_cpuinfo(self): + try: + res = self.info['cpuinfo'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_cpuconfig(self): + try: + res = self.info['cpuconfig'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_diskinfo(self): + try: + res = self.info['diskinfo'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_osinfo(self): + try: + res = self.info['osinfo'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_concpuinfo(self): + try: + res = self.info['concpupercent'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_containers(self): + try: + res = self.info['containers'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res + + def get_status(self): + try: + isexist = self.info['running'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + isexist = False + if(isexist): + return 'RUNNING' + else: + return 'STOPPED' + + def get_containerslist(self): + try: + res = self.info['containerslist'] + except Exception as err: + logger.warning(traceback.format_exc()) + logger.warning(err) + res = {} + return res diff --git a/src/master/nodemgr.py b/src/master/nodemgr.py index 9ca819b..d3396a6 100755 --- a/src/master/nodemgr.py +++ b/src/master/nodemgr.py @@ -2,9 +2,9 @@ import threading, random, time, xmlrpc.client, sys #import network -from nettools import netcontrol,ovscontrol -from log import logger -import env +from utils.nettools import netcontrol,ovscontrol +from utils.log import logger +from utils import env ########################################## # NodeMgr @@ -149,7 +149,7 @@ class NodeMgr(object): taskargs = task['args'] logger.info("recover task:%s in worker:%s" % (taskname, ip)) eval('worker.'+taskname)(*taskargs) - + # get all run nodes' IP addr def get_nodeips(self): return self.runnodes diff --git a/src/master/notificationmgr.py b/src/master/notificationmgr.py index 426099d..84e6350 100644 --- a/src/master/notificationmgr.py +++ b/src/master/notificationmgr.py @@ -1,15 +1,15 @@ import json -from log import logger -from model import db, Notification, NotificationGroups, User, UserNotificationPair -from userManager import administration_required, token_required +from utils.log import logger +from utils.model import db, Notification, NotificationGroups, User, UserNotificationPair +from master.userManager import administration_required, token_required import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.header import Header from datetime import datetime -import env -from settings import settings +from utils import env +from master.settings import settings class NotificationMgr: def __init__(self): diff --git a/src/master/settings.py b/src/master/settings.py index 67d8111..af9e9df 100644 --- a/src/master/settings.py +++ b/src/master/settings.py @@ -1,9 +1,9 @@ #!/usr/bin/python3 -import env +from utils import env import json, os from functools import wraps -from log import logger +from utils.log import logger class settingsClass: diff --git a/src/master/userManager.py b/src/master/userManager.py index 1dad29b..6d89835 100755 --- a/src/master/userManager.py +++ b/src/master/userManager.py @@ -7,22 +7,22 @@ Warning: in some early versions, "token" stand for the instance of class model.U Original author: Liu Peidong ''' -from model import db, User, UserGroup, Notification, UserUsage +from utils.model import db, User, UserGroup, Notification, UserUsage from functools import wraps import os, subprocess, math import hashlib import pam from base64 import b64encode -import env -from settings import settings +from utils import env +from master.settings import settings import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.header import Header from datetime import datetime import json -from log import logger -from lvmtool import * +from utils.log import logger +from utils.lvmtool import * PAM = pam.pam() fspath = env.getenv('FS_PREFIX') @@ -162,7 +162,7 @@ class userManager: sys_admin.auth_method = 'local' db.session.add(sys_admin) path = env.getenv('DOCKLET_LIB') - subprocess.call([path+"/userinit.sh", username]) + subprocess.call([path+"/master/userinit.sh", username]) db.session.commit() if not os.path.exists(fspath+"/global/sys/quota"): groupfile = open(fspath+"/global/sys/quota",'w') @@ -870,7 +870,7 @@ class userManager: # now initialize for all kind of users #if newuser.status == 'normal': path = env.getenv('DOCKLET_LIB') - subprocess.call([path+"/userinit.sh", newuser.username]) + subprocess.call([path+"/master/userinit.sh", newuser.username]) res = self.groupQuery(name=newuser.user_group) if res['success']: self.set_nfs_quota(newuser.username,res['data']['data']) diff --git a/src/master/vclustermgr.py b/src/master/vclustermgr.py index c781feb..b7ef839 100755 --- a/src/master/vclustermgr.py +++ b/src/master/vclustermgr.py @@ -1,15 +1,13 @@ #!/usr/bin/python3 -import os, random, json, sys, imagemgr +import os, random, json, sys import datetime, math -from log import logger -import env -import proxytool -import requests, threading -import traceback -from nettools import portcontrol -from model import db, Container, PortMapping, VCluster +from utils.log import logger +from utils import env, imagemgr, proxytool +import requests, threading, traceback +from utils.nettools import portcontrol +from utils.model import db, Container, PortMapping, VCluster userpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT')) def post_to_user(url = '/', data={}): diff --git a/src/utils/imagemgr.py b/src/utils/imagemgr.py index fc6a2bb..fcb8873 100755 --- a/src/utils/imagemgr.py +++ b/src/utils/imagemgr.py @@ -24,7 +24,7 @@ from utils.model import db, Image from utils.log import logger from utils import env, updatebase -from worker.lvmtool import * +from utils.lvmtool import * import requests master_port = str(env.getenv('MASTER_PORT')) diff --git a/src/utils/logs.py b/src/utils/logs.py index 51be483..ac79727 100644 --- a/src/utils/logs.py +++ b/src/utils/logs.py @@ -1,8 +1,8 @@ #!/usr/bin/python3 -import env +from utils import env import json, os -from log import logger +from utils.log import logger from werkzeug.utils import secure_filename logsPath = env.getenv('FS_PREFIX') + '/local/log/' diff --git a/src/worker/lvmtool.py b/src/utils/lvmtool.py similarity index 100% rename from src/worker/lvmtool.py rename to src/utils/lvmtool.py diff --git a/src/worker/container.py b/src/worker/container.py index 7f2d977..9a530fa 100755 --- a/src/worker/container.py +++ b/src/worker/container.py @@ -3,7 +3,7 @@ import subprocess, os, json from utils.log import logger from utils import env, imagemgr -from worker.lvmtool import sys_run, check_volume +from utils.lvmtool import sys_run, check_volume from worker.monitor import Container_Collector, History_Manager import lxc @@ -44,7 +44,7 @@ class Container(object): if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)): path = env.getenv('DOCKLET_LIB') - subprocess.call([path+"/userinit.sh", username]) + subprocess.call([path+"/master/userinit.sh", username]) logger.info("user %s directory not found, create it" % username) sys_run("mkdir -p /var/lib/lxc/%s" % lxc_name) logger.info("generate config file for %s" % lxc_name) diff --git a/src/worker/monitor.py b/src/worker/monitor.py index 0291b1a..de31870 100755 --- a/src/worker/monitor.py +++ b/src/worker/monitor.py @@ -35,33 +35,6 @@ b_mem = 2000000 # MB c_disk = 4000 # MB d_port = 1 -# major dict to store the monitoring data -# only use on Master -# monitor_hosts: use workers' ip addresses as first key. -# second key: cpuinfo,diskinfo,meminfo,osinfo,cpuconfig,running,containers,containerslist -# 1.cpuinfo stores the cpu usages data, and it has keys: user,system,idle,iowait -# 2.diskinfo stores the disks usages data, and it has keys: device,mountpoint,total,used,free,percent -# 3.meminfo stores the memory usages data, and it has keys: total,used,free,buffers,cached,percent -# 4.osinfo stores the information of operating system, -# and it has keys: platform,system,node,release,version,machine,processor -# 5.cpuconfig stores the information of processors, and it is a list, each element of list is a dict -# which stores the information of a processor, each element has key: processor,model name, -# core id, cpu MHz, cache size, physical id. -# 6.running indicates the status of worker,and it has two values: True, False. -# 7.containers store the amount of containers on the worker. -# 8.containers store a list which consists of the names of containers on the worker. -monitor_hosts = {} - -# monitor_vnodes: use the owners' names of vnodes(containers) as first key. -# use the names of vnodes(containers) as second key. -# third key: cpu_use,mem_use,disk_use,basic_info,quota -# 1.cpu_use has keys: val,unit,hostpercent -# 2.mem_use has keys: val,unit,usedp -# 3.disk_use has keys: device,mountpoint,total,used,free,percent -# 4.basic_info has keys: Name,State,PID,IP,RunningTime,billing,billing_this_hour -# 5.quota has keys: cpu,memeory -monitor_vnodes = {} - # major dict to store the monitoring data on Worker # only use on Worker # workerinfo: only store the data collected on current Worker, @@ -627,234 +600,6 @@ def get_billing_history(vnode_name): default['port'] = 0 return default -# the thread to collect data from each worker and store them in monitor_hosts and monitor_vnodes -class Master_Collector(threading.Thread): - - def __init__(self,nodemgr,master_ip): - threading.Thread.__init__(self) - self.thread_stop = False - self.nodemgr = nodemgr - self.master_ip = master_ip - self.net_lastbillings = {} - self.bytes_per_beans = 1000000000 - return - - def net_billings(self, username, now_bytes_total): - global monitor_vnodes - if not username in self.net_lastbillings.keys(): - self.net_lastbillings[username] = 0 - elif int(now_bytes_total/self.bytes_per_beans) < self.net_lastbillings[username]: - self.net_lastbillings[username] = 0 - diff = int(now_bytes_total/self.bytes_per_beans) - self.net_lastbillings[username] - if diff > 0: - auth_key = env.getenv('AUTH_KEY') - data = {"owner_name":username,"billing":diff, "auth_key":auth_key} - header = {'Content-Type':'application/x-www-form-urlencoded'} - http = Http() - [resp,content] = http.request("http://"+self.master_ip+"/billing/beans/","POST",urlencode(data),headers = header) - logger.info("response from master:"+content.decode('utf-8')) - self.net_lastbillings[username] += diff - monitor_vnodes[username]['net_stats']['net_billings'] = self.net_lastbillings[username] - - def run(self): - global monitor_hosts - global monitor_vnodes - while not self.thread_stop: - for worker in monitor_hosts.keys(): - monitor_hosts[worker]['running'] = False - workers = self.nodemgr.get_nodeips() - for worker in workers: - try: - ip = worker - workerrpc = self.nodemgr.ip_to_rpc(worker) - # fetch data - info = list(eval(workerrpc.workerFetchInfo(self.master_ip))) - #logger.info(info[0]) - # store data in monitor_hosts and monitor_vnodes - monitor_hosts[ip] = info[0] - for container in info[1].keys(): - owner = get_owner(container) - if not owner in monitor_vnodes.keys(): - monitor_vnodes[owner] = {} - monitor_vnodes[owner][container] = info[1][container] - for user in info[2].keys(): - if not user in monitor_vnodes.keys(): - continue - else: - monitor_vnodes[user]['net_stats'] = info[2][user] - self.net_billings(user, info[2][user]['bytes_total']) - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - time.sleep(2) - #logger.info(History.query.all()) - #logger.info(VNode.query.all()) - return - - def stop(self): - self.thread_stop = True - return - -# master use this class to fetch specific data of containers(vnodes) -class Container_Fetcher: - def __init__(self,container_name): - self.owner = get_owner(container_name) - self.con_id = container_name - return - - def get_cpu_use(self): - global monitor_vnodes - try: - res = monitor_vnodes[self.owner][self.con_id]['cpu_use'] - res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_mem_use(self): - global monitor_vnodes - try: - res = monitor_vnodes[self.owner][self.con_id]['mem_use'] - res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_disk_use(self): - global monitor_vnodes - try: - res = monitor_vnodes[self.owner][self.con_id]['disk_use'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_net_stats(self): - global monitor_vnodes - try: - res = monitor_vnodes[self.owner][self.con_id]['net_stats'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_basic_info(self): - global monitor_vnodes - try: - res = monitor_vnodes[self.owner][self.con_id]['basic_info'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - -# Master use this class to fetch specific data of physical machines(hosts) -class Fetcher: - - def __init__(self,host): - global monitor_hosts - self.info = monitor_hosts[host] - return - - #def get_clcnt(self): - # return DockletMonitor.clcnt - - #def get_nodecnt(self): - # return DockletMonitor.nodecnt - - #def get_meminfo(self): - # return self.get_meminfo_('172.31.0.1') - - def get_meminfo(self): - try: - res = self.info['meminfo'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_cpuinfo(self): - try: - res = self.info['cpuinfo'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_cpuconfig(self): - try: - res = self.info['cpuconfig'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_diskinfo(self): - try: - res = self.info['diskinfo'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_osinfo(self): - try: - res = self.info['osinfo'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_concpuinfo(self): - try: - res = self.info['concpupercent'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_containers(self): - try: - res = self.info['containers'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - - def get_status(self): - try: - isexist = self.info['running'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - isexist = False - if(isexist): - return 'RUNNING' - else: - return 'STOPPED' - - def get_containerslist(self): - try: - res = self.info['containerslist'] - except Exception as err: - logger.warning(traceback.format_exc()) - logger.warning(err) - res = {} - return res - # To record data when the status of containers change class History_Manager: diff --git a/src/worker/worker.py b/src/worker/worker.py index 7121090..88839c7 100755 --- a/src/worker/worker.py +++ b/src/worker/worker.py @@ -20,7 +20,7 @@ import threading from utils import etcdlib, proxytool from worker import container, monitor from utils.nettools import netcontrol,ovscontrol,portcontrol -from worker.lvmtool import new_group, recover_group +from utils.lvmtool import new_group, recover_group from master import network ################################################################## From 95ab1104f46b9358212962dc1cfd982927b2a1da Mon Sep 17 00:00:00 2001 From: zhuyj17 Date: Mon, 11 Jun 2018 00:14:41 +0800 Subject: [PATCH 8/8] Tide user and web code --- user/user.py | 18 +++++++++--------- web/web.py | 2 +- web/webViews/authenticate/login.py | 4 ++-- web/webViews/dockletrequest.py | 4 ++-- web/webViews/log.py | 2 +- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/user/user.py b/user/user.py index 79ade1d..38b9de7 100755 --- a/user/user.py +++ b/user/user.py @@ -13,12 +13,7 @@ src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"..", "sr if src_folder not in sys.path: sys.path.insert(0, src_folder) -# must first init loadenv -from log import initlogging -initlogging("docklet-user") -from log import logger - -import tools, env +from utils import tools, env config = env.getenv("CONFIG") tools.loadenv(config) masterips = env.getenv("MASTER_IPS").split(",") @@ -26,14 +21,19 @@ G_masterips = [] for masterip in masterips: G_masterips.append(masterip.split("@")[0] + ":" + str(env.getenv("MASTER_PORT"))) +# must first init loadenv +from utils.log import initlogging +initlogging("docklet-user") +from utils.log import logger + from flask import Flask, request, session, render_template, redirect, send_from_directory, make_response, url_for, abort from functools import wraps -import userManager,beansapplicationmgr, notificationmgr, lockmgr +from master import userManager,beansapplicationmgr, notificationmgr, lockmgr import threading,traceback -from model import User,db +from utils.model import User,db from httplib2 import Http from urllib.parse import urlencode -from settings import settings +from master.settings import settings external_login = env.getenv('EXTERNAL_LOGIN') if(external_login == 'TRUE'): diff --git a/web/web.py b/web/web.py index 6138438..0d195fe 100755 --- a/web/web.py +++ b/web/web.py @@ -12,7 +12,7 @@ if src_folder not in sys.path: sys.path.insert(0, src_folder) # must first init loadenv -import tools, env +from utils import tools, env config = env.getenv("CONFIG") tools.loadenv(config) diff --git a/web/webViews/authenticate/login.py b/web/webViews/authenticate/login.py index e0801a9..91cef45 100755 --- a/web/webViews/authenticate/login.py +++ b/web/webViews/authenticate/login.py @@ -13,7 +13,7 @@ src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../../.. if src_folder not in sys.path: sys.path.insert(0, src_folder) -import env +from utils import env if (env.getenv('EXTERNAL_LOGIN') == 'True'): sys.path.insert(0, os.path.realpath(os.path.abspath(os.path.join(this_folder,"../../../src", "plugin")))) @@ -140,7 +140,7 @@ class external_login_callbackView(normalView): class external_loginView(normalView): if (env.getenv('EXTERNAL_LOGIN') == 'True'): template_path = external_generate.html_path - + @classmethod def post(self): return render_template(self.template_path) diff --git a/web/webViews/dockletrequest.py b/web/webViews/dockletrequest.py index 88e15b5..8a48790 100644 --- a/web/webViews/dockletrequest.py +++ b/web/webViews/dockletrequest.py @@ -9,7 +9,7 @@ src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../..", if src_folder not in sys.path: sys.path.insert(0, src_folder) -import env +from utils import env masterips=env.getenv('MASTER_IPS').split(",") user_endpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT')) @@ -60,7 +60,7 @@ class dockletRequest(): return result #except: #abort(500) - + @classmethod def getdesc(self,mastername): return env.getenv(mastername+"_desc")[1:-1] diff --git a/web/webViews/log.py b/web/webViews/log.py index 726fdce..b606c7d 100644 --- a/web/webViews/log.py +++ b/web/webViews/log.py @@ -12,7 +12,7 @@ this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(ins src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../..", "src"))) if src_folder not in sys.path: sys.path.insert(0, src_folder) -import env +from utils import env # logger should only be imported after initlogging has been called logger = None