Rename com to utils

This commit is contained in:
zhuyj17 2018-06-04 01:16:32 +08:00
parent 868be0c8c0
commit 18d9ea08fd
21 changed files with 24 additions and 295 deletions

2
.gitignore vendored
View File

@ -6,4 +6,4 @@ __temp
.DS_Store
docklet.conf
home.html
src/migrations/
src/utils/migrations/

View File

@ -1 +0,0 @@
Generic single-database configuration.

View File

@ -1,45 +0,0 @@
# A generic, single database configuration.
[alembic]
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View File

@ -1,87 +0,0 @@
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@ -1,22 +0,0 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision}
Create Date: ${create_date}
"""
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@ -1,116 +0,0 @@
"""empty message
Revision ID: 37dcfdb2604
Revises: None
Create Date: 2018-04-22 11:58:48.307690
"""
# revision identifiers, used by Alembic.
revision = '37dcfdb2604'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('apply_msg',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=10), nullable=True),
sa.Column('number', sa.Integer(), nullable=True),
sa.Column('reason', sa.String(length=600), nullable=True),
sa.Column('status', sa.String(length=10), nullable=True),
sa.Column('time', sa.DateTime(timezone=10), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('image',
sa.Column('imagename', sa.String(length=50), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('hasPrivate', sa.Boolean(), nullable=True),
sa.Column('hasPublic', sa.Boolean(), nullable=True),
sa.Column('ownername', sa.String(length=20), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('v_cluster',
sa.Column('clusterid', sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column('clustername', sa.String(length=50), nullable=True),
sa.Column('ownername', sa.String(length=20), nullable=True),
sa.Column('status', sa.String(length=10), nullable=True),
sa.Column('size', sa.Integer(), nullable=True),
sa.Column('nextcid', sa.Integer(), nullable=True),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('start_time', sa.String(length=20), nullable=True),
sa.Column('proxy_server_ip', sa.String(length=20), nullable=True),
sa.Column('proxy_public_ip', sa.String(length=20), nullable=True),
sa.PrimaryKeyConstraint('clusterid')
)
op.create_table('v_node',
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('laststopcpuval', sa.Float(), nullable=True),
sa.Column('laststopruntime', sa.Integer(), nullable=True),
sa.Column('billing', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('name')
)
op.create_table('billing_history',
sa.Column('node_name', sa.String(length=100), nullable=False),
sa.Column('vclusterid', sa.Integer(), nullable=True),
sa.Column('cpu', sa.Float(), nullable=True),
sa.Column('mem', sa.Float(), nullable=True),
sa.Column('disk', sa.Float(), nullable=True),
sa.Column('port', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['vclusterid'], ['v_cluster.clusterid'], ),
sa.PrimaryKeyConstraint('node_name')
)
op.create_table('container',
sa.Column('containername', sa.String(length=100), nullable=False),
sa.Column('hostname', sa.String(length=30), nullable=True),
sa.Column('ip', sa.String(length=20), nullable=True),
sa.Column('host', sa.String(length=20), nullable=True),
sa.Column('image', sa.String(length=50), nullable=True),
sa.Column('lastsave', sa.DateTime(), nullable=True),
sa.Column('setting_cpu', sa.Integer(), nullable=True),
sa.Column('setting_mem', sa.Integer(), nullable=True),
sa.Column('setting_disk', sa.Integer(), nullable=True),
sa.Column('vclusterid', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['vclusterid'], ['v_cluster.clusterid'], ),
sa.PrimaryKeyConstraint('containername')
)
op.create_table('history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('vnode', sa.String(length=100), nullable=True),
sa.Column('action', sa.String(length=30), nullable=True),
sa.Column('runningtime', sa.Integer(), nullable=True),
sa.Column('cputime', sa.Float(), nullable=True),
sa.Column('billing', sa.Integer(), nullable=True),
sa.Column('actionTime', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['vnode'], ['v_node.name'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('port_mapping',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('node_name', sa.String(length=100), nullable=True),
sa.Column('node_ip', sa.String(length=20), nullable=True),
sa.Column('node_port', sa.Integer(), nullable=True),
sa.Column('host_port', sa.Integer(), nullable=True),
sa.Column('vclusterid', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['vclusterid'], ['v_cluster.clusterid'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('port_mapping')
op.drop_table('history')
op.drop_table('container')
op.drop_table('billing_history')
op.drop_table('v_node')
op.drop_table('v_cluster')
op.drop_table('image')
op.drop_table('apply_msg')
### end Alembic commands ###

View File

@ -20,10 +20,10 @@ from configparser import ConfigParser
from io import StringIO
import os,sys,subprocess,time,re,datetime,threading,random
import xmlrpc.client
from com.model import db, Image
from utils.model import db, Image
from com.log import logger
from com import env, updatebase
from utils.log import logger
from utils import env, updatebase
from worker.lvmtool import *
import requests

View File

@ -6,7 +6,7 @@ import argparse
import sys
import time # this is only being used as part of the example
import os
from com import env
from utils import env
# logger should only be imported after initlogging has been called
logger = None

View File

@ -34,7 +34,7 @@ import os, json
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import SignatureExpired, BadSignature
from com import env
from utils import env
fsdir = env.getenv('FS_PREFIX')

View File

@ -1,8 +1,8 @@
#!/usr/bin/python3
import subprocess, threading
from com.log import logger
from com import env
from utils.log import logger
from utils import env
class ipcontrol(object):
@staticmethod

View File

@ -1,7 +1,7 @@
#!/usr/bin/python3
import requests, json
from com import env
from utils import env
proxy_api_port = env.getenv("PROXY_API_PORT")
proxy_control="http://localhost:"+ str(proxy_api_port) +"/api/routes"

View File

@ -1,7 +1,7 @@
#!/usr/bin/python3
import os, shutil
from com.log import logger
from utils.log import logger
def aufs_remove(basefs):
try:
@ -13,7 +13,7 @@ def aufs_remove(basefs):
logger.error(e)
def aufs_clean(basefs):
# clean the aufs mark
# clean the aufs mark
allfiles = os.listdir(basefs)
for onefile in allfiles:
if onefile[:4] == ".wh.":

View File

@ -1,8 +1,8 @@
#!/usr/bin/python3
import subprocess, os, json
from com.log import logger
from com import env, imagemgr
from utils.log import logger
from utils import env, imagemgr
from worker.lvmtool import sys_run, check_volume
from worker.monitor import Container_Collector, History_Manager
import lxc

View File

@ -1,8 +1,8 @@
#!/usr/bin/python3
import subprocess,os,time
from com.log import logger
from com import env
from utils.log import logger
from utils import env
def sys_run(command,check=False):
Ret = subprocess.run(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True, check=check)

View File

@ -19,13 +19,13 @@ Design:Monitor mainly consists of three parts: Collectors, Master_Collector and
import subprocess,re,os,psutil,math,sys
import time,threading,json,traceback,platform
from com import env, etcdlib
from utils import env, etcdlib
import lxc
import xmlrpc.client
from datetime import datetime
from com.model import db,VNode,History,BillingHistory,VCluster,PortMapping
from com.log import logger
from utils.model import db,VNode,History,BillingHistory,VCluster,PortMapping
from utils.log import logger
from httplib2 import Http
from urllib.parse import urlencode

View File

@ -4,22 +4,22 @@
import sys
if sys.path[0].endswith("worker"):
sys.path[0] = sys.path[0][:-6]
from com import env, tools
from utils import env, tools
config = env.getenv("CONFIG")
#config = "/opt/docklet/local/docklet-running.conf"
tools.loadenv(config)
# must import logger after initlogging, ugly
from com.log import initlogging
from utils.log import initlogging
initlogging("docklet-worker")
from com.log import logger
from utils.log import logger
import xmlrpc.server, sys, time
from socketserver import ThreadingMixIn
import threading
from com import etcdlib, proxytool
from utils import etcdlib, proxytool
from worker import container, monitor
from com.nettools import netcontrol,ovscontrol,portcontrol
from utils.nettools import netcontrol,ovscontrol,portcontrol
from worker.lvmtool import new_group, recover_group
from master import network
@ -179,7 +179,7 @@ class Worker(object):
netcontrol.new_bridge('docklet-br')
else:
if not netcontrol.bridge_exists('docklet-br'):
logger.error("docklet-br not found")
utils logger.error("docklet-br not found")
sys.exit(1)
logger.info ("setup GRE tunnel to master %s" % self.master)
#network.netsetup("gre", self.master)