[Init] init repository with Docklet 0.2.6
|
@ -0,0 +1,7 @@
|
|||
__pycache__
|
||||
*.pyc
|
||||
*.swp
|
||||
__temp
|
||||
*~
|
||||
.DS_Store
|
||||
docklet.conf
|
|
@ -0,0 +1,26 @@
|
|||
Copyright (c) 2016, Peking University (PKU).
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. Neither the name of the PKU nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.INCLUDING NEGLIGENCE OR OTHERWISE
|
|
@ -0,0 +1,199 @@
|
|||
# Docklet
|
||||
|
||||
## intro
|
||||
|
||||
Docklet is an operating system for mini-datacener. Its goal is to help
|
||||
multi-user share cluster resources effectively. Unlike the "application
|
||||
framework oriented" cluster manager such as Mesos and Yarn, Docklet is
|
||||
**user oriented**. In Docklet, every user has their own private
|
||||
**virtual cluster (vcluster)**, which consists of a number of virtual
|
||||
Linux container nodes distributed over the physical cluster. Every
|
||||
vcluster is separated from others and can be operated like a real
|
||||
physical cluster. Therefore, most applications, especially those
|
||||
requiring a cluster environment, can run in vcluster seamlessly.
|
||||
|
||||
Docklet provides a base image for creating virtual nodes. This image has
|
||||
pre-installed a lot of mainstream development tools and frameworks,
|
||||
including gcc/g++, openjdk, python3, R, MPI, scala, ruby, php, node.js,
|
||||
texlive, mpich2, spark,
|
||||
scipy/numpy/matplotlib/pandas/sympy/scikit-learn, jupyter notebook, etc.
|
||||
Users can get a ready vcluster with just one click within 1 second.
|
||||
|
||||
The users are free to install their specific software in their vcluster.
|
||||
Docklet supports operating through **web terminal**. Users can do their
|
||||
work as an administrator working on a console. The base image system is
|
||||
ubuntu. The recommended way of installing new software is by
|
||||
**apt-get**.
|
||||
|
||||
The users manage and use their vcluster all through web. The only client
|
||||
tool needed is a modern web browser, like safari, firefox, chrome. The
|
||||
integrated *jupyter notebook* provides a web workspace. By visiting the
|
||||
workspace, users can do coding, debugging and testing of their programs
|
||||
online. The **python scipy** series of tools can even display graphical
|
||||
pictures in the browser. Therefore, it is ideal for data analysis and
|
||||
processing.
|
||||
|
||||
Docklet only need **one** public IP address. The vclusters are
|
||||
configured to use private IP address range, e.g., 172.16.0.0/16,
|
||||
192.168.0.0/16, 10.0.0.0/8. A proxy is setup to help
|
||||
users visit their vclusters behind the firewall/gateway.
|
||||
|
||||
The Docklet system runtime consists of four components:
|
||||
|
||||
- distributed file system server
|
||||
- etcd server
|
||||
- docklet master
|
||||
- docklet worker
|
||||
|
||||
## install
|
||||
|
||||
Currently the docklet runtime is recommend to run in Unbuntu 15.10+.
|
||||
|
||||
Ensure that python3.5 is the default python3 version.
|
||||
|
||||
Unpack the docklet tarball to a directory ( /root/docklet as an
|
||||
example), will get
|
||||
|
||||
```
|
||||
readme.md
|
||||
prepare.sh
|
||||
conf/
|
||||
container.conf
|
||||
docklet.conf.template
|
||||
lxc-script/
|
||||
bin/
|
||||
docklet-master
|
||||
docklet-worker
|
||||
src/
|
||||
httprest.py
|
||||
worker.py
|
||||
...
|
||||
web/
|
||||
web.py
|
||||
dep/
|
||||
etcd-multi-nodes.sh
|
||||
etcd-one-node.sh
|
||||
doc/
|
||||
tools/
|
||||
update-basefs.sh
|
||||
start_jupyter.sh
|
||||
```
|
||||
|
||||
If it is the first time install, users should run **prepare.sh** to
|
||||
install necessary packages automatically. Note it may need to run this
|
||||
script several times to successfully install all the needed packages.
|
||||
|
||||
A *root* users will be created for managing the system. The password is
|
||||
recorded in `FS_PREFIX/local/generated_password.txt` .
|
||||
|
||||
## config ##
|
||||
|
||||
The main configuration file of docklet is conf/docklet.conf. Most
|
||||
default setting works for a single host environment.
|
||||
|
||||
First copy docklet.conf.template to get docklet.conf.
|
||||
|
||||
The following settings should be taken care of:
|
||||
|
||||
- NETWORK_DEVICE : the network device to use.
|
||||
- ETCD : the etcd server address. For distributed muli hosts
|
||||
environment, it should be one of the ETCD public server address.
|
||||
For single host environment, the default value should be OK.
|
||||
- STORAGE : using disk or file to storage persistent data, for
|
||||
single host, file is convenient.
|
||||
- FS_PREFIX: the working dir of docklet runtime. default is
|
||||
/opt/docklet.
|
||||
- CLUSTER_NET: the vcluster network ip address range, default is
|
||||
172.16.0.1/16. This network range should all be allocated to and
|
||||
managed by docklet.
|
||||
- PROXY_PORT : the public port of docklet. Users use
|
||||
this port to visit the docklet system.
|
||||
- PORTAL_URL : the portal of the system. Users access the system
|
||||
by visiting this address. If the system is behind a firewall, then
|
||||
a reverse proxy should be setup.
|
||||
|
||||
## start ##
|
||||
|
||||
### distributed file system ###
|
||||
|
||||
For multi hosts distributed environment, a distributed file system is
|
||||
needed to store global data. Currently, glusterfs has been tested.
|
||||
Lets presume the file system server export filesystem as nfs
|
||||
**fileserver:/pub** :
|
||||
|
||||
In each physical host to run docklet, mount **fileserver:/pub** to
|
||||
**FS_PEFIX/global** .
|
||||
|
||||
For single host environment, it need not to configure distributed
|
||||
file system.
|
||||
|
||||
### etcd ###
|
||||
|
||||
For single host environment, start **dep/etcd-one-node.sh** . Some recent
|
||||
Ubuntu releases have included **etcd** in the repository, just `apt-get
|
||||
install etcd`, and it need not to start etcd manually.
|
||||
|
||||
For multi hosts distributed environment, start
|
||||
**dep/etcd-multi-nodes.sh** in each etcd server hosts. This scripts
|
||||
requires users providing the etcd server address as parameters.
|
||||
|
||||
### master ###
|
||||
|
||||
First, select a server with 2 network interface card, one having a
|
||||
public IP address/url, e.g., docklet.info; the other having a private IP
|
||||
address, e.g., 172.16.0.1. This server will be the master.
|
||||
|
||||
If it is the first time you start docklet, run `bin/docklet-master init`
|
||||
to init and start docklet master. Otherwise, run `bin/docklet-master start`,
|
||||
which will start master in recovery mode in background using
|
||||
conf/docklet.conf. It means docklet will recover workspaces existed.
|
||||
|
||||
This script in fact will start three daemons: the docklet master of
|
||||
httprest.py, the configurable-http-proxy and the docklet web of web.py.
|
||||
|
||||
You can check the daemon status by running `bin/docklet-master status`
|
||||
|
||||
If the master failed to start, you could try `bin/docklet-master init`
|
||||
to initialize the whole system.
|
||||
|
||||
More usages can be found by typing `bin/docklet-master`
|
||||
|
||||
The master logs are in **FS_PREFIX/local/log/docklet-master.log** and
|
||||
**docklet-web.log**.
|
||||
|
||||
### worker ###
|
||||
|
||||
Worker needs a basefs image to boot container.
|
||||
|
||||
You can create such an image with `lxc-create -n test -t download`,
|
||||
and then copy the rootfs to **FS_PREFIX/local**, and renamed `rootfs`
|
||||
to `basefs`.
|
||||
|
||||
Note the `jupyerhub` package must be installed for this image. And the
|
||||
start script `tools/start_jupyter.sh` should be placed at
|
||||
`basefs/home/jupyter`.
|
||||
|
||||
You can check and run `tools/update-basefs.sh` to update basefs.
|
||||
|
||||
Run `bin/docklet-worker start`, will start worker in background.
|
||||
|
||||
You can check the daemon status by running `bin/docklet-worker status`
|
||||
|
||||
More usages can be found by typing `bin/docklet-worker`
|
||||
|
||||
The log is in **FS_PREFIX/local/log/docklet-worker.log**
|
||||
|
||||
Currently, the worker must be run after the master has been started.
|
||||
|
||||
## usage ##
|
||||
|
||||
Open a browser, visiting the address specified by PORTAL_URL ,
|
||||
e.g., ` http://docklet.info/ `
|
||||
|
||||
If the system is just deployed in single host for testing purpose,
|
||||
then the PORTAL_URL defaults to `http://MASTER_IP:PROXY_PORT`,
|
||||
e.g., `http://localhost:8000`.
|
||||
|
||||
That is it.
|
||||
|
||||
## system admin ##
|
|
@ -0,0 +1,230 @@
|
|||
#!/bin/sh
|
||||
|
||||
[ $(id -u) != '0' ] && echo "root is needed" && exit 1
|
||||
|
||||
# get some path of docklet
|
||||
|
||||
bindir=${0%/*}
|
||||
# $bindir maybe like /opt/docklet/src/../sbin
|
||||
# use command below to make $bindir in normal absolute path
|
||||
DOCKLET_BIN=$(cd $bindir; pwd)
|
||||
DOCKLET_HOME=${DOCKLET_BIN%/*}
|
||||
DOCKLET_CONF=$DOCKLET_HOME/conf
|
||||
LXC_SCRIPT=$DOCKLET_CONF/lxc-script
|
||||
DOCKLET_SRC=$DOCKLET_HOME/src
|
||||
DOCKLET_LIB=$DOCKLET_SRC
|
||||
DOCKLET_WEB=$DOCKLET_HOME/web
|
||||
|
||||
# default working directory, default to /opt/docklet
|
||||
FS_PREFIX=/opt/docklet
|
||||
|
||||
RUN_DIR=$FS_PREFIX/local/run
|
||||
LOG_DIR=$FS_PREFIX/local/log
|
||||
|
||||
#configurable-http-proxy public port, default is 8000
|
||||
PROXY_PORT=8000
|
||||
#configurable-http-proxy api port, default is 8001
|
||||
PROXY_API_PORT=8001
|
||||
#network interface , default is eth0
|
||||
NETWORK_DEVICE=eth0
|
||||
#etcd server address, default is localhost:2379
|
||||
ETCD=localhost:2379
|
||||
#unique cluster_name, default is docklet-vc
|
||||
CLUSTER_NAME=docklet-vc
|
||||
#web port, default is 8888
|
||||
WEB_PORT=8888
|
||||
#cluster net, default is 172.16.0.1/16
|
||||
CLUSTER_NET="172.16.0.1/16"
|
||||
|
||||
. $DOCKLET_CONF/docklet.conf
|
||||
|
||||
export FS_PREFIX
|
||||
|
||||
# This next line determines what user the script runs as.
|
||||
DAEMON_USER=root
|
||||
|
||||
# settings for docklet master
|
||||
DAEMON_MASTER=$DOCKLET_LIB/httprest.py
|
||||
DAEMON_NAME_MASTER=docklet-master
|
||||
DAEMON_OPTS_MASTER=
|
||||
# The process ID of the script when it runs is stored here:
|
||||
PIDFILE_MASTER=$RUN_DIR/$DAEMON_NAME_MASTER.pid
|
||||
|
||||
# settings for docklet proxy, which is required for web access
|
||||
DAEMON_PROXY=`which configurable-http-proxy`
|
||||
DAEMON_NAME_PROXY=docklet-proxy
|
||||
PIDFILE_PROXY=$RUN_DIR/proxy.pid
|
||||
DAEMON_OPTS_PROXY=
|
||||
|
||||
# settings for docklet web
|
||||
DAEMON_WEB=$DOCKLET_WEB/web.py
|
||||
DAEMON_NAME_WEB=docklet-web
|
||||
PIDFILE_WEB=$RUN_DIR/docklet-web.pid
|
||||
DAEMON_OPTS_WEB=
|
||||
|
||||
RUNNING_CONFIG=$FS_PREFIX/local/docklet-running.conf
|
||||
export CONFIG=$RUNNING_CONFIG
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
###########
|
||||
|
||||
pre_start_master () {
|
||||
log_daemon_msg "Starting $DAEMON_NAME_MASTER in $FS_PREFIX"
|
||||
|
||||
[ ! -d $FS_PREFIX/global ] && mkdir -p $FS_PREFIX/global
|
||||
[ ! -d $FS_PREFIX/local ] && mkdir -p $FS_PREFIX/local
|
||||
[ ! -d $FS_PREFIX/global/users ] && mkdir -p $FS_PREFIX/global/users
|
||||
[ ! -d $FS_PREFIX/local/volume ] && mkdir -p $FS_PREFIX/local/volume
|
||||
[ ! -d $FS_PREFIX/local/temp ] && mkdir -p $FS_PREFIX/local/temp
|
||||
[ ! -d $FS_PREFIX/local/run ] && mkdir -p $FS_PREFIX/local/run
|
||||
[ ! -d $FS_PREFIX/local/log ] && mkdir -p $FS_PREFIX/local/log
|
||||
|
||||
grep -P "^[\s]*[a-zA-Z]" $DOCKLET_CONF/docklet.conf > $RUNNING_CONFIG
|
||||
|
||||
echo "DOCKLET_HOME=$DOCKLET_HOME" >> $RUNNING_CONFIG
|
||||
echo "DOCKLET_BIN=$DOCKLET_BIN" >> $RUNNING_CONFIG
|
||||
echo "DOCKLET_CONF=$DOCKLET_CONF" >> $RUNNING_CONFIG
|
||||
echo "LXC_SCRIPT=$LXC_SCRIPT" >> $RUNNING_CONFIG
|
||||
echo "DOCKLET_SRC=$DOCKLET_SRC" >> $RUNNING_CONFIG
|
||||
echo "DOCKLET_LIB=$DOCKLET_LIB" >> $RUNNING_CONFIG
|
||||
|
||||
|
||||
# iptables for NAT network for containers to access web
|
||||
iptables -t nat -F
|
||||
iptables -t nat -A POSTROUTING -s $CLUSTER_NET -j MASQUERADE
|
||||
|
||||
}
|
||||
|
||||
do_start_master () {
|
||||
pre_start_master
|
||||
|
||||
DAEMON_OPTS_MASTER=$1
|
||||
|
||||
# MODE : start mode
|
||||
# new : clean old data in etcd, global directory and start a new cluster
|
||||
# recovery : start cluster and recover status from etcd and global directory
|
||||
# Default is "recovery"
|
||||
|
||||
start-stop-daemon --start --oknodo --background --pidfile $PIDFILE_MASTER --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_MASTER -- $DAEMON_OPTS_MASTER
|
||||
log_end_msg $?
|
||||
}
|
||||
|
||||
|
||||
do_start_proxy () {
|
||||
log_daemon_msg "Starting $DAEMON_NAME_PROXY daemon in $FS_PREFIX"
|
||||
DAEMON_OPTS_PROXY="--port $PROXY_PORT --api-port $PROXY_API_PORT --default-target=http://localhost:8888"
|
||||
start-stop-daemon --start --background --pidfile $PIDFILE_PROXY --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_PROXY -- $DAEMON_OPTS_PROXY
|
||||
log_end_msg $?
|
||||
}
|
||||
|
||||
pre_start_web () {
|
||||
log_daemon_msg "Starting $DAEMON_NAME_WEB in $FS_PREFIX"
|
||||
|
||||
webip=$(ip addr show $NETWORK_DEVICE | grep -oE "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+/[0-9]+")
|
||||
|
||||
[ $? != "0" ] && echo "wrong NETWORK_DEVICE $NETWORK_DEVICE" && exit 1
|
||||
|
||||
webip=${webip%/*}
|
||||
|
||||
AUTH_COOKIE_URL=http://$webip:$WEB_PORT/jupyter
|
||||
#echo "set AUTH_COOKIE_URL:$AUTH_COOKIE_URL in etcd with key:$CLUSTER_NAME/web/authurl"
|
||||
curl -XPUT http://$ETCD/v2/keys/$CLUSTER_NAME/web/authurl -d value="$AUTH_COOKIE_URL" > /dev/null 2>&1
|
||||
[ $? != 0 ] && echo "set AUTH_COOKIE_URL failed in etcd" && exit 1
|
||||
}
|
||||
|
||||
do_start_web () {
|
||||
pre_start_web
|
||||
|
||||
DAEMON_OPTS_WEB="-p $WEB_PORT"
|
||||
|
||||
start-stop-daemon --start --background --pidfile $PIDFILE_WEB --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_WEB -- $DAEMON_OPTS_WEB
|
||||
log_end_msg $?
|
||||
}
|
||||
|
||||
|
||||
do_stop_master () {
|
||||
log_daemon_msg "Stopping $DAEMON_NAME_MASTER daemon"
|
||||
start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_MASTER --retry 10
|
||||
log_end_msg $?
|
||||
}
|
||||
|
||||
do_stop_proxy () {
|
||||
log_daemon_msg "Stopping $DAEMON_NAME_PROXY daemon"
|
||||
start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_PROXY --retry 10
|
||||
log_end_msg $?
|
||||
}
|
||||
|
||||
|
||||
do_stop_web () {
|
||||
log_daemon_msg "Stopping $DAEMON_NAME_WEB daemon"
|
||||
start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_WEB --retry 10
|
||||
log_end_msg $?
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
init)
|
||||
do_start_master "new"
|
||||
do_start_proxy
|
||||
do_start_web
|
||||
;;
|
||||
start)
|
||||
do_start_master "recovery"
|
||||
do_start_proxy
|
||||
do_start_web
|
||||
;;
|
||||
|
||||
stop)
|
||||
do_stop_web
|
||||
do_stop_proxy
|
||||
do_stop_master
|
||||
;;
|
||||
|
||||
restart)
|
||||
do_stop_web
|
||||
do_stop_proxy
|
||||
do_stop_master
|
||||
do_start_master "recovery"
|
||||
do_start_proxy
|
||||
do_start_web
|
||||
;;
|
||||
|
||||
start_proxy)
|
||||
do_start_proxy
|
||||
;;
|
||||
|
||||
stop_proxy)
|
||||
do_stop_proxy
|
||||
;;
|
||||
|
||||
start_web)
|
||||
do_start_web
|
||||
;;
|
||||
|
||||
stop_web)
|
||||
do_stop_web
|
||||
;;
|
||||
|
||||
reinit)
|
||||
do_stop_web
|
||||
do_stop_proxy
|
||||
do_stop_master
|
||||
do_start_master "new"
|
||||
do_start_proxy
|
||||
do_start_web
|
||||
;;
|
||||
|
||||
status)
|
||||
status=0
|
||||
status_of_proc -p $PIDFILE_MASTER "$DAEMON_MASTER" "$DAEMON_NAME_MASTER" || status=$?
|
||||
status_of_proc -p $PIDFILE_PROXY "$DAEMON_PROXY" "$DAEMON_NAME_PROXY" || status=$?
|
||||
status_of_proc -p $PIDFILE_WEB "$DAEMON_WEB" "$DAEMON_NAME_WEB" || status=$?
|
||||
exit $status
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $DAEMON_NAME_MASTER {init|start|stop|restart|reinit|status|start_proxy|stop_proxy|start_web|stop_web}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit 0
|
|
@ -0,0 +1,124 @@
|
|||
#!/bin/sh
|
||||
|
||||
[ $(id -u) != '0' ] && echo "root is needed" && exit 1
|
||||
|
||||
# get some path of docklet
|
||||
|
||||
bindir=${0%/*}
|
||||
# $bindir maybe like /opt/docklet/src/../bin
|
||||
# use command below to make $bindir in normal absolute path
|
||||
DOCKLET_BIN=$(cd $bindir; pwd)
|
||||
DOCKLET_HOME=${DOCKLET_BIN%/*}
|
||||
DOCKLET_CONF=$DOCKLET_HOME/conf
|
||||
LXC_SCRIPT=$DOCKLET_CONF/lxc-script
|
||||
DOCKLET_SRC=$DOCKLET_HOME/src
|
||||
DOCKLET_LIB=$DOCKLET_SRC
|
||||
DOCKLET_WEB=$DOCKLET_HOME/web
|
||||
|
||||
# working directory, default to /opt/docklet
|
||||
FS_PREFIX=/opt/docklet
|
||||
|
||||
# cluster net ip range, default is 172.16.0.1/16
|
||||
CLUSTER_NET="172.16.0.1/16"
|
||||
|
||||
RUN_DIR=$FS_PREFIX/local/run
|
||||
LOG_DIR=$FS_PREFIX/local/log
|
||||
|
||||
. $DOCKLET_CONF/docklet.conf
|
||||
|
||||
export FS_PREFIX
|
||||
|
||||
# This next line determines what user the script runs as.
|
||||
DAEMON_USER=root
|
||||
|
||||
# settings for docklet worker
|
||||
DAEMON=$DOCKLET_LIB/worker.py
|
||||
DAEMON_NAME=docklet-worker
|
||||
DAEMON_OPTS=
|
||||
# The process ID of the script when it runs is stored here:
|
||||
PIDFILE=$RUN_DIR/$DAEMON_NAME.pid
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
###########
|
||||
|
||||
pre_start () {
|
||||
log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX"
|
||||
|
||||
[ ! -d $FS_PREFIX/global ] && mkdir -p $FS_PREFIX/global
|
||||
[ ! -d $FS_PREFIX/local ] && mkdir -p $FS_PREFIX/local
|
||||
[ ! -d $FS_PREFIX/global/users ] && mkdir -p $FS_PREFIX/global/users
|
||||
[ ! -d $FS_PREFIX/local/volume ] && mkdir -p $FS_PREFIX/local/volume
|
||||
[ ! -d $FS_PREFIX/local/temp ] && mkdir -p $FS_PREFIX/local/temp
|
||||
[ ! -d $FS_PREFIX/local/run ] && mkdir -p $FS_PREFIX/local/run
|
||||
[ ! -d $FS_PREFIX/local/log ] && mkdir -p $FS_PREFIX/local/log
|
||||
|
||||
tempdir=/opt/docklet/local/temp
|
||||
|
||||
RUNNING_CONFIG=$FS_PREFIX/local/docklet-running.conf
|
||||
|
||||
grep -P "^[\s]*[a-zA-Z]" $DOCKLET_CONF/docklet.conf > $RUNNING_CONFIG
|
||||
|
||||
echo "DOCKLET_HOME=$DOCKLET_HOME" >> $RUNNING_CONFIG
|
||||
echo "DOCKLET_BIN=$DOCKLET_BIN" >> $RUNNING_CONFIG
|
||||
echo "DOCKLET_CONF=$DOCKLET_CONF" >> $RUNNING_CONFIG
|
||||
echo "LXC_SCRIPT=$LXC_SCRIPT" >> $RUNNING_CONFIG
|
||||
echo "DOCKLET_SRC=$DOCKLET_SRC" >> $RUNNING_CONFIG
|
||||
echo "DOCKLET_LIB=$DOCKLET_LIB" >> $RUNNING_CONFIG
|
||||
|
||||
export CONFIG=$RUNNING_CONFIG
|
||||
|
||||
# iptables for NAT network for containers to access web
|
||||
iptables -t nat -F
|
||||
iptables -t nat -A POSTROUTING -s $CLUSTER_NET -j MASQUERADE
|
||||
|
||||
if [ ! -d $FS_PREFIX/local/basefs ]; then
|
||||
log_daemon_msg "create basefs ..."
|
||||
[ ! -f $tempdir/basefs.tar.bz ] && log_daemon_msg "$tempdir/basefs.tar.bz not exist, run prepare.sh first" && exit 1
|
||||
tar xvf $tempdir/basefs.tar.bz -C $FS_PREFIX/local > /dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
do_start() {
|
||||
pre_start
|
||||
start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS
|
||||
log_end_msg $?
|
||||
}
|
||||
|
||||
do_stop () {
|
||||
log_daemon_msg "Stopping $DAEMON_NAME daemon"
|
||||
start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE --retry 10
|
||||
log_end_msg $?
|
||||
}
|
||||
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
do_start
|
||||
;;
|
||||
|
||||
stop)
|
||||
do_stop
|
||||
;;
|
||||
|
||||
console)
|
||||
pre_start
|
||||
cprofilev $DAEMON $DAEMON_OPTS
|
||||
;;
|
||||
|
||||
restart)
|
||||
do_stop
|
||||
do_start
|
||||
;;
|
||||
|
||||
status)
|
||||
status_of_proc -p $PIDFILE "$DAEMON" "$DAEMON_NAME" && exit 0 || exit $?
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $DAEMON_NAME {start|stop|restart|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit 0
|
|
@ -0,0 +1,51 @@
|
|||
# This is the common container.conf for all containers.
|
||||
# If want set custom settings, you have two choices:
|
||||
# 1. Directly modify this file, which is not recommend, because the
|
||||
# setting will be overriden when new version container.conf released.
|
||||
# 2. Use a custom config file in this conf directory: lxc.custom.conf,
|
||||
# it uses the same grammer as container.conf, and will be merged
|
||||
# with the default container.conf by docklet at runtime.
|
||||
#
|
||||
# The following is an example mounting user html directory
|
||||
# lxc.mount.entry = /public/home/%USERNAME%/public_html %ROOTFS%/root/public_html none bind,rw,create=dir 0 0
|
||||
#
|
||||
|
||||
#### include /usr/share/lxc/config/ubuntu.common.conf
|
||||
lxc.include = /usr/share/lxc/config/ubuntu.common.conf
|
||||
|
||||
############## DOCKLET CONFIG ##############
|
||||
|
||||
# Setup 0 tty devices
|
||||
lxc.tty = 0
|
||||
|
||||
lxc.rootfs = %ROOTFS%
|
||||
lxc.utsname = %HOSTNAME%
|
||||
|
||||
lxc.network.type = veth
|
||||
lxc.network.name = eth0
|
||||
lxc.network.veth.pair = %LXCNAME%
|
||||
lxc.network.script.up = Bridge=docklet-br VLANID=%VLANID% %LXCSCRIPT%/lxc-ifup
|
||||
lxc.network.script.down = Bridge=docklet-br %LXCSCRIPT%/lxc-ifdown
|
||||
lxc.network.ipv4 = %IP%
|
||||
lxc.network.ipv4.gateway = %GATEWAY%
|
||||
lxc.network.flags = up
|
||||
lxc.network.mtu = 1420
|
||||
|
||||
lxc.cgroup.memory.limit_in_bytes = %CONTAINER_MEMORY%M
|
||||
#lxc.cgroup.memory.kmem.limit_in_bytes = 512M
|
||||
#lxc.cgroup.memory.soft_limit_in_bytes = 4294967296
|
||||
#lxc.cgroup.memory.memsw.limit_in_bytes = 8589934592
|
||||
|
||||
# lxc.cgroup.cpu.cfs_period_us : period time of cpu, default 100000, means 100ms
|
||||
# lxc.cgroup.cpu.cfs_quota_us : quota time of this process
|
||||
lxc.cgroup.cpu.cfs_quota_us = %CONTAINER_CPU%
|
||||
|
||||
lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/data %ROOTFS%/root/nfs none bind,rw,create=dir 0 0
|
||||
lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/hosts/%CLUSTERID%.hosts %ROOTFS%/etc/hosts none bind,ro,create=file 0 0
|
||||
lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/ssh %ROOTFS%/root/.ssh none bind,ro,create=dir 0 0
|
||||
|
||||
# setting hostname
|
||||
lxc.hook.pre-start = HNAME=%HOSTNAME% %LXCSCRIPT%/lxc-prestart
|
||||
|
||||
# setting nfs softlink
|
||||
#lxc.hook.mount = %LXCSCRIPT%/lxc-mount
|
|
@ -0,0 +1,133 @@
|
|||
|
||||
# ==================================================
|
||||
#
|
||||
# [Local config example]
|
||||
#
|
||||
# ==================================================
|
||||
|
||||
# CLUSTER_NAME: name of host cluster, every host cluster should have
|
||||
# a unique name, default is docklet-vc
|
||||
# CLUSTER_NAME=docklet-vc
|
||||
|
||||
# FS_PREFIX: path to store global and local data for docklet
|
||||
# default is /opt/docklet.
|
||||
#
|
||||
# Note: $FS_PREFIX/global is for storing persistent data, e.g.,
|
||||
# custom container images, user data, etc. For a multi hosts
|
||||
# environement, it is the mountpoint of the distributed filesystem
|
||||
# that all physical hosts (master and slave) share.
|
||||
# E.g., for a system with three hosts: computing hosts A and B,
|
||||
# strorage host C. Host C exports its stroage filesystem through nfs
|
||||
# as C:/data, then host A and B should mount C:/data to $FS_PREFIX/global.
|
||||
# Please make sure that the mount is OK before launching docklet.
|
||||
#
|
||||
# FS_PREFIX=/opt/docklet
|
||||
|
||||
# STORAGE: local storage type, file or disk, default is file
|
||||
# note lvm is required for either case
|
||||
#
|
||||
# file : a large file simulating raw disk storing container runtime
|
||||
# data, located in FS_PREFIX/local, for single machine testing purpose.
|
||||
#
|
||||
# disk : raw disk for storing container files, for production purpose.
|
||||
# If using disk, a partition must be allocated to docklet
|
||||
# - a disk device name must be specified by DISK , e.g, /dev/sdc9
|
||||
# - this device must be formatted as Linux-LVM, and initialized
|
||||
# as a physical volume (pvcreate /dev/sdc9) in advance.
|
||||
# TAKE CARE to ensure the disk is OK before launching docklet.
|
||||
#
|
||||
# STORAGE=file
|
||||
#
|
||||
# DISK: disk device name if STORAGE is disk
|
||||
# DISK=/dev/sdc9
|
||||
|
||||
# CLUSTER_SIZE: virtual cluster size, default is 1
|
||||
# CLUSTER_SIZE=1
|
||||
|
||||
# CLUSTER_NET: cluster network ip address range, default is 172.16.0.1/16
|
||||
# CLUSTER_NET=172.16.0.1/16
|
||||
|
||||
# CONTAINER_CPU: CPU quota of container, default is 100000
|
||||
# A single CPU core has total=100000 (100ms), so the default 100000
|
||||
# mean a single container can occupy a whole core.
|
||||
# For a CPU with two cores, this can be set to 200000
|
||||
# CONTAINER_CPU=100000
|
||||
|
||||
# CONTAINER_DISK: disk quota of container image upper layer, count in MB,
|
||||
# default is 1000
|
||||
# CONTAINER_DISK=1000
|
||||
|
||||
# CONTAINER_MEMORY: memory quota of container, count in MB, default is 1000
|
||||
# CONTAINER_MEMORY=1000
|
||||
|
||||
# DISKPOOL_SIZE: lvm group size, count in MB, default is 5000
|
||||
# Only valid with STORAGE=file
|
||||
# DISKPOOL_SIZE=5000
|
||||
|
||||
# ETCD: etcd address, default is localhost:2379
|
||||
# For a muti hosts environment, the administrator should configure how
|
||||
# etcd cluster work together
|
||||
# ETCD=localhost:2379
|
||||
|
||||
# NETWORK_DEVICE: specify the network interface docklet uses,
|
||||
# Default is eth0
|
||||
# NETWORK_DEVICE=eth0
|
||||
|
||||
# PORTAL_URL: the public docklet portal url. for a production system,
|
||||
# it should be a valid URL, like http://docklet.info
|
||||
# default is MASTER_IP:PROXY_PORT
|
||||
# PORTAL_URL=http://locahost:8000
|
||||
|
||||
# MASTER_IP: master listen ip, default listens on all interfaces
|
||||
# MASTER_IP=0.0.0.0
|
||||
|
||||
# MASTER_PORT: master listen port, default is 9000
|
||||
# MASTER_PORT=9000
|
||||
|
||||
# WORKER_PORT: worker listen port, default is 9001
|
||||
# WORKER_PORT=9001
|
||||
|
||||
# PROXY_PORT: the access port of the public protal, default is 8000
|
||||
# it is also the listen port of configurable-http-proxy, which
|
||||
# proxy connections from exteral public network to internal private
|
||||
# container networks. Usually 80 is recommded for production environment.
|
||||
# PROXY_PORT=8000
|
||||
|
||||
# PROXY_API_PORT: configurable-http-proxy api port, default is 8001
|
||||
# Admins can query the proxy table by calling:
|
||||
# curl http://localhost:8001/api/routes
|
||||
# PROXY_API_PORT=8001
|
||||
|
||||
# WEB_PORT: docklet web listening port, default is 8888
|
||||
# Note: docklet web server is located behind the docklet proxy.
|
||||
# Users access docklet first through proxy, then docklet web server.
|
||||
# Therefore, it is not for user direct access. In most cases,
|
||||
# admins need not to change the default value.
|
||||
# WEB_PORT=8888
|
||||
|
||||
# LOG_LEVEL: logging level, of DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||||
# default is DEBUG
|
||||
# LOG_LEVEL=DEBUG
|
||||
|
||||
# LOG_LIFE: how many days the logs will be kept, default is 10
|
||||
# LOG_LIFE=10
|
||||
|
||||
# WEB_LOG_LEVEL: logging level, of DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||||
# default is DEBUG
|
||||
# WEB_LOG_LEVEL=DEBUG
|
||||
|
||||
# EXTERNAL_LOGIN: whether docklet will use external account to log in
|
||||
# True or False, default is False
|
||||
# default: authenticate local and PAM users
|
||||
# EXTERNAL_LOGIN=False
|
||||
|
||||
# EMAIL_FROM_ADDRESS : the e-mail address to send activating e-mail to user
|
||||
# If this address is "", no email will be sent out.
|
||||
# default: ""
|
||||
# EMAIL_FROM_ADDRESS=""
|
||||
|
||||
# ADMIN_EMAIL_ADDRESS : when an activating request is sent, an e-mail will
|
||||
# be sent to this address to remind the admin.
|
||||
# If this address i "", no email will be sent to admin.
|
||||
# default: ""
|
||||
# ADMIN_EMAIL_ADDRESS=""
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
ovs-vsctl --if-exists del-port $Bridge $5
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/sh
|
||||
|
||||
|
||||
# $1 : name of container ( name in lxc-start with -n)
|
||||
# $2 : net
|
||||
# $3 : network flags, up or down
|
||||
# $4 : network type, for example, veth
|
||||
# $5 : value of lxc.network.veth.pair
|
||||
|
||||
ovs-vsctl --may-exist add-port $Bridge $5 tag=$VLANID
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/sh
|
||||
|
||||
# $1 Container name.
|
||||
# $2 Section (always 'lxc').
|
||||
# $3 The hook type (i.e. 'clone' or 'pre-mount').
|
||||
|
||||
#cd $LXC_ROOTFS_PATH/root ; rm -rf nfs && ln -s ../nfs nfs
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/sh
|
||||
|
||||
# $1 Container id
|
||||
# $2 Container name.
|
||||
# $3 Section (always 'lxc').
|
||||
# $4 The hook type (i.e. 'clone' or 'pre-mount').
|
||||
|
||||
echo $HNAME > $LXC_ROOTFS_PATH/etc/hostname
|
|
@ -0,0 +1,93 @@
|
|||
# NOTE
|
||||
|
||||
## here is some thinking and notes in coding
|
||||
|
||||
* path : scripts' path should be known by scripts to call/import other script -- use environment variables
|
||||
|
||||
* FS_PREFIX : docklet filesystem path to put data
|
||||
|
||||
* overlay : " modprobe overlay " to add overlay module
|
||||
|
||||
* after reboot :
|
||||
* bridges lost -- it's ok, recreate it
|
||||
* loop device lost -- losetup /dev/loop0 BLOCK_FILE again, and lvm will get group and volume back automatically
|
||||
|
||||
* lvm can do snapshot, image management can use lvm's snapshot -- No! lvm snapshot will use the capacity of LVM group.
|
||||
|
||||
* cgroup memory control maybe not work. need run command below:
|
||||
echo 'GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"' >> /etc/default/grub && update-grub && reboot
|
||||
|
||||
* debian don't support cpu.cfs_quota_us option in cgroup. it needs to recompile linux kernel with CONFIG_CFS_BANDWIDTH option
|
||||
|
||||
* ip can add bridge/link/GRE, maybe we should test whether ip can replace of ovs-vsctl and brctl. ( see "man ip-link" )
|
||||
|
||||
* lxc.mount.entry :
|
||||
* do not use relevant path. use absolute path, like :
|
||||
lxc.mount.entry = /root/from-dir /root/rootfs/to-dir none bind 0 0 # lxc.rootfs = /root/rootfs
|
||||
if use relevant paht, container path will be mounted on /usr/lib/x86_64..../ , a not existed path
|
||||
* path of host and container should both exist. if not exist in container, it will be mounted on /usr/lib/x86_64....
|
||||
* if path in container not exists, you can use option : create=dir/file, like :
|
||||
lxc.mount.entry = /root/from-dir /root/rootfs/to-dir none bind,create=dir 0 0 # lxc.rootfs = /root/rootfs
|
||||
|
||||
* lxc.mount.entry : bind and rbind ( see "man mount" )
|
||||
* bind means mount a part of filesystem on somewhere else of this filesystem
|
||||
* but bind only attachs a single filesystem. That means the submount of source directory of mount may disappear in target directory.
|
||||
* if you want to make submount work, use rbind option.
|
||||
rbind will make entire file hierarchy including submounts mounted on another place.
|
||||
* NOW, we use bind in container.sh. maybe it need rbind if FS_PREFIX/global/users/$USERNAME/nfs is under glusterfs mountpoint
|
||||
|
||||
* rpc server maybe not security. anyone can call rpc method if he knows ip address.
|
||||
* maybe we can use "transport" option of xmlrpc.client.ServerProxy(uri, transport="http://user:pass@host:port/path") and SimpleXMLRPCRequestHandler of xmlrpc.server.SimpleXMLRPCServer(addr, requestHandler=..) to parse the rpc request and authenticate the request
|
||||
xmlrpc.client.ServerProxy can also support https request, it is also a security method
|
||||
* If we use rpc with authentication, maybe we can use http server and http request to replace rpc
|
||||
|
||||
* frontend and backend
|
||||
arch:
|
||||
+-----------------+
|
||||
Web -- Flask --HttpRest Core |
|
||||
+-----------------+
|
||||
Now, HttpRest and Core work as backend
|
||||
Web and Flask work as frontend
|
||||
all modules are in backend
|
||||
Flask just dispatch urls and render web pages
|
||||
(Maybe Flask can be merged in Core and works as http server)
|
||||
(Then Flask needs to render pages, parse urls, response requests, ...)
|
||||
(It maybe not fine)
|
||||
|
||||
* httprest.py :
|
||||
httphandler needs to call vclustermgr/nodemgr/... to handler request
|
||||
we need to call these classes in httphandler
|
||||
Way-1: init/new these classes in httphandler init function (httphandler need to init parent class) -- wrong : httpserver will create a new httphandler instance for every http request ( see /usr/lib/python3.4/socketserver.py )
|
||||
Way-2: use global varibles -- Now this way
|
||||
|
||||
* in shell, run python script or other not built-in command, the command will run in new process and new process group ( see csapp shell lab )
|
||||
so, the environment variables set in shell can not be see in python/...
|
||||
but command like below can work :
|
||||
A=ab B=ba ./python.py
|
||||
|
||||
* maybe we need to parse argvs in python
|
||||
some module to parse argvs : sys.argv, optparse, getopt, argparse
|
||||
|
||||
* in shell, { command; } means run command in current shell, ";" is necessary
|
||||
( command; ) means run command in sub shell
|
||||
|
||||
* function in registered in rpc server must have return.
|
||||
without return, the rpc client will raise an exception
|
||||
|
||||
* ** NEED TO BE FIX **
|
||||
we add a prefix in etcdlib
|
||||
so when we getkey, key may be a absolute path from base url
|
||||
when we setkey use the key we get, etcdlib will append the absolute path to prefix, it will wrong
|
||||
|
||||
* overlay : upperdir and workdir must in the same mount filesystem.
|
||||
that means we should mount LV first and then mkdir upperdir and workdir in the LV mountpoint
|
||||
|
||||
* when use 'worker.py > log' to redirect output of python script, it will empty output of log.
|
||||
because python interpreter will use buffer to collect output.
|
||||
we can use ways below to fix this problem:
|
||||
stdbuf -o 0 worker.py > log # but it fail in my try. don't know why
|
||||
python3 -u worker.py > log # recommended, -u option of python3
|
||||
print('output', flush=True) # flush option of print
|
||||
sys.stdout.flush() # flush by hand
|
||||
|
||||
* CPU QUOTA should not be too small. too small it will work so slowly
|
|
@ -0,0 +1,77 @@
|
|||
# Info of docklet
|
||||
|
||||
## container info
|
||||
container name : username-clusterid-nodeid
|
||||
hostname : host-nodeid
|
||||
lxc config : /var/lib/lxc/username-clusterid-nodeid/config
|
||||
lxc rootfs : /var/lib/lxc/username-clusterid-nodeid/rootfs
|
||||
lxc rootfs
|
||||
|__ / : aufs : basefs + volume/username-clusterid-nodeid
|
||||
|__ /nfs : global/users/username/data
|
||||
|__ /etc/hosts : global/users/username/clusters/clusterid/hosts
|
||||
|__ /root/.ssh : global/users/username/ssh
|
||||
|
||||
|
||||
## ETCD Table
|
||||
we use etcd for some configuration information of our clusters, here is some details.
|
||||
|
||||
every cluster has a CLUSTER_NAME and all data of this cluster is put in a directory called CLUSTER_NAME in etcd just like a table.
|
||||
|
||||
so, different cluster should has different CLUSTER_NAME.
|
||||
|
||||
below is content of cluster info in CLUSTER_NAME 'table' in etcd:
|
||||
|
||||
<type> <name> <content> <description>
|
||||
key token random code token for checking whether master and workers has the same global filesystem
|
||||
|
||||
dir machines ... info of physical clusters
|
||||
dir machines/allnodes ip:ok record all nodes, for recovery and checks
|
||||
dir machines/runnodes ip: ? record running node for this start up.
|
||||
when startup: ETCD
|
||||
| IP:waiting | 1. worker write worker-ip:waiting
|
||||
2. master update IP:init-mode | IP:init-mode | 3. worker init itself by init-mode
|
||||
| IP:work | 4. worker finish init and update IP:work
|
||||
5. master add workerip and update IP:ok | IP:ok |
|
||||
|
||||
key service/master master-ip
|
||||
key service/mode new,recovery start mode of cluster
|
||||
|
||||
key vcluster/nextid ID next available ID
|
||||
|
||||
|
||||
|
||||
## filesystem
|
||||
here is the path and content description of docklet filesystem
|
||||
|
||||
FS_PREFIX
|
||||
|__ global/users/{username}
|
||||
| |__ clusters/clustername : clusterid, cluster size, status, containers, ... in json format
|
||||
| |__ hosts/id.hosts : ip host-nodeid host-nodeid.clustername
|
||||
| |__ data : direcroty in distributed filesystem for user to put his data
|
||||
| |__ ssh : ssh keys
|
||||
|
|
||||
|__ local
|
||||
|__ docklet-storage : loop file for lvm
|
||||
|__ basefs : base image
|
||||
|__ volume / { username-clusterid-nodeid } : upper layer of container
|
||||
|
||||
|
||||
|
||||
## vcluster files
|
||||
|
||||
### hosts file:(raw)
|
||||
IP-0 host-0 host-0.clustername
|
||||
IP-1 host-1 host-1.clustername
|
||||
...
|
||||
|
||||
### info file:(json)
|
||||
{
|
||||
clusterid: ID ,
|
||||
status: stopped/running ,
|
||||
size: size ,
|
||||
containers: [
|
||||
{ containername: lxc_name, hostname: hostname, ip: lxc_ip, host: host_ip },
|
||||
{ containername: lxc_name, hostname: hostname, ip: lxc_ip, host: host_ip },
|
||||
...
|
||||
]
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
# Network Manager
|
||||
|
||||
## About
|
||||
网络管理是为docklet提供网络管理的模块。
|
||||
|
||||
关于需求,主要有两点:
|
||||
* 一个中心管理池,按 网络段(IP/CIDR) 给用户分配网络池
|
||||
* 很多用户网络池,按 一个或者几个网络地址 给用户的cluster分配网络地址
|
||||
|
||||
## Data Structure
|
||||
面对这两种需求,设计了两种数据结构来管理网络地址。
|
||||
* 区间池 / interval pool : 分配、回收 网络段
|
||||
|
||||
|
||||
interval pool 中的元素为区间,其由很多个区间组成。
|
||||
一个朴素的 区间池 是这样的 : interval pool : [A1,A2],[B1,B2],[C1,C2],...[X1,X2]
|
||||
每次申请一段地址的时候,从上述区间中选择一个区间分配,并将该区间中剩余部分放回区间池
|
||||
|
||||
而考虑到 网络段(IP/CIDR) 是 2 的幂的结构,所以可以将区间池进一步设计成如下结构:
|
||||
interval pool:
|
||||
... ...
|
||||
cidr=16 : [A1,A2], [A3,A4], ...
|
||||
cidr=17 : [B1,B2], [B3,B4], ...
|
||||
cidr=18 : [C1,C2], [C3,C4], ...
|
||||
... ...
|
||||
上述结构还可以进一步优化,因为 每一个区间的结尾地址可以通过开始地址和CIDR算出来,所以每个区间只需要写一个起始地址就可以了
|
||||
所以:
|
||||
interval pool:
|
||||
... ...
|
||||
cidr=16 : A1, A3, ...
|
||||
cidr=17 : B1, B3, ...
|
||||
cidr=18 : C1, C3, ...
|
||||
... ...
|
||||
而其中,每一个元素,比如 A1,其实代表的是一个区间 [A1, A1+2^16-1]
|
||||
这种基于2的幂的区间设计的好处是可以方便的进行 分配 和 合并 区间,操作起来更加高效。
|
||||
|
||||
* 枚举池 / enumeration pool : 分配、回收一个、多个网络地址
|
||||
|
||||
|
||||
enum pool 中的元素为单个网络地址,比如:
|
||||
enum pool : A, B, C, D, ... X
|
||||
|
||||
## API
|
||||
操作上述两种数据结构的API,这里省略
|
||||
|
||||
## Network Manager Storage Design
|
||||
* center : 中心池,提供 用户网络段 的分配、回收
|
||||
|
||||
|
||||
info : IP/CIDR
|
||||
intervalpool :
|
||||
cidr16 : ...
|
||||
cidr17 : ...
|
||||
... ...
|
||||
|
||||
* system : 系统保留地址,为系统内部的 网络地址 提供 分配回收
|
||||
|
||||
|
||||
info : IP/CIDR
|
||||
enumpool : ...
|
||||
|
||||
* vlan/<username> : 为某个用户提供地址分配、回收服务
|
||||
|
||||
|
||||
info : IP/CIDR
|
||||
enumpool : ...
|
||||
vlanid : id
|
|
@ -0,0 +1,266 @@
|
|||
# Test of VLAN on openvswitch
|
||||
|
||||
## Note 1
|
||||
基本操作,建网桥,配置地址,启动网桥
|
||||
|
||||
ovs-vsctl add-br br0
|
||||
ip address add 172.0.0.1/8 dev br0
|
||||
ip link set br0 up
|
||||
|
||||
## Note 2
|
||||
LXC conf 中指定 pair 的名称,从而方便控制 网络链接
|
||||
|
||||
所以,需要修改 conf 文件来实现这一点
|
||||
|
||||
lxc.network.type = veth
|
||||
lxc.network.name = eth0
|
||||
lxc.network.script.up = Bridge=br0 /home/leebaok/Container/lxc-ifup
|
||||
lxc.network.script.down = Bridge=br0 /home/leebaok/Container/lxc-ifdown
|
||||
lxc.network.veth.pair = base
|
||||
lxc.network.ipv4 = 172.0.0.10/8
|
||||
lxc.network.ipv4.gateway = 172.0.0.1
|
||||
lxc.network.flags = up
|
||||
lxc.network.mtu = 1420
|
||||
|
||||
我们对上面的配置解释一下:
|
||||
* lxc.network.link 现在不需要了
|
||||
* lxc.network.script.up/down 来指定container启动前和关闭后的网络准备和释放,这个脚本的路径是物理机的路径,因为这个脚本是由物理机来执行的,“Bridge=br0” 是为了传参数给后面的脚本
|
||||
* lxc.network.veth.pair 是网络连接的名字,即container和物理机的哪个口连接
|
||||
|
||||
配置了网络设置的脚本路径,我们还需要实现这两个具体的脚本:
|
||||
* /home/leebaok/Container/lxc-ifup
|
||||
|
||||
|
||||
#!/bin/bash
|
||||
# $1 : name of container ( name in lxc-start with -n )
|
||||
# $2 : net
|
||||
# $3 : network flags, up or down
|
||||
# $4 : network type, for example, veth
|
||||
# $5 : value of lxc.network.veth.pair
|
||||
ovs-vsctl --may-exist add-port $Bridge $5
|
||||
# ovs-vsctl set port $5 tag=$Tag
|
||||
|
||||
* /home/leebaok/Container/lxc-ifdown
|
||||
|
||||
|
||||
#!/bin/bash
|
||||
# $1 : name of container ( name in lxc-start with -n )
|
||||
# $2 : net
|
||||
# $3 : network flags, up or down
|
||||
# $4 : network type, for example, veth
|
||||
# $5 : value of lxc.network.veth.pair
|
||||
ovs-vsctl --if-exists del-port $Bridge $5
|
||||
|
||||
## Note 3
|
||||
VLAN tag 操作:
|
||||
|
||||
ovs-vsctl set port <port-name> tag=<tag-id>
|
||||
ovs-vsctl clear port <port-name> tag
|
||||
|
||||
patch 是用来连接两个网桥的,操作如下:
|
||||
|
||||
ovs-vsctl add-br br0
|
||||
ovs-vsctl add-br br1
|
||||
ovs-vsctl add-port br0 patch0 -- set interface patch0 type=patch options:peer=patch1
|
||||
ovs-vsctl add-port br1 patch1 -- set interface patch1 type=patch options:peer=patch0
|
||||
# NOW : two bridges are connected by patch
|
||||
|
||||
|
||||
## Note 4
|
||||
一台机器上一个域的网桥只有一个,比如在 host-0 上,建两个网桥:
|
||||
|
||||
ovs-vsctl add-br br0
|
||||
ip address add 172.0.0.1/8 dev br0
|
||||
ip link set br0 up
|
||||
|
||||
ovs-vsctl add-br br1
|
||||
ip address add 172.0.0.2/8 dev br1
|
||||
ip link set br1 up
|
||||
|
||||
则,后配置的那个网桥会失效
|
||||
|
||||
因为系统认为,172.0.0.1/8 内的机器都应该在 br0 中
|
||||
|
||||
而以下配置是正确的:
|
||||
|
||||
ovs-vsctl add-br br0
|
||||
ip address add 172.0.0.1/24 dev br0
|
||||
ip link set br0 up
|
||||
|
||||
ovs-vsctl add-br br1
|
||||
ip address add 172.0.1.1/24 dev br1
|
||||
ip link set br1 up
|
||||
|
||||
## Note 5
|
||||
关于网关,网桥/交换机是二层设备,网关是三层组件,我们可以将网桥连接起来,多个网桥共用一个网关
|
||||
|
||||
ovs-vsctl add-br br0
|
||||
ip link set br0 up
|
||||
ovs-vsctl add-br br1
|
||||
ip address add 172.0.0.1/24 dev br1
|
||||
ip link set br1 up
|
||||
ovs-vsctl add-port br0 patch0 -- set interface patch0 type=patch options:peer=patch1
|
||||
ovs-vsctl add-port br1 patch1 -- set interface patch1 type=patch options:peer=patch0
|
||||
|
||||
# lxc config :
|
||||
# ip -- 172.0.0.11/24
|
||||
# gateway -- 172.0.0.1
|
||||
# lxc.network.veth.pair -- base , base is connected on br0
|
||||
lxc-start -f container.conf -n base -F -- /bin/bash
|
||||
# NOW : lxc network is running ok
|
||||
|
||||
## Note 6
|
||||
基于多个网桥实现VLAN
|
||||
|
||||
### 方案一
|
||||
|
||||
ovs-vsctl add-br br0
|
||||
ip link set br0 up
|
||||
ovs-vsctl add-br br1
|
||||
ip address add 172.0.0.1/24 dev br1
|
||||
ip link set br1 up
|
||||
ovs-vsctl add-port br0 patch0 -- set interface patch0 type=patch options:peer=patch1
|
||||
ovs-vsctl add-port br1 patch1 -- set interface patch1 type=patch options:peer=patch0
|
||||
|
||||
# lxc config :
|
||||
# ip -- 172.0.0.11/24
|
||||
# gateway -- 172.0.0.1
|
||||
# lxc.network.veth.pair -- base , base is connected on br0
|
||||
lxc-start -f container.conf -n base -F -- /bin/bash
|
||||
# NOW : lxc network is running ok
|
||||
## above is the same as before
|
||||
|
||||
ovs-vsctl set port base tag=5
|
||||
ovs-vsctl set port patch0 tag=5
|
||||
# NOW : lxc network is running ok
|
||||
|
||||
# ARCH
|
||||
+-----------------------+ +----------------------+
|
||||
| br0 | | br1 : 172.0.0.1/24 |
|
||||
+--+-----tag=5---tag=5--+ +---+-------+----------+
|
||||
| | | patch | |
|
||||
| | +-------------------+ |
|
||||
| | |
|
||||
internal base:172.0.0.11/24 internal
|
||||
(gateway:172.0.0.1)
|
||||
|
||||
# flow : base --> patch --> br1/internal
|
||||
|
||||
* 方案可行
|
||||
* 但是,每个 VLAN 需要一个网关
|
||||
|
||||
### 方案二 (不可行)
|
||||
|
||||
# ARCH
|
||||
+-------------------------------------------------------------+
|
||||
| br0 |
|
||||
+--+-----tag=5---tag=5---------+-----tag=6---tag=6---------+--+
|
||||
| | | +-----+ | | | +-----+ |
|
||||
| | +--| br1 |--+ | +--| br2 |--+
|
||||
| | +-----+ | +-----+
|
||||
internal base1:172.0.0.11/24 base2:172.0.0.12/24
|
||||
|
||||
# flow 1 : base1 --> br1 --> internal
|
||||
# flow 2 : base1 --> br1 --> br2 --> base2
|
||||
|
||||
* 方案不可行,因为上面的 flow 可以使得 base1、base2 在二层通信,无法隔离
|
||||
|
||||
## Note 7
|
||||
上述可行方案的简化版
|
||||
### 简化版一
|
||||
|
||||
ovs-vsctl add-br br0
|
||||
ip link set br0 up
|
||||
# add a fake bridge connected to br0 with vlan tag=5
|
||||
ovs-vsctl add-br fakebr br0 5
|
||||
ip address add 172.0.0.1/24 dev fakebr
|
||||
ip link set fakebr up
|
||||
|
||||
# lxc config:
|
||||
# ip : 172.0.0.11/24
|
||||
# gateway : 172.0.0.1/24
|
||||
# lxc.network.veth.pair -- base , base is connected on br0
|
||||
lxc-start -f container.conf -n base -F -- /bin/bash
|
||||
|
||||
ovs-vsctl set port base tag=5
|
||||
|
||||
# ARCH
|
||||
+-----------------------+
|
||||
| br0 |
|
||||
+--+-----tag=5---tag=5--+
|
||||
| | |
|
||||
| | fakebr:172.0.0.1/24
|
||||
| |
|
||||
internal base:172.0.0.11/24
|
||||
(gateway:172.0.0.1)
|
||||
|
||||
# flow : base --> fakebr
|
||||
|
||||
### 简化版二
|
||||
|
||||
ovs-vsctl add-br br0
|
||||
ip link set br0 up
|
||||
# add an internal interface for vlan
|
||||
ovs-vsctl add-port br0 vlanif tag=5 -- set interface vlanif type=internal
|
||||
ip address add 172.0.0.1/24 dev vlanif
|
||||
ip link set vlanif up
|
||||
|
||||
# lxc config:
|
||||
# ip : 172.0.0.11/24
|
||||
# gateway : 172.0.0.1/24
|
||||
# lxc.network.veth.pair -- base , base is connected on br0
|
||||
lxc-start -f container.conf -n base -F -- /bin/bash
|
||||
|
||||
ovs-vsctl set port base tag=5
|
||||
|
||||
# ARCH
|
||||
+-----------------------+
|
||||
| br0 |
|
||||
+--+-----tag=5---tag=5--+
|
||||
| | |
|
||||
| | vlanif:172.0.0.1/24
|
||||
| |
|
||||
internal base:172.0.0.11/24
|
||||
(gateway:172.0.0.1)
|
||||
|
||||
# flow : base --> vlanif
|
||||
|
||||
### 简化版一 & 简化版二
|
||||
使用 ovs-vsctl show 查看的时候,上述两个版本显示的信息是一样的,说明 fakebr 其实本质上可能就是一个 internal interface
|
||||
|
||||
其实,方案一中,对 br1 的 IP(172.0.0.1/24)的配置,其实就是对 br1 的 internal 的 interface 的配置,所以其实多余的网桥不是必须的,而 interface 才是真正需要的。
|
||||
|
||||
而,internal interface 相当于是连接着本地Linux的虚拟网卡,这块网卡的另一端连着OVS的虚拟网桥。
|
||||
|
||||
而,Linux 的网络栈又管理着物理网卡、虚拟网卡,以及对这些网卡的包进行转发、路由等处理。
|
||||
|
||||
似乎,Linux 的网络栈又成了一个大的交换机/网桥,上面连接着 internal interface 和 物理网卡。
|
||||
|
||||
## Note 8
|
||||
基于上述的实践和探索,其实 **我们需要给一个VLAN配置一个可以出去的网关、网卡。**
|
||||
|
||||
那么,我们一个简单可行的方案可以这样:
|
||||
|
||||
+------------------------------------------------------------------------------+
|
||||
| bridge |
|
||||
| <------- VLAN ID=5 ---------> <---- VLAN ID=6 ------> |
|
||||
+--+-----tag=5---tag=5------------tag=5-------------tag=6-------------tag=6----+
|
||||
| | | | | |
|
||||
| | lxc-2:172.0.0.12/24 | | |
|
||||
internal | (gateway:172.0.0.1) | | |
|
||||
| | | |
|
||||
lxc-1:172.0.0.11/24 gw5:172.0.0.1/24 lxc-3:172.0.1.11/24 gw6:172.0.1.1/24
|
||||
(gateway:172.0.0.1) internal (gateway:172.0.1.1) internal
|
||||
| |
|
||||
| |
|
||||
+----------- NAT / iptables --------+
|
||||
||||
|
||||
||||
|
||||
\\\///
|
||||
\\//
|
||||
\/
|
||||
|
||||
|
||||
|
||||
|
||||
# end
|
|
@ -0,0 +1,33 @@
|
|||
# Some Note for configurable-http-proxy usage
|
||||
|
||||
## intsall
|
||||
sudo apt-get install nodejs nodejs-legacy npm
|
||||
sudo npm install -g configurable-http-proxy
|
||||
|
||||
## start
|
||||
configurable-http-proxy -h : for help
|
||||
configurable-http-proxy --ip IP \
|
||||
--port PORT \
|
||||
--api-ip IP \
|
||||
--api-port PORT \
|
||||
--default-target http://IP:PORT \
|
||||
--log-level debug/info/warn/error
|
||||
default ip:port is 0.0.0.0:8000,
|
||||
default api-ip:api-port is localhost:8001
|
||||
|
||||
## control route table
|
||||
### get route table
|
||||
* without token:
|
||||
curl http://localhost:8001/api/routes
|
||||
* with token:
|
||||
curl -H "Authorization: token TOKEN" http://localhost:8001/api/routes
|
||||
### add/set route table
|
||||
* without token:
|
||||
curl -XPOST --data '{"target":"http://TARGET-IP:TARGET-PORT"}' http://localhost:8001/api/routes/PROXY-URL
|
||||
* with token:
|
||||
curl -H "Authorization: token TOKEN" -XPOST --data '{"target":"http://TARGET-IP:TARGET-PORT"}' http://localhost:8001/api/routes/PROXY-URL
|
||||
### delete route table line
|
||||
* without token:
|
||||
curl -XDELETE http://localhost:8001/api/routes/PROXY-URL
|
||||
* with token:
|
||||
curl -H "Authorization: token TOKEN" -XDELETE http://localhost:8001/api/routes/PROXY-URL
|
|
@ -0,0 +1,45 @@
|
|||
# startup mode
|
||||
|
||||
## new mode
|
||||
#### step 1 : data
|
||||
<Master>
|
||||
clean etcd table
|
||||
write token
|
||||
init etcd table
|
||||
clean global directory of user clusters
|
||||
#### step 2 : nodemgr
|
||||
<Master> <Slave>
|
||||
init network
|
||||
wait for all nodes starts
|
||||
|_____ listen node joins IP:waiting <--- worker starts
|
||||
update etcd ----> IP:init-mode ---> worker init
|
||||
|____ stop all containers
|
||||
|____ umount mountpoint, delete lxc files, delete LV
|
||||
|____ delete VG, umount loop dev, delete loop file
|
||||
|____ init loop file, loop dev, create VG
|
||||
add node to list <--- IP:work <---- init done, begin work
|
||||
check all nodes begin work
|
||||
#### step 3 : vclustermgr
|
||||
Nothing to do
|
||||
|
||||
|
||||
|
||||
|
||||
## recovery mode
|
||||
#### step 1 : data
|
||||
<Master>
|
||||
write token
|
||||
init some of etcd table
|
||||
#### step 2 : nodemgr
|
||||
<Master> <Slave>
|
||||
init network
|
||||
wait for all nodes starts
|
||||
|_____ listen node joins IP:waiting <--- worker starts
|
||||
update etcd ----> IP:init-mode ---> worker init
|
||||
|____ check loop file, loop dev, VG
|
||||
|____ check all containers and mountpoint
|
||||
add node to list <--- IP:work <---- init done, begin work
|
||||
check all nodes begin work
|
||||
#### step 3 : vclustermgr
|
||||
<Master> <Slave>
|
||||
recover vclusters:some need start ---------------> recover containers: some need start
|
|
@ -0,0 +1,40 @@
|
|||
# import package
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn import linear_model, datasets
|
||||
%matplotlib inline
|
||||
|
||||
# load data : we only use target==0 and target==1 (2 types classify) and feature 0 and feature 2 ()
|
||||
iris = datasets.load_iris()
|
||||
X = iris.data[iris.target!=2][:, [0,2]]
|
||||
Y = iris.target[iris.target!=2]
|
||||
|
||||
h = .02 # step size in the mesh
|
||||
|
||||
logreg = linear_model.LogisticRegression(C=1e5)
|
||||
logreg.fit(X, Y)
|
||||
|
||||
# Plot the decision boundary. For that, we will assign a color to each
|
||||
# point in the mesh [x_min, m_max]x[y_min, y_max].
|
||||
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
|
||||
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
|
||||
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
|
||||
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
|
||||
|
||||
# Put the result into a color plot
|
||||
Z = Z.reshape(xx.shape)
|
||||
#plt.figure(1, figsize=(4, 3))
|
||||
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
|
||||
plt.xlabel('Sepal length')
|
||||
plt.ylabel('Sepal width')
|
||||
|
||||
# Plot also the training points
|
||||
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
|
||||
plt.xlabel('Sepal length')
|
||||
plt.ylabel('Sepal width')
|
||||
|
||||
plt.xlim(xx.min(), xx.max())
|
||||
plt.ylim(yy.min(), yy.max())
|
||||
plt.xticks(())
|
||||
plt.yticks(())
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
#!/bin/bash
|
||||
|
||||
##################################################
|
||||
# before-start.sh
|
||||
# when you first use docklet, you should run this script to
|
||||
# check and prepare the environment
|
||||
# *important* : you need run this script again and again till success
|
||||
##################################################
|
||||
|
||||
if [[ "`whoami`" != "root" ]]; then
|
||||
echo "FAILED: Require root previledge !" > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# check cgroup control
|
||||
which cgm &> /dev/null || { echo "FAILED : cgmanager is required, please install cgmanager" && exit 1; }
|
||||
cpucontrol=$(cgm listkeys cpu)
|
||||
[[ -z $(echo $cpucontrol | grep cfs_quota_us) ]] && echo "FAILED : cpu.cfs_quota_us of cgroup is not supported, you may need to recompile kernel" && exit 1
|
||||
memcontrol=$(cgm listkeys memory)
|
||||
if [[ -z $(echo $memcontrol | grep limit_in_bytes) ]]; then
|
||||
echo "FAILED : memory.limit_in_bytes of cgroup is not supported"
|
||||
echo "Try : "
|
||||
echo -e " echo 'GRUB_CMDLINE_LINUX=\"cgroup_enable=memory swapaccount=1\"' >> /etc/default/grub; update-grub; reboot" > /dev/stderr
|
||||
echo "Info : if not success, you may need to recompile kernel"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# install packages that docklet needs (in ubuntu)
|
||||
# some packages' name maybe different in debian
|
||||
apt-get install -y cgmanager lxc lvm2 bridge-utils curl exim4 openssh-server openvswitch-switch
|
||||
apt-get install -y python3 python3-netifaces python3-flask python3-flask-sqlalchemy python3-pampy
|
||||
apt-get install -y python3-psutil
|
||||
apt-get install -y python3-lxc
|
||||
apt-get install -y python3-requests python3-suds
|
||||
apt-get install -y nodejs nodejs-legacy npm
|
||||
apt-get install -y etcd
|
||||
|
||||
# check and install configurable-http-proxy
|
||||
which configurable-http-proxy &>/dev/null || npm install -g configurable-http-proxy
|
||||
which configurable-http-proxy &>/dev/null || { echo "Error : install configurable-http-proxy failed, you should try again" && exit 1; }
|
||||
|
||||
[[ -f conf/docklet.conf ]] || { echo "Generating docklet.conf from template" && cp conf/docklet.conf.template conf/docklet.conf; }
|
||||
|
||||
echo ""
|
||||
echo "All preparation installation is done."
|
||||
echo "****************************************"
|
||||
echo "* Please Read Lines Below Before Start *"
|
||||
echo "****************************************"
|
||||
echo ""
|
||||
|
||||
echo "Before staring : you need a basefs image. "
|
||||
echo "basefs images are provided at: "
|
||||
echo " http://docklet.unias.org/download"
|
||||
echo "Please download it to FS_PREFIX/local and then extract it. (defalut FS_PRERIX is /opt/docklet)"
|
||||
echo "Probably you will get a dicectory structure like"
|
||||
echo " /opt/docklet/local/basefs/etc "
|
||||
echo " /opt/docklet/local/basefs/bin "
|
||||
echo " /opt/docklet/local/basefs/..."
|
||||
echo " "
|
||||
|
||||
echo "Next, make sure exim4 can deliver mail out. To enable, run:"
|
||||
echo "dpkg-reconfigure exim4-config"
|
||||
echo "select internet site"
|
||||
|
||||
echo ""
|
||||
|
||||
|
||||
echo "Then start docklet as described in README.md"
|
||||
|
|
@ -0,0 +1,348 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import subprocess, os, json
|
||||
import imagemgr
|
||||
from log import logger
|
||||
import env
|
||||
from lvmtool import *
|
||||
|
||||
class Container(object):
|
||||
def __init__(self, addr, etcdclient):
|
||||
self.addr = addr
|
||||
self.etcd=etcdclient
|
||||
self.libpath = env.getenv('DOCKLET_LIB')
|
||||
self.confpath = env.getenv('DOCKLET_CONF')
|
||||
self.fspath = env.getenv('FS_PREFIX')
|
||||
# set jupyter running dir in container
|
||||
self.rundir = "/home/jupyter"
|
||||
# set root running dir in container
|
||||
self.nodehome = "/root"
|
||||
|
||||
self.lxcpath = "/var/lib/lxc"
|
||||
self.imgmgr = imagemgr.ImageMgr()
|
||||
|
||||
def create_container(self, lxc_name, username, user_info, clustername, clusterid, hostname, ip, gateway, vlanid, image):
|
||||
logger.info("create container %s of %s for %s" %(lxc_name, clustername, username))
|
||||
try:
|
||||
user_info = json.loads(user_info)
|
||||
cpu = user_info["data"]["groupinfo"]["cpu"]
|
||||
memory = user_info["data"]["groupinfo"]["memory"]
|
||||
image = json.loads(image)
|
||||
status = self.imgmgr.prepareFS(username,image,lxc_name)
|
||||
if not status:
|
||||
return [False, "Create container failed when preparing filesystem, possibly insufficient space"]
|
||||
|
||||
#Ret = subprocess.run([self.libpath+"/lxc_control.sh",
|
||||
# "create", lxc_name, username, str(clusterid), hostname,
|
||||
# ip, gateway, str(vlanid), str(cpu), str(memory)], stdout=subprocess.PIPE,
|
||||
# stderr=subprocess.STDOUT,shell=False, check=True)
|
||||
|
||||
rootfs = "/var/lib/lxc/%s/rootfs" % lxc_name
|
||||
|
||||
if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)):
|
||||
logger.error("user %s directory not found" % username)
|
||||
return [False, "user directory not found"]
|
||||
sys_run("mkdir -p /var/lib/lxc/%s" % lxc_name)
|
||||
logger.info("generate config file for %s" % lxc_name)
|
||||
|
||||
if os.path.exists(self.confpath+"/lxc.custom.conf"):
|
||||
conffile = open(self.confpath+"/lxc.custom.conf",'r')
|
||||
else:
|
||||
conffile = open(self.confpath+"/container.conf",'r')
|
||||
|
||||
conftext = conffile.read()
|
||||
conffile.close()
|
||||
conftext = conftext.replace("%ROOTFS%",rootfs)
|
||||
conftext = conftext.replace("%HOSTNAME%",hostname)
|
||||
conftext = conftext.replace("%IP%",ip)
|
||||
conftext = conftext.replace("%GATEWAY%",gateway)
|
||||
conftext = conftext.replace("%CONTAINER_MEMORY%",str(memory))
|
||||
conftext = conftext.replace("%CONTAINER_CPU%",str(cpu))
|
||||
conftext = conftext.replace("%FS_PREFIX%",self.fspath)
|
||||
conftext = conftext.replace("%USERNAME%",username)
|
||||
conftext = conftext.replace("%CLUSTERID%",str(clusterid))
|
||||
conftext = conftext.replace("%LXCSCRIPT%",env.getenv("LXC_SCRIPT"))
|
||||
conftext = conftext.replace("%LXCNAME%",lxc_name)
|
||||
conftext = conftext.replace("%VLANID%",str(vlanid))
|
||||
conftext = conftext.replace("%CLUSTERNAME%", clustername)
|
||||
|
||||
conffile = open("/var/lib/lxc/%s/config" % lxc_name,"w")
|
||||
conffile.write(conftext)
|
||||
conffile.close()
|
||||
|
||||
#logger.debug(Ret.stdout.decode('utf-8'))
|
||||
logger.info("create container %s success" % lxc_name)
|
||||
|
||||
# get AUTH COOKIE URL for jupyter
|
||||
[status, authurl] = self.etcd.getkey("web/authurl")
|
||||
if not status:
|
||||
[status, masterip] = self.etcd.getkey("service/master")
|
||||
if status:
|
||||
webport = env.getenv("WEB_PORT")
|
||||
authurl = "http://%s:%s/jupyter" % (masterip,
|
||||
webport)
|
||||
else:
|
||||
logger.error ("get AUTH COOKIE URL failed for jupyter")
|
||||
authurl = "error"
|
||||
if (username=='guest'):
|
||||
cookiename='guest-cookie'
|
||||
else:
|
||||
cookiename='docklet-jupyter-cookie'
|
||||
|
||||
rundir = self.lxcpath+'/'+lxc_name+'/rootfs' + self.rundir
|
||||
|
||||
logger.debug(rundir)
|
||||
|
||||
if not os.path.exists(rundir):
|
||||
os.makedirs(rundir)
|
||||
else:
|
||||
if not os.path.isdir(rundir):
|
||||
os.remove(rundir)
|
||||
os.makedirs(rundir)
|
||||
|
||||
jconfigpath = rundir + '/jupyter.config'
|
||||
config = open(jconfigpath, 'w')
|
||||
jconfigs="""USER=%s
|
||||
PORT=%d
|
||||
COOKIE_NAME=%s
|
||||
BASE_URL=%s
|
||||
HUB_PREFIX=%s
|
||||
HUB_API_URL=%s
|
||||
IP=%s
|
||||
""" % (username, 10000, cookiename, '/go/'+username+'/'+clustername, '/jupyter',
|
||||
authurl, ip.split('/')[0])
|
||||
config.write(jconfigs)
|
||||
config.close()
|
||||
|
||||
except subprocess.CalledProcessError as sube:
|
||||
logger.error('create container %s failed: %s' % (lxc_name,
|
||||
sube.stdout.decode('utf-8')))
|
||||
return [False, "create container failed"]
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
return [False, "create container failed"]
|
||||
return [True, "create container success"]
|
||||
|
||||
def delete_container(self, lxc_name):
|
||||
logger.info ("delete container:%s" % lxc_name)
|
||||
if self.imgmgr.deleteFS(lxc_name):
|
||||
logger.info("delete container %s success" % lxc_name)
|
||||
return [True, "delete container success"]
|
||||
else:
|
||||
logger.info("delete container %s failed" % lxc_name)
|
||||
return [False, "delete container failed"]
|
||||
#status = subprocess.call([self.libpath+"/lxc_control.sh", "delete", lxc_name])
|
||||
#if int(status) == 1:
|
||||
# logger.error("delete container %s failed" % lxc_name)
|
||||
# return [False, "delete container failed"]
|
||||
#else:
|
||||
# logger.info ("delete container %s success" % lxc_name)
|
||||
# return [True, "delete container success"]
|
||||
|
||||
# start container, if running, restart it
|
||||
def start_container(self, lxc_name):
|
||||
logger.info ("start container:%s" % lxc_name)
|
||||
#status = subprocess.call([self.libpath+"/lxc_control.sh", "start", lxc_name])
|
||||
#if int(status) == 1:
|
||||
# logger.error ("start container %s failed" % lxc_name)
|
||||
# return [False, "start container failed"]
|
||||
#else:
|
||||
# logger.info ("start container %s success" % lxc_name)
|
||||
# return [True, "start container success"]
|
||||
#subprocess.run(["lxc-stop -k -n %s" % lxc_name],
|
||||
# stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=True)
|
||||
try :
|
||||
subprocess.run(["lxc-start -n %s" % lxc_name],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=True)
|
||||
logger.info ("start container %s success" % lxc_name)
|
||||
return [True, "start container success"]
|
||||
except subprocess.CalledProcessError as sube:
|
||||
logger.error('start container %s failed: %s' % (lxc_name,
|
||||
sube.stdout.decode('utf-8')))
|
||||
return [False, "start container failed"]
|
||||
|
||||
# start container services
|
||||
# for the master node, jupyter must be started,
|
||||
# for other node, ssh must be started.
|
||||
# container must be RUNNING before calling this service
|
||||
def start_services(self, lxc_name, services=[]):
|
||||
logger.info ("start services for container %s: %s" % (lxc_name, services))
|
||||
try:
|
||||
#Ret = subprocess.run(["lxc-attach -n %s -- ln -s /nfs %s" %
|
||||
#(lxc_name, self.nodehome)],
|
||||
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
#shell=True, check=False)
|
||||
#logger.debug ("prepare nfs for %s: %s" % (lxc_name,
|
||||
#Ret.stdout.decode('utf-8')))
|
||||
# not sure whether should execute this
|
||||
#Ret = subprocess.run(["lxc-attach -n %s -- service ssh start" % lxc_name],
|
||||
# stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
#shell=True, check=False)
|
||||
#logger.debug(Ret.stdout.decode('utf-8'))
|
||||
if len(services) == 0: # master node
|
||||
Ret = subprocess.run(["lxc-attach -n %s -- su -c %s/start_jupyter.sh" % (lxc_name, self.rundir)],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=True)
|
||||
logger.debug (Ret)
|
||||
logger.info ("start services for container %s success" % lxc_name)
|
||||
return [True, "start container services success"]
|
||||
except subprocess.CalledProcessError as sube:
|
||||
logger.error('start services for container %s failed: %s' % (lxc_name,
|
||||
sube.output.decode('utf-8')))
|
||||
return [False, "start services for container failed"]
|
||||
|
||||
# recover container: if running, do nothing. if stopped, start it
|
||||
def recover_container(self, lxc_name):
|
||||
logger.info ("recover container:%s" % lxc_name)
|
||||
#status = subprocess.call([self.libpath+"/lxc_control.sh", "status", lxc_name])
|
||||
[success, status] = self.container_status(lxc_name)
|
||||
if not success:
|
||||
return [False, status]
|
||||
if status == 'stopped':
|
||||
logger.info("%s stopped, recover it to running" % lxc_name)
|
||||
if self.start_container(lxc_name)[0]:
|
||||
if self.start_services(lxc_name)[0]:
|
||||
logger.info("%s recover success" % lxc_name)
|
||||
return [True, "recover success"]
|
||||
else:
|
||||
logger.error("%s recover failed with services not start" % lxc_name)
|
||||
return [False, "recover failed for services not start"]
|
||||
else:
|
||||
logger.error("%s recover failed for container starting failed" % lxc_name)
|
||||
return [False, "recover failed for container starting failed"]
|
||||
else:
|
||||
logger.info("%s recover success" % lxc_name)
|
||||
return [True, "recover success"]
|
||||
|
||||
def stop_container(self, lxc_name):
|
||||
logger.info ("stop container:%s" % lxc_name)
|
||||
#status = subprocess.call([self.libpath+"/lxc_control.sh", "stop", lxc_name])
|
||||
[success, status] = self.container_status(lxc_name)
|
||||
if not success:
|
||||
return [False, status]
|
||||
if status == "running":
|
||||
sys_run("lxc-stop -k -n %s" % lxc_name)
|
||||
[success, status] = self.container_status(lxc_name)
|
||||
if status == "running":
|
||||
logger.error("stop container %s failed" % lxc_name)
|
||||
return [False, "stop container failed"]
|
||||
else:
|
||||
logger.info("stop container %s success" % lxc_name)
|
||||
return [True, "stop container success"]
|
||||
#if int(status) == 1:
|
||||
# logger.error ("stop container %s failed" % lxc_name)
|
||||
# return [False, "stop container failed"]
|
||||
#else:
|
||||
# logger.info ("stop container %s success" % lxc_name)
|
||||
# return [True, "stop container success"]
|
||||
|
||||
# check container: check LV and mountpoints, if wrong, try to repair it
|
||||
def check_container(self, lxc_name):
|
||||
logger.info ("check container:%s" % lxc_name)
|
||||
if not check_volume("docklet-group", lxc_name):
|
||||
logger.error("check container %s failed" % lxc_name)
|
||||
return [False, "check container failed"]
|
||||
#status = subprocess.call([self.libpath+"/lxc_control.sh", "check", lxc_name])
|
||||
self.imgmgr.checkFS(lxc_name)
|
||||
logger.info ("check container %s success" % lxc_name)
|
||||
return [True, "check container success"]
|
||||
|
||||
def is_container(self, lxc_name):
|
||||
if os.path.isdir(self.lxcpath+"/"+lxc_name):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def container_status(self, lxc_name):
|
||||
if not self.is_container(lxc_name):
|
||||
return [False, "container not found"]
|
||||
Ret = sys_run("lxc-info -n %s | grep RUNNING")
|
||||
#status = subprocess.call([self.libpath+"/lxc_control.sh", "status", lxc_name])
|
||||
if Ret.returncode == 0:
|
||||
return [True, 'running']
|
||||
else:
|
||||
return [True, 'stopped']
|
||||
|
||||
def list_containers(self):
|
||||
if not os.path.isdir(self.lxcpath):
|
||||
return [True, []]
|
||||
lxclist = []
|
||||
for onedir in os.listdir(self.lxcpath):
|
||||
if os.path.isfile(self.lxcpath+"/"+onedir+"/config"):
|
||||
lxclist.append(onedir)
|
||||
else:
|
||||
logger.warning ("%s in lxc directory, but not container directory" % onedir)
|
||||
return [True, lxclist]
|
||||
|
||||
def delete_allcontainers(self):
|
||||
logger.info ("deleting all containers...")
|
||||
[status, containers] = self.list_containers()
|
||||
result = True
|
||||
for container in containers:
|
||||
[result, status] = self.container_status(container)
|
||||
if status=='running':
|
||||
self.stop_container(container)
|
||||
result = result & self.delete_container(container)[0]
|
||||
if result:
|
||||
logger.info ("deleted all containers success")
|
||||
return [True, 'all deleted']
|
||||
else:
|
||||
logger.error ("deleted all containers failed")
|
||||
return [False, 'some containers delete failed']
|
||||
|
||||
# list containers in /var/lib/lxc/ as local
|
||||
# list containers in FS_PREFIX/global/... on this host as global
|
||||
def diff_containers(self):
|
||||
[status, localcontainers] = self.list_containers()
|
||||
globalpath = self.fspath+"/global/users/"
|
||||
users = os.listdir(globalpath)
|
||||
globalcontainers = []
|
||||
for user in users:
|
||||
clusters = os.listdir(globalpath+user+"/clusters")
|
||||
for cluster in clusters:
|
||||
clusterfile = open(globalpath+user+"/clusters/"+cluster, 'r')
|
||||
clusterinfo = json.loads(clusterfile.read())
|
||||
for container in clusterinfo['containers']:
|
||||
if container['host'] == self.addr:
|
||||
globalcontainers.append(container['containername'])
|
||||
both = []
|
||||
onlylocal = []
|
||||
onlyglobal = []
|
||||
for container in localcontainers:
|
||||
if container in globalcontainers:
|
||||
both.append(container)
|
||||
else:
|
||||
onlylocal.append(container)
|
||||
for container in globalcontainers:
|
||||
if container not in localcontainers:
|
||||
onlyglobal.append(container)
|
||||
return [both, onlylocal, onlyglobal]
|
||||
|
||||
def create_image(self,username,imagename,containername,description="not thing",isforce = False):
|
||||
return self.imgmgr.createImage(username,imagename,containername,description,isforce)
|
||||
|
||||
def flush_container(self,username,imagename,containername):
|
||||
self.imgmgr.flush_one(username,imagename,containername)
|
||||
logger.info("container: %s has been flushed" % containername)
|
||||
return 0
|
||||
# check all local containers
|
||||
def check_allcontainers(self):
|
||||
[both, onlylocal, onlyglobal] = self.diff_containers()
|
||||
logger.info("check all containers and repair them")
|
||||
status = True
|
||||
result = True
|
||||
for container in both:
|
||||
logger.info ("%s in LOCAL and GLOBAL checks..." % container)
|
||||
[status, meg]=self.check_container(container)
|
||||
result = result & status
|
||||
if len(onlylocal) > 0:
|
||||
result = False
|
||||
logger.error ("some container only exists in LOCAL: %s" % onlylocal)
|
||||
if len(onlyglobal) > 0:
|
||||
result = False
|
||||
logger.error ("some container only exists in GLOBAL: %s" % onlyglobal)
|
||||
if status:
|
||||
logger.info ("check all containers success")
|
||||
return [True, 'all is ok']
|
||||
else:
|
||||
logger.error ("check all containers failed")
|
||||
return [False, 'not ok']
|
|
@ -0,0 +1,54 @@
|
|||
import os
|
||||
|
||||
def getenv(key):
|
||||
if key == "CLUSTER_NAME":
|
||||
return os.environ.get("CLUSTER_NAME", "docklet-vc")
|
||||
elif key == "FS_PREFIX":
|
||||
return os.environ.get("FS_PREFIX", "/opt/docklet")
|
||||
elif key == "CLUSTER_SIZE":
|
||||
return int(os.environ.get("CLUSTER_SIZE", 1))
|
||||
elif key == "CLUSTER_NET":
|
||||
return os.environ.get("CLUSTER_NET", "172.16.0.1/16")
|
||||
elif key == "CONTAINER_CPU":
|
||||
return int(os.environ.get("CONTAINER_CPU", 100000))
|
||||
elif key == "CONTAINER_DISK":
|
||||
return int(os.environ.get("CONTAINER_DISK", 1000))
|
||||
elif key == "CONTAINER_MEMORY":
|
||||
return int(os.environ.get("CONTAINER_MEMORY", 1000))
|
||||
elif key == "DISKPOOL_SIZE":
|
||||
return int(os.environ.get("DISKPOOL_SIZE", 5000))
|
||||
elif key == "ETCD":
|
||||
return os.environ.get("ETCD", "localhost:2379")
|
||||
elif key == "NETWORK_DEVICE":
|
||||
return os.environ.get("NETWORK_DEVICE", "eth0")
|
||||
elif key == "MASTER_IP":
|
||||
return os.environ.get("MASTER_IP", "0.0.0.0")
|
||||
elif key == "MASTER_PORT":
|
||||
return int(os.environ.get("MASTER_PORT", 9000))
|
||||
elif key == "WORKER_PORT":
|
||||
return int(os.environ.get("WORKER_PORT", 9001))
|
||||
elif key == "PROXY_PORT":
|
||||
return int(os.environ.get("PROXY_PORT", 8000))
|
||||
elif key == "PROXY_API_PORT":
|
||||
return int(os.environ.get("PROXY_API_PORT", 8001))
|
||||
elif key == "WEB_PORT":
|
||||
return int(os.environ.get("WEB_PORT", 8888))
|
||||
elif key == "PORTAL_URL":
|
||||
return os.environ.get("PORTAL_URL",
|
||||
"http://"+getenv("MASTER_IP") + ":" + str(getenv("PROXY_PORT")))
|
||||
elif key == "LOG_LEVEL":
|
||||
return os.environ.get("LOG_LEVEL", "DEBUG")
|
||||
elif key == "LOG_LIFE":
|
||||
return int(os.environ.get("LOG_LIFE", 10))
|
||||
elif key == "WEB_LOG_LEVEL":
|
||||
return os.environ.get("WEB_LOG_LEVEL", "DEBUG")
|
||||
elif key == "STORAGE":
|
||||
return os.environ.get("STORAGE", "file")
|
||||
elif key =="EXTERNAL_LOGIN":
|
||||
return os.environ.get("EXTERNAL_LOGIN", "False")
|
||||
elif key =="EMAIL_FROM_ADDRESS":
|
||||
return os.environ.get("EMAIL_FROM_ADDRESS", "")
|
||||
elif key =="ADMIN_EMAIL_ADDRESS":
|
||||
return os.environ.get("ADMIN_EMAIL_ADDRESS", "")
|
||||
else:
|
||||
return os.environ[key]
|
|
@ -0,0 +1,202 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
############################################################
|
||||
# etcdlib.py -- etcdlib provides a python etcd client
|
||||
# author : Bao Li <libao14@pku.edu.cn>, UniAS, SEI, PKU
|
||||
# license : BSD License
|
||||
############################################################
|
||||
|
||||
import urllib.request, urllib.error
|
||||
import random, json, time
|
||||
#import sys
|
||||
|
||||
# send http request to etcd server and get the json result
|
||||
# url : url
|
||||
# data : data to send by POST/PUT
|
||||
# method : method used by http request
|
||||
def dorequest(url, data = "", method = 'GET'):
|
||||
try:
|
||||
if method == 'GET':
|
||||
response = urllib.request.urlopen(url, timeout=10).read()
|
||||
else:
|
||||
# use PUT/DELETE/POST, data should be encoded in ascii/bytes
|
||||
request = urllib.request.Request(url, data = data.encode('ascii'), method = method)
|
||||
response = urllib.request.urlopen(request, timeout=10).read()
|
||||
# etcd may return json result with response http error code
|
||||
# http error code will raise exception in urlopen
|
||||
# catch the HTTPError and get the json result
|
||||
except urllib.error.HTTPError as e:
|
||||
# e.fp must be read() in this except block.
|
||||
# the e will be deleted and e.fp will be closed after this block
|
||||
response = e.fp.read()
|
||||
# response is encoded in bytes.
|
||||
# recoded in utf-8 and loaded in json
|
||||
result = json.loads(str(response, encoding='utf-8'))
|
||||
return result
|
||||
|
||||
|
||||
# client to use etcd
|
||||
# not all APIs are implemented below. just implement what we want
|
||||
class Client(object):
|
||||
# server is a string of one server IP and PORT, like 192.168.4.12:2379
|
||||
def __init__(self, server, prefix = ""):
|
||||
self.clientid = str(random.random())
|
||||
self.server = "http://"+server
|
||||
prefix = prefix.strip("/")
|
||||
if prefix == "":
|
||||
self.keysurl = self.server+"/v2/keys/"
|
||||
else:
|
||||
self.keysurl = self.server+"/v2/keys/"+prefix+"/"
|
||||
self.members = self.getmembers()
|
||||
|
||||
def getmembers(self):
|
||||
out = dorequest(self.server+"/v2/members")
|
||||
result = []
|
||||
for one in out['members']:
|
||||
result.append(one['clientURLs'][0])
|
||||
return result
|
||||
|
||||
# list etcd servers
|
||||
def listmembers(self):
|
||||
return self.members
|
||||
|
||||
def clean(self):
|
||||
[baseurl, dirname] = self.keysurl.split("/v2/keys/", maxsplit=1)
|
||||
dirname = dirname.strip("/")
|
||||
if dirname == '': # clean root content
|
||||
[status, result] = self.listdir("")
|
||||
if status:
|
||||
for one in result:
|
||||
if 'dir' in one:
|
||||
self.deldir(one['key'])
|
||||
else:
|
||||
self.delkey(one['key'])
|
||||
if self.isdir("_lock"):
|
||||
self.deldir("_lock")
|
||||
else: # clean a directory
|
||||
if self.isdir("")[0]:
|
||||
self.deldir("")
|
||||
self.createdir("")
|
||||
|
||||
def getkey(self, key):
|
||||
key = key.strip("/")
|
||||
out = dorequest(self.keysurl+key)
|
||||
if 'action' not in out:
|
||||
return [False, "key not found"]
|
||||
else:
|
||||
return [True, out['node']['value']]
|
||||
|
||||
def setkey(self, key, value, ttl=0):
|
||||
key = key.strip("/")
|
||||
if ttl == 0:
|
||||
out = dorequest(self.keysurl+key, 'value='+str(value), 'PUT')
|
||||
else:
|
||||
out = dorequest(self.keysurl+key, 'value='+str(value)+"&ttl="+str(ttl), 'PUT')
|
||||
if 'action' not in out:
|
||||
return [False, 'set key failed']
|
||||
else:
|
||||
return [True, out['node']['value']]
|
||||
|
||||
def delkey(self, key):
|
||||
key = key.strip("/")
|
||||
out = dorequest(self.keysurl+key, method='DELETE')
|
||||
if 'action' not in out:
|
||||
return [False, 'delete key failed']
|
||||
else:
|
||||
return [True, out['node']['key']]
|
||||
|
||||
def isdir(self, dirname):
|
||||
dirname = dirname.strip("/")
|
||||
out = dorequest(self.keysurl+dirname)
|
||||
if 'action' not in out:
|
||||
return [False, dirname+" not found"]
|
||||
if 'dir' not in out['node']:
|
||||
return [False, dirname+" is a key"]
|
||||
return [True, dirname]
|
||||
|
||||
def createdir(self, dirname):
|
||||
dirname = dirname.strip("/")
|
||||
out = dorequest(self.keysurl+dirname, 'dir=true', 'PUT')
|
||||
if 'action' not in out:
|
||||
return [False, 'create dir failed']
|
||||
else:
|
||||
return [True, out['node']['key']]
|
||||
|
||||
# list key-value in the directory. BUT not recursive.
|
||||
# if necessary, recursive can be supported by add ?recursive=true in url
|
||||
def listdir(self, dirname):
|
||||
dirname = dirname.strip("/")
|
||||
out = dorequest(self.keysurl+dirname)
|
||||
if 'action' not in out:
|
||||
return [False, 'list directory failed']
|
||||
else:
|
||||
if "dir" not in out['node']:
|
||||
return [False, dirname+" is a key"]
|
||||
if 'nodes' not in out['node']:
|
||||
return [True, []]
|
||||
result=[]
|
||||
for kv in out['node']['nodes']:
|
||||
if 'dir' in kv:
|
||||
result.append({"key":kv['key'], 'dir':True})
|
||||
else:
|
||||
result.append({"key":kv['key'], 'value':kv['value']})
|
||||
return [True, result]
|
||||
|
||||
# del directory with recursive=true
|
||||
def deldir(self, dirname):
|
||||
dirname = dirname.strip("/")
|
||||
out = dorequest(self.keysurl+dirname+"?recursive=true", method='DELETE')
|
||||
if 'action' not in out:
|
||||
return [False, 'delete directory failed']
|
||||
else:
|
||||
return [True, out['node']['key']]
|
||||
|
||||
# watch a key or directory when it changes.
|
||||
# recursive=true means anything in the directory changes, it will return
|
||||
def watch(self, key):
|
||||
key = key.strip("/")
|
||||
out = dorequest(self.keysurl+key+"?wait=true&recursive=true")
|
||||
if 'action' not in out:
|
||||
return [False, 'watch key failed']
|
||||
else:
|
||||
return [True, out['node']['value']]
|
||||
|
||||
# atomic create a key. return immediately with True or False
|
||||
def atomiccreate(self, key, value='atom'):
|
||||
key = key.strip("/")
|
||||
out = dorequest(self.keysurl+key+"?prevExist=false", 'value='+value, method='PUT')
|
||||
if 'action' not in out:
|
||||
return [False, 'atomic create key failed']
|
||||
else:
|
||||
return [True, out['node']['key']]
|
||||
|
||||
################# Lock ##################
|
||||
# lockref(key) : get a reference of a lock named key in etcd.
|
||||
# not need to create this lock. it is automatical.
|
||||
# acquire(lockref) : acquire this lock by lockref.
|
||||
# blocked if lock is holded by others
|
||||
# release(lockref) : release this lock by lockref
|
||||
# only can be released by holder
|
||||
#########################################
|
||||
def lockref(self, key):
|
||||
key = key.strip("/")
|
||||
return "_lock/"+key
|
||||
|
||||
def acquire(self, lockref):
|
||||
while(True):
|
||||
if self.atomiccreate(lockref, self.clientid)[0]:
|
||||
return [True, 'get lock']
|
||||
else:
|
||||
time.sleep(0.01)
|
||||
|
||||
def release(self, lockref):
|
||||
value = self.getkey(lockref)
|
||||
if value[0]:
|
||||
if value[1] == self.clientid:
|
||||
self.delkey(lockref)
|
||||
return [True, 'release lock']
|
||||
else:
|
||||
return [False, 'you are not lock holder']
|
||||
else:
|
||||
return [False, 'no one holds this lock']
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import os,time,subprocess
|
||||
import env
|
||||
import json
|
||||
|
||||
class Guest(object):
|
||||
def __init__(self,vclusterMgr,nodemgr):
|
||||
self.libpath = env.getenv('DOCKLET_LIB')
|
||||
self.fspath = env.getenv('FS_PREFIX')
|
||||
self.lxcpath = "/var/lib/lxc"
|
||||
self.G_vclustermgr = vclusterMgr
|
||||
self.nodemgr = nodemgr
|
||||
|
||||
def work(self):
|
||||
image = {}
|
||||
image['name'] = "base"
|
||||
image['type'] = "base"
|
||||
image['owner'] = "docklet"
|
||||
while len(self.nodemgr.get_rpcs()) < 1:
|
||||
time.sleep(10)
|
||||
if not os.path.isdir(self.fspath+"/global/users/guest"):
|
||||
subprocess.getoutput(self.libpath+"/userinit.sh guest")
|
||||
user_info = {}
|
||||
user_info["data"] = {}
|
||||
user_info["data"]["groupinfo"] = {}
|
||||
user_info["data"]["groupinfo"]["cpu"] = 100000
|
||||
user_info["data"]["groupinfo"]["memory"] = 2000
|
||||
user_info = json.dumps(user_info)
|
||||
self.G_vclustermgr.create_cluster("guestspace", "guest", image, user_info)
|
||||
while True:
|
||||
self.G_vclustermgr.start_cluster("guestspace", "guest")
|
||||
time.sleep(3600)
|
||||
self.G_vclustermgr.stop_cluster("guestspace", "guest")
|
||||
fspath = self.fspath + "/global/local/volume/guest-1-0/"
|
||||
subprocess.getoutput("(cd %s && rm -rf *)" % fspath)
|
|
@ -0,0 +1,585 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
# load environment variables in the beginning
|
||||
# because some modules need variables when import
|
||||
# for example, userManager/model.py
|
||||
|
||||
# must first init loadenv
|
||||
import tools, env
|
||||
config = env.getenv("CONFIG")
|
||||
tools.loadenv(config)
|
||||
|
||||
# second init logging
|
||||
# must import logger after initlogging, ugly
|
||||
from log import initlogging
|
||||
initlogging("docklet-master")
|
||||
from log import logger
|
||||
|
||||
import os
|
||||
import http.server, cgi, json, sys, shutil
|
||||
from socketserver import ThreadingMixIn
|
||||
import nodemgr, vclustermgr, etcdlib, network, imagemgr
|
||||
import userManager
|
||||
import monitor
|
||||
import guest_control, threading
|
||||
|
||||
external_login = env.getenv('EXTERNAL_LOGIN')
|
||||
if (external_login == 'TRUE'):
|
||||
from userDependence import external_auth
|
||||
|
||||
class DockletHttpHandler(http.server.BaseHTTPRequestHandler):
|
||||
def response(self, code, output):
|
||||
self.send_response(code)
|
||||
self.send_header("Content-type", "application/json")
|
||||
self.end_headers()
|
||||
# wfile/rfile are in byte/binary encoded. need to recode
|
||||
self.wfile.write(json.dumps(output).encode('ascii'))
|
||||
self.wfile.write("\n".encode('ascii'))
|
||||
# do not wfile.close()
|
||||
# because self.handle_one_request will call wfile.flush after calling do_*
|
||||
# and self.handle_one_request will close this wfile after timeout automatically
|
||||
# (see /usr/lib/python3.4/http/server.py handle_one_request function)
|
||||
#self.wfile.close()
|
||||
|
||||
# override log_request to not print default request log
|
||||
# we use the log info by ourselves in our style
|
||||
def log_request(code = '-', size = '-'):
|
||||
pass
|
||||
|
||||
def do_PUT(self):
|
||||
self.response(400, {'success':'false', 'message':'Not supported methond'})
|
||||
|
||||
def do_GET(self):
|
||||
self.response(400, {'success':'false', 'message':'Not supported methond'})
|
||||
|
||||
def do_DELETE(self):
|
||||
self.response(400, {'success':'false', 'message':'Not supported methond'})
|
||||
|
||||
# handler POST request
|
||||
def do_POST(self):
|
||||
global G_vclustermgr
|
||||
global G_usermgr
|
||||
#logger.info ("get request, header content:\n%s" % self.headers)
|
||||
#logger.info ("read request content:\n%s" % self.rfile.read(int(self.headers["Content-Length"])))
|
||||
logger.info ("get request, path: %s" % self.path)
|
||||
# for test
|
||||
if self.path == '/test':
|
||||
logger.info ("return welcome for test")
|
||||
self.response(200, {'success':'true', 'message':'welcome to docklet'})
|
||||
return [True, 'test ok']
|
||||
|
||||
# check for not null content
|
||||
if 'Content-Length' not in self.headers:
|
||||
logger.info ("request content is null")
|
||||
self.response(401, {'success':'false', 'message':'request content is null'})
|
||||
return [False, 'content is null']
|
||||
|
||||
# auth the user
|
||||
# cgi.FieldStorage need fp/headers/environ. (see /usr/lib/python3.4/cgi.py)
|
||||
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,environ={'REQUEST_METHOD':'POST'})
|
||||
cmds = self.path.strip('/').split('/')
|
||||
if cmds[0] == 'register' and form.getvalue('activate', None) == None:
|
||||
logger.info ("handle request : user register")
|
||||
username = form.getvalue('username', '')
|
||||
password = form.getvalue('password', '')
|
||||
email = form.getvalue('email', '')
|
||||
description = form.getvalue('description','')
|
||||
if (username == '' or password == '' or email == ''):
|
||||
self.response(500, {'success':'false'})
|
||||
newuser = G_usermgr.newuser()
|
||||
newuser.username = form.getvalue('username')
|
||||
newuser.password = form.getvalue('password')
|
||||
newuser.e_mail = form.getvalue('email')
|
||||
newuser.student_number = form.getvalue('studentnumber')
|
||||
newuser.department = form.getvalue('department')
|
||||
newuser.nickname = form.getvalue('truename')
|
||||
newuser.truename = form.getvalue('truename')
|
||||
newuser.description = form.getvalue('description')
|
||||
newuser.status = "init"
|
||||
newuser.auth_method = "local"
|
||||
result = G_usermgr.register(user = newuser)
|
||||
self.response(200, result)
|
||||
return [True, "register succeed"]
|
||||
if cmds[0] == 'login':
|
||||
logger.info ("handle request : user login")
|
||||
user = form.getvalue("user")
|
||||
key = form.getvalue("key")
|
||||
if user == None or key == None:
|
||||
self.response(401, {'success':'false', 'message':'user or key is null'})
|
||||
return [False, "auth failed"]
|
||||
auth_result = G_usermgr.auth(user, key)
|
||||
if auth_result['success'] == 'false':
|
||||
self.response(401, {'success':'false', 'message':'auth failed'})
|
||||
return [False, "auth failed"]
|
||||
self.response(200, {'success':'true', 'action':'login', 'data': auth_result['data']})
|
||||
return [True, "auth succeeded"]
|
||||
if cmds[0] == 'external_login':
|
||||
logger.info ("handle request : external user login")
|
||||
try:
|
||||
result = G_usermgr.auth_external(form)
|
||||
self.response(200, result)
|
||||
return result
|
||||
except:
|
||||
result = {'success': 'false', 'reason': 'Something wrong happened when auth an external account'}
|
||||
self.response(200, result)
|
||||
return result
|
||||
|
||||
token = form.getvalue("token")
|
||||
if token == None:
|
||||
self.response(401, {'success':'false', 'message':'user or key is null'})
|
||||
return [False, "auth failed"]
|
||||
cur_user = G_usermgr.auth_token(token)
|
||||
if cur_user == None:
|
||||
self.response(401, {'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'})
|
||||
return [False, "auth failed"]
|
||||
|
||||
|
||||
|
||||
user = cur_user.username
|
||||
# parse the url and get to do actions
|
||||
# /cluster/list
|
||||
# /cluster/create & clustername
|
||||
# /cluster/start & clustername
|
||||
# /cluster/stop & clustername
|
||||
# /cluster/delete & clustername
|
||||
# /cluster/info & clustername
|
||||
|
||||
|
||||
if cmds[0] == 'cluster':
|
||||
clustername = form.getvalue('clustername')
|
||||
# check for 'clustername' : all actions except 'list' need 'clustername'
|
||||
if (cmds[1] != 'list') and clustername == None:
|
||||
self.response(401, {'success':'false', 'message':'clustername is null'})
|
||||
return [False, "clustername is null"]
|
||||
if cmds[1] == 'create':
|
||||
image = {}
|
||||
image['name'] = form.getvalue("imagename")
|
||||
image['type'] = form.getvalue("imagetype")
|
||||
image['owner'] = form.getvalue("imageowner")
|
||||
user_info = G_usermgr.selfQuery(cur_user = cur_user)
|
||||
user_info = json.dumps(user_info)
|
||||
logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name']))
|
||||
[status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info)
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'action':'create cluster', 'message':result})
|
||||
else:
|
||||
self.response(200, {'success':'false', 'action':'create cluster', 'message':result})
|
||||
elif cmds[1] == 'scaleout':
|
||||
logger.info("handle request : scale out %s" % clustername)
|
||||
image = {}
|
||||
image['name'] = form.getvalue("imagename")
|
||||
image['type'] = form.getvalue("imagetype")
|
||||
image['owner'] = form.getvalue("imageowner")
|
||||
logger.debug("imagename:" + image['name'])
|
||||
logger.debug("imagetype:" + image['type'])
|
||||
logger.debug("imageowner:" + image['owner'])
|
||||
user_info = G_usermgr.selfQuery(cur_user = cur_user)
|
||||
user_info = json.dumps(user_info)
|
||||
[status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info)
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'action':'scale out', 'message':result})
|
||||
else:
|
||||
self.response(200, {'success':'false', 'action':'scale out', 'message':result})
|
||||
elif cmds[1] == 'scalein':
|
||||
logger.info("handle request : scale in %s" % clustername)
|
||||
containername = form.getvalue("containername")
|
||||
[status, result] = G_vclustermgr.scale_in_cluster(clustername, user, containername)
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'action':'scale in', 'message':result})
|
||||
else:
|
||||
self.response(200, {'success':'false', 'action':'scale in', 'message':result})
|
||||
elif cmds[1] == 'start':
|
||||
logger.info ("handle request : start cluster %s" % clustername)
|
||||
[status, result] = G_vclustermgr.start_cluster(clustername, user)
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'action':'start cluster', 'message':result})
|
||||
else:
|
||||
self.response(200, {'success':'false', 'action':'start cluster', 'message':result})
|
||||
elif cmds[1] == 'stop':
|
||||
logger.info ("handle request : stop cluster %s" % clustername)
|
||||
[status, result] = G_vclustermgr.stop_cluster(clustername, user)
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'action':'stop cluster', 'message':result})
|
||||
else:
|
||||
self.response(200, {'success':'false', 'action':'stop cluster', 'message':result})
|
||||
elif cmds[1] == 'delete':
|
||||
logger.info ("handle request : delete cluster %s" % clustername)
|
||||
[status, result] = G_vclustermgr.delete_cluster(clustername, user)
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'action':'delete cluster', 'message':result})
|
||||
else:
|
||||
self.response(200, {'success':'false', 'action':'delete cluster', 'message':result})
|
||||
elif cmds[1] == 'info':
|
||||
logger.info ("handle request : info cluster %s" % clustername)
|
||||
[status, result] = G_vclustermgr.get_clusterinfo(clustername, user)
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'action':'info cluster', 'message':result})
|
||||
else:
|
||||
self.response(200, {'success':'false', 'action':'info cluster', 'message':result})
|
||||
elif cmds[1] == 'list':
|
||||
logger.info ("handle request : list clusters for %s" % user)
|
||||
[status, clusterlist] = G_vclustermgr.list_clusters(user)
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'action':'list cluster', 'clusters':clusterlist})
|
||||
else:
|
||||
self.response(400, {'success':'false', 'action':'list cluster', 'message':clusterlist})
|
||||
|
||||
elif cmds[1] == 'flush':
|
||||
from_lxc = form.getvalue('from_lxc')
|
||||
G_vclustermgr.flush_cluster(user,clustername,from_lxc)
|
||||
self.response(200, {'success':'true', 'action':'flush'})
|
||||
|
||||
elif cmds[1] == 'save':
|
||||
imagename = form.getvalue("image")
|
||||
description = form.getvalue("description")
|
||||
containername = form.getvalue("containername")
|
||||
isforce = form.getvalue("isforce")
|
||||
if isforce == "true":
|
||||
isforce = True
|
||||
else:
|
||||
isforce = False
|
||||
[status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,isforce)
|
||||
if status:
|
||||
logger.info("image has been saved")
|
||||
self.response(200, {'success':'true', 'action':'save'})
|
||||
else:
|
||||
logger.debug(message)
|
||||
self.response(400, {'success':'false', 'message':message})
|
||||
|
||||
else:
|
||||
logger.warning ("request not supported ")
|
||||
self.response(400, {'success':'false', 'message':'not supported request'})
|
||||
|
||||
# Request for Image
|
||||
elif cmds[0] == 'image':
|
||||
if cmds[1] == 'list':
|
||||
images = G_imagemgr.list_images(user)
|
||||
self.response(200, {'success':'true', 'images': images})
|
||||
elif cmds[1] == 'description':
|
||||
image = {}
|
||||
image['name'] = form.getvalue("imagename")
|
||||
image['type'] = form.getvalue("imagetype")
|
||||
image['owner'] = form.getvalue("imageowner")
|
||||
description = G_imagemgr.get_image_description(user,image)
|
||||
self.response(200, {'success':'true', 'message':description})
|
||||
elif cmds[1] == 'share':
|
||||
image = form.getvalue('image')
|
||||
G_imagemgr.shareImage(user,image)
|
||||
self.response(200, {'success':'true', 'action':'share'})
|
||||
elif cmds[1] == 'unshare':
|
||||
image = form.getvalue('image')
|
||||
G_imagemgr.unshareImage(user,image)
|
||||
self.response(200, {'success':'true', 'action':'unshare'})
|
||||
elif cmds[1] == 'delete':
|
||||
image = form.getvalue('image')
|
||||
G_imagemgr.removeImage(user,image)
|
||||
self.response(200, {'success':'true', 'action':'delete'})
|
||||
else:
|
||||
logger.warning("request not supported ")
|
||||
self.response(400, {'success':'false', 'message':'not supported request'})
|
||||
|
||||
# Add Proxy
|
||||
elif cmds[0] == 'addproxy':
|
||||
logger.info ("handle request : add proxy")
|
||||
proxy_ip = form.getvalue("ip")
|
||||
proxy_port = form.getvalue("port")
|
||||
clustername = form.getvalue("clustername")
|
||||
[status, message] = G_vclustermgr.addproxy(user,clustername,proxy_ip,proxy_port)
|
||||
if status is True:
|
||||
self.response(200, {'success':'true', 'action':'addproxy'})
|
||||
else:
|
||||
self.response(400, {'success':'false', 'message': message})
|
||||
# Delete Proxy
|
||||
elif cmds[0] == 'deleteproxy':
|
||||
logger.info ("handle request : delete proxy")
|
||||
clustername = form.getvalue("clustername")
|
||||
G_vclustermgr.deleteproxy(user,clustername)
|
||||
self.response(200, {'success':'true', 'action':'deleteproxy'})
|
||||
|
||||
# Request for Monitor
|
||||
elif cmds[0] == 'monitor':
|
||||
logger.info("handle request: monitor")
|
||||
res = {}
|
||||
if cmds[1] == 'hosts':
|
||||
com_id = cmds[2]
|
||||
fetcher = monitor.Fetcher(etcdaddr,G_clustername,com_id)
|
||||
if cmds[3] == 'meminfo':
|
||||
res['meminfo'] = fetcher.get_meminfo()
|
||||
elif cmds[3] == 'cpuinfo':
|
||||
res['cpuinfo'] = fetcher.get_cpuinfo()
|
||||
elif cmds[3] == 'cpuconfig':
|
||||
res['cpuconfig'] = fetcher.get_cpuconfig()
|
||||
elif cmds[3] == 'diskinfo':
|
||||
res['diskinfo'] = fetcher.get_diskinfo()
|
||||
elif cmds[3] == 'osinfo':
|
||||
res['osinfo'] = fetcher.get_osinfo()
|
||||
elif cmds[3] == 'containers':
|
||||
res['containers'] = fetcher.get_containers()
|
||||
elif cmds[3] == 'status':
|
||||
res['status'] = fetcher.get_status()
|
||||
elif cmds[3] == 'containerslist':
|
||||
res['containerslist'] = fetcher.get_containerslist()
|
||||
elif cmds[3] == 'containersinfo':
|
||||
res = []
|
||||
conlist = fetcher.get_containerslist()
|
||||
for container in conlist:
|
||||
ans = {}
|
||||
confetcher = monitor.Container_Fetcher(etcdaddr,G_clustername)
|
||||
ans = confetcher.get_basic_info(container)
|
||||
ans['cpu_use'] = confetcher.get_cpu_use(container)
|
||||
ans['mem_use'] = confetcher.get_mem_use(container)
|
||||
res.append(ans)
|
||||
else:
|
||||
self.response(400, {'success':'false', 'message':'not supported request'})
|
||||
return
|
||||
|
||||
self.response(200, {'success':'true', 'monitor':res})
|
||||
elif cmds[1] == 'vnodes':
|
||||
fetcher = monitor.Container_Fetcher(etcdaddr,G_clustername)
|
||||
if cmds[3] == 'cpu_use':
|
||||
res['cpu_use'] = fetcher.get_cpu_use(cmds[2])
|
||||
elif cmds[3] == 'mem_use':
|
||||
res['mem_use'] = fetcher.get_mem_use(cmds[2])
|
||||
elif cmds[3] == 'basic_info':
|
||||
res['basic_info'] = fetcher.get_basic_info(cmds[2])
|
||||
self.response(200, {'success':'true', 'monitor':res})
|
||||
elif cmds[1] == 'user':
|
||||
if not user == 'root':
|
||||
self.response(400, {'success':'false', 'message':'Root Required'})
|
||||
if cmds[3] == 'clustercnt':
|
||||
flag = True
|
||||
clutotal = 0
|
||||
clurun = 0
|
||||
contotal = 0
|
||||
conrun = 0
|
||||
[status, clusterlist] = G_vclustermgr.list_clusters(cmds[2])
|
||||
if status:
|
||||
for clustername in clusterlist:
|
||||
clutotal += 1
|
||||
[status2, result] = G_vclustermgr.get_clusterinfo(clustername, cmds[2])
|
||||
if status2:
|
||||
contotal += result['size']
|
||||
if result['status'] == 'running':
|
||||
clurun += 1
|
||||
conrun += result['size']
|
||||
else:
|
||||
flag = False
|
||||
if flag:
|
||||
res = {}
|
||||
res['clutotal'] = clutotal
|
||||
res['clurun'] = clurun
|
||||
res['contotal'] = contotal
|
||||
res['conrun'] = conrun
|
||||
self.response(200, {'success':'true', 'monitor':{'clustercnt':res}})
|
||||
else:
|
||||
self.response(200, {'success':'false','message':clusterlist})
|
||||
elif cmds[3] == 'cluster':
|
||||
if cmds[4] == 'list':
|
||||
[status, clusterlist] = G_vclustermgr.list_clusters(cmds[2])
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'monitor':{'clusters':clusterlist}})
|
||||
else:
|
||||
self.response(400, {'success':'false', 'message':clusterlist})
|
||||
elif cmds[4] == 'info':
|
||||
clustername = form.getvalue('clustername')
|
||||
logger.info ("handle request : info cluster %s" % clustername)
|
||||
[status, result] = G_vclustermgr.get_clusterinfo(clustername, user)
|
||||
if status:
|
||||
self.response(200, {'success':'true', 'monitor':{'info':result}})
|
||||
else:
|
||||
self.response(200, {'success':'false','message':result})
|
||||
else:
|
||||
self.response(400, {'success':'false', 'message':'not supported request'})
|
||||
|
||||
elif cmds[1] == 'listphynodes':
|
||||
res['allnodes'] = G_nodemgr.get_allnodes()
|
||||
self.response(200, {'success':'true', 'monitor':res})
|
||||
# Request for User
|
||||
elif cmds[0] == 'user':
|
||||
logger.info("handle request: user")
|
||||
if cmds[1] == 'modify':
|
||||
#user = G_usermgr.query(username = form.getvalue("username"), cur_user = cur_user).get('token', None)
|
||||
result = G_usermgr.modify(newValue = form, cur_user = cur_user)
|
||||
self.response(200, result)
|
||||
if cmds[1] == 'groupModify':
|
||||
result = G_usermgr.groupModify(newValue = form, cur_user = cur_user)
|
||||
self.response(200, result)
|
||||
if cmds[1] == 'query':
|
||||
result = G_usermgr.query(ID = form.getvalue("ID"), cur_user = cur_user)
|
||||
if (result.get('success', None) == None or result.get('success', None) == "false"):
|
||||
self.response(301,result)
|
||||
else:
|
||||
result = G_usermgr.queryForDisplay(user = result['token'])
|
||||
self.response(200,result)
|
||||
|
||||
elif cmds[1] == 'add':
|
||||
user = G_usermgr.newuser(cur_user = cur_user)
|
||||
user.username = form.getvalue('username')
|
||||
user.password = form.getvalue('password')
|
||||
user.e_mail = form.getvalue('e_mail', '')
|
||||
user.status = "normal"
|
||||
result = G_usermgr.register(user = user, cur_user = cur_user)
|
||||
self.response(200, result)
|
||||
elif cmds[1] == 'groupadd':
|
||||
result = G_usermgr.groupadd(name = form.getvalue('name', None), cur_user = cur_user)
|
||||
self.response(200, result)
|
||||
elif cmds[1] == 'data':
|
||||
logger.info("handle request: user/data")
|
||||
result = G_usermgr.userList(cur_user = cur_user)
|
||||
self.response(200, result)
|
||||
elif cmds[1] == 'groupNameList':
|
||||
result = G_usermgr.groupListName(cur_user = cur_user)
|
||||
self.response(200, result)
|
||||
elif cmds[1] == 'groupList':
|
||||
result = G_usermgr.groupList(cur_user = cur_user)
|
||||
self.response(200, result)
|
||||
elif cmds[1] == 'groupQuery':
|
||||
result = G_usermgr.groupQuery(ID = form.getvalue("ID", '3'), cur_user = cur_user)
|
||||
if (result.get('success', None) == None or result.get('success', None) == "false"):
|
||||
self.response(301,result)
|
||||
else:
|
||||
self.response(200,result)
|
||||
elif cmds[1] == 'selfQuery':
|
||||
result = G_usermgr.selfQuery(cur_user = cur_user)
|
||||
self.response(200,result)
|
||||
elif cmds[1] == 'selfModify':
|
||||
result = G_usermgr.selfModify(cur_user = cur_user, newValue = form)
|
||||
self.response(200,result)
|
||||
elif cmds[0] == 'register' :
|
||||
#activate
|
||||
logger.info("handle request: user/activate")
|
||||
newuser = G_usermgr.newuser()
|
||||
newuser.username = cur_user.username
|
||||
newuser.nickname = cur_user.truename
|
||||
newuser.status = 'applying'
|
||||
newuser.user_group = cur_user.user_group
|
||||
newuser.auth_method = cur_user.auth_method
|
||||
newuser.e_mail = form.getvalue('email','')
|
||||
newuser.student_number = form.getvalue('studentnumber', '')
|
||||
newuser.department = form.getvalue('department', '')
|
||||
newuser.truename = form.getvalue('truename', '')
|
||||
newuser.tel = form.getvalue('tel', '')
|
||||
newuser.description = form.getvalue('description', '')
|
||||
result = G_usermgr.register(user = newuser)
|
||||
userManager.send_remind_activating_email(newuser.username)
|
||||
self.response(200,result)
|
||||
else:
|
||||
logger.warning ("request not supported ")
|
||||
self.response(400, {'success':'false', 'message':'not supported request'})
|
||||
|
||||
class ThreadingHttpServer(ThreadingMixIn, http.server.HTTPServer):
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
global G_nodemgr
|
||||
global G_vclustermgr
|
||||
global G_usermgr
|
||||
global etcdclient
|
||||
global G_networkmgr
|
||||
global G_clustername
|
||||
# move 'tools.loadenv' to the beginning of this file
|
||||
|
||||
fs_path = env.getenv("FS_PREFIX")
|
||||
logger.info("using FS_PREFIX %s" % fs_path)
|
||||
|
||||
etcdaddr = env.getenv("ETCD")
|
||||
logger.info("using ETCD %s" % etcdaddr)
|
||||
|
||||
G_clustername = env.getenv("CLUSTER_NAME")
|
||||
logger.info("using CLUSTER_NAME %s" % G_clustername)
|
||||
|
||||
# get network interface
|
||||
net_dev = env.getenv("NETWORK_DEVICE")
|
||||
logger.info("using NETWORK_DEVICE %s" % net_dev)
|
||||
|
||||
ipaddr = network.getip(net_dev)
|
||||
if ipaddr==False:
|
||||
logger.error("network device is not correct")
|
||||
sys.exit(1)
|
||||
else:
|
||||
logger.info("using ipaddr %s" % ipaddr)
|
||||
|
||||
# init etcdlib client
|
||||
try:
|
||||
etcdclient = etcdlib.Client(etcdaddr, prefix = G_clustername)
|
||||
except Exception:
|
||||
logger.error ("connect etcd failed, maybe etcd address not correct...")
|
||||
sys.exit(1)
|
||||
mode = 'recovery'
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "new":
|
||||
mode = 'new'
|
||||
|
||||
# do some initialization for mode: new/recovery
|
||||
if mode == 'new':
|
||||
# clean and initialize the etcd table
|
||||
if etcdclient.isdir(""):
|
||||
etcdclient.clean()
|
||||
else:
|
||||
etcdclient.createdir("")
|
||||
token = tools.gen_token()
|
||||
tokenfile = open(fs_path+"/global/token", 'w')
|
||||
tokenfile.write(token)
|
||||
tokenfile.write("\n")
|
||||
tokenfile.close()
|
||||
etcdclient.setkey("token", token)
|
||||
etcdclient.setkey("service/master", ipaddr)
|
||||
etcdclient.setkey("service/mode", mode)
|
||||
etcdclient.createdir("machines/allnodes")
|
||||
etcdclient.createdir("machines/runnodes")
|
||||
etcdclient.setkey("vcluster/nextid", "1")
|
||||
# clean all users vclusters files : FS_PREFIX/global/users/<username>/clusters/<clusterid>
|
||||
usersdir = fs_path+"/global/users/"
|
||||
for user in os.listdir(usersdir):
|
||||
shutil.rmtree(usersdir+user+"/clusters")
|
||||
shutil.rmtree(usersdir+user+"/hosts")
|
||||
os.mkdir(usersdir+user+"/clusters")
|
||||
os.mkdir(usersdir+user+"/hosts")
|
||||
else:
|
||||
# check whether cluster exists
|
||||
if not etcdclient.isdir("")[0]:
|
||||
logger.error ("cluster not exists, you should use mode:new ")
|
||||
sys.exit(1)
|
||||
# initialize the etcd table for recovery
|
||||
token = tools.gen_token()
|
||||
tokenfile = open(fs_path+"/global/token", 'w')
|
||||
tokenfile.write(token)
|
||||
tokenfile.write("\n")
|
||||
tokenfile.close()
|
||||
etcdclient.setkey("token", token)
|
||||
etcdclient.setkey("service/master", ipaddr)
|
||||
etcdclient.setkey("service/mode", mode)
|
||||
if etcdclient.isdir("_lock")[0]:
|
||||
etcdclient.deldir("_lock")
|
||||
if etcdclient.isdir("machines/runnodes")[0]:
|
||||
etcdclient.deldir("machines/runnodes")
|
||||
etcdclient.createdir("machines/runnodes")
|
||||
|
||||
G_usermgr = userManager.userManager('root')
|
||||
clusternet = env.getenv("CLUSTER_NET")
|
||||
logger.info("using CLUSTER_NET %s" % clusternet)
|
||||
|
||||
G_networkmgr = network.NetworkMgr(clusternet, etcdclient, mode)
|
||||
G_networkmgr.printpools()
|
||||
|
||||
# start NodeMgr and NodeMgr will wait for all nodes to start ...
|
||||
G_nodemgr = nodemgr.NodeMgr(G_networkmgr, etcdclient, addr = ipaddr, mode=mode)
|
||||
logger.info("nodemgr started")
|
||||
G_vclustermgr = vclustermgr.VclusterMgr(G_nodemgr, G_networkmgr, etcdclient, ipaddr, mode)
|
||||
logger.info("vclustermgr started")
|
||||
G_imagemgr = imagemgr.ImageMgr()
|
||||
logger.info("imagemgr started")
|
||||
Guest_control = guest_control.Guest(G_vclustermgr,G_nodemgr)
|
||||
logger.info("guest control started")
|
||||
threading.Thread(target=Guest_control.work, args=()).start()
|
||||
|
||||
logger.info("startting to listen on: ")
|
||||
masterip = env.getenv('MASTER_IP')
|
||||
logger.info("using MASTER_IP %s", masterip)
|
||||
|
||||
masterport = env.getenv('MASTER_PORT')
|
||||
logger.info("using MASTER_PORT %d", int(masterport))
|
||||
|
||||
# server = http.server.HTTPServer((masterip, masterport), DockletHttpHandler)
|
||||
server = ThreadingHttpServer((masterip, int(masterport)), DockletHttpHandler)
|
||||
logger.info("starting master server")
|
||||
server.serve_forever()
|
|
@ -0,0 +1,284 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
design:
|
||||
1. When user create an image, it will upload to an image server, at the same time, local host
|
||||
will save an image. A time file will be made with them. Everytime a container start by this
|
||||
image, the time file will update.
|
||||
2. When user save an image, if it is a update option, it will faster than create a new image.
|
||||
3. At image server and every physical host, run a shell script to delete the image, which is
|
||||
out of time.
|
||||
4. We can show every user their own images and the images are shared by other. User can new a
|
||||
cluster or scale out a new node by them. And user can remove his own images.
|
||||
5. When a remove option occur, the image server will delete it. But some physical host may
|
||||
also maintain it. I think it doesn't matter.
|
||||
6. The manage of lvm has been including in this module.
|
||||
"""
|
||||
|
||||
|
||||
from configparser import ConfigParser
|
||||
from io import StringIO
|
||||
import os,sys,subprocess,time,re,datetime,threading
|
||||
|
||||
from log import logger
|
||||
import env
|
||||
from lvmtool import *
|
||||
|
||||
class ImageMgr():
|
||||
def sys_call(self,command):
|
||||
output = subprocess.getoutput(command).strip()
|
||||
return None if output == '' else output
|
||||
|
||||
def sys_return(self,command):
|
||||
return_value = subprocess.call(command,shell=True)
|
||||
return return_value
|
||||
|
||||
def __init__(self):
|
||||
self.NFS_PREFIX = env.getenv('FS_PREFIX')
|
||||
self.imgpath = self.NFS_PREFIX + "/global/images/"
|
||||
self.srcpath = env.getenv('DOCKLET_LIB') + "/"
|
||||
self.imageserver = "192.168.6.249"
|
||||
|
||||
def datetime_toString(self,dt):
|
||||
return dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
def string_toDatetime(self,string):
|
||||
return datetime.datetime.strptime(string, "%Y-%m-%d %H:%M:%S")
|
||||
|
||||
def updateinfo(self,imgpath,image,description):
|
||||
image_info_file = open(imgpath+"."+image+".info",'w')
|
||||
image_info_file.writelines([self.datetime_toString(datetime.datetime.now()) + "\n", "unshare"])
|
||||
image_info_file.close()
|
||||
image_description_file = open(imgpath+"."+image+".description", 'w')
|
||||
image_description_file.write(description)
|
||||
image_description_file.close()
|
||||
|
||||
def dealpath(self,fspath):
|
||||
if fspath[-1:] == "/":
|
||||
return self.dealpath(fspath[:-1])
|
||||
else:
|
||||
return fspath
|
||||
|
||||
def createImage(self,user,image,lxc,description="Not thing",isforce = False):
|
||||
fspath = self.NFS_PREFIX + "/local/volume/" + lxc
|
||||
imgpath = self.imgpath + "private/" + user + "/"
|
||||
if isforce is False:
|
||||
logger.info("this save operation is not force")
|
||||
if os.path.exists(imgpath+image):
|
||||
return [False,"target image is exists"]
|
||||
self.sys_call("mkdir -p %s" % imgpath+image)
|
||||
self.sys_call("rsync -a --delete --exclude=lost+found/ --exclude=nfs/ --exclude=dev/ --exclude=mnt/ --exclude=tmp/ --exclude=media/ --exclude=proc/ --exclude=sys/ %s/ %s/" % (self.dealpath(fspath),imgpath+image))
|
||||
self.sys_call("rm -f %s" % (imgpath+"."+image+"_docklet_share"))
|
||||
self.updateinfo(imgpath,image,description)
|
||||
logger.info("image:%s from LXC:%s create success" % (image,lxc))
|
||||
return [True, "create image success"]
|
||||
|
||||
def prepareImage(self,user,image,fspath):
|
||||
imagename = image['name']
|
||||
imagetype = image['type']
|
||||
imageowner = image['owner']
|
||||
if imagename == "base" and imagetype == "base":
|
||||
return
|
||||
if imagetype == "private":
|
||||
imgpath = self.imgpath + "private/" + user + "/"
|
||||
else:
|
||||
imgpath = self.imgpath + "public/" + imageowner + "/"
|
||||
self.sys_call("rsync -a --delete --exclude=lost+found/ --exclude=nfs/ --exclude=dev/ --exclude=mnt/ --exclude=tmp/ --exclude=media/ --exclude=proc/ --exclude=sys/ %s/ %s/" % (imgpath+imagename,self.dealpath(fspath)))
|
||||
#self.sys_call("rsync -a --delete --exclude=nfs/ %s/ %s/" % (imgpath+image,self.dealpath(fspath)))
|
||||
#self.updatetime(imgpath,image)
|
||||
return
|
||||
|
||||
def prepareFS(self,user,image,lxc,size="1000",vgname="docklet-group"):
|
||||
rootfs = "/var/lib/lxc/%s/rootfs" % lxc
|
||||
layer = self.NFS_PREFIX + "/local/volume/" + lxc
|
||||
#check mountpoint
|
||||
Ret = sys_run("mountpoint %s" % rootfs)
|
||||
if Ret.returncode == 0:
|
||||
logger.info("%s not clean" % rootfs)
|
||||
sys_run("umount -l %s" % rootfs)
|
||||
Ret = sys_run("mountpoint %s" % layer)
|
||||
if Ret.returncode == 0:
|
||||
logger.info("%s not clean" % layer)
|
||||
sys_run("umount -l %s" % layer)
|
||||
sys_run("rm -rf %s %s" % (rootfs, layer))
|
||||
sys_run("mkdir -p %s %s" % (rootfs, layer))
|
||||
|
||||
#prepare volume
|
||||
if check_volume(vgname,lxc):
|
||||
logger.info("volume %s already exists, delete it")
|
||||
delete_volume(vgname,lxc)
|
||||
if not new_volume(vgname,lxc,size):
|
||||
logger.error("volume %s create failed" % lxc)
|
||||
return False
|
||||
sys_run("mkfs.ext4 /dev/%s/%s" % (vgname,lxc))
|
||||
sys_run("mount /dev/%s/%s %s" %(vgname,lxc,layer))
|
||||
#self.sys_call("mountpoint %s &>/dev/null && umount -l %s" % (rootfs,rootfs))
|
||||
#self.sys_call("mountpoint %s &>/dev/null && umount -l %s" % (layer,layer))
|
||||
#self.sys_call("rm -rf %s %s && mkdir -p %s %s" % (rootfs,layer,rootfs,layer))
|
||||
#rv = self.sys_return(self.srcpath+"lvmtool.sh check volume %s %s" % (vgname,lxc))
|
||||
#if rv == 1:
|
||||
# self.sys_call(self.srcpath+"lvmtool.sh newvolume %s %s %s %s" % (vgname,lxc,size,layer))
|
||||
#else:
|
||||
# self.sys_call(self.srcpath+"lvmtool.sh mount volume %s %s %s" % (vgname,lxc,layer))
|
||||
#self.sys_call("mkdir -p %s/overlay %s/work" % (layer,layer))
|
||||
#self.sys_call("mount -t overlay overlay -olowerdir=%s/local/basefs,upperdir=%s/overlay,workdir=%s/work %s" % (self.NFS_PREFIX,layer,layer,rootfs))
|
||||
self.sys_call("mount -t aufs -o br=%s=rw:%s/local/basefs=ro+wh none %s/" % (layer,self.NFS_PREFIX,rootfs))
|
||||
logger.info("FS has been prepared for user:%s lxc:%s" % (user,lxc))
|
||||
#self.prepareImage(user,image,layer+"/overlay")
|
||||
self.prepareImage(user,image,layer)
|
||||
logger.info("image has been prepared")
|
||||
return True
|
||||
|
||||
def deleteFS(self,lxc,vgname="docklet-group"):
|
||||
rootfs = "/var/lib/lxc/%s/rootfs" % lxc
|
||||
layer = self.NFS_PREFIX + "/local/volume/" + lxc
|
||||
lxcpath = "/var/lib/lxc/%s" % lxc
|
||||
sys_run("lxc-stop -k -n %s" % lxc)
|
||||
#check mountpoint
|
||||
Ret = sys_run("mountpoint %s" % rootfs)
|
||||
if Ret.returncode == 0:
|
||||
sys_run("umount -l %s" % rootfs)
|
||||
Ret = sys_run("mountpoint %s" % layer)
|
||||
if Ret.returncode == 0:
|
||||
sys_run("umount -l %s" % layer)
|
||||
if check_volume(vgname, lxc):
|
||||
delete_volume(vgname, lxc)
|
||||
sys_run("rm -rf %s %s" % (layer,lxcpath))
|
||||
return True
|
||||
|
||||
def checkFS(self, lxc, vgname="docklet-group"):
|
||||
rootfs = "/var/lib/lxc/%s/rootfs" % lxc
|
||||
layer = self.NFS_PREFIX + "/local/volume/" + lxc
|
||||
if not os.path.isdir(layer):
|
||||
sys_run("mkdir -p %s" % layer)
|
||||
#check mountpoint
|
||||
Ret = sys_run("mountpoint %s" % layer)
|
||||
if Ret.returncode != 0:
|
||||
sys_run("mount /dev/%s/%s %s" % (vgname,lxc,layer))
|
||||
Ret = sys_run("mountpoint %s" % rootfs)
|
||||
if Ret.returncode != 0:
|
||||
self.sys_call("mount -t aufs -o br=%s=rw:%s/local/basefs=ro+wh none %s/" % (layer,self.NFS_PREFIX,rootfs))
|
||||
return True
|
||||
|
||||
|
||||
def removeImage(self,user,image):
|
||||
imgpath = self.imgpath + "private/" + user + "/"
|
||||
self.sys_call("rm -rf %s/" % imgpath+image)
|
||||
self.sys_call("rm -f %s" % imgpath+"."+image+".info")
|
||||
self.sys_call("rm -f %s" % (imgpath+"."+image+".description"))
|
||||
|
||||
def shareImage(self,user,image):
|
||||
imgpath = self.imgpath + "private/" + user + "/"
|
||||
share_imgpath = self.imgpath + "public/" + user + "/"
|
||||
image_info_file = open(imgpath+"."+image+".info", 'r')
|
||||
[createtime, isshare] = image_info_file.readlines()
|
||||
isshare = "shared"
|
||||
image_info_file.close()
|
||||
image_info_file = open(imgpath+"."+image+".info", 'w')
|
||||
image_info_file.writelines([createtime, isshare])
|
||||
image_info_file.close()
|
||||
self.sys_call("mkdir -p %s" % (share_imgpath + image))
|
||||
self.sys_call("rsync -a --delete %s/ %s/" % (imgpath+image,share_imgpath+image))
|
||||
self.sys_call("cp %s %s" % (imgpath+"."+image+".info",share_imgpath+"."+image+".info"))
|
||||
self.sys_call("cp %s %s" % (imgpath+"."+image+".description",share_imgpath+"."+image+".description"))
|
||||
|
||||
|
||||
|
||||
def unshareImage(self,user,image):
|
||||
public_imgpath = self.imgpath + "public/" + user + "/"
|
||||
imgpath = self.imgpath + "private/" + user + "/"
|
||||
if os.path.exists(imgpath + image):
|
||||
image_info_file = open(imgpath+"."+image+".info", 'r')
|
||||
[createtime, isshare] = image_info_file.readlines()
|
||||
isshare = "unshare"
|
||||
image_info_file.close()
|
||||
image_info_file = open(imgpath+"."+image+".info", 'w')
|
||||
image_info_file.writelines([createtime, isshare])
|
||||
image_info_file.close()
|
||||
self.sys_call("rm -rf %s/" % public_imgpath+image)
|
||||
self.sys_call("rm -f %s" % public_imgpath+"."+image+".info")
|
||||
self.sys_call("rm -f %s" % public_imgpath+"."+image+".description")
|
||||
|
||||
|
||||
def get_image_info(self, user, image, imagetype):
|
||||
if imagetype == "private":
|
||||
imgpath = self.imgpath + "private/" + user + "/"
|
||||
else:
|
||||
imgpath = self.imgpath + "public/" + user + "/"
|
||||
image_info_file = open(imgpath+"."+image+".info",'r')
|
||||
time = image_info_file.readline()
|
||||
image_info_file.close()
|
||||
image_description_file = open(imgpath+"."+image+".description",'r')
|
||||
description = image_description_file.read()
|
||||
image_description_file.close()
|
||||
if len(description) > 15:
|
||||
description = description[:15] + "......"
|
||||
return [time, description]
|
||||
|
||||
def get_image_description(self, user, image):
|
||||
if image['type'] == "private":
|
||||
imgpath = self.imgpath + "private/" + user + "/"
|
||||
else:
|
||||
imgpath = self.imgpath + "public/" + image['owner'] + "/"
|
||||
image_description_file = open(imgpath+"."+image['name']+".description", 'r')
|
||||
description = image_description_file.read()
|
||||
image_description_file.close()
|
||||
return description
|
||||
|
||||
def list_images(self,user):
|
||||
images = {}
|
||||
images["private"] = []
|
||||
images["public"] = {}
|
||||
imgpath = self.imgpath + "private/" + user + "/"
|
||||
private_images = self.sys_call("ls %s" % imgpath)
|
||||
if private_images is not None and private_images[:3] != "ls:":
|
||||
private_images = private_images.split("\n")
|
||||
for image in private_images:
|
||||
fimage={}
|
||||
fimage["name"] = image
|
||||
fimage["isshared"] = self.isshared(user,image)
|
||||
[time, description] = self.get_image_info(user, image, "private")
|
||||
fimage["time"] = time
|
||||
fimage["description"] = description
|
||||
images["private"].append(fimage)
|
||||
else:
|
||||
pass
|
||||
imgpath = self.imgpath + "public" + "/"
|
||||
public_users = self.sys_call("ls %s" % imgpath)
|
||||
if public_users is not None and public_users[:3] != "ls:":
|
||||
public_users = public_users.split("\n")
|
||||
for public_user in public_users:
|
||||
imgpath = self.imgpath + "public/" + public_user + "/"
|
||||
public_images = self.sys_call("ls %s" % imgpath)
|
||||
if public_images is not None and public_images[:3] != "ls:":
|
||||
public_images = public_images.split("\n")
|
||||
images["public"][public_user] = []
|
||||
for image in public_images:
|
||||
fimage = {}
|
||||
fimage["name"] = image
|
||||
[time, description] = self.get_image_info(public_user, image, "public")
|
||||
fimage["time"] = time
|
||||
fimage["description"] = description
|
||||
images["public"][public_user].append(fimage)
|
||||
else:
|
||||
pass
|
||||
return images
|
||||
|
||||
def isshared(self,user,image):
|
||||
imgpath = self.imgpath + "private/" + user + "/"
|
||||
image_info_file = open(imgpath+"."+image+".info",'r')
|
||||
[time, isshare] = image_info_file.readlines()
|
||||
image_info_file.close()
|
||||
if isshare == "shared":
|
||||
return "true"
|
||||
else:
|
||||
return "false"
|
||||
|
||||
if __name__ == '__main__':
|
||||
mgr = ImageMgr()
|
||||
if sys.argv[1] == "prepareImage":
|
||||
mgr.prepareImage(sys.argv[2],sys.argv[3],sys.argv[4])
|
||||
elif sys.argv[1] == "create":
|
||||
mgr.createImage(sys.argv[2],sys.argv[3],sys.argv[4])
|
||||
else:
|
||||
logger.warning("unknown option")
|
|
@ -0,0 +1,70 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import logging
|
||||
import logging.handlers
|
||||
import argparse
|
||||
import sys
|
||||
import time # this is only being used as part of the example
|
||||
import os
|
||||
import env
|
||||
|
||||
# logger should only be imported after initlogging has been called
|
||||
logger = None
|
||||
|
||||
def initlogging(name='docklet'):
|
||||
# Deafults
|
||||
global logger
|
||||
|
||||
homepath = env.getenv('FS_PREFIX')
|
||||
LOG_FILENAME = homepath + '/local/log/' + name + '.log'
|
||||
|
||||
LOG_LIFE = env.getenv('LOG_LIFE')
|
||||
LOG_LEVEL = env.getenv('LOG_LEVEL')
|
||||
if LOG_LEVEL == "DEBUG":
|
||||
LOG_LEVEL = logging.DEBUG
|
||||
elif LOG_LEVEL == "INFO":
|
||||
LOG_LEVEL = logging.INFO
|
||||
elif LOG_LEVEL == "WARNING":
|
||||
LOG_LEVEL = logging.WARNING
|
||||
elif LOG_LEVEL == "ERROR":
|
||||
LOG_LEVEL = logging.ERROR
|
||||
elif LOG_LEVEL == "CRITICAL":
|
||||
LOG_LEVEL = logging.CRITIAL
|
||||
else:
|
||||
LOG_LEVEL = logging.DEBUG
|
||||
|
||||
logger = logging.getLogger(name)
|
||||
# Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data
|
||||
# Give the logger a unique name (good practice)
|
||||
# Set the log level to LOG_LEVEL
|
||||
logger.setLevel(LOG_LEVEL)
|
||||
# Make a handler that writes to a file, making a new file at midnight and keeping 3 backups
|
||||
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME,
|
||||
when="midnight", backupCount=LOG_LIFE)
|
||||
# Format each log message like this
|
||||
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(module)s[%(lineno)d] %(message)s')
|
||||
# Attach the formatter to the handler
|
||||
handler.setFormatter(formatter)
|
||||
# Attach the handler to the logger
|
||||
logger.addHandler(handler)
|
||||
|
||||
# Replace stdout with logging to file at INFO level
|
||||
sys.stdout = RedirectLogger(logger, logging.INFO)
|
||||
# Replace stderr with logging to file at ERROR level
|
||||
sys.stderr = RedirectLogger(logger, logging.ERROR)
|
||||
|
||||
# Make a class we can use to capture stdout and sterr in the log
|
||||
class RedirectLogger(object):
|
||||
def __init__(self, logger, level):
|
||||
"""Needs a logger and a logger level."""
|
||||
self.logger = logger
|
||||
self.level = level
|
||||
|
||||
def write(self, message):
|
||||
# Only log if there is a message (not just a new line)
|
||||
if message.rstrip() != "":
|
||||
self.logger.log(self.level, message.rstrip())
|
||||
|
||||
def flush(self):
|
||||
for handler in self.logger.handlers:
|
||||
handler.flush()
|
|
@ -0,0 +1,159 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import env,subprocess,os,time
|
||||
from log import logger
|
||||
|
||||
def sys_run(command):
|
||||
Ret = subprocess.run(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True, check=False)
|
||||
return Ret
|
||||
|
||||
def new_group(group_name, size = "5000", file_path = "/opt/docklet/local/docklet-storage"):
|
||||
storage = env.getenv("STORAGE")
|
||||
logger.info("begin initialize lvm group:%s with size %sM" % (group_name,size))
|
||||
if storage == "file":
|
||||
#check vg
|
||||
Ret = sys_run("vgdisplay " + group_name)
|
||||
if Ret.returncode == 0:
|
||||
logger.info("lvm group: " + group_name + " already exists, delete it")
|
||||
Ret = sys_run("vgremove -f " + group_name)
|
||||
if Ret.returncode != 0:
|
||||
logger.error("delete VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8')))
|
||||
#check pv
|
||||
Ret = sys_run("pvdisplay /dev/loop0")
|
||||
if Ret.returncode == 0:
|
||||
Ret = sys_run("pvremove -ff /dev/loop0")
|
||||
if Ret.returncode != 0:
|
||||
logger.error("remove pv failed:%s" % Ret.stdout.decode('utf-8'))
|
||||
#check mountpoint
|
||||
Ret = sys_run("losetup /dev/loop0")
|
||||
if Ret.returncode == 0:
|
||||
logger.info("/dev/loop0 already exists, detach it")
|
||||
Ret = sys_run("losetup -d /dev/loop0")
|
||||
if Ret.returncode != 0:
|
||||
logger.error("losetup -d failed:%s" % Ret.stdout.decode('utf-8'))
|
||||
#check file_path
|
||||
if os.path.exists(file_path):
|
||||
logger.info(file_path + " for lvm group already exists, delete it")
|
||||
os.remove(file_path)
|
||||
if not os.path.isdir(file_path[:file_path.rindex("/")]):
|
||||
os.makedirs(file_path[:file_path.rindex("/")])
|
||||
sys_run("dd if=/dev/zero of=%s bs=1M seek=%s count=0" % (file_path,size))
|
||||
sys_run("losetup /dev/loop0 " + file_path)
|
||||
sys_run("vgcreate %s /dev/loop0" % group_name)
|
||||
logger.info("initialize lvm group:%s with size %sM success" % (group_name,size))
|
||||
return True
|
||||
|
||||
elif storage == "disk":
|
||||
disk = env.getenv("DISK")
|
||||
if disk is None:
|
||||
logger.error("use disk for story without a physical disk")
|
||||
return False
|
||||
#check vg
|
||||
Ret = sys_run("vgdisplay " + group_name)
|
||||
if Ret.returncode == 0:
|
||||
logger.info("lvm group: " + group_name + " already exists, delete it")
|
||||
Ret = sys_run("vgremove -f " + group_name)
|
||||
if Ret.returncode != 0:
|
||||
logger.error("delete VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8')))
|
||||
sys_run("vgcreate %s %s" % (group_name,disk))
|
||||
logger.info("initialize lvm group:%s with size %sM success" % (group_name,size))
|
||||
return True
|
||||
|
||||
else:
|
||||
logger.info("unknown storage type:" + storage)
|
||||
return False
|
||||
|
||||
def recover_group(group_name,file_path="/opt/docklet/local/docklet-storage"):
|
||||
storage = env.getenv("STORAGE")
|
||||
if storage == "file":
|
||||
if not os.path.exists(file_path):
|
||||
logger.error("%s not found, unable to recover VG" % file_path)
|
||||
return False
|
||||
#recover mountpoint
|
||||
Ret = sys_run("losetup /dev/loop0")
|
||||
if Ret.returncode != 0:
|
||||
Ret = sys_run("losetup /dev/loop0 " + file_path)
|
||||
if Ret.returncode != 0:
|
||||
logger.error("losetup failed:%s" % Ret.stdout.decode('utf-8'))
|
||||
return False
|
||||
time.sleep(1)
|
||||
#recover vg
|
||||
Ret = sys_run("vgdisplay " + group_name)
|
||||
if Ret.returncode != 0:
|
||||
Ret = sys_run("vgcreate %s /dev/loop0" % group_name)
|
||||
if Ret.returncode != 0:
|
||||
logger.error("create VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8')))
|
||||
return False
|
||||
logger.info("recover VG %s success" % group_name)
|
||||
|
||||
elif storage == "disk":
|
||||
disk = env.getenv("DISK")
|
||||
if disk is None:
|
||||
logger.error("use disk for story without a physical disk")
|
||||
return False
|
||||
#recover vg
|
||||
Ret = sys_run("vgdisplay " + group_name)
|
||||
if Ret.returncode != 0:
|
||||
Ret = sys_run("vgcreate %s %s" % (group_name,disk))
|
||||
if Ret.returncode != 0:
|
||||
logger.error("create VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8')))
|
||||
return False
|
||||
logger.info("recover VG %s success" % group_name)
|
||||
|
||||
def new_volume(group_name,volume_name,size):
|
||||
Ret = sys_run("lvdisplay %s/%s" % (group_name,volume_name))
|
||||
if Ret.returncode == 0:
|
||||
logger.info("logical volume already exists, delete it")
|
||||
Ret = sys_run("lvremove -f %s/%s" % (group_name,volume_name))
|
||||
if Ret.returncode != 0:
|
||||
logger.error("delete logical volume %s failed: %s" %
|
||||
(volume_name, Ret.stdout.decode('utf-8')))
|
||||
Ret = sys_run("lvcreate -L %sM -n %s %s" % (size,volume_name,group_name))
|
||||
if Ret.returncode != 0:
|
||||
logger.error("lvcreate failed: %s" % Ret.stdout.decode('utf-8'))
|
||||
return False
|
||||
logger.info("create lv success")
|
||||
return True
|
||||
|
||||
def check_group(group_name):
|
||||
Ret = sys_run("vgdisplay %s" % group_name)
|
||||
if Ret.returncode == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def check_volume(group_name,volume_name):
|
||||
Ret = sys_run("lvdisplay %s/%s" % (group_name,volume_name))
|
||||
if Ret.returncode == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def delete_group(group_name):
|
||||
Ret = sys_run("vgdisplay %s" % group_name)
|
||||
if Ret.returncode == 0:
|
||||
Ret = sys_run("vgremove -f %s" % group_name)
|
||||
if Ret.returncode == 0:
|
||||
logger.info("delete vg %s success" % group_name)
|
||||
return True
|
||||
else:
|
||||
logger.error("delete vg %s failed:%s" % (group_name,Ret.stdout.decode('utf-8')))
|
||||
return False
|
||||
else:
|
||||
logger.info("vg %s does not exists" % group_name)
|
||||
return True
|
||||
|
||||
def delete_volume(group_name, volume_name):
|
||||
Ret = sys_run("lvdisplay %s/%s" % (group_name, volume_name))
|
||||
if Ret.returncode == 0:
|
||||
Ret = sys_run("lvremove -f %s/%s" % (group_name, volume_name))
|
||||
if Ret.returncode == 0:
|
||||
logger.info("delete lv %s in vg %s success" % (volume_name,group_name))
|
||||
return True
|
||||
else:
|
||||
logger.error("delete lv %s in vg %s failed:%s" % (volume_name,group_name,Ret.stdout.decode('utf-8')))
|
||||
return False
|
||||
else:
|
||||
logger.info("lv %s in vg %s does not exists" % (volume_name,group_name))
|
||||
|
||||
|
|
@ -0,0 +1,144 @@
|
|||
#coding=utf-8
|
||||
'''
|
||||
2 tables: users, usergroup
|
||||
User:
|
||||
id
|
||||
username
|
||||
password
|
||||
avatar
|
||||
nickname
|
||||
description
|
||||
status
|
||||
student_number
|
||||
department
|
||||
truename
|
||||
tel
|
||||
e_mail
|
||||
register_date
|
||||
user_group
|
||||
auth_method
|
||||
|
||||
Usergroup
|
||||
id
|
||||
name
|
||||
|
||||
Token expiration can be set in User.generate_auth_token
|
||||
'''
|
||||
from flask import Flask
|
||||
from flask.ext.sqlalchemy import SQLAlchemy
|
||||
from datetime import datetime
|
||||
from base64 import b64encode, b64decode
|
||||
import os
|
||||
|
||||
#this class from itsdangerous implements token<->user
|
||||
#from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
|
||||
from itsdangerous import JSONWebSignatureSerializer as Serializer
|
||||
from itsdangerous import SignatureExpired, BadSignature
|
||||
|
||||
import env
|
||||
|
||||
fsdir = env.getenv('FS_PREFIX')
|
||||
|
||||
app = Flask(__name__)
|
||||
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+fsdir+'/local/UserTable.db'
|
||||
try:
|
||||
secret_key_file = open(env.getenv('FS_PREFIX') + '/local/token_secret_key.txt')
|
||||
app.secret_key = secret_key_file.read()
|
||||
secret_key_file.close()
|
||||
except:
|
||||
from os import urandom
|
||||
secret_key = urandom(24)
|
||||
secret_key = b64encode(secret_key).decode('utf-8')
|
||||
app.secret_key = secret_key
|
||||
secret_key_file = open(env.getenv('FS_PREFIX') + '/local/token_secret_key.txt', 'w')
|
||||
secret_key_file.write(secret_key)
|
||||
secret_key_file.close()
|
||||
|
||||
db = SQLAlchemy(app)
|
||||
|
||||
class User(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
username = db.Column(db.String(10), unique=True)
|
||||
password = db.Column(db.String(100))
|
||||
avatar = db.Column(db.String(30))
|
||||
nickname = db.Column(db.String(10))
|
||||
description = db.Column(db.String(15))
|
||||
status = db.Column(db.String(10))
|
||||
e_mail = db.Column(db.String(20))
|
||||
student_number = db.Column(db.String(20))
|
||||
department = db.Column(db.String(20))
|
||||
truename = db.Column(db.String(20))
|
||||
tel = db.Column(db.String(20))
|
||||
register_date = db.Column(db.String(10))
|
||||
user_group = db.Column(db.String(50))
|
||||
auth_method = db.Column(db.String(10))
|
||||
|
||||
|
||||
def __init__(self, username, password, avatar="default.png", nickname = "", description = "", status = "init",
|
||||
e_mail = "" , student_number = "", department = "", truename = "", tel="", date = None, usergroup = "primary"
|
||||
, auth_method = "local"):
|
||||
# using sha512
|
||||
#if (len(password) <= 6):
|
||||
# self = None
|
||||
# return None
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.avatar = avatar
|
||||
self.nickname = nickname
|
||||
self.description = description
|
||||
self.status = status
|
||||
self.e_mail = e_mail
|
||||
self.student_number = student_number
|
||||
self.department = department
|
||||
self.truename = truename
|
||||
self.tel = tel
|
||||
if (date != None):
|
||||
self.register_date = date
|
||||
else:
|
||||
self.register_date = datetime.utcnow()
|
||||
if (UserGroup.query.filter_by(name=usergroup).first() != None):
|
||||
self.user_group = usergroup
|
||||
else:
|
||||
self.user_group = "primary"
|
||||
self.auth_method = auth_method
|
||||
|
||||
def __repr__(self):
|
||||
return '<User %r>' % self.username
|
||||
|
||||
#token will expire after 3600s
|
||||
# replace token with no time expiration
|
||||
def generate_auth_token(self, expiration = 3600):
|
||||
s = Serializer(app.config['SECRET_KEY'])
|
||||
str = s.dumps({'id': self.id})
|
||||
return b64encode(str).decode('utf-8')
|
||||
|
||||
@staticmethod
|
||||
def verify_auth_token(token):
|
||||
s = Serializer(app.config['SECRET_KEY'])
|
||||
try:
|
||||
data = s.loads(b64decode(token))
|
||||
except SignatureExpired:
|
||||
return None # valid token, but expired
|
||||
except BadSignature:
|
||||
return None # invalid token
|
||||
user = User.query.get(data['id'])
|
||||
return user
|
||||
|
||||
|
||||
class UserGroup(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.String(50))
|
||||
cpu = db.Column(db.String(10))
|
||||
memory = db.Column(db.String(10))
|
||||
imageQuantity = db.Column(db.String(10))
|
||||
lifeCycle = db.Column(db.String(10))
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.cpu = '100000'
|
||||
self.memory = '2000'
|
||||
self.imageQuantity = '10'
|
||||
self.lifeCycle = '24'
|
||||
|
||||
def __repr__(self):
|
||||
return '<UserGroup %r>' % self.name
|
|
@ -0,0 +1,331 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import subprocess,re,sys,etcdlib,psutil
|
||||
import time,threading,json,traceback,platform
|
||||
|
||||
from log import logger
|
||||
|
||||
class Container_Collector(threading.Thread):
|
||||
|
||||
def __init__(self,etcdaddr,cluster_name,host,cpu_quota,mem_quota,test=False):
|
||||
threading.Thread.__init__(self)
|
||||
self.thread_stop = False
|
||||
self.host = host
|
||||
self.etcdser = etcdlib.Client(etcdaddr,"/%s/monitor" % (cluster_name))
|
||||
self.etcdser.setkey('/vnodes/cpu_quota', cpu_quota)
|
||||
self.etcdser.setkey('/vnodes/mem_quota', mem_quota)
|
||||
self.cpu_quota = float(cpu_quota)/100000.0
|
||||
self.mem_quota = float(mem_quota)*1000000/1024
|
||||
self.interval = 2
|
||||
self.test = test
|
||||
return
|
||||
|
||||
def list_container(self):
|
||||
output = subprocess.check_output(["sudo lxc-ls"],shell=True)
|
||||
output = output.decode('utf-8')
|
||||
containers = re.split('\s+',output)
|
||||
return containers
|
||||
|
||||
def collect_containerinfo(self,container_name):
|
||||
output = subprocess.check_output("sudo lxc-info -n %s" % (container_name),shell=True)
|
||||
output = output.decode('utf-8')
|
||||
parts = re.split('\n',output)
|
||||
info = {}
|
||||
basic_info = {}
|
||||
for part in parts:
|
||||
if not part == '':
|
||||
key_val = re.split(':',part)
|
||||
key = key_val[0]
|
||||
val = key_val[1]
|
||||
info[key] = val.lstrip()
|
||||
basic_info['Name'] = info['Name']
|
||||
basic_info['State'] = info['State']
|
||||
if(info['State'] == 'STOPPED'):
|
||||
self.etcdser.setkey('/vnodes/%s/basic_info'%(container_name), basic_info)
|
||||
return False
|
||||
basic_info['PID'] = info['PID']
|
||||
basic_info['IP'] = info['IP']
|
||||
self.etcdser.setkey('/vnodes/%s/basic_info'%(container_name), basic_info)
|
||||
cpu_parts = re.split(' +',info['CPU use'])
|
||||
cpu_val = cpu_parts[0].strip()
|
||||
cpu_unit = cpu_parts[1].strip()
|
||||
res = self.etcdser.getkey('/vnodes/%s/cpu_use'%(container_name))
|
||||
cpu_last = 0
|
||||
if res[0] == True:
|
||||
last_use = dict(eval(res[1]))
|
||||
cpu_last = float(last_use['val'])
|
||||
cpu_use = {}
|
||||
cpu_use['val'] = cpu_val
|
||||
cpu_use['unit'] = cpu_unit
|
||||
cpu_usedp = (float(cpu_val)-float(cpu_last))/(self.cpu_quota*self.interval*1.3)
|
||||
if(cpu_usedp > 1):
|
||||
cpu_usedp = 1
|
||||
cpu_use['usedp'] = cpu_usedp
|
||||
self.etcdser.setkey('vnodes/%s/cpu_use'%(container_name), cpu_use)
|
||||
mem_parts = re.split(' +',info['Memory use'])
|
||||
mem_val = mem_parts[0].strip()
|
||||
mem_unit = mem_parts[1].strip()
|
||||
mem_use = {}
|
||||
mem_use['val'] = mem_val
|
||||
mem_use['unit'] = mem_unit
|
||||
if(mem_unit == "MiB"):
|
||||
mem_val = float(mem_val) * 1024
|
||||
mem_usedp = float(mem_val) / self.mem_quota
|
||||
mem_use['usedp'] = mem_usedp
|
||||
self.etcdser.setkey('/vnodes/%s/mem_use'%(container_name), mem_use)
|
||||
#print(output)
|
||||
#print(parts)
|
||||
return True
|
||||
|
||||
def run(self):
|
||||
cnt = 0
|
||||
while not self.thread_stop:
|
||||
containers = self.list_container()
|
||||
countR = 0
|
||||
conlist = []
|
||||
for container in containers:
|
||||
if not container == '':
|
||||
conlist.append(container)
|
||||
try:
|
||||
if(self.collect_containerinfo(container)):
|
||||
countR += 1
|
||||
except Exception as err:
|
||||
#pass
|
||||
logger.warning(err)
|
||||
containers_num = len(containers)-1
|
||||
concnt = {}
|
||||
concnt['total'] = containers_num
|
||||
concnt['running'] = countR
|
||||
self.etcdser.setkey('/hosts/%s/containers'%(self.host), concnt)
|
||||
time.sleep(self.interval)
|
||||
if cnt == 0:
|
||||
self.etcdser.setkey('/hosts/%s/containerslist'%(self.host), conlist)
|
||||
cnt = (cnt+1)%5
|
||||
if self.test:
|
||||
break
|
||||
return
|
||||
|
||||
def stop(self):
|
||||
self.thread_stop = True
|
||||
|
||||
|
||||
class Collector(threading.Thread):
|
||||
|
||||
def __init__(self,etcdaddr,cluster_name,host,test=False):
|
||||
threading.Thread.__init__(self)
|
||||
self.host = host
|
||||
self.thread_stop = False
|
||||
self.etcdser = etcdlib.Client(etcdaddr,"/%s/monitor/hosts/%s" % (cluster_name,host))
|
||||
self.interval = 1
|
||||
self.test=test
|
||||
return
|
||||
|
||||
def collect_meminfo(self):
|
||||
meminfo = psutil.virtual_memory()
|
||||
memdict = {}
|
||||
memdict['total'] = meminfo.total/1024
|
||||
memdict['used'] = meminfo.used/1024
|
||||
memdict['free'] = meminfo.free/1024
|
||||
memdict['buffers'] = meminfo.buffers/1024
|
||||
memdict['cached'] = meminfo.cached/1024
|
||||
memdict['percent'] = meminfo.percent
|
||||
self.etcdser.setkey('/meminfo',memdict)
|
||||
#print(output)
|
||||
#print(memparts)
|
||||
return
|
||||
|
||||
def collect_cpuinfo(self):
|
||||
cpuinfo = psutil.cpu_times_percent(interval=1,percpu=False)
|
||||
cpuset = {}
|
||||
cpuset['user'] = cpuinfo.user
|
||||
cpuset['system'] = cpuinfo.system
|
||||
cpuset['idle'] = cpuinfo.idle
|
||||
cpuset['iowait'] = cpuinfo.iowait
|
||||
self.etcdser.setkey('/cpuinfo',cpuset)
|
||||
output = subprocess.check_output(["cat /proc/cpuinfo"],shell=True)
|
||||
output = output.decode('utf-8')
|
||||
parts = output.split('\n')
|
||||
info = []
|
||||
idx = -1
|
||||
for part in parts:
|
||||
if not part == '':
|
||||
key_val = re.split(':',part)
|
||||
key = key_val[0].rstrip()
|
||||
if key == 'processor':
|
||||
info.append({})
|
||||
idx += 1
|
||||
val = key_val[1].lstrip()
|
||||
if key=='processor' or key=='model name' or key=='core id' or key=='cpu MHz' or key=='cache size' or key=='physical id':
|
||||
info[idx][key] = val
|
||||
self.etcdser.setkey('/cpuconfig',info)
|
||||
return
|
||||
|
||||
def collect_diskinfo(self):
|
||||
parts = psutil.disk_partitions()
|
||||
setval = []
|
||||
devices = {}
|
||||
for part in parts:
|
||||
if not part.device in devices:
|
||||
devices[part.device] = 1
|
||||
diskval = {}
|
||||
diskval['device'] = part.device
|
||||
diskval['mountpoint'] = part.mountpoint
|
||||
usage = psutil.disk_usage(part.mountpoint)
|
||||
diskval['total'] = usage.total
|
||||
diskval['used'] = usage.used
|
||||
diskval['free'] = usage.free
|
||||
diskval['percent'] = usage.percent
|
||||
setval.append(diskval)
|
||||
self.etcdser.setkey('/diskinfo', setval)
|
||||
#print(output)
|
||||
#print(diskparts)
|
||||
return
|
||||
|
||||
def collect_osinfo(self):
|
||||
uname = platform.uname()
|
||||
osinfo = {}
|
||||
osinfo['platform'] = platform.platform()
|
||||
osinfo['system'] = uname.system
|
||||
osinfo['node'] = uname.node
|
||||
osinfo['release'] = uname.release
|
||||
osinfo['version'] = uname.version
|
||||
osinfo['machine'] = uname.machine
|
||||
osinfo['processor'] = uname.processor
|
||||
self.etcdser.setkey('/osinfo',osinfo)
|
||||
return
|
||||
|
||||
def run(self):
|
||||
self.collect_osinfo()
|
||||
while not self.thread_stop:
|
||||
self.collect_meminfo()
|
||||
self.collect_cpuinfo()
|
||||
self.collect_diskinfo()
|
||||
self.etcdser.setkey('/running','True',6)
|
||||
time.sleep(self.interval)
|
||||
if self.test:
|
||||
break
|
||||
# print(self.etcdser.getkey('/meminfo/total'))
|
||||
return
|
||||
|
||||
def stop(self):
|
||||
self.thread_stop = True
|
||||
|
||||
class Container_Fetcher:
|
||||
def __init__(self,etcdaddr,cluster_name):
|
||||
self.etcdser = etcdlib.Client(etcdaddr,"/%s/monitor/vnodes" % (cluster_name))
|
||||
return
|
||||
|
||||
def get_cpu_use(self,container_name):
|
||||
res = {}
|
||||
[ret, ans] = self.etcdser.getkey('/%s/cpu_use'%(container_name))
|
||||
if ret == True :
|
||||
res = dict(eval(ans))
|
||||
res['quota'] = self.etcdser.getkey('/cpu_quota')[1]
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
return res
|
||||
|
||||
def get_mem_use(self,container_name):
|
||||
res = {}
|
||||
[ret, ans] = self.etcdser.getkey('/%s/mem_use'%(container_name))
|
||||
if ret == True :
|
||||
res = dict(eval(ans))
|
||||
res['quota'] = self.etcdser.getkey('/mem_quota')[1]
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
return res
|
||||
|
||||
def get_basic_info(self,container_name):
|
||||
res = self.etcdser.getkey("/%s/basic_info"%(container_name))
|
||||
if res[0] == False:
|
||||
return {}
|
||||
res = dict(eval(res[1]))
|
||||
return res
|
||||
|
||||
class Fetcher:
|
||||
|
||||
def __init__(self,etcdaddr,cluster_name,host):
|
||||
self.etcdser = etcdlib.Client(etcdaddr,"/%s/monitor/hosts/%s" % (cluster_name,host))
|
||||
return
|
||||
|
||||
#def get_clcnt(self):
|
||||
# return DockletMonitor.clcnt
|
||||
|
||||
#def get_nodecnt(self):
|
||||
# return DockletMonitor.nodecnt
|
||||
|
||||
#def get_meminfo(self):
|
||||
# return self.get_meminfo_('172.31.0.1')
|
||||
|
||||
def get_meminfo(self):
|
||||
res = {}
|
||||
[ret, ans] = self.etcdser.getkey('/meminfo')
|
||||
if ret == True :
|
||||
res = dict(eval(ans))
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
return res
|
||||
|
||||
def get_cpuinfo(self):
|
||||
res = {}
|
||||
[ret, ans] = self.etcdser.getkey('/cpuinfo')
|
||||
if ret == True :
|
||||
res = dict(eval(ans))
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
return res
|
||||
|
||||
def get_cpuconfig(self):
|
||||
res = {}
|
||||
[ret, ans] = self.etcdser.getkey('/cpuconfig')
|
||||
if ret == True :
|
||||
res = list(eval(ans))
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
return res
|
||||
|
||||
def get_diskinfo(self):
|
||||
res = []
|
||||
[ret, ans] = self.etcdser.getkey('/diskinfo')
|
||||
if ret == True :
|
||||
res = list(eval(ans))
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
return res
|
||||
|
||||
def get_osinfo(self):
|
||||
res = {}
|
||||
[ret, ans] = self.etcdser.getkey('/osinfo')
|
||||
if ret == True:
|
||||
res = dict(eval(ans))
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
return res
|
||||
|
||||
def get_containers(self):
|
||||
res = {}
|
||||
[ret, ans] = self.etcdser.getkey('/containers')
|
||||
if ret == True:
|
||||
res = dict(eval(ans))
|
||||
return res
|
||||
else:
|
||||
logger.warning(ans)
|
||||
return res
|
||||
|
||||
def get_status(self):
|
||||
isexist = self.etcdser.getkey('/running')[0]
|
||||
if(isexist):
|
||||
return 'RUNNING'
|
||||
else:
|
||||
return 'STOPPED'
|
||||
|
||||
def get_containerslist(self):
|
||||
res = list(eval(self.etcdser.getkey('/containerslist')[1]))
|
||||
return res
|
|
@ -0,0 +1,276 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import subprocess
|
||||
|
||||
class ipcontrol(object):
|
||||
@staticmethod
|
||||
def parse(cmdout):
|
||||
links = {}
|
||||
thislink = None
|
||||
for line in cmdout.splitlines():
|
||||
# empty line
|
||||
if len(line)==0:
|
||||
continue
|
||||
# Level 1 : first line of one link
|
||||
if line[0] != ' ':
|
||||
blocks = line.split()
|
||||
thislink = blocks[1].strip(':')
|
||||
links[thislink] = {}
|
||||
links[thislink]['state'] = blocks[blocks.index('state')+1] if 'state' in blocks else 'UNKNOWN'
|
||||
# Level 2 : line with 4 spaces
|
||||
elif line[4] != ' ':
|
||||
blocks = line.split()
|
||||
if blocks[0] == 'inet':
|
||||
if 'inet' not in links[thislink]:
|
||||
links[thislink]['inet'] = []
|
||||
links[thislink]['inet'].append(blocks[1])
|
||||
# we just need inet (IPv4)
|
||||
else:
|
||||
pass
|
||||
# Level 3 or more : no need for us
|
||||
else:
|
||||
pass
|
||||
return links
|
||||
|
||||
@staticmethod
|
||||
def list_links():
|
||||
try:
|
||||
ret = subprocess.run(['ip', 'link', 'show'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
links = ipcontrol.parse(ret.stdout.decode('utf-8'))
|
||||
return [True, list(links.keys())]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "list links failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def link_exist(linkname):
|
||||
try:
|
||||
subprocess.run(['ip', 'link', 'show', 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def link_info(linkname):
|
||||
try:
|
||||
ret = subprocess.run(['ip', 'address', 'show', 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, ipcontrol.parse(ret.stdout.decode('utf-8'))[str(linkname)]]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "get link info failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def link_state(linkname):
|
||||
try:
|
||||
ret = subprocess.run(['ip', 'link', 'show', 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, ipcontrol.parse(ret.stdout.decode('utf-8'))[str(linkname)]['state']]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "get link state failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def link_ips(linkname):
|
||||
[status, info] = ipcontrol.link_info(str(linkname))
|
||||
if status:
|
||||
if 'inet' not in info:
|
||||
return [True, []]
|
||||
else:
|
||||
return [True, info['inet']]
|
||||
else:
|
||||
return [False, info]
|
||||
|
||||
@staticmethod
|
||||
def up_link(linkname):
|
||||
try:
|
||||
subprocess.run(['ip', 'link', 'set', 'dev', str(linkname), 'up'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(linkname)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "set link up failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def down_link(linkname):
|
||||
try:
|
||||
subprocess.run(['ip', 'link', 'set', 'dev', str(linkname), 'down'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(linkname)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "set link down failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def add_addr(linkname, address):
|
||||
try:
|
||||
subprocess.run(['ip', 'address', 'add', address, 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(linkname)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "add address failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def del_addr(linkname, address):
|
||||
try:
|
||||
subprocess.run(['ip', 'address', 'del', address, 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(linkname)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "delete address failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
|
||||
# ovs-vsctl list-br
|
||||
# ovs-vsctl br-exists <Bridge>
|
||||
# ovs-vsctl add-br <Bridge>
|
||||
# ovs-vsctl del-br <Bridge>
|
||||
# ovs-vsctl list-ports <Bridge>
|
||||
# ovs-vsctl del-port <Bridge> <Port>
|
||||
# ovs-vsctl add-port <Bridge> <Port> -- set interface <Port> type=gre options:remote_ip=<RemoteIP>
|
||||
# ovs-vsctl add-port <Bridge> <Port> tag=<ID> -- set interface <Port> type=internal
|
||||
# ovs-vsctl port-to-br <Port>
|
||||
# ovs-vsctl set Port <Port> tag=<ID>
|
||||
# ovs-vsctl clear Port <Port> tag
|
||||
|
||||
class ovscontrol(object):
|
||||
@staticmethod
|
||||
def list_bridges():
|
||||
try:
|
||||
ret = subprocess.run(['ovs-vsctl', 'list-br'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, ret.stdout.decode('utf-8').split()]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "list bridges failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def bridge_exist(bridge):
|
||||
try:
|
||||
subprocess.run(['ovs-vsctl', 'br-exists', str(bridge)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def port_tobridge(port):
|
||||
try:
|
||||
ret = subprocess.run(['ovs-vsctl', 'port-to-br', str(port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, ret.stdout.decode('utf-8').strip()]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def port_exists(port):
|
||||
return ovscontrol.port_tobridge(port)[0]
|
||||
|
||||
@staticmethod
|
||||
def add_bridge(bridge):
|
||||
try:
|
||||
subprocess.run(['ovs-vsctl', 'add-br', str(bridge)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(bridge)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "add bridge failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def del_bridge(bridge):
|
||||
try:
|
||||
subprocess.run(['ovs-vsctl', 'del-br', str(bridge)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(bridge)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "del bridge failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def list_ports(bridge):
|
||||
try:
|
||||
ret = subprocess.run(['ovs-vsctl', 'list-ports', str(bridge)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, ret.stdout.decode('utf-8').split()]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "list ports failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def del_port(bridge, port):
|
||||
try:
|
||||
subprocess.run(['ovs-vsctl', 'del-port', str(bridge), str(port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(port)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "delete port failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def add_port_internal(bridge, port):
|
||||
try:
|
||||
subprocess.run(['ovs-vsctl', 'add-port', str(bridge), str(port), '--', 'set', 'interface', str(port), 'type=internal'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(port)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "add port failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def add_port_internal_withtag(bridge, port, tag):
|
||||
try:
|
||||
subprocess.run(['ovs-vsctl', 'add-port', str(bridge), str(port), 'tag='+str(tag), '--', 'set', 'interface', str(port), 'type=internal'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(port)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "add port failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def add_port_gre(bridge, port, remote):
|
||||
try:
|
||||
subprocess.run(['ovs-vsctl', 'add-port', str(bridge), str(port), '--', 'set', 'interface', str(port), 'type=gre', 'options:remote_ip='+str(remote)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(port)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "add port failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
@staticmethod
|
||||
def set_port_tag(port, tag):
|
||||
try:
|
||||
subprocess.run(['ovs-vsctl', 'set', 'Port', str(port), 'tag='+str(tag)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True)
|
||||
return [True, str(port)]
|
||||
except subprocess.CalledProcessError as suberror:
|
||||
return [False, "set port tag failed : %s" % suberror.stdout.decode('utf-8')]
|
||||
|
||||
|
||||
class netcontrol(object):
|
||||
@staticmethod
|
||||
def bridge_exists(bridge):
|
||||
return ovscontrol.bridge_exist(bridge)
|
||||
|
||||
@staticmethod
|
||||
def del_bridge(bridge):
|
||||
return ovscontrol.del_bridge(bridge)
|
||||
|
||||
@staticmethod
|
||||
def new_bridge(bridge):
|
||||
return ovscontrol.add_bridge(bridge)
|
||||
|
||||
@staticmethod
|
||||
def gre_exists(bridge, remote):
|
||||
# port is unique, bridge is not necessary
|
||||
return ovscontrol.port_exists('gre-'+str(remote))
|
||||
|
||||
@staticmethod
|
||||
def setup_gre(bridge, remote):
|
||||
return ovscontrol.add_port_gre(bridge, 'gre-'+str(remote), remote)
|
||||
|
||||
@staticmethod
|
||||
def gw_exists(bridge, gwport):
|
||||
return ovscontrol.port_exists(gwport)
|
||||
|
||||
@staticmethod
|
||||
def setup_gw(bridge, gwport, addr, tag):
|
||||
[status, result] = ovscontrol.add_port_internal_withtag(bridge, gwport, tag)
|
||||
if not status:
|
||||
return [status, result]
|
||||
[status, result] = ipcontrol.add_addr(gwport, addr)
|
||||
if not status:
|
||||
return [status, result]
|
||||
return ipcontrol.up_link(gwport)
|
||||
|
||||
@staticmethod
|
||||
def del_gw(bridge, gwport):
|
||||
return ovscontrol.del_port(bridge, gwport)
|
||||
|
||||
@staticmethod
|
||||
def check_gw(bridge, gwport, addr, tag):
|
||||
if not netcontrol.gw_exists(bridge, gwport):
|
||||
return netcontrol.setup_gw(bridge, gwport, addr, tag)
|
||||
[status, info] = ipcontrol.link_info(gwport)
|
||||
if not status:
|
||||
return [False, "get gateway info failed"]
|
||||
if ('inet' not in info) or (addr not in info['inet']):
|
||||
ipcontrol.add_addr(gwport, addr)
|
||||
else:
|
||||
info['inet'].remove(addr)
|
||||
for otheraddr in info['inet']:
|
||||
ipcontrol.del_addr(gwport, otheraddr)
|
||||
ovscontrol.set_port_tag(gwport, tag)
|
||||
if info['state'] == 'DOWN':
|
||||
ipcontrol.up_link(gwport)
|
||||
return [True, "check gateway port %s" % gwport]
|
||||
|
||||
|
|
@ -0,0 +1,475 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import json, sys, netifaces
|
||||
from nettools import netcontrol
|
||||
|
||||
from log import logger
|
||||
|
||||
# getip : get ip from network interface
|
||||
# ifname : name of network interface
|
||||
def getip(ifname):
|
||||
if ifname not in netifaces.interfaces():
|
||||
return False # No such interface
|
||||
else:
|
||||
addrinfo = netifaces.ifaddresses(ifname)
|
||||
if 2 in addrinfo:
|
||||
return netifaces.ifaddresses(ifname)[2][0]['addr']
|
||||
else:
|
||||
return False # network interface is down
|
||||
|
||||
def ip_to_int(addr):
|
||||
[a, b, c, d] = addr.split('.')
|
||||
return (int(a)<<24) + (int(b)<<16) + (int(c)<<8) + int(d)
|
||||
|
||||
def int_to_ip(num):
|
||||
return str((num>>24)&255)+"."+str((num>>16)&255)+"."+str((num>>8)&255)+"."+str(num&255)
|
||||
|
||||
# fix addr with cidr, for example, 172.16.0.10/24 --> 172.16.0.0/24
|
||||
def fix_ip(addr, cidr):
|
||||
return int_to_ip( ip_to_int(addr) & ( (-1) << (32-int(cidr)) ) )
|
||||
#return int_to_ip(ip_to_int(addr) & ( ~( (1<<(32-int(cidr)))-1 ) ) )
|
||||
|
||||
# jump to next interval address with cidr
|
||||
def next_interval(addr, cidr):
|
||||
addr = fix_ip(addr, int(cidr))
|
||||
return int_to_ip(ip_to_int(addr)+(1<<(32-int(cidr))))
|
||||
|
||||
# jump to before interval address with cidr
|
||||
def before_interval(addr, cidr):
|
||||
addr = fix_ip(addr, int(cidr))
|
||||
addrint = ip_to_int(addr)-(1<<(32-int(cidr)))
|
||||
# addrint maybe negative
|
||||
if addrint < 0:
|
||||
return "-1.-1.-1.-1"
|
||||
else:
|
||||
return int_to_ip(addrint)
|
||||
|
||||
|
||||
# IntervalPool : manage network blocks with IP/CIDR
|
||||
# Data Structure :
|
||||
# ... ...
|
||||
# cidr=16 : A1, A2, ... # A1 is an IP, means an interval [A1, A1+2^16-1], equals to A1/16
|
||||
# cidr=17 : B1, B2, ...
|
||||
# ... ...
|
||||
# API :
|
||||
# allocate
|
||||
# free
|
||||
class IntervalPool(object):
|
||||
# cidr : 1,2, ..., 32
|
||||
def __init__(self, addr_cidr=None, copy=None):
|
||||
if addr_cidr:
|
||||
self.pool = {}
|
||||
[addr, cidr] = addr_cidr.split('/')
|
||||
cidr = int(cidr)
|
||||
# fix addr with cidr, for example, 172.16.0.10/24 --> 172.16.0.0/24
|
||||
addr = fix_ip(addr, cidr)
|
||||
self.info = addr+"/"+str(cidr)
|
||||
# init interval pool
|
||||
# cidr : [ addr ]
|
||||
# cidr+1 : [ ]
|
||||
# ...
|
||||
# 32 : [ ]
|
||||
self.pool[str(cidr)]=[addr]
|
||||
for i in range(cidr+1, 33):
|
||||
self.pool[str(i)]=[]
|
||||
elif copy:
|
||||
self.info = copy['info']
|
||||
self.pool = copy['pool']
|
||||
else:
|
||||
logger.error("IntervalPool init failed with no addr_cidr or center")
|
||||
|
||||
def __str__(self):
|
||||
return json.dumps({'info':self.info, 'pool':self.pool})
|
||||
|
||||
def printpool(self):
|
||||
cidrs = list(self.pool.keys())
|
||||
# sort with key=int(cidr)
|
||||
cidrs.sort(key=int)
|
||||
for i in cidrs:
|
||||
print (i + " : " + str(self.pool[i]))
|
||||
|
||||
# allocate an interval with CIDR
|
||||
def allocate(self, thiscidr):
|
||||
# thiscidr -- cidr for this request
|
||||
# upcidr -- up stream which has interval to allocate
|
||||
thiscidr=int(thiscidr)
|
||||
upcidr = thiscidr
|
||||
# find first cidr who can allocate enough ips
|
||||
while((str(upcidr) in self.pool) and len(self.pool[str(upcidr)])==0):
|
||||
upcidr = upcidr-1
|
||||
if str(upcidr) not in self.pool:
|
||||
return [False, 'Not Enough to Allocate']
|
||||
# get the block/interval to allocate ips
|
||||
upinterval = self.pool[str(upcidr)][0]
|
||||
self.pool[str(upcidr)].remove(upinterval)
|
||||
# split the upinterval and put the rest intervals back to interval pool
|
||||
for i in range(int(thiscidr), int(upcidr), -1):
|
||||
self.pool[str(i)].append(next_interval(upinterval, i))
|
||||
#self.pool[str(i)].sort(key=ip_to_int) # cidr between thiscidr and upcidr are null, no need to sort
|
||||
return [True, upinterval]
|
||||
|
||||
# deallocate an interval with IP/CIDR
|
||||
# ToDo : when free IP/CIDR, we donot check whether IP/CIDR is in pool
|
||||
# maybe we check this later
|
||||
def free(self, addr, cidr):
|
||||
cidr = int(cidr)
|
||||
# cidr not in pool means CIDR out of pool range
|
||||
if str(cidr) not in self.pool:
|
||||
return [False, 'CIDR not in pool']
|
||||
addr = fix_ip(addr, cidr)
|
||||
# merge interval and move to up cidr
|
||||
while(True):
|
||||
# cidr-1 not in pool means current CIDR is the top CIDR
|
||||
if str(cidr-1) not in self.pool:
|
||||
break
|
||||
# if addr can satisfy cidr-1, and next_interval also exist,
|
||||
# merge addr with next_interval to up cidr (cidr-1)
|
||||
# if addr not satisfy cidr-1, and before_interval exist,
|
||||
# merge addr with before_interval to up cidr, and interval index is before_interval
|
||||
if addr == fix_ip(addr, cidr-1):
|
||||
if next_interval(addr, cidr) in self.pool[str(cidr)]:
|
||||
self.pool[str(cidr)].remove(next_interval(addr,cidr))
|
||||
cidr=cidr-1
|
||||
else:
|
||||
break
|
||||
else:
|
||||
if before_interval(addr, cidr) in self.pool[str(cidr)]:
|
||||
addr = before_interval(addr, cidr)
|
||||
self.pool[str(cidr)].remove(addr)
|
||||
cidr = cidr - 1
|
||||
else:
|
||||
break
|
||||
self.pool[str(cidr)].append(addr)
|
||||
# sort interval with key=ip_to_int(IP)
|
||||
self.pool[str(cidr)].sort(key=ip_to_int)
|
||||
return [True, "Free success"]
|
||||
|
||||
# EnumPool : manage network ips with ip or ip list
|
||||
# Data Structure : [ A, B, C, ... X ] , A is a IP address
|
||||
class EnumPool(object):
|
||||
def __init__(self, addr_cidr=None, copy=None):
|
||||
if addr_cidr:
|
||||
self.pool = []
|
||||
[addr, cidr] = addr_cidr.split('/')
|
||||
cidr=int(cidr)
|
||||
addr=fix_ip(addr, cidr)
|
||||
self.info = addr+"/"+str(cidr)
|
||||
# init enum pool
|
||||
# first IP is network id, last IP is network broadcast address
|
||||
# first and last IP can not be allocated
|
||||
for i in range(1, pow(2, 32-cidr)-1):
|
||||
self.pool.append(int_to_ip(ip_to_int(addr)+i))
|
||||
elif copy:
|
||||
self.info = copy['info']
|
||||
self.pool = copy['pool']
|
||||
else:
|
||||
logger.error("EnumPool init failed with no addr_cidr or copy")
|
||||
|
||||
def __str__(self):
|
||||
return json.dumps({'info':self.info, 'pool':self.pool})
|
||||
|
||||
def printpool(self):
|
||||
print (str(self.pool))
|
||||
|
||||
def acquire(self, num=1):
|
||||
if num > len(self.pool):
|
||||
return [False, "No enough IPs: %s" % self.info]
|
||||
result = []
|
||||
for i in range(0, num):
|
||||
result.append(self.pool.pop())
|
||||
return [True, result]
|
||||
|
||||
def acquire_cidr(self, num=1):
|
||||
[status, result] = self.acquire(int(num))
|
||||
if not status:
|
||||
return [status, result]
|
||||
return [True, list(map(lambda x:x+"/"+self.info.split('/')[1], result))]
|
||||
|
||||
# ToDo : when release :
|
||||
# not check whether IP is in the range of pool
|
||||
# not check whether IP is already in the pool
|
||||
def release(self, ip_or_ips):
|
||||
if type(ip_or_ips) == str:
|
||||
ips = [ ip_or_ips ]
|
||||
else:
|
||||
ips = ip_or_ips
|
||||
for ip in ips:
|
||||
# maybe ip is in format IP/CIDR
|
||||
ip = ip.split('/')[0]
|
||||
self.pool.append(ip)
|
||||
return [True, "release success"]
|
||||
|
||||
# wrap EnumPool with vlanid and gateway
|
||||
class UserPool(EnumPool):
|
||||
def __init__(self, addr_cidr=None, vlanid=None, copy=None):
|
||||
if addr_cidr and vlanid:
|
||||
EnumPool.__init__(self, addr_cidr = addr_cidr)
|
||||
self.vlanid=vlanid
|
||||
self.pool.sort(key=ip_to_int)
|
||||
self.gateway = self.pool[0]
|
||||
self.pool.remove(self.gateway)
|
||||
elif copy:
|
||||
EnumPool.__init__(self, copy = copy)
|
||||
self.vlanid = int(copy['vlanid'])
|
||||
self.gateway = copy['gateway']
|
||||
else:
|
||||
logger.error("UserPool init failed with no addr_cidr or copy")
|
||||
|
||||
def get_gateway(self):
|
||||
return self.gateway
|
||||
|
||||
def get_gateway_cidr(self):
|
||||
return self.gateway+"/"+self.info.split('/')[1]
|
||||
|
||||
def printpool(self):
|
||||
print("users ID:"+str(self.vlanid)+", net info:"+self.info+", gateway:"+self.gateway)
|
||||
print (str(self.pool))
|
||||
|
||||
# NetworkMgr : mange docklet network ip address
|
||||
# center : interval pool to allocate and free network block with IP/CIDR
|
||||
# system : enumeration pool to acquire and release system ip address
|
||||
# users : set of users' enumeration pools to manage users' ip address
|
||||
class NetworkMgr(object):
|
||||
def __init__(self, addr_cidr, etcdclient, mode):
|
||||
self.etcd = etcdclient
|
||||
if mode == 'new':
|
||||
logger.info("init network manager with %s" % addr_cidr)
|
||||
self.center = IntervalPool(addr_cidr=addr_cidr)
|
||||
# allocate a pool for system IPs, use CIDR=27, has 32 IPs
|
||||
syscidr = 27
|
||||
[status, sysaddr] = self.center.allocate(syscidr)
|
||||
if status == False:
|
||||
logger.error ("allocate system ips in __init__ failed")
|
||||
sys.exit(1)
|
||||
# maybe for system, the last IP address of CIDR is available
|
||||
# But, EnumPool drop the last IP address in its pool -- it is not important
|
||||
self.system = EnumPool(sysaddr+"/"+str(syscidr))
|
||||
self.users = {}
|
||||
self.vlanids = {}
|
||||
self.init_vlanids(4095, 60)
|
||||
self.dump_center()
|
||||
self.dump_system()
|
||||
elif mode == 'recovery':
|
||||
logger.info("init network manager from etcd")
|
||||
self.center = None
|
||||
self.system = None
|
||||
self.users = {}
|
||||
self.vlanids = {}
|
||||
self.load_center()
|
||||
self.load_system()
|
||||
self.load_vlanids()
|
||||
else:
|
||||
logger.error("mode: %s not supported" % mode)
|
||||
|
||||
def init_vlanids(self, total, block):
|
||||
self.vlanids['block'] = block
|
||||
self.etcd.setkey("network/vlanids/info", str(total)+"/"+str(block))
|
||||
for i in range(1, int((total-1)/block)):
|
||||
self.etcd.setkey("network/vlanids/"+str(i), json.dumps(list(range(1+block*(i-1), block*i+1))))
|
||||
self.vlanids['currentpool'] = list(range(1+block*i, total+1))
|
||||
self.vlanids['currentindex'] = i+1
|
||||
self.etcd.setkey("network/vlanids/"+str(i+1), json.dumps(self.vlanids['currentpool']))
|
||||
self.etcd.setkey("network/vlanids/current", str(i+1))
|
||||
|
||||
def load_vlanids(self):
|
||||
[status, info] = self.etcd.getkey("network/vlanids/info")
|
||||
self.vlanids['block'] = int(info.split("/")[1])
|
||||
[status, current] = self.etcd.getkey("network/vlanids/current")
|
||||
self.vlanids['currentindex'] = int(current)
|
||||
if self.vlanids['currentindex'] == 0:
|
||||
self.vlanids['currentpool'] = []
|
||||
else:
|
||||
[status, pool]= self.etcd.getkey("network/vlanids/"+str(self.vlanids['currentindex']))
|
||||
self.vlanids['currentpool'] = json.loads(pool)
|
||||
|
||||
def dump_vlanids(self):
|
||||
if self.vlanids['currentpool'] == []:
|
||||
if self.vlanids['currentindex'] != 0:
|
||||
self.etcd.delkey("network/vlanids/"+str(self.vlanids['currentindex']))
|
||||
self.etcd.setkey("network/vlanids/current", str(self.vlanids['currentindex']-1))
|
||||
else:
|
||||
pass
|
||||
else:
|
||||
self.etcd.setkey("network/vlanids/"+str(self.vlanids['currentindex']), json.dumps(self.vlanids['currentpool']))
|
||||
|
||||
def load_center(self):
|
||||
[status, centerdata] = self.etcd.getkey("network/center")
|
||||
center = json.loads(centerdata)
|
||||
self.center = IntervalPool(copy = center)
|
||||
|
||||
def dump_center(self):
|
||||
self.etcd.setkey("network/center", json.dumps({'info':self.center.info, 'pool':self.center.pool}))
|
||||
|
||||
def load_system(self):
|
||||
[status, systemdata] = self.etcd.getkey("network/system")
|
||||
system = json.loads(systemdata)
|
||||
self.system = EnumPool(copy=system)
|
||||
|
||||
def dump_system(self):
|
||||
self.etcd.setkey("network/system", json.dumps({'info':self.system.info, 'pool':self.system.pool}))
|
||||
|
||||
def load_user(self, username):
|
||||
[status, userdata] = self.etcd.getkey("network/users/"+username)
|
||||
usercopy = json.loads(userdata)
|
||||
user = UserPool(copy = usercopy)
|
||||
self.users[username] = user
|
||||
|
||||
def dump_user(self, username):
|
||||
self.etcd.setkey("network/users/"+username, json.dumps({'info':self.users[username].info, 'vlanid':self.users[username].vlanid, 'gateway':self.users[username].gateway, 'pool':self.users[username].pool}))
|
||||
|
||||
def printpools(self):
|
||||
print ("<Center>")
|
||||
self.center.printpool()
|
||||
print ("<System>")
|
||||
self.system.printpool()
|
||||
print ("<users>")
|
||||
print (" users in users is in etcd, not in memory")
|
||||
print ("<vlanids>")
|
||||
print (str(self.vlanids['currentindex'])+":"+str(self.vlanids['currentpool']))
|
||||
|
||||
def acquire_vlanid(self):
|
||||
if self.vlanids['currentpool'] == []:
|
||||
if self.vlanids['currentindex'] == 0:
|
||||
return [False, "No VLAN IDs"]
|
||||
else:
|
||||
logger.error("vlanids current pool is empty with current index not zero")
|
||||
return [False, "internal error"]
|
||||
vlanid = self.vlanids['currentpool'].pop()
|
||||
self.dump_vlanids()
|
||||
if self.vlanids['currentpool'] == []:
|
||||
self.load_vlanids()
|
||||
return [True, vlanid]
|
||||
|
||||
def release_vlanid(self, vlanid):
|
||||
if len(self.vlanids['currentpool']) == self.vlanids['block']:
|
||||
self.vlanids['currentpool'] = [vlanid]
|
||||
self.vlanids['currentindex'] = self.vanids['currentindex']+1
|
||||
self.dump_vlanids()
|
||||
else:
|
||||
self.vlanids['currentpool'].append(vlanid)
|
||||
self.dump_vlanids()
|
||||
return [True, "Release VLAN ID success"]
|
||||
|
||||
def add_user(self, username, cidr):
|
||||
logger.info ("add user %s with cidr=%s" % (username, str(cidr)))
|
||||
if self.has_user(username):
|
||||
return [False, "user already exists in users set"]
|
||||
[status, result] = self.center.allocate(cidr)
|
||||
self.dump_center()
|
||||
if status == False:
|
||||
return [False, result]
|
||||
[status, vlanid] = self.acquire_vlanid()
|
||||
if status:
|
||||
vlanid = int(vlanid)
|
||||
else:
|
||||
self.center.free(result, cidr)
|
||||
self.dump_center()
|
||||
return [False, vlanid]
|
||||
self.users[username] = UserPool(addr_cidr = result+"/"+str(cidr), vlanid=vlanid)
|
||||
logger.info("setup gateway for %s with %s and vlan=%s" % (username, self.users[username].get_gateway_cidr(), str(vlanid)))
|
||||
netcontrol.setup_gw('docklet-br', username, self.users[username].get_gateway_cidr(), str(vlanid))
|
||||
self.dump_user(username)
|
||||
del self.users[username]
|
||||
return [True, 'add user success']
|
||||
|
||||
def del_user(self, username):
|
||||
logger.info ("delete user %s with cidr=%s" % (username))
|
||||
if not self.has_user(username):
|
||||
return [False, username+" not in users set"]
|
||||
self.load_user(username)
|
||||
[addr, cidr] = self.users[username].info.split('/')
|
||||
self.center.free(addr, int(cidr))
|
||||
self.dump_center()
|
||||
self.release_vlanid(self.users[username].vlanid)
|
||||
netcontrol.del_gw('docklet-br', username)
|
||||
self.etcd.deldir("network/users/"+username)
|
||||
del self.users[username]
|
||||
return [True, 'delete user success']
|
||||
|
||||
def check_usergw(self, username):
|
||||
self.load_user(username)
|
||||
netcontrol.check_gw('docklet-br', username, self.users[username].get_gateway_cidr(), str(self.users[username].vlanid))
|
||||
del self.users[username]
|
||||
return [True, 'check gw ok']
|
||||
|
||||
def has_user(self, username):
|
||||
[status, _value] = self.etcd.getkey("network/users/"+username)
|
||||
return status
|
||||
|
||||
def acquire_userips(self, username, num=1):
|
||||
logger.info ("acquire user ips of %s" % (username))
|
||||
if not self.has_user(username):
|
||||
return [False, 'username not exists in users set']
|
||||
self.load_user(username)
|
||||
result = self.users[username].acquire(num)
|
||||
self.dump_user(username)
|
||||
del self.users[username]
|
||||
return result
|
||||
|
||||
def acquire_userips_cidr(self, username, num=1):
|
||||
logger.info ("acquire user ips of %s" % (username))
|
||||
if not self.has_user(username):
|
||||
return [False, 'username not exists in users set']
|
||||
self.load_user(username)
|
||||
result = self.users[username].acquire_cidr(num)
|
||||
self.dump_user(username)
|
||||
del self.users[username]
|
||||
return result
|
||||
|
||||
# ip_or_ips : one IP address or a list of IPs
|
||||
def release_userips(self, username, ip_or_ips):
|
||||
logger.info ("release user ips of %s with ips: %s" % (username, str(ip_or_ips)))
|
||||
if not self.has_user(username):
|
||||
return [False, 'username not exists in users set']
|
||||
self.load_user(username)
|
||||
result = self.users[username].release(ip_or_ips)
|
||||
self.dump_user(username)
|
||||
del self.users[username]
|
||||
return result
|
||||
|
||||
def get_usergw(self, username):
|
||||
if not self.has_user(username):
|
||||
return [False, 'username not exists in users set']
|
||||
self.load_user(username)
|
||||
result = self.users[username].get_gateway()
|
||||
self.dump_user(username)
|
||||
del self.users[username]
|
||||
return result
|
||||
|
||||
def get_usergw_cidr(self, username):
|
||||
if not self.has_user(username):
|
||||
return [False, 'username not exists in users set']
|
||||
self.load_user(username)
|
||||
result = self.users[username].get_gateway_cidr()
|
||||
self.dump_user(username)
|
||||
del self.users[username]
|
||||
return result
|
||||
|
||||
def get_uservlanid(self, username):
|
||||
if not self.has_user(username):
|
||||
return [False, 'username not exists in users set']
|
||||
self.load_user(username)
|
||||
result = self.users[username].vlanid
|
||||
self.dump_user(username)
|
||||
del self.users[username]
|
||||
return result
|
||||
|
||||
def acquire_sysips(self, num=1):
|
||||
logger.info ("acquire system ips")
|
||||
result = self.system.acquire(num)
|
||||
self.dump_system()
|
||||
return result
|
||||
|
||||
def acquire_sysips_cidr(self, num=1):
|
||||
logger.info ("acquire system ips")
|
||||
result = self.system.acquire_cidr(num)
|
||||
self.dump_system()
|
||||
return result
|
||||
|
||||
def release_sysips(self, ip_or_ips):
|
||||
logger.info ("acquire system ips: %s" % str(ip_or_ips))
|
||||
result = self.system.release(ip_or_ips)
|
||||
self.dump_system()
|
||||
return result
|
||||
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import threading, random, time, xmlrpc.client, sys
|
||||
#import network
|
||||
from nettools import netcontrol
|
||||
from log import logger
|
||||
import env
|
||||
|
||||
##########################################
|
||||
# NodeMgr
|
||||
# Description : manage the physical nodes
|
||||
# 1. list running nodes now
|
||||
# 2. update node list when new node joins
|
||||
# ETCD table :
|
||||
# machines/allnodes -- all nodes in docklet, for recovery
|
||||
# machines/runnodes -- run nodes of this start up
|
||||
##############################################
|
||||
class NodeMgr(object):
|
||||
def __init__(self, networkmgr, etcdclient, addr, mode):
|
||||
self.addr = addr
|
||||
logger.info ("begin initialize on %s" % self.addr)
|
||||
self.networkmgr = networkmgr
|
||||
self.etcd = etcdclient
|
||||
self.mode = mode
|
||||
|
||||
# initialize the network
|
||||
logger.info ("initialize network")
|
||||
|
||||
# 'docklet-br' not need ip address. Because every user has gateway
|
||||
#[status, result] = self.networkmgr.acquire_sysips_cidr()
|
||||
#self.networkmgr.printpools()
|
||||
#if not status:
|
||||
# logger.info ("initialize network failed, no IP for system bridge")
|
||||
# sys.exit(1)
|
||||
#self.bridgeip = result[0]
|
||||
#logger.info ("initialize bridge wih ip %s" % self.bridgeip)
|
||||
#network.netsetup("init", self.bridgeip)
|
||||
|
||||
if self.mode == 'new':
|
||||
if netcontrol.bridge_exists('docklet-br'):
|
||||
netcontrol.del_bridge('docklet-br')
|
||||
netcontrol.new_bridge('docklet-br')
|
||||
else:
|
||||
if not netcontrol.bridge_exists('docklet-br'):
|
||||
logger.error("docklet-br not found")
|
||||
sys.exit(1)
|
||||
|
||||
# get allnodes
|
||||
self.allnodes = self._nodelist_etcd("allnodes")
|
||||
self.runnodes = self._nodelist_etcd("runnodes")
|
||||
logger.info ("all nodes are: %s" % self.allnodes)
|
||||
logger.info ("run nodes are: %s" % self.runnodes)
|
||||
if len(self.runnodes)>0:
|
||||
logger.error ("init runnodes is not null, need to be clean")
|
||||
sys.exit(1)
|
||||
# init rpc list
|
||||
self.rpcs = []
|
||||
# start new thread to watch whether a new node joins
|
||||
logger.info ("start thread to watch new nodes ...")
|
||||
self.thread_watchnewnode = threading.Thread(target=self._watchnewnode)
|
||||
self.thread_watchnewnode.start()
|
||||
# wait for all nodes joins
|
||||
while(True):
|
||||
allin = True
|
||||
for node in self.allnodes:
|
||||
if node not in self.runnodes:
|
||||
allin = False
|
||||
break
|
||||
if allin:
|
||||
logger.info("all nodes necessary joins ...")
|
||||
break
|
||||
time.sleep(0.05)
|
||||
logger.info ("run nodes are: %s" % self.runnodes)
|
||||
|
||||
|
||||
# get nodes list from etcd table
|
||||
def _nodelist_etcd(self, which):
|
||||
if which == "allnodes" or which == "runnodes":
|
||||
[status, nodeinfo]=self.etcd.listdir("machines/"+which)
|
||||
if status:
|
||||
nodelist = []
|
||||
for node in nodeinfo:
|
||||
nodelist.append(node["key"].rsplit('/', 1)[1])
|
||||
return nodelist
|
||||
return []
|
||||
|
||||
# thread target : watch whether a new node joins
|
||||
def _watchnewnode(self):
|
||||
workerport = env.getenv('WORKER_PORT')
|
||||
while(True):
|
||||
time.sleep(0.1)
|
||||
[status, runlist] = self.etcd.listdir("machines/runnodes")
|
||||
if not status:
|
||||
logger.warning ("get runnodes list failed from etcd ")
|
||||
continue
|
||||
for node in runlist:
|
||||
nodeip = node['key'].rsplit('/',1)[1]
|
||||
if node['value']=='waiting':
|
||||
logger.info ("%s want to joins, call it to init first" % nodeip)
|
||||
# 'docklet-br' of worker do not need IP Addr. Not need to allocate an IP to it
|
||||
#if nodeip != self.addr:
|
||||
# [status, result] = self.networkmgr.acquire_sysips_cidr()
|
||||
# self.networkmgr.printpools()
|
||||
# if not status:
|
||||
# logger.error("no IP for worker bridge, please check network system pool")
|
||||
# continue
|
||||
# bridgeip = result[0]
|
||||
# self.etcd.setkey("network/workbridge", bridgeip)
|
||||
if nodeip in self.allnodes:
|
||||
######## HERE MAYBE NEED TO FIX ###############
|
||||
# here we must use "machines/runnodes/nodeip"
|
||||
# we cannot use node['key'], node['key'] is absolute
|
||||
# path, etcd client will append the path to prefix,
|
||||
# which is wrong
|
||||
###############################################
|
||||
self.etcd.setkey("machines/runnodes/"+nodeip, "init-"+self.mode)
|
||||
else:
|
||||
self.etcd.setkey('machines/runnodes/'+nodeip, "init-new")
|
||||
elif node['value']=='work':
|
||||
logger.info ("new node %s joins" % nodeip)
|
||||
# setup GRE tunnels for new nodes
|
||||
if self.addr == nodeip:
|
||||
logger.debug ("worker start on master node. not need to setup GRE")
|
||||
else:
|
||||
logger.debug ("setup GRE for %s" % nodeip)
|
||||
if netcontrol.gre_exists('docklet-br', nodeip):
|
||||
logger.debug("GRE for %s already exists, reuse it" % nodeip)
|
||||
else:
|
||||
netcontrol.setup_gre('docklet-br', nodeip)
|
||||
self.runnodes.append(nodeip)
|
||||
self.etcd.setkey("machines/runnodes/"+nodeip, "ok")
|
||||
if nodeip not in self.allnodes:
|
||||
self.allnodes.append(nodeip)
|
||||
self.etcd.setkey("machines/allnodes/"+nodeip, "ok")
|
||||
logger.debug ("all nodes are: %s" % self.allnodes)
|
||||
logger.debug ("run nodes are: %s" % self.runnodes)
|
||||
self.rpcs.append(xmlrpc.client.ServerProxy("http://%s:%s"
|
||||
% (nodeip, workerport)))
|
||||
logger.info ("add %s:%s in rpc client list" %
|
||||
(nodeip, workerport))
|
||||
|
||||
# get all run nodes' IP addr
|
||||
def get_nodeips(self):
|
||||
return self.allnodes
|
||||
|
||||
def get_rpcs(self):
|
||||
return self.rpcs
|
||||
|
||||
def get_onerpc(self):
|
||||
return self.rpcs[random.randint(0, len(self.rpcs)-1)]
|
||||
|
||||
def rpc_to_ip(self, rpcclient):
|
||||
return self.runnodes[self.rpcs.index(rpcclient)]
|
||||
|
||||
def ip_to_rpc(self, nodeip):
|
||||
return self.rpcs[self.runnodes.index(nodeip)]
|
||||
|
||||
def get_allnodes(self):
|
||||
return self.allnodes
|
|
@ -0,0 +1,31 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import requests, json
|
||||
|
||||
proxy_control="http://localhost:8001/api/routes"
|
||||
|
||||
def get_routes():
|
||||
try:
|
||||
resp = requests.get(proxy_control)
|
||||
except:
|
||||
return [False, 'Connect Failed']
|
||||
return [True, resp.json()]
|
||||
|
||||
def set_route(path, target):
|
||||
path='/'+path.strip('/')
|
||||
if path=='' or target=='':
|
||||
return [False, 'input not valid']
|
||||
try:
|
||||
resp = requests.post(proxy_control+path, data=json.dumps({'target':target}))
|
||||
except:
|
||||
return [False, 'Connect Failed']
|
||||
return [True, 'set ok']
|
||||
|
||||
def delete_route(path):
|
||||
path='/'+path.strip('/')
|
||||
try:
|
||||
resp = requests.delete(proxy_control+path)
|
||||
except:
|
||||
return [False, 'Connect Failed']
|
||||
# if exist and delete, status_code=204, if not exist, status_code=404
|
||||
return [True, 'delete ok']
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import os, random
|
||||
|
||||
#from log import logger
|
||||
|
||||
def loadenv(configpath):
|
||||
configfile = open(configpath)
|
||||
#logger.info ("load environment from %s" % configpath)
|
||||
for line in configfile:
|
||||
line = line.strip()
|
||||
if line == '':
|
||||
continue
|
||||
keyvalue = line.split("=")
|
||||
if len(keyvalue) < 2:
|
||||
continue
|
||||
key = keyvalue[0].strip()
|
||||
value = keyvalue[1].strip()
|
||||
#logger.info ("load env and put env %s:%s" % (key, value))
|
||||
os.environ[key] = value
|
||||
|
||||
def gen_token():
|
||||
return str(random.randint(10000, 99999))+"-"+str(random.randint(10000, 99999))
|
|
@ -0,0 +1,643 @@
|
|||
'''
|
||||
userManager for Docklet
|
||||
provide a class for managing users and usergroups in Docklet
|
||||
Warning: in some early versions, "token" stand for the instance of class model.User
|
||||
now it stands for a string that can be parsed to get that instance.
|
||||
in all functions start with "@administration_required" or "@administration_or_self_required", "token" is the instance
|
||||
Original author: Liu Peidong
|
||||
'''
|
||||
|
||||
from model import db, User, UserGroup
|
||||
from functools import wraps
|
||||
import os, subprocess
|
||||
import hashlib
|
||||
import pam
|
||||
from base64 import b64encode
|
||||
import env
|
||||
import smtplib
|
||||
from email.mime.text import MIMEText
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.header import Header
|
||||
from datetime import datetime
|
||||
|
||||
email_from_address = env.getenv('EMAIL_FROM_ADDRESS')
|
||||
admin_email_address = env.getenv('ADMIN_EMAIL_ADDRESS')
|
||||
PAM = pam.pam()
|
||||
|
||||
if (env.getenv('EXTERNAL_LOGIN').lower() == 'true'):
|
||||
from plugin import external_receive
|
||||
|
||||
def administration_required(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if ( ('cur_user' in kwargs) == False):
|
||||
return {"success":'false', "reason":"Cannot get cur_user"}
|
||||
cur_user = kwargs['cur_user']
|
||||
if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root')):
|
||||
return func(*args, **kwargs)
|
||||
else:
|
||||
return {"success": 'false', "reason": 'Unauthorized Action'}
|
||||
|
||||
return wrapper
|
||||
|
||||
def administration_or_self_required(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if ( (not ('cur_user' in kwargs)) or (not ('user' in kwargs))):
|
||||
return {"success":'false', "reason":"Cannot get cur_user or user"}
|
||||
cur_user = kwargs['cur_user']
|
||||
user = kwargs['user']
|
||||
if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root') or (cur_user.username == user.username)):
|
||||
return func(*args, **kwargs)
|
||||
else:
|
||||
return {"success": 'false', "reason": 'Unauthorized Action'}
|
||||
|
||||
return wrapper
|
||||
|
||||
def token_required(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if ( ('cur_user' in kwargs) == False):
|
||||
return {"success":'false', "reason":"Cannot get cur_user"}
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
def send_activated_email(to_address, username):
|
||||
if (email_from_address in ['\'\'', '\"\"', '']):
|
||||
return
|
||||
#text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated'
|
||||
text = '<html><h4>Dear '+ username + ':</h4>'
|
||||
text += '''<p> Your account in <a href='%s'>%s</a> has been activated</p>
|
||||
<p> Enjoy your personal workspace in the cloud !</p>
|
||||
<br>
|
||||
<p> Note: DO NOT reply to this email!</p>
|
||||
<br><br>
|
||||
<p> <a href='http://docklet.unias.org'>Docklet Team</a>, SEI, PKU</p>
|
||||
''' % (env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"))
|
||||
text += '<p>'+ str(datetime.utcnow()) + '</p>'
|
||||
text += '</html>'
|
||||
subject = 'Docklet account activated'
|
||||
msg = MIMEMultipart()
|
||||
textmsg = MIMEText(text,'html','utf-8')
|
||||
msg['Subject'] = Header(subject, 'utf-8')
|
||||
msg['From'] = email_from_address
|
||||
msg['To'] = to_address
|
||||
msg.attach(textmsg)
|
||||
s = smtplib.SMTP()
|
||||
s.connect()
|
||||
s.sendmail(email_from_address, to_address, msg.as_string())
|
||||
s.close()
|
||||
|
||||
def send_remind_activating_email(username):
|
||||
nulladdr = ['\'\'', '\"\"', '']
|
||||
if (email_from_address in nulladdr or admin_email_address in nulladdr):
|
||||
return
|
||||
#text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated'
|
||||
text = '<html><h4>Dear '+ 'admin' + ':</h4>'
|
||||
text += '''<p> An activating request for %s in <a href='%s'>%s</a> has been sent</p>
|
||||
<p> Please check it !</p>
|
||||
<br/><br/>
|
||||
<p> Docklet Team, SEI, PKU</p>
|
||||
''' % (username, env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"))
|
||||
text += '<p>'+ str(datetime.utcnow()) + '</p>'
|
||||
text += '</html>'
|
||||
subject = 'An activating request in Docklet has been sent'
|
||||
msg = MIMEMultipart()
|
||||
textmsg = MIMEText(text,'html','utf-8')
|
||||
msg['Subject'] = Header(subject, 'utf-8')
|
||||
msg['From'] = email_from_address
|
||||
msg['To'] = admin_email_address
|
||||
msg.attach(textmsg)
|
||||
s = smtplib.SMTP()
|
||||
s.connect()
|
||||
s.sendmail(email_from_address, admin_email_address, msg.as_string())
|
||||
s.close()
|
||||
|
||||
|
||||
class userManager:
|
||||
def __init__(self, username = 'root', password = None):
|
||||
'''
|
||||
Try to create the database when there is none
|
||||
initialize 'root' user and 'root' & 'primary' group
|
||||
'''
|
||||
try:
|
||||
User.query.all()
|
||||
UserGroup.query.all()
|
||||
except:
|
||||
db.create_all()
|
||||
root = UserGroup('root')
|
||||
db.session.add(root)
|
||||
db.session.commit()
|
||||
if password == None:
|
||||
#set a random password
|
||||
password = os.urandom(16)
|
||||
password = b64encode(password).decode('utf-8')
|
||||
fsdir = env.getenv('FS_PREFIX')
|
||||
f = open(fsdir + '/local/generated_password.txt', 'w')
|
||||
f.write("User=%s\nPass=%s\n"%(username, password))
|
||||
f.close()
|
||||
sys_admin = User(username, hashlib.sha512(password.encode('utf-8')).hexdigest())
|
||||
sys_admin.status = 'normal'
|
||||
sys_admin.nickname = 'root'
|
||||
sys_admin.description = 'Root_User'
|
||||
sys_admin.user_group = 'root'
|
||||
sys_admin.auth_method = 'local'
|
||||
db.session.add(sys_admin)
|
||||
path = env.getenv('DOCKLET_LIB')
|
||||
subprocess.call([path+"/userinit.sh", username])
|
||||
db.session.commit()
|
||||
admin = UserGroup('admin')
|
||||
primary = UserGroup('primary')
|
||||
db.session.add(admin)
|
||||
db.session.add(primary)
|
||||
db.session.commit()
|
||||
|
||||
def auth_local(self, username, password):
|
||||
password = hashlib.sha512(password.encode('utf-8')).hexdigest()
|
||||
user = User.query.filter_by(username = username).first()
|
||||
if (user == None):
|
||||
return {"success":'false', "reason": "User did not exist"}
|
||||
if (user.password != password):
|
||||
return {"success":'false', "reason": "Wrong password"}
|
||||
result = {
|
||||
"success": 'true',
|
||||
"data":{
|
||||
"username" : user.username,
|
||||
"avatar" : user.avatar,
|
||||
"nickname" : user.nickname,
|
||||
"description" : user.description,
|
||||
"status" : user.status,
|
||||
"group" : user.user_group,
|
||||
"token" : user.generate_auth_token(),
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
def auth_pam(self, username, password):
|
||||
user = User.query.filter_by(username = username).first()
|
||||
pamresult = PAM.authenticate(username, password)
|
||||
if (pamresult == False or (user != None and user.auth_method != 'pam')):
|
||||
return {"success":'false', "reason": "Wrong password or wrong login method"}
|
||||
if (user == None):
|
||||
newuser = self.newuser();
|
||||
newuser.username = username
|
||||
newuser.password = "no_password"
|
||||
newuser.nickname = username
|
||||
newuser.status = "init"
|
||||
newuser.user_group = "primary"
|
||||
newuser.auth_method = "pam"
|
||||
self.register(user = newuser)
|
||||
user = User.query.filter_by(username = username).first()
|
||||
result = {
|
||||
"success": 'true',
|
||||
"data":{
|
||||
"username" : user.username,
|
||||
"avatar" : user.avatar,
|
||||
"nickname" : user.nickname,
|
||||
"description" : user.description,
|
||||
"status" : user.status,
|
||||
"group" : user.user_group,
|
||||
"token" : user.generate_auth_token(),
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
def auth_external(self, form):
|
||||
|
||||
if (env.getenv('EXTERNAL_LOGIN') != 'True'):
|
||||
failed_result = {'success': 'false', 'reason' : 'external auth disabled'}
|
||||
return failed_result
|
||||
|
||||
result = external_receive.external_auth_receive_request(form)
|
||||
|
||||
if (result['success'] != 'True'):
|
||||
failed_result = {'success':'false', 'result': result}
|
||||
return failed_result
|
||||
|
||||
username = result['username']
|
||||
user = User.query.filter_by(username = username).first()
|
||||
if (user != None and user.auth_method == result['auth_method']):
|
||||
result = {
|
||||
"success": 'true',
|
||||
"data":{
|
||||
"username" : user.username,
|
||||
"avatar" : user.avatar,
|
||||
"nickname" : user.nickname,
|
||||
"description" : user.description,
|
||||
"status" : user.status,
|
||||
"group" : user.user_group,
|
||||
"token" : user.generate_auth_token(),
|
||||
}
|
||||
}
|
||||
return result
|
||||
if (user != None and user.auth_method != result['auth_method']):
|
||||
result = {'success': 'false', 'reason': 'other kinds of account already exists'}
|
||||
return result
|
||||
#user == None , register an account for external user
|
||||
newuser = self.newuser();
|
||||
newuser.username = result['username']
|
||||
newuser.password = result['password']
|
||||
newuser.avatar = result['avatar']
|
||||
newuser.nickname = result['nickname']
|
||||
newuser.description = result['description']
|
||||
newuser.e_mail = result['e_mail']
|
||||
newuser.truename = result['truename']
|
||||
newuser.student_number = result['student_number']
|
||||
newuser.status = result['status']
|
||||
newuser.user_group = result['user_group']
|
||||
newuser.auth_method = result['auth_method']
|
||||
newuser.department = result['department']
|
||||
newuser.tel = result['tel']
|
||||
self.register(user = newuser)
|
||||
user = User.query.filter_by(username = username).first()
|
||||
result = {
|
||||
"success": 'true',
|
||||
"data":{
|
||||
"username" : user.username,
|
||||
"avatar" : user.avatar,
|
||||
"nickname" : user.nickname,
|
||||
"description" : user.description,
|
||||
"status" : user.status,
|
||||
"group" : user.user_group,
|
||||
"token" : user.generate_auth_token(),
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
def auth(self, username, password):
|
||||
'''
|
||||
authenticate a user by username & password
|
||||
return a token as well as some user information
|
||||
'''
|
||||
user = User.query.filter_by(username = username).first()
|
||||
if (user == None or user.auth_method =='pam'):
|
||||
return self.auth_pam(username, password)
|
||||
elif (user.auth_method == 'local'):
|
||||
return self.auth_local(username, password)
|
||||
else:
|
||||
result = {'success':'false', 'reason':'auth_method error'}
|
||||
return result
|
||||
|
||||
def auth_token(self, token):
|
||||
'''
|
||||
authenticate a user by a token
|
||||
when succeeded, return the database iterator
|
||||
otherwise return None
|
||||
'''
|
||||
user = User.verify_auth_token(token)
|
||||
return user
|
||||
|
||||
@administration_required
|
||||
def query(*args, **kwargs):
|
||||
'''
|
||||
Usage: query(username = 'xxx', cur_user = token_from_auth)
|
||||
|| query(ID = a_integer, cur_user = token_from_auth)
|
||||
Provide information about one user that administrators need to use
|
||||
'''
|
||||
if ( 'ID' in kwargs):
|
||||
user = User.query.filter_by(id = kwargs['ID']).first()
|
||||
if (user == None):
|
||||
return {"success":False, "reason":"User does not exist"}
|
||||
result = {
|
||||
"success":'true',
|
||||
"data":{
|
||||
"username" : user.username,
|
||||
"password" : user.password,
|
||||
"avatar" : user.avatar,
|
||||
"nickname" : user.nickname,
|
||||
"description" : user.description,
|
||||
"status" : user.status,
|
||||
"e_mail" : user.e_mail,
|
||||
"student_number": user.student_number,
|
||||
"department" : user.department,
|
||||
"truename" : user.truename,
|
||||
"tel" : user.tel,
|
||||
"register_date" : "%s"%(user.register_date),
|
||||
"group" : user.user_group,
|
||||
"description" : user.description,
|
||||
},
|
||||
"token": user
|
||||
}
|
||||
return result
|
||||
|
||||
if ( 'username' not in kwargs):
|
||||
return {"success":'false', "reason":"Cannot get 'username'"}
|
||||
username = kwargs['username']
|
||||
user = User.query.filter_by(username = username).first()
|
||||
if (user == None):
|
||||
return {"success":'false', "reason":"User does not exist"}
|
||||
result = {
|
||||
"success": 'true',
|
||||
"data":{
|
||||
"username" : user.username,
|
||||
"password" : user.password,
|
||||
"avatar" : user.avatar,
|
||||
"nickname" : user.nickname,
|
||||
"description" : user.description,
|
||||
"status" : user.status,
|
||||
"e_mail" : user.e_mail,
|
||||
"student_number": user.student_number,
|
||||
"department" : user.department,
|
||||
"truename" : user.truename,
|
||||
"tel" : user.tel,
|
||||
"register_date" : "%s"%(user.register_date),
|
||||
"group" : user.user_group,
|
||||
},
|
||||
"token": user
|
||||
}
|
||||
return result
|
||||
|
||||
@token_required
|
||||
def selfQuery(*args, **kwargs):
|
||||
'''
|
||||
Usage: selfQuery(cur_user = token_from_auth)
|
||||
List informantion for oneself
|
||||
'''
|
||||
user = kwargs['cur_user']
|
||||
group = UserGroup.query.filter_by(name = user.user_group).first()
|
||||
result = {
|
||||
"success": 'true',
|
||||
"data":{
|
||||
"username" : user.username,
|
||||
"password" : user.password,
|
||||
"avatar" : user.avatar,
|
||||
"nickname" : user.nickname,
|
||||
"description" : user.description,
|
||||
"status" : user.status,
|
||||
"e_mail" : user.e_mail,
|
||||
"student_number": user.student_number,
|
||||
"department" : user.department,
|
||||
"truename" : user.truename,
|
||||
"tel" : user.tel,
|
||||
"register_date" : "%s"%(user.register_date),
|
||||
"group" : user.user_group,
|
||||
"groupinfo": {
|
||||
"cpu": group.cpu,
|
||||
"memory": group.memory,
|
||||
"imageQuantity": group.imageQuantity,
|
||||
"lifeCycle":group.lifeCycle,
|
||||
},
|
||||
},
|
||||
}
|
||||
return result
|
||||
|
||||
@token_required
|
||||
def selfModify(*args, **kwargs):
|
||||
'''
|
||||
Usage: selfModify(cur_user = token_from_auth, newValue = form)
|
||||
Modify informantion for oneself
|
||||
'''
|
||||
form = kwargs['newValue']
|
||||
name = form.getvalue('name', None)
|
||||
value = form.getvalue('value', None)
|
||||
if (name == None or value == None):
|
||||
result = {'success': 'false'}
|
||||
return result
|
||||
user = User.query.filter_by(username = kwargs['cur_user'].username).first()
|
||||
if (name == 'nickname'):
|
||||
user.nickname = value
|
||||
elif (name == 'description'):
|
||||
user.description = value
|
||||
elif (name == 'department'):
|
||||
user.department = value
|
||||
elif (name == 'e_mail'):
|
||||
user.e_mail = value
|
||||
elif (name == 'tel'):
|
||||
user.tel = value
|
||||
else:
|
||||
result = {'success': 'false'}
|
||||
return result
|
||||
db.session.commit()
|
||||
result = {'success': 'true'}
|
||||
return result
|
||||
|
||||
|
||||
@administration_required
|
||||
def userList(*args, **kwargs):
|
||||
'''
|
||||
Usage: list(cur_user = token_from_auth)
|
||||
List all users for an administrator
|
||||
'''
|
||||
alluser = User.query.all()
|
||||
result = {
|
||||
"success": 'true',
|
||||
"data":[]
|
||||
}
|
||||
for user in alluser:
|
||||
userinfo = [
|
||||
user.id,
|
||||
user.username,
|
||||
user.truename,
|
||||
user.e_mail,
|
||||
user.tel,
|
||||
"%s"%(user.register_date),
|
||||
user.status,
|
||||
user.user_group,
|
||||
'',
|
||||
]
|
||||
result["data"].append(userinfo)
|
||||
return result
|
||||
|
||||
@administration_required
|
||||
def groupList(*args, **kwargs):
|
||||
'''
|
||||
Usage: list(cur_user = token_from_auth)
|
||||
List all groups for an administrator
|
||||
'''
|
||||
allgroup = UserGroup.query.all()
|
||||
result = {
|
||||
"success": 'true',
|
||||
"data":[]
|
||||
}
|
||||
for group in allgroup:
|
||||
groupinfo = [
|
||||
group.id,
|
||||
group.name,
|
||||
group.cpu,
|
||||
group.memory,
|
||||
group.imageQuantity,
|
||||
group.lifeCycle,
|
||||
'',
|
||||
]
|
||||
result["data"].append(groupinfo)
|
||||
return result
|
||||
|
||||
@administration_required
|
||||
def groupQuery(*args, **kwargs):
|
||||
'''
|
||||
Usage: groupQuery(id = XXX, cur_user = token_from_auth)
|
||||
List a group for an administrator
|
||||
'''
|
||||
group = UserGroup.query.filter_by(id = kwargs['ID']).first()
|
||||
if (group == None):
|
||||
return {"success":False, "reason":"Group does not exist"}
|
||||
result = {
|
||||
"success":'true',
|
||||
"data":{
|
||||
"name" : group.name ,
|
||||
"cpu" : group.cpu ,
|
||||
"memory" : group.memory,
|
||||
"imageQuantity" : group.imageQuantity,
|
||||
"lifeCycle" : group.lifeCycle,
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
@administration_required
|
||||
def groupListName(*args, **kwargs):
|
||||
'''
|
||||
Usage: grouplist(cur_user = token_from_auth)
|
||||
List all group names for an administrator
|
||||
'''
|
||||
groups = UserGroup.query.all()
|
||||
result = {
|
||||
"groups": [],
|
||||
}
|
||||
for group in groups:
|
||||
result["groups"].append(group.name)
|
||||
return result
|
||||
|
||||
@administration_required
|
||||
def groupModify(*args, **kwargs):
|
||||
'''
|
||||
Usage: groupModify(newValue = dict_from_form, cur_user = token_from_auth)
|
||||
'''
|
||||
group_modify = UserGroup.query.filter_by(name = kwargs['newValue'].getvalue('groupname', None)).first()
|
||||
if (group_modify == None):
|
||||
return {"success":'false', "reason":"UserGroup does not exist"}
|
||||
form = kwargs['newValue']
|
||||
group_modify.cpu = form.getvalue('cpu', '')
|
||||
group_modify.memory = form.getvalue('memory', '')
|
||||
group_modify.imageQuantity = form.getvalue('image', '')
|
||||
group_modify.lifeCycle = form.getvalue('lifecycle', '')
|
||||
db.session.commit()
|
||||
return {"success":'true'}
|
||||
|
||||
@administration_required
|
||||
def modify(*args, **kwargs):
|
||||
'''
|
||||
modify a user's information in database
|
||||
will send an e-mail when status is changed from 'applying' to 'normal'
|
||||
Usage: modify(newValue = dict_from_form, cur_user = token_from_auth)
|
||||
'''
|
||||
user_modify = User.query.filter_by(username = kwargs['newValue'].getvalue('username', None)).first()
|
||||
if (user_modify == None):
|
||||
|
||||
return {"success":'false', "reason":"User does not exist"}
|
||||
|
||||
#try:
|
||||
form = kwargs['newValue']
|
||||
user_modify.truename = form.getvalue('truename', '')
|
||||
user_modify.e_mail = form.getvalue('e_mail', '')
|
||||
user_modify.department = form.getvalue('department', '')
|
||||
user_modify.student_number = form.getvalue('student_number', '')
|
||||
user_modify.tel = form.getvalue('tel', '')
|
||||
user_modify.user_group = form.getvalue('group', '')
|
||||
user_modify.auth_method = form.getvalue('auth_method', '')
|
||||
if (user_modify.status == 'applying' and form.getvalue('status', '') == 'normal'):
|
||||
send_activated_email(user_modify.e_mail, user_modify.username)
|
||||
user_modify.status = form.getvalue('status', '')
|
||||
if (form.getvalue('Chpassword', '') == 'Yes'):
|
||||
new_password = form.getvalue('password','no_password')
|
||||
new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest()
|
||||
user_modify.password = new_password
|
||||
#self.chpassword(cur_user = user_modify, password = form.getvalue('password','no_password'))
|
||||
|
||||
db.session.commit()
|
||||
return {"success":'true'}
|
||||
#except:
|
||||
#return {"success":'false', "reason":"Something happened"}
|
||||
|
||||
@token_required
|
||||
def chpassword(*args, **kwargs):
|
||||
'''
|
||||
Usage: chpassword(cur_user = token_from_auth, password = 'your_password')
|
||||
'''
|
||||
cur_user = kwargs['cur_user']
|
||||
cur_user.password = hashlib.sha512(kwargs['password'].encode('utf-8')).hexdigest()
|
||||
|
||||
def newuser(*args, **kwargs):
|
||||
'''
|
||||
Usage : newuser()
|
||||
The only method to create a new user
|
||||
call this method first, modify the return value which is a database row instance,then call self.register()
|
||||
'''
|
||||
user_new = User('newuser', 'asdf1234')
|
||||
user_new.user_group = 'primary'
|
||||
user_new.avatar = 'default.png'
|
||||
return user_new
|
||||
|
||||
def register(*args, **kwargs):
|
||||
'''
|
||||
Usage: register(user = modified_from_newuser())
|
||||
'''
|
||||
|
||||
if (kwargs['user'].username == None or kwargs['user'].username == ''):
|
||||
return {"success":'false', "reason": "Empty username"}
|
||||
user_check = User.query.filter_by(username = kwargs['user'].username).first()
|
||||
if (user_check != None and user_check.status != "init"):
|
||||
#for the activating form
|
||||
return {"success":'false', "reason": "Unauthorized action"}
|
||||
if (user_check != None and (user_check.status == "init")):
|
||||
db.session.delete(user_check)
|
||||
db.session.commit()
|
||||
newuser = kwargs['user']
|
||||
newuser.password = hashlib.sha512(newuser.password.encode('utf-8')).hexdigest()
|
||||
db.session.add(newuser)
|
||||
db.session.commit()
|
||||
|
||||
# if newuser status is normal, init some data for this user
|
||||
# now initialize for all kind of users
|
||||
#if newuser.status == 'normal':
|
||||
path = env.getenv('DOCKLET_LIB')
|
||||
subprocess.call([path+"/userinit.sh", newuser.username])
|
||||
return {"success":'true'}
|
||||
|
||||
@administration_required
|
||||
def groupadd(*args, **kwargs):
|
||||
name = kwargs.get('name', None)
|
||||
if (name == None):
|
||||
return {"success":'false', "reason": "Empty group name"}
|
||||
group_new = UserGroup(name)
|
||||
db.session.add(group_new)
|
||||
db.session.commit()
|
||||
return {"success":'true'}
|
||||
|
||||
def queryForDisplay(*args, **kwargs):
|
||||
'''
|
||||
Usage: queryForDisplay(user = token_from_auth)
|
||||
Provide information about one user that administrators need to use
|
||||
'''
|
||||
|
||||
if ( 'user' not in kwargs):
|
||||
return {"success":'false', "reason":"Cannot get 'user'"}
|
||||
user = kwargs['user']
|
||||
if (user == None):
|
||||
return {"success":'false', "reason":"User does not exist"}
|
||||
result = {
|
||||
"success": 'true',
|
||||
"data":{
|
||||
"username" : user.username,
|
||||
"password" : user.password,
|
||||
"avatar" : user.avatar,
|
||||
"nickname" : user.nickname,
|
||||
"description" : user.description,
|
||||
"status" : user.status,
|
||||
"e_mail" : user.e_mail,
|
||||
"student_number": user.student_number,
|
||||
"department" : user.department,
|
||||
"truename" : user.truename,
|
||||
"tel" : user.tel,
|
||||
"register_date" : "%s"%(user.register_date),
|
||||
"group" : user.user_group,
|
||||
"auth_method": user.auth_method,
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
# def usermodify(rowID, columnID, newValue, cur_user):
|
||||
# '''not used now'''
|
||||
# user = um.query(ID = request.form["rowID"], cur_user = root).get('token', None)
|
||||
# result = um.modify(user = user, columnID = request.form["columnID"], newValue = request.form["newValue"], cur_user = root)
|
||||
# return json.dumps(result)
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
|
||||
# initialize for a new user
|
||||
# initialize directory : clusters, data, ssh
|
||||
# generate ssh keys for new user
|
||||
|
||||
[ -z $FS_PREFIX ] && FS_PREFIX="/opt/docklet"
|
||||
|
||||
USERNAME=$1
|
||||
|
||||
[ -z $USERNAME ] && echo "[userinit.sh] USERNAME is needed" && exit 1
|
||||
|
||||
echo "[Info] [userinit.sh] initialize for user $USERNAME"
|
||||
|
||||
USER_DIR=$FS_PREFIX/global/users/$USERNAME
|
||||
[ -d $USER_DIR ] && echo "[userinit.sh] user directory already exists, delete it" && rm -r $USER_DIR
|
||||
|
||||
mkdir -p $USER_DIR/{clusters,hosts,data,ssh}
|
||||
|
||||
SSH_DIR=$USER_DIR/ssh
|
||||
# here generate id_rsa.pub has "user@hostname" at the end
|
||||
# maybe it should be delete
|
||||
ssh-keygen -t rsa -P '' -f $SSH_DIR/id_rsa &>/dev/null
|
||||
cp $SSH_DIR/id_rsa.pub $SSH_DIR/authorized_keys
|
|
@ -0,0 +1,397 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import os, random, json, sys, imagemgr
|
||||
import datetime
|
||||
|
||||
from log import logger
|
||||
import env
|
||||
import proxytool
|
||||
|
||||
##################################################
|
||||
# VclusterMgr
|
||||
# Description : VclusterMgr start/stop/manage virtual clusters
|
||||
#
|
||||
##################################################
|
||||
|
||||
class VclusterMgr(object):
|
||||
def __init__(self, nodemgr, networkmgr, etcdclient, addr, mode):
|
||||
self.mode = mode
|
||||
self.nodemgr = nodemgr
|
||||
self.imgmgr = imagemgr.ImageMgr()
|
||||
self.networkmgr = networkmgr
|
||||
self.addr = addr
|
||||
self.etcd = etcdclient
|
||||
self.defaultsize = env.getenv("CLUSTER_SIZE")
|
||||
self.fspath = env.getenv("FS_PREFIX")
|
||||
|
||||
logger.info ("vcluster start on %s" % (self.addr))
|
||||
if self.mode == 'new':
|
||||
logger.info ("starting in new mode on %s" % (self.addr))
|
||||
# check if all clusters data are deleted in httprest.py
|
||||
clean = True
|
||||
usersdir = self.fspath+"/global/users/"
|
||||
for user in os.listdir(usersdir):
|
||||
if len(os.listdir(usersdir+user+"/clusters")) > 0 or len(os.listdir(usersdir+user+"/hosts")) > 0:
|
||||
clean = False
|
||||
if not clean:
|
||||
logger.error ("clusters files not clean, start failed")
|
||||
sys.exit(1)
|
||||
elif self.mode == "recovery":
|
||||
logger.info ("starting in recovery mode on %s" % (self.addr))
|
||||
self.recover_allclusters()
|
||||
else:
|
||||
logger.error ("not supported mode:%s" % self.mode)
|
||||
sys.exit(1)
|
||||
|
||||
def recover_allclusters(self):
|
||||
logger.info("recovering all vclusters for all users...")
|
||||
usersdir = self.fspath+"/global/users/"
|
||||
for user in os.listdir(usersdir):
|
||||
for cluster in self.list_clusters(user)[1]:
|
||||
logger.info ("recovering cluster:%s for user:%s ..." % (cluster, user))
|
||||
self.recover_cluster(cluster, user)
|
||||
logger.info("recovered all vclusters for all users")
|
||||
|
||||
def create_cluster(self, clustername, username, image, user_info):
|
||||
if self.is_cluster(clustername, username):
|
||||
return [False, "cluster:%s already exists" % clustername]
|
||||
clustersize = int(self.defaultsize);
|
||||
logger.info ("starting cluster %s with %d containers for %s" % (clustername, int(clustersize), username))
|
||||
workers = self.nodemgr.get_rpcs()
|
||||
image_json = json.dumps(image)
|
||||
if (len(workers) == 0):
|
||||
logger.warning ("no workers to start containers, start cluster failed")
|
||||
return [False, "no workers are running"]
|
||||
# check user IP pool status, should be moved to user init later
|
||||
if not self.networkmgr.has_user(username):
|
||||
self.networkmgr.add_user(username, cidr=29)
|
||||
[status, result] = self.networkmgr.acquire_userips_cidr(username, clustersize)
|
||||
gateway = self.networkmgr.get_usergw(username)
|
||||
vlanid = self.networkmgr.get_uservlanid(username)
|
||||
logger.info ("create cluster with gateway : %s" % gateway)
|
||||
self.networkmgr.printpools()
|
||||
if not status:
|
||||
logger.info ("create cluster failed: %s" % result)
|
||||
return [False, result]
|
||||
ips = result
|
||||
clusterid = self._acquire_id()
|
||||
clusterpath = self.fspath+"/global/users/"+username+"/clusters/"+clustername
|
||||
hostpath = self.fspath+"/global/users/"+username+"/hosts/"+str(clusterid)+".hosts"
|
||||
hosts = "127.0.0.1\tlocalhost\n"
|
||||
containers = []
|
||||
for i in range(0, clustersize):
|
||||
onework = workers[random.randint(0, len(workers)-1)]
|
||||
lxc_name = username + "-" + str(clusterid) + "-" + str(i)
|
||||
hostname = "host-"+str(i)
|
||||
logger.info ("create container with : name-%s, username-%s, clustername-%s, clusterid-%s, hostname-%s, ip-%s, gateway-%s, image-%s" % (lxc_name, username, clustername, str(clusterid), hostname, ips[i], gateway, image_json))
|
||||
[success,message] = onework.create_container(lxc_name, username, user_info , clustername, str(clusterid), hostname, ips[i], gateway, str(vlanid), image_json)
|
||||
if success is False:
|
||||
logger.info("container create failed, so vcluster create failed")
|
||||
return [False, message]
|
||||
logger.info("container create success")
|
||||
hosts = hosts + ips[i].split("/")[0] + "\t" + hostname + "\t" + hostname + "."+clustername + "\n"
|
||||
containers.append({ 'containername':lxc_name, 'hostname':hostname, 'ip':ips[i], 'host':self.nodemgr.rpc_to_ip(onework), 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") })
|
||||
hostfile = open(hostpath, 'w')
|
||||
hostfile.write(hosts)
|
||||
hostfile.close()
|
||||
clusterfile = open(clusterpath, 'w')
|
||||
proxy_url = env.getenv("PORTAL_URL") + "/_web/" + username + "/" + clustername
|
||||
info = {'clusterid':clusterid, 'status':'stopped', 'size':clustersize, 'containers':containers, 'nextcid': clustersize, 'create_time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'start_time':"------" , 'proxy_url':proxy_url}
|
||||
clusterfile.write(json.dumps(info))
|
||||
clusterfile.close()
|
||||
return [True, info]
|
||||
|
||||
def scale_out_cluster(self,clustername,username,image,user_info):
|
||||
if not self.is_cluster(clustername,username):
|
||||
return [False, "cluster:%s not found" % clustername]
|
||||
workers = self.nodemgr.get_rpcs()
|
||||
if (len(workers) == 0):
|
||||
logger.warning("no workers to start containers, scale out failed")
|
||||
return [False, "no workers are running"]
|
||||
image_json = json.dumps(image)
|
||||
[status, result] = self.networkmgr.acquire_userips_cidr(username)
|
||||
gateway = self.networkmgr.get_usergw(username)
|
||||
vlanid = self.networkmgr.get_uservlanid(username)
|
||||
self.networkmgr.printpools()
|
||||
if not status:
|
||||
return [False, result]
|
||||
ip = result[0]
|
||||
[status, clusterinfo] = self.get_clusterinfo(clustername,username)
|
||||
clusterid = clusterinfo['clusterid']
|
||||
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
|
||||
hostpath = self.fspath + "/global/users/" + username + "/hosts/" + str(clusterid) + ".hosts"
|
||||
cid = clusterinfo['nextcid']
|
||||
onework = workers[random.randint(0, len(workers)-1)]
|
||||
lxc_name = username + "-" + str(clusterid) + "-" + str(cid)
|
||||
hostname = "host-" + str(cid)
|
||||
[success, message] = onework.create_container(lxc_name, username, user_info, clustername, clusterid, hostname, ip, gateway, str(vlanid), image_json)
|
||||
if success is False:
|
||||
logger.info("create container failed, so scale out failed")
|
||||
return [False, message]
|
||||
if clusterinfo['status'] == "running":
|
||||
onework.start_container(lxc_name)
|
||||
onework.start_services(lxc_name, ["ssh"]) # TODO: need fix
|
||||
logger.info("scale out success")
|
||||
hostfile = open(hostpath, 'a')
|
||||
hostfile.write(ip.split("/")[0] + "\t" + hostname + "\t" + hostname + "." + clustername + "\n")
|
||||
hostfile.close()
|
||||
clusterinfo['nextcid'] = int(clusterinfo['nextcid']) + 1
|
||||
clusterinfo['size'] = int(clusterinfo['size']) + 1
|
||||
clusterinfo['containers'].append({'containername':lxc_name, 'hostname':hostname, 'ip':ip, 'host':self.nodemgr.rpc_to_ip(onework), 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") })
|
||||
clusterfile = open(clusterpath, 'w')
|
||||
clusterfile.write(json.dumps(clusterinfo))
|
||||
clusterfile.close()
|
||||
return [True, clusterinfo]
|
||||
|
||||
def addproxy(self,username,clustername,ip,port):
|
||||
[status, clusterinfo] = self.get_clusterinfo(clustername, username)
|
||||
if 'proxy_ip' in clusterinfo:
|
||||
return [False, "proxy already exists"]
|
||||
target = "http://" + ip + ":" + port
|
||||
clusterinfo['proxy_ip'] = ip + ":" + port
|
||||
clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w')
|
||||
clusterfile.write(json.dumps(clusterinfo))
|
||||
clusterfile.close()
|
||||
proxytool.set_route("/_web/" + username + "/" + clustername, target)
|
||||
return [True, clusterinfo]
|
||||
|
||||
def deleteproxy(self, username, clustername):
|
||||
[status, clusterinfo] = self.get_clusterinfo(clustername, username)
|
||||
if 'proxy_ip' not in clusterinfo:
|
||||
return [False, "proxy not exists"]
|
||||
clusterinfo.pop('proxy_ip')
|
||||
clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w')
|
||||
clusterfile.write(json.dumps(clusterinfo))
|
||||
clusterfile.close()
|
||||
proxytool.delete_route("/_web/" + username + "/" + clustername)
|
||||
return [True, clusterinfo]
|
||||
|
||||
def flush_cluster(self,username,clustername,containername):
|
||||
begintime = datetime.datetime.now()
|
||||
[status, info] = self.get_clusterinfo(clustername, username)
|
||||
if not status:
|
||||
return [False, "cluster not found"]
|
||||
containers = info['containers']
|
||||
imagetmp = username + "_tmp_docklet"
|
||||
for container in containers:
|
||||
if container['containername'] == containername:
|
||||
logger.info("container: %s found" % containername)
|
||||
onework = self.nodemgr.ip_to_rpc(container['host'])
|
||||
onework.create_image(username,imagetmp,containername)
|
||||
fimage = container['image']
|
||||
logger.info("image: %s created" % imagetmp)
|
||||
break
|
||||
else:
|
||||
logger.error("container: %s not found" % containername)
|
||||
for container in containers:
|
||||
if container['containername'] != containername:
|
||||
logger.info("container: %s now flush" % container['containername'])
|
||||
onework = self.nodemgr.ip_to_rpc(container['host'])
|
||||
#t = threading.Thread(target=onework.flush_container,args=(username,imagetmp,container['containername']))
|
||||
#threads.append(t)
|
||||
onework.flush_container(username,imagetmp,container['containername'])
|
||||
container['lastsave'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
container['image'] = fimage
|
||||
logger.info("thread for container: %s has been prepared" % container['containername'])
|
||||
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
|
||||
infofile = open(clusterpath,'w')
|
||||
infofile.write(json.dumps(info))
|
||||
infofile.close()
|
||||
self.imgmgr.removeImage(username,imagetmp)
|
||||
endtime = datetime.datetime.now()
|
||||
dtime = (endtime - begintime).seconds
|
||||
logger.info("flush spend %s seconds" % dtime)
|
||||
logger.info("flush success")
|
||||
|
||||
|
||||
def create_image(self,username,clustername,containername,imagename,description,isforce=False):
|
||||
[status, info] = self.get_clusterinfo(clustername,username)
|
||||
if not status:
|
||||
return [False, "cluster not found"]
|
||||
containers = info['containers']
|
||||
for container in containers:
|
||||
if container['containername'] == containername:
|
||||
logger.info("container: %s found" % containername)
|
||||
onework = self.nodemgr.ip_to_rpc(container['host'])
|
||||
res = onework.create_image(username,imagename,containername,description,isforce)
|
||||
container['lastsave'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
container['image'] = imagename
|
||||
break
|
||||
else:
|
||||
res = [False, "container not found"]
|
||||
logger.error("container: %s not found" % containername)
|
||||
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
|
||||
infofile = open(clusterpath, 'w')
|
||||
infofile.write(json.dumps(info))
|
||||
infofile.close()
|
||||
return res
|
||||
|
||||
def delete_cluster(self, clustername, username):
|
||||
[status, info] = self.get_clusterinfo(clustername, username)
|
||||
if not status:
|
||||
return [False, "cluster not found"]
|
||||
if info['status']=='running':
|
||||
return [False, "cluster is still running, you need to stop it and then delete"]
|
||||
ips = []
|
||||
for container in info['containers']:
|
||||
worker = self.nodemgr.ip_to_rpc(container['host'])
|
||||
worker.delete_container(container['containername'])
|
||||
ips.append(container['ip'])
|
||||
logger.info("delete vcluster and release vcluster ips")
|
||||
self.networkmgr.release_userips(username, ips)
|
||||
self.networkmgr.printpools()
|
||||
os.remove(self.fspath+"/global/users/"+username+"/clusters/"+clustername)
|
||||
os.remove(self.fspath+"/global/users/"+username+"/hosts/"+str(info['clusterid'])+".hosts")
|
||||
return [True, "cluster delete"]
|
||||
|
||||
def scale_in_cluster(self, clustername, username, containername):
|
||||
[status, info] = self.get_clusterinfo(clustername, username)
|
||||
if not status:
|
||||
return [False, "cluster not found"]
|
||||
new_containers = []
|
||||
for container in info['containers']:
|
||||
if container['containername'] == containername:
|
||||
worker = self.nodemgr.ip_to_rpc(container['host'])
|
||||
worker.delete_container(containername)
|
||||
self.networkmgr.release_userips(username, container['ip'])
|
||||
self.networkmgr.printpools()
|
||||
else:
|
||||
new_containers.append(container)
|
||||
info['containers'] = new_containers
|
||||
info['size'] -= 1
|
||||
cid = containername[containername.rindex("-")+1:]
|
||||
clusterid = info['clusterid']
|
||||
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
|
||||
hostpath = self.fspath + "/global/users/" + username + "/hosts/" + str(clusterid) + ".hosts"
|
||||
clusterfile = open(clusterpath, 'w')
|
||||
clusterfile.write(json.dumps(info))
|
||||
clusterfile.close()
|
||||
hostfile = open(hostpath, 'r')
|
||||
hostinfo = hostfile.readlines()
|
||||
hostfile.close()
|
||||
hostfile = open(hostpath, 'w')
|
||||
new_hostinfo = []
|
||||
new_hostinfo.append(hostinfo[0])
|
||||
for host in hostinfo[1:]:
|
||||
parts = host.split("\t")
|
||||
if parts[1][parts[1].rindex("-")+1:] == cid:
|
||||
pass
|
||||
else:
|
||||
new_hostinfo.append(host)
|
||||
hostfile.writelines(new_hostinfo)
|
||||
hostfile.close()
|
||||
return [True, info]
|
||||
|
||||
|
||||
def start_cluster(self, clustername, username):
|
||||
[status, info] = self.get_clusterinfo(clustername, username)
|
||||
if not status:
|
||||
return [False, "cluster not found"]
|
||||
if info['status'] == 'running':
|
||||
return [False, "cluster is already running"]
|
||||
# check gateway for user
|
||||
# after reboot, user gateway goes down and lose its configuration
|
||||
# so, check is necessary
|
||||
self.networkmgr.check_usergw(username)
|
||||
# set proxy
|
||||
try:
|
||||
target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000"
|
||||
proxytool.set_route('/go/'+username+'/'+clustername, target)
|
||||
except:
|
||||
return [False, "start cluster failed with setting proxy failed"]
|
||||
for container in info['containers']:
|
||||
worker = self.nodemgr.ip_to_rpc(container['host'])
|
||||
worker.start_container(container['containername'])
|
||||
worker.start_services(container['containername'])
|
||||
info['status']='running'
|
||||
info['start_time']=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
infofile = open(self.fspath+"/global/users/"+username+"/clusters/"+clustername, 'w')
|
||||
infofile.write(json.dumps(info))
|
||||
infofile.close()
|
||||
return [True, "start cluster"]
|
||||
|
||||
def recover_cluster(self, clustername, username):
|
||||
[status, info] = self.get_clusterinfo(clustername, username)
|
||||
if not status:
|
||||
return [False, "cluster not found"]
|
||||
if info['status'] == 'stopped':
|
||||
return [True, "cluster no need to start"]
|
||||
# need to check and recover gateway of this user
|
||||
self.networkmgr.check_usergw(username)
|
||||
# recover proxy of cluster
|
||||
try:
|
||||
target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000"
|
||||
proxytool.set_route('/go/'+username+'/'+clustername, target)
|
||||
except:
|
||||
return [False, "start cluster failed with setting proxy failed"]
|
||||
info['containers'][0]
|
||||
# recover containers of this cluster
|
||||
for container in info['containers']:
|
||||
worker = self.nodemgr.ip_to_rpc(container['host'])
|
||||
worker.recover_container(container['containername'])
|
||||
return [True, "start cluster"]
|
||||
|
||||
|
||||
# maybe here should use cluster id
|
||||
def stop_cluster(self, clustername, username):
|
||||
[status, info] = self.get_clusterinfo(clustername, username)
|
||||
if not status:
|
||||
return [False, "cluster not found"]
|
||||
if info['status'] == 'stopped':
|
||||
return [False, 'cluster is already stopped']
|
||||
for container in info['containers']:
|
||||
worker = self.nodemgr.ip_to_rpc(container['host'])
|
||||
worker.stop_container(container['containername'])
|
||||
info['status']='stopped'
|
||||
info['start_time']="------"
|
||||
infofile = open(self.fspath+"/global/users/"+username+"/clusters/"+clustername, 'w')
|
||||
infofile.write(json.dumps(info))
|
||||
infofile.close()
|
||||
return [True, "start cluster"]
|
||||
|
||||
def list_clusters(self, user):
|
||||
if not os.path.exists(self.fspath+"/global/users/"+user+"/clusters"):
|
||||
return [True, []]
|
||||
clusters = os.listdir(self.fspath+"/global/users/"+user+"/clusters")
|
||||
full_clusters = []
|
||||
for cluster in clusters:
|
||||
single_cluster = {}
|
||||
single_cluster['name'] = cluster
|
||||
[status, info] = self.get_clusterinfo(cluster,user)
|
||||
if info['status'] == 'running':
|
||||
single_cluster['status'] = 'running'
|
||||
else:
|
||||
single_cluster['status'] = 'stopping'
|
||||
full_clusters.append(single_cluster)
|
||||
return [True, clusters]
|
||||
|
||||
def is_cluster(self, clustername, username):
|
||||
[status, clusters] = self.list_clusters(username)
|
||||
if clustername in clusters:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
# get id from name
|
||||
def get_clusterid(self, clustername, username):
|
||||
[status, info] = self.get_clusterinfo(clustername, username)
|
||||
if not status:
|
||||
return -1
|
||||
if 'clusterid' in info:
|
||||
return int(info['clusterid'])
|
||||
logger.error ("internal error: cluster:%s info file has no clusterid " % clustername)
|
||||
return -1
|
||||
|
||||
def get_clusterinfo(self, clustername, username):
|
||||
clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
|
||||
if not os.path.isfile(clusterpath):
|
||||
return [False, "cluster not found"]
|
||||
infofile = open(clusterpath, 'r')
|
||||
info = json.loads(infofile.read())
|
||||
return [True, info]
|
||||
|
||||
# acquire cluster id from etcd
|
||||
def _acquire_id(self):
|
||||
clusterid = self.etcd.getkey("vcluster/nextid")[1]
|
||||
self.etcd.setkey("vcluster/nextid", str(int(clusterid)+1))
|
||||
return int(clusterid)
|
|
@ -0,0 +1,201 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
# first init env
|
||||
import env, tools
|
||||
config = env.getenv("CONFIG")
|
||||
tools.loadenv(config)
|
||||
|
||||
# must import logger after initlogging, ugly
|
||||
from log import initlogging
|
||||
initlogging("docklet-worker")
|
||||
from log import logger
|
||||
|
||||
import xmlrpc.server, sys, time
|
||||
from socketserver import ThreadingMixIn
|
||||
import etcdlib, network, container
|
||||
from nettools import netcontrol
|
||||
import monitor
|
||||
from lvmtool import *
|
||||
|
||||
##################################################################
|
||||
# Worker
|
||||
# Description : Worker starts at worker node to listen rpc request and complete the work
|
||||
# Init() :
|
||||
# get master ip
|
||||
# initialize rpc server
|
||||
# register rpc functions
|
||||
# initialize network
|
||||
# initialize lvm group
|
||||
# Start() :
|
||||
# register in etcd
|
||||
# setup GRE tunnel
|
||||
# start rpc service
|
||||
##################################################################
|
||||
|
||||
class ThreadXMLRPCServer(ThreadingMixIn,xmlrpc.server.SimpleXMLRPCServer):
|
||||
pass
|
||||
|
||||
class Worker(object):
|
||||
def __init__(self, etcdclient, addr, port):
|
||||
self.addr = addr
|
||||
self.port = port
|
||||
logger.info ("begin initialize on %s" % self.addr)
|
||||
|
||||
self.fspath = env.getenv('FS_PREFIX')
|
||||
self.poolsize = env.getenv('DISKPOOL_SIZE')
|
||||
|
||||
self.etcd = etcdclient
|
||||
self.master = self.etcd.getkey("service/master")[1]
|
||||
self.mode=None
|
||||
|
||||
# register self to master
|
||||
self.etcd.setkey("machines/runnodes/"+self.addr, "waiting")
|
||||
for f in range (0, 3):
|
||||
[status, value] = self.etcd.getkey("machines/runnodes/"+self.addr)
|
||||
if not value.startswith("init"):
|
||||
# master wakesup every 0.1s to check register
|
||||
logger.debug("worker % register to master failed %d \
|
||||
time, sleep %fs" % (self.addr, f+1, 0.1))
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
break
|
||||
|
||||
if value.startswith("init"):
|
||||
# check token to check global directory
|
||||
[status, token_1] = self.etcd.getkey("token")
|
||||
tokenfile = open(self.fspath+"/global/token", 'r')
|
||||
token_2 = tokenfile.readline().strip()
|
||||
if token_1 != token_2:
|
||||
logger.error("check token failed, global directory is not a shared filesystem")
|
||||
sys.exit(1)
|
||||
else:
|
||||
logger.error ("worker register in machines/runnodes failed, maybe master not start")
|
||||
sys.exit(1)
|
||||
logger.info ("worker registered in master and checked the token")
|
||||
|
||||
Containers = container.Container(self.addr, etcdclient)
|
||||
if value == 'init-new':
|
||||
logger.info ("init worker with mode:new")
|
||||
self.mode='new'
|
||||
# check global directory do not have containers on this worker
|
||||
[both, onlylocal, onlyglobal] = Containers.diff_containers()
|
||||
if len(both+onlyglobal) > 0:
|
||||
logger.error ("mode:new will clean containers recorded in global, please check")
|
||||
sys.exit(1)
|
||||
[status, info] = Containers.delete_allcontainers()
|
||||
if not status:
|
||||
logger.error ("delete all containers failed")
|
||||
sys.exit(1)
|
||||
# create new lvm VG at last
|
||||
new_group("docklet-group",self.poolsize,self.fspath+"/local/docklet-storage")
|
||||
#subprocess.call([self.libpath+"/lvmtool.sh", "new", "group", "docklet-group", self.poolsize, self.fspath+"/local/docklet-storage"])
|
||||
elif value == 'init-recovery':
|
||||
logger.info ("init worker with mode:recovery")
|
||||
self.mode='recovery'
|
||||
# recover lvm VG first
|
||||
recover_group("docklet-group",self.fspath+"/local/docklet-storage")
|
||||
#subprocess.call([self.libpath+"/lvmtool.sh", "recover", "group", "docklet-group", self.fspath+"/local/docklet-storage"])
|
||||
[status, meg] = Containers.check_allcontainers()
|
||||
if status:
|
||||
logger.info ("all containers check ok")
|
||||
else:
|
||||
logger.info ("not all containers check ok")
|
||||
#sys.exit(1)
|
||||
else:
|
||||
logger.error ("worker init mode:%s not supported" % value)
|
||||
sys.exit(1)
|
||||
# initialize rpc
|
||||
# xmlrpc.server.SimpleXMLRPCServer(addr) -- addr : (ip-addr, port)
|
||||
# if ip-addr is "", it will listen ports of all IPs of this host
|
||||
logger.info ("initialize rpcserver %s:%d" % (self.addr, int(self.port)))
|
||||
# logRequests=False : not print rpc log
|
||||
#self.rpcserver = xmlrpc.server.SimpleXMLRPCServer((self.addr, self.port), logRequests=False)
|
||||
self.rpcserver = ThreadXMLRPCServer((self.addr, int(self.port)), allow_none=True)
|
||||
self.rpcserver.register_introspection_functions()
|
||||
self.rpcserver.register_instance(Containers)
|
||||
# register functions or instances to server for rpc
|
||||
#self.rpcserver.register_function(function_name)
|
||||
|
||||
# initialize the network
|
||||
# if worker and master run on the same node, reuse bridges
|
||||
# don't need to create new bridges
|
||||
if (self.addr == self.master):
|
||||
logger.info ("master also on this node. reuse master's network")
|
||||
else:
|
||||
logger.info ("initialize network")
|
||||
# 'docklet-br' of worker do not need IP Addr.
|
||||
#[status, result] = self.etcd.getkey("network/workbridge")
|
||||
#if not status:
|
||||
# logger.error ("get bridge IP failed, please check whether master set bridge IP for worker")
|
||||
#self.bridgeip = result
|
||||
# create bridges for worker
|
||||
#network.netsetup("init", self.bridgeip)
|
||||
if self.mode == 'new':
|
||||
if netcontrol.bridge_exists('docklet-br'):
|
||||
netcontrol.del_bridge('docklet-br')
|
||||
netcontrol.new_bridge('docklet-br')
|
||||
else:
|
||||
if not netcontrol.bridge_exists('docklet-br'):
|
||||
logger.error("docklet-br not found")
|
||||
sys.exit(1)
|
||||
logger.info ("setup GRE tunnel to master %s" % self.master)
|
||||
#network.netsetup("gre", self.master)
|
||||
if not netcontrol.gre_exists('docklet-br', self.master):
|
||||
netcontrol.setup_gre('docklet-br', self.master)
|
||||
|
||||
# start service of worker
|
||||
def start(self):
|
||||
self.etcd.setkey("machines/runnodes/"+self.addr, "work")
|
||||
# start serving for rpc
|
||||
logger.info ("begins to work")
|
||||
self.rpcserver.serve_forever()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
etcdaddr = env.getenv("ETCD")
|
||||
logger.info ("using ETCD %s" % etcdaddr )
|
||||
|
||||
clustername = env.getenv("CLUSTER_NAME")
|
||||
logger.info ("using CLUSTER_NAME %s" % clustername )
|
||||
|
||||
# get network interface
|
||||
net_dev = env.getenv("NETWORK_DEVICE")
|
||||
logger.info ("using NETWORK_DEVICE %s" % net_dev )
|
||||
|
||||
ipaddr = network.getip(net_dev)
|
||||
if ipaddr is False:
|
||||
logger.error("network device is not correct")
|
||||
sys.exit(1)
|
||||
else:
|
||||
logger.info("using ipaddr %s" % ipaddr)
|
||||
# init etcdlib client
|
||||
try:
|
||||
etcdclient = etcdlib.Client(etcdaddr, prefix = clustername)
|
||||
except Exception:
|
||||
logger.error ("connect etcd failed, maybe etcd address not correct...")
|
||||
sys.exit(1)
|
||||
else:
|
||||
logger.info("etcd connected")
|
||||
|
||||
# init collector to collect monitor infomation
|
||||
collector = monitor.Collector(etcdaddr,clustername,ipaddr)
|
||||
collector.start()
|
||||
|
||||
cpu_quota = env.getenv('CONTAINER_CPU')
|
||||
logger.info ("using CONTAINER_CPU %s" % cpu_quota )
|
||||
|
||||
mem_quota = env.getenv('CONTAINER_MEMORY')
|
||||
logger.info ("using CONTAINER_MEMORY %s" % mem_quota )
|
||||
|
||||
worker_port = env.getenv('WORKER_PORT')
|
||||
logger.info ("using WORKER_PORT %s" % worker_port )
|
||||
|
||||
con_collector = monitor.Container_Collector(etcdaddr, clustername,
|
||||
ipaddr, cpu_quota, mem_quota)
|
||||
con_collector.start()
|
||||
logger.info("CPU and Memory usage monitor started")
|
||||
|
||||
logger.info("Starting worker")
|
||||
worker = Worker(etcdclient, addr=ipaddr, port=worker_port)
|
||||
worker.start()
|
|
@ -0,0 +1,17 @@
|
|||
** MUST READ **
|
||||
|
||||
1. Please keep your important data in ~/nfs directory. It will not be
|
||||
destroyed even if the workspace is deleted.
|
||||
|
||||
2. If you delete your workspace, all data in your Home directory will
|
||||
be lost, except those in ~/nfs directory.
|
||||
|
||||
3. You can save your workspace as a private image if you have modified
|
||||
the system and do not want to repeat it in your new workspace or new
|
||||
container.
|
||||
|
||||
4. Your containers are distributed by default. So it is ideal for simple
|
||||
parallel jobs.
|
||||
|
||||
5. If you find the Web Terminal not align correctly, choose a monospace
|
||||
font may help.
|
|
@ -0,0 +1,42 @@
|
|||
#!/bin/bash
|
||||
|
||||
# more details for https://coreos.com/etcd/docs/latest
|
||||
|
||||
which etcd &>/dev/null || { echo "etcd not installed, please install etcd first" && exit 1; }
|
||||
|
||||
if [ $# -eq 0 ] ; then
|
||||
echo "Usage: `basename $0` ip1 ip2 ip3"
|
||||
echo " ip1 ip2 ip3 are the ip address of node etcd_1 etcd_2 etcd_3"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
index=1
|
||||
while [ $# -gt 0 ] ; do
|
||||
h="etcd_$index"
|
||||
if [ $index -eq 1 ] ; then
|
||||
CLUSTER="$h=http://$1:2380"
|
||||
else
|
||||
CLUSTER="$CLUSTER,$h=http://$1:2380"
|
||||
fi
|
||||
index=$(($index+1))
|
||||
shift
|
||||
done
|
||||
|
||||
# -initial-advertise-peer-urls : tell others what peer urls of me
|
||||
# -listen-peer-urls : what peer urls of me
|
||||
|
||||
# -listen-client-urls : what client urls to listen
|
||||
# -advertise-client-urls : tell others what client urls to listen of me
|
||||
|
||||
# -initial-cluster-state : new means join a new cluster; existing means join an existing cluster
|
||||
# : new not means clear
|
||||
|
||||
|
||||
etcd --name etcd_1 \
|
||||
--initial-advertise-peer-urls http://$etcd_1:2380 \
|
||||
--listen-peer-urls http://$etcd_1:2380 \
|
||||
--listen-client-urls http://$etcd_1:2379 \
|
||||
--advertise-client-urls http://$etcd_1:2379 \
|
||||
--initial-cluster-token etcd-cluster \
|
||||
--initial-cluster $CLUSTER \
|
||||
--initial-cluster-state new
|
|
@ -0,0 +1,45 @@
|
|||
#!/bin/sh
|
||||
|
||||
# more details for https://coreos.com/etcd/docs/latest
|
||||
|
||||
#which etcd &>/dev/null || { echo "etcd not installed, please install etcd first" && exit 1; }
|
||||
which etcd >/dev/null || { echo "etcd not installed, please install etcd first" && exit 1; }
|
||||
|
||||
etcd_1=localhost
|
||||
|
||||
if [ $# -gt 0 ] ; then
|
||||
etcd_1=$1
|
||||
fi
|
||||
|
||||
|
||||
# -initial-advertise-peer-urls : tell others what peer urls of me
|
||||
# -listen-peer-urls : what peer urls of me
|
||||
|
||||
# -listen-client-urls : what client urls to listen
|
||||
# -advertise-client-urls : tell others what client urls to listen of me
|
||||
|
||||
# -initial-cluster-state : new means join a new cluster; existing means a new node join an existing cluster
|
||||
# : new not means clear, old data is still alive
|
||||
|
||||
depdir=${0%/*}
|
||||
tempdir=/opt/docklet/local
|
||||
[ ! -d $tempdir/log ] && mkdir -p $tempdir/log
|
||||
[ ! -d $tempdir/run ] && mkdir -p $tempdir/run
|
||||
|
||||
echo "starting etcd on $etcd_1"
|
||||
|
||||
#stdbuf -o0 -e0 $tempdir/etcd --name etcd_1 \
|
||||
etcd --name etcd_1 \
|
||||
--data-dir $tempdir/etcd_data \
|
||||
--initial-advertise-peer-urls http://$etcd_1:2380 \
|
||||
--listen-peer-urls http://$etcd_1:2380 \
|
||||
--listen-client-urls http://$etcd_1:2379 \
|
||||
--advertise-client-urls http://$etcd_1:2379 \
|
||||
--initial-cluster-token etcd_cluster \
|
||||
--initial-cluster etcd_1=http://$etcd_1:2380 \
|
||||
--initial-cluster-state new > $tempdir/log/etcd.log 2>&1 &
|
||||
|
||||
etcdpid=$!
|
||||
echo "etcd start with pid: $etcdpid and log:$tempdir/log/etcd.log"
|
||||
echo $etcdpid > $tempdir/run/etcd.pid
|
||||
|
|
@ -0,0 +1 @@
|
|||
registry = https://registry.npm.taobao.org
|
|
@ -0,0 +1,2 @@
|
|||
[global]
|
||||
index-url=https://pypi.mirrors.ustc.edu.cn/simple/
|
|
@ -0,0 +1,2 @@
|
|||
nameserver 162.105.129.26
|
||||
nameserver 162.105.129.27
|
|
@ -0,0 +1 @@
|
|||
deb https://mirrors.ustc.edu.cn/ubuntu/ xenial main restricted universe multiverse
|
|
@ -0,0 +1,56 @@
|
|||
#!/bin/sh
|
||||
|
||||
#
|
||||
# this script should be placed in basefs/home/jupyter
|
||||
#
|
||||
|
||||
# This next line determines what user the script runs as.
|
||||
DAEMON_USER=root
|
||||
|
||||
# settings for docklet worker
|
||||
DAEMON=/usr/local/bin/jupyterhub-singleuser
|
||||
DAEMON_NAME=jupyter
|
||||
# The process ID of the script when it runs is stored here:
|
||||
PIDFILE=/home/jupyter/$DAEMON_NAME.pid
|
||||
|
||||
RUN_DIR=/root
|
||||
|
||||
#export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
|
||||
|
||||
#export HOME=/home
|
||||
|
||||
#export SHELL=/bin/bash
|
||||
|
||||
#export LOGNAME=root
|
||||
|
||||
# JPY_API_TOKEN is needed by jupyterhub-singleuser
|
||||
# it will send this token in request header to hub-api-url for authorization
|
||||
# but we don't use this by now
|
||||
export JPY_API_TOKEN=not-use
|
||||
|
||||
# user for this notebook
|
||||
USER=root
|
||||
# port to start service
|
||||
PORT=10000
|
||||
# cookie name to get from http request and send to hub_api_url for authorization
|
||||
COOKIE_NAME=docklet-jupyter-cookie
|
||||
# base url of this server. client will use this url for request
|
||||
BASE_URL=/workspace/$USER
|
||||
# prefix for login and logout
|
||||
HUB_PREFIX=/jupyter
|
||||
# URL for authorising cookie
|
||||
HUB_API_URL=http://192.168.192.64:9000/jupyter
|
||||
# IP for listening request
|
||||
IP=0.0.0.0
|
||||
|
||||
[ -f /home/jupyter/jupyter.config ] && . /home/jupyter/jupyter.config
|
||||
|
||||
[ -z $IP ] && IP=$(ip address show dev eth0 | grep -P -o '10\.[0-9]*\.[0-9]*\.[0-9]*(?=/)')
|
||||
|
||||
DAEMON_OPTS="--no-browser --user=$USER --port=$PORT --cookie-name=$COOKIE_NAME --base-url=$BASE_URL --hub-prefix=$HUB_PREFIX --hub-api-url=$HUB_API_URL --ip=$IP --debug"
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
###########
|
||||
|
||||
start-stop-daemon --start --oknodo --background -d $RUN_DIR --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS
|
|
@ -0,0 +1,106 @@
|
|||
#!/bin/sh
|
||||
|
||||
## WARNING
|
||||
## This sript is just for my own convenience . my image is
|
||||
## based on Ubuntu xenial. I did not test it for other distros.
|
||||
## Therefore this script may not work for your basefs image.
|
||||
##
|
||||
|
||||
|
||||
if [ "$1" != "-y" ] ; then
|
||||
echo "This script will update your basefs. backup it first."
|
||||
echo "then run: $0 -y"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# READ docklet.conf
|
||||
|
||||
FS_PREFIX=/opt/docklet
|
||||
|
||||
BASEFS=$FS_PREFIX/local/basefs
|
||||
|
||||
CONF=../conf/docklet.conf
|
||||
|
||||
echo "Reading $CONF"
|
||||
|
||||
if [ -f $CONF ] ; then
|
||||
. $CONF
|
||||
BASEFS=$FS_PREFIX/local/basefs
|
||||
echo "$CONF exit, basefs=$BASEFS"
|
||||
else
|
||||
echo "$CONF not exist, default basefs=$BASEFS"
|
||||
fi
|
||||
|
||||
if [ ! -d $BASEFS ] ; then
|
||||
echo "Checking $BASEFS: not exist, FAIL"
|
||||
exit 1
|
||||
else
|
||||
echo "Checking $BASEFS: exist. "
|
||||
fi
|
||||
|
||||
echo "[*] Copying start_jupyter.sh to $BASEFS/home/jupyter"
|
||||
|
||||
mkdir -p $BASEFS/home/jupyter
|
||||
|
||||
cp start_jupyter.sh $BASEFS/home/jupyter
|
||||
|
||||
echo ""
|
||||
|
||||
echo "[*] Changing $BASEFS/etc/network/interfaces using static"
|
||||
|
||||
echo "Original network/interfaces is"
|
||||
|
||||
cat $BASEFS/etc/network/interfaces | sed 's/^/OLD /'
|
||||
|
||||
sed -i -- 's/dhcp/static/g' $BASEFS/etc/network/interfaces
|
||||
|
||||
# setting resolv.conf, use your own resolv.conf for your image
|
||||
echo "[*] Setting $BASEFS/etc/resolv.conf"
|
||||
cp resolv.conf $BASEFS/etc/resolvconf/resolv.conf.d/base
|
||||
|
||||
echo "[*] Masking console-getty.service"
|
||||
chroot $BASEFS systemctl mask console-getty.service
|
||||
|
||||
echo "[*] Masking system-journald.service"
|
||||
chroot $BASEFS systemctl mask systemd-journald.service
|
||||
|
||||
echo "[*] Masking system-logind.service"
|
||||
chroot $BASEFS systemctl mask systemd-logind.service
|
||||
|
||||
echo "[*] Masking dbus.service"
|
||||
chroot $BASEFS systemctl mask dbus.service
|
||||
|
||||
echo "[*] Disabling apache2 service(if installed)"
|
||||
chroot $BASEFS update-rc.d apache2 disable
|
||||
|
||||
echo "[*] Disabling ondemand service(if installed)"
|
||||
chroot $BASEFS update-rc.d ondemand disable
|
||||
|
||||
echo "[*] Disabling dbus service(if installed)"
|
||||
chroot $BASEFS update-rc.d dbus disable
|
||||
|
||||
echo "[*] Disabling mysql service(if installed)"
|
||||
chroot $BASEFS update-rc.d mysql disable
|
||||
|
||||
echo "[*] Disabling nginx service(if installed)"
|
||||
chroot $BASEFS update-rc.d nginx disable
|
||||
|
||||
echo "[*] Setting worker_processes of nginx to 1(if installed)"
|
||||
[ -f $BASEFS/etc/nginx/nginx.conf ] && sed -i -- 's/worker_processes\ auto/worker_processes\ 1/g' $BASEFS/etc/nginx/nginx.conf
|
||||
|
||||
echo "[*] Deleting default /etc/nginx/sites-enabled/default"
|
||||
rm -f $BASEFS/etc/nginx/sites-enabled/default
|
||||
|
||||
echo "[*] Copying vimrc.local to $BASEFS/etc/vim/"
|
||||
cp vimrc.local $BASEFS/etc/vim
|
||||
|
||||
echo "[*] Copying pip.conf to $BASEFS/root/.pip/"
|
||||
mkdir -p $BASEFS/root/.pip/
|
||||
cp pip.conf $BASEFS/root/.pip
|
||||
|
||||
echo "[*] Copying npmrc to $BASEFS/root/.npmrc"
|
||||
cp npmrc $BASEFS/root/.npmrc
|
||||
|
||||
echo "[*] Copying DOCKLET_NOTES.txt to $BASEFS/root/DOCKLET_NOTES.txt"
|
||||
cp DOCKLET_NOTES.txt $BASEFS/root/
|
|
@ -0,0 +1,15 @@
|
|||
syntax on
|
||||
|
||||
set smarttab expandtab sw=4 ts=4
|
||||
|
||||
set sm ai
|
||||
|
||||
set hlsearch
|
||||
|
||||
set wildchar=<Tab> wildmenu wildmode=full
|
||||
|
||||
set enc=utf-8
|
||||
set fileencoding=utf-8
|
||||
set fileencodings=utf-8,cp936,euc-cn,ascii
|
||||
|
||||
filetype indent on
|
After Width: | Height: | Size: 100 KiB |
|
@ -0,0 +1,54 @@
|
|||
.btn-outline, .btn-outline-default, .badge-outline, .badge-outline-default, .label-outline, .label-outline-default {
|
||||
border: 1px solid #AAB2BD;
|
||||
background-color: transparent;
|
||||
color: #434A54;
|
||||
}
|
||||
.btn-outline-success, .badge-outline-success, .label-outline-success {
|
||||
border: 1px solid #1C84C6;
|
||||
background-color: transparent;
|
||||
color: #1C84C6;
|
||||
}
|
||||
.btn-outline-warning, .badge-outline-warning, .label-outline-warning {
|
||||
border: 1px solid #F8AC59;
|
||||
background-color: transparent;
|
||||
color: #F8AC59;
|
||||
}
|
||||
|
||||
.btn-outline-default:hover,
|
||||
.btn-outline:hover {
|
||||
border: 1px solid #AAB2BD;
|
||||
background-color: #AAB2BD;
|
||||
color: #434A54;
|
||||
}
|
||||
|
||||
.btn-outline-success:hover {
|
||||
border: 1px solid #1C84C6;
|
||||
background-color: #1C84C6;
|
||||
color: #FFFFFF;
|
||||
}
|
||||
|
||||
.btn-outline-warning:hover {
|
||||
border: 1px solid #F8AC59;
|
||||
background-color: #F8AC59;
|
||||
color: #FFFFFF;
|
||||
}
|
||||
.docklet-red-block{
|
||||
background-color: #EB4235;
|
||||
color: #FFFFFF;
|
||||
}
|
||||
|
||||
.docklet-green-block{
|
||||
background-color: #7DB600;
|
||||
color: #FFFFFF;
|
||||
}
|
||||
|
||||
.docklet-yellow-block{
|
||||
background-color: #FABC05;
|
||||
color: #FFFFFF;
|
||||
}
|
||||
|
||||
.docklet-blue-block{
|
||||
background-color: #4185F6;
|
||||
color: #FFFFFF;
|
||||
}
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
/* FLOT CHART */
|
||||
.flot-chart {
|
||||
display: block;
|
||||
height: 200px;
|
||||
}
|
||||
.widget .flot-chart.dashboard-chart {
|
||||
display: block;
|
||||
height: 120px;
|
||||
margin-top: 40px;
|
||||
}
|
||||
.flot-chart.dashboard-chart {
|
||||
display: block;
|
||||
height: 180px;
|
||||
margin-top: 40px;
|
||||
}
|
||||
.flot-chart-content {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
.flot-chart-pie-content {
|
||||
width: 200px;
|
||||
height: 200px;
|
||||
margin: auto;
|
||||
}
|
||||
.jqstooltip {
|
||||
position: absolute;
|
||||
display: block;
|
||||
left: 0px;
|
||||
top: 0px;
|
||||
visibility: hidden;
|
||||
background: #2b303a;
|
||||
background-color: rgba(43, 48, 58, 0.8);
|
||||
color: white;
|
||||
text-align: left;
|
||||
white-space: nowrap;
|
||||
z-index: 10000;
|
||||
padding: 5px 5px 5px 5px;
|
||||
min-height: 22px;
|
||||
border-radius: 3px;
|
||||
}
|
||||
.jqsfield {
|
||||
color: white;
|
||||
text-align: left;
|
||||
}
|
||||
.h-200 {
|
||||
min-height: 200px;
|
||||
}
|
||||
.legendLabel {
|
||||
padding-left: 5px;
|
||||
}
|
||||
.stat-list li:first-child {
|
||||
margin-top: 0;
|
||||
}
|
||||
.stat-list {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
.stat-percent {
|
||||
float: right;
|
||||
}
|
||||
.stat-list li {
|
||||
margin-top: 15px;
|
||||
position: relative;
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/* MODAL */
|
||||
.modal-content {
|
||||
background-clip: padding-box;
|
||||
background-color: #FFFFFF;
|
||||
border: 1px solid rgba(0, 0, 0, 0);
|
||||
border-radius: 4px;
|
||||
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.3);
|
||||
outline: 0 none;
|
||||
position: relative;
|
||||
}
|
||||
.modal-dialog {
|
||||
z-index: 2200;
|
||||
}
|
||||
.modal-body {
|
||||
padding: 20px 30px 30px 30px;
|
||||
}
|
||||
.inmodal .modal-body {
|
||||
background: #f8fafb;
|
||||
}
|
||||
.inmodal .modal-header {
|
||||
padding: 30px 15px;
|
||||
text-align: center;
|
||||
}
|
||||
.animated.modal.fade .modal-dialog {
|
||||
-webkit-transform: none;
|
||||
-ms-transform: none;
|
||||
-o-transform: none;
|
||||
transform: none;
|
||||
}
|
||||
.inmodal .modal-title {
|
||||
font-size: 26px;
|
||||
}
|
||||
.inmodal .modal-icon {
|
||||
font-size: 84px;
|
||||
color: #e2e3e3;
|
||||
}
|
||||
.modal-footer {
|
||||
margin-top: 0;
|
||||
}
|
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Skin: Blue
|
||||
* ----------
|
||||
*/
|
||||
.skin-blue .main-header .navbar {
|
||||
background-color: #3c8dbc;
|
||||
}
|
||||
.skin-blue .main-header .navbar .nav > li > a {
|
||||
color: #ffffff;
|
||||
}
|
||||
.skin-blue .main-header .navbar .nav > li > a:hover,
|
||||
.skin-blue .main-header .navbar .nav > li > a:active,
|
||||
.skin-blue .main-header .navbar .nav > li > a:focus,
|
||||
.skin-blue .main-header .navbar .nav .open > a,
|
||||
.skin-blue .main-header .navbar .nav .open > a:hover,
|
||||
.skin-blue .main-header .navbar .nav .open > a:focus,
|
||||
.skin-blue .main-header .navbar .nav > .active > a {
|
||||
background: rgba(0, 0, 0, 0.1);
|
||||
color: #f6f6f6;
|
||||
}
|
||||
.skin-blue .main-header .navbar .sidebar-toggle {
|
||||
color: #ffffff;
|
||||
}
|
||||
.skin-blue .main-header .navbar .sidebar-toggle:hover {
|
||||
color: #f6f6f6;
|
||||
background: rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
.skin-blue .main-header .navbar .sidebar-toggle {
|
||||
color: #fff;
|
||||
}
|
||||
.skin-blue .main-header .navbar .sidebar-toggle:hover {
|
||||
background-color: #367fa9;
|
||||
}
|
||||
@media (max-width: 767px) {
|
||||
.skin-blue .main-header .navbar .dropdown-menu li.divider {
|
||||
background-color: rgba(255, 255, 255, 0.1);
|
||||
}
|
||||
.skin-blue .main-header .navbar .dropdown-menu li a {
|
||||
color: #fff;
|
||||
}
|
||||
.skin-blue .main-header .navbar .dropdown-menu li a:hover {
|
||||
background: #367fa9;
|
||||
}
|
||||
}
|
||||
.skin-blue .main-header .logo {
|
||||
background-color: #367fa9;
|
||||
color: #ffffff;
|
||||
border-bottom: 0 solid transparent;
|
||||
}
|
||||
.skin-blue .main-header .logo:hover {
|
||||
background-color: #357ca5;
|
||||
}
|
||||
.skin-blue .main-header li.user-header {
|
||||
background-color: #3c8dbc;
|
||||
}
|
||||
.skin-blue .content-header {
|
||||
background: transparent;
|
||||
}
|
||||
.skin-blue .wrapper,
|
||||
.skin-blue .main-sidebar,
|
||||
.skin-blue .left-side {
|
||||
background-color: #222d32;
|
||||
}
|
||||
.skin-blue .user-panel > .info,
|
||||
.skin-blue .user-panel > .info > a {
|
||||
color: #fff;
|
||||
}
|
||||
.skin-blue .sidebar-menu > li.header {
|
||||
color: #4b646f;
|
||||
background: #1a2226;
|
||||
}
|
||||
.skin-blue .sidebar-menu > li > a {
|
||||
border-left: 3px solid transparent;
|
||||
}
|
||||
.skin-blue .sidebar-menu > li:hover > a,
|
||||
.skin-blue .sidebar-menu > li.active > a {
|
||||
color: #ffffff;
|
||||
background: #1e282c;
|
||||
border-left-color: #3c8dbc;
|
||||
}
|
||||
.skin-blue .sidebar-menu > li > .treeview-menu {
|
||||
margin: 0 1px;
|
||||
background: #2c3b41;
|
||||
}
|
||||
.skin-blue .sidebar a {
|
||||
color: #b8c7ce;
|
||||
}
|
||||
.skin-blue .sidebar a:hover {
|
||||
text-decoration: none;
|
||||
}
|
||||
.skin-blue .treeview-menu > li > a {
|
||||
color: #8aa4af;
|
||||
}
|
||||
.skin-blue .treeview-menu > li.active > a,
|
||||
.skin-blue .treeview-menu > li > a:hover {
|
||||
color: #ffffff;
|
||||
}
|
||||
.skin-blue .sidebar-form {
|
||||
border-radius: 3px;
|
||||
border: 1px solid #374850;
|
||||
margin: 10px 10px;
|
||||
}
|
||||
.skin-blue .sidebar-form input[type="text"],
|
||||
.skin-blue .sidebar-form .btn {
|
||||
box-shadow: none;
|
||||
background-color: #374850;
|
||||
border: 1px solid transparent;
|
||||
height: 35px;
|
||||
-webkit-transition: all 0.3s ease-in-out;
|
||||
-o-transition: all 0.3s ease-in-out;
|
||||
transition: all 0.3s ease-in-out;
|
||||
}
|
||||
.skin-blue .sidebar-form input[type="text"] {
|
||||
color: #666;
|
||||
border-top-left-radius: 2px;
|
||||
border-top-right-radius: 0;
|
||||
border-bottom-right-radius: 0;
|
||||
border-bottom-left-radius: 2px;
|
||||
}
|
||||
.skin-blue .sidebar-form input[type="text"]:focus,
|
||||
.skin-blue .sidebar-form input[type="text"]:focus + .input-group-btn .btn {
|
||||
background-color: #fff;
|
||||
color: #666;
|
||||
}
|
||||
.skin-blue .sidebar-form input[type="text"]:focus + .input-group-btn .btn {
|
||||
border-left-color: #fff;
|
||||
}
|
||||
.skin-blue .sidebar-form .btn {
|
||||
color: #999;
|
||||
border-top-left-radius: 0;
|
||||
border-top-right-radius: 2px;
|
||||
border-bottom-right-radius: 2px;
|
||||
border-bottom-left-radius: 0;
|
||||
}
|
||||
.skin-blue.layout-top-nav .main-header > .logo {
|
||||
background-color: #3c8dbc;
|
||||
color: #ffffff;
|
||||
border-bottom: 0 solid transparent;
|
||||
}
|
||||
.skin-blue.layout-top-nav .main-header > .logo:hover {
|
||||
background-color: #3b8ab8;
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
.skin-blue .main-header .navbar{background-color:#3c8dbc}.skin-blue .main-header .navbar .nav>li>a{color:#fff}.skin-blue .main-header .navbar .nav>li>a:hover,.skin-blue .main-header .navbar .nav>li>a:active,.skin-blue .main-header .navbar .nav>li>a:focus,.skin-blue .main-header .navbar .nav .open>a,.skin-blue .main-header .navbar .nav .open>a:hover,.skin-blue .main-header .navbar .nav .open>a:focus,.skin-blue .main-header .navbar .nav>.active>a{background:rgba(0,0,0,0.1);color:#f6f6f6}.skin-blue .main-header .navbar .sidebar-toggle{color:#fff}.skin-blue .main-header .navbar .sidebar-toggle:hover{color:#f6f6f6;background:rgba(0,0,0,0.1)}.skin-blue .main-header .navbar .sidebar-toggle{color:#fff}.skin-blue .main-header .navbar .sidebar-toggle:hover{background-color:#367fa9}@media (max-width:767px){.skin-blue .main-header .navbar .dropdown-menu li.divider{background-color:rgba(255,255,255,0.1)}.skin-blue .main-header .navbar .dropdown-menu li a{color:#fff}.skin-blue .main-header .navbar .dropdown-menu li a:hover{background:#367fa9}}.skin-blue .main-header .logo{background-color:#367fa9;color:#fff;border-bottom:0 solid transparent}.skin-blue .main-header .logo:hover{background-color:#357ca5}.skin-blue .main-header li.user-header{background-color:#3c8dbc}.skin-blue .content-header{background:transparent}.skin-blue .wrapper,.skin-blue .main-sidebar,.skin-blue .left-side{background-color:#222d32}.skin-blue .user-panel>.info,.skin-blue .user-panel>.info>a{color:#fff}.skin-blue .sidebar-menu>li.header{color:#4b646f;background:#1a2226}.skin-blue .sidebar-menu>li>a{border-left:3px solid transparent}.skin-blue .sidebar-menu>li:hover>a,.skin-blue .sidebar-menu>li.active>a{color:#fff;background:#1e282c;border-left-color:#3c8dbc}.skin-blue .sidebar-menu>li>.treeview-menu{margin:0 1px;background:#2c3b41}.skin-blue .sidebar a{color:#b8c7ce}.skin-blue .sidebar a:hover{text-decoration:none}.skin-blue .treeview-menu>li>a{color:#8aa4af}.skin-blue .treeview-menu>li.active>a,.skin-blue .treeview-menu>li>a:hover{color:#fff}.skin-blue .sidebar-form{border-radius:3px;border:1px solid #374850;margin:10px 10px}.skin-blue .sidebar-form input[type="text"],.skin-blue .sidebar-form .btn{box-shadow:none;background-color:#374850;border:1px solid transparent;height:35px;-webkit-transition:all .3s ease-in-out;-o-transition:all .3s ease-in-out;transition:all .3s ease-in-out}.skin-blue .sidebar-form input[type="text"]{color:#666;border-top-left-radius:2px;border-top-right-radius:0;border-bottom-right-radius:0;border-bottom-left-radius:2px}.skin-blue .sidebar-form input[type="text"]:focus,.skin-blue .sidebar-form input[type="text"]:focus+.input-group-btn .btn{background-color:#fff;color:#666}.skin-blue .sidebar-form input[type="text"]:focus+.input-group-btn .btn{border-left-color:#fff}.skin-blue .sidebar-form .btn{color:#999;border-top-left-radius:0;border-top-right-radius:2px;border-bottom-right-radius:2px;border-bottom-left-radius:0}.skin-blue.layout-top-nav .main-header>.logo{background-color:#3c8dbc;color:#fff;border-bottom:0 solid transparent}.skin-blue.layout-top-nav .main-header>.logo:hover{background-color:#3b8ab8}
|
|
@ -0,0 +1,758 @@
|
|||
/*! AdminLTE app.js
|
||||
* ================
|
||||
* Main JS application file for AdminLTE v2. This file
|
||||
* should be included in all pages. It controls some layout
|
||||
* options and implements exclusive AdminLTE plugins.
|
||||
*
|
||||
* @Author Almsaeed Studio
|
||||
* @Support <http://www.almsaeedstudio.com>
|
||||
* @Email <support@almsaeedstudio.com>
|
||||
* @version 2.3.2
|
||||
* @license MIT <http://opensource.org/licenses/MIT>
|
||||
*/
|
||||
|
||||
//Make sure jQuery has been loaded before app.js
|
||||
if (typeof jQuery === "undefined") {
|
||||
throw new Error("AdminLTE requires jQuery");
|
||||
}
|
||||
|
||||
/* AdminLTE
|
||||
*
|
||||
* @type Object
|
||||
* @description $.AdminLTE is the main object for the template's app.
|
||||
* It's used for implementing functions and options related
|
||||
* to the template. Keeping everything wrapped in an object
|
||||
* prevents conflict with other plugins and is a better
|
||||
* way to organize our code.
|
||||
*/
|
||||
$.AdminLTE = {};
|
||||
|
||||
/* --------------------
|
||||
* - AdminLTE Options -
|
||||
* --------------------
|
||||
* Modify these options to suit your implementation
|
||||
*/
|
||||
$.AdminLTE.options = {
|
||||
//Add slimscroll to navbar menus
|
||||
//This requires you to load the slimscroll plugin
|
||||
//in every page before app.js
|
||||
navbarMenuSlimscroll: true,
|
||||
navbarMenuSlimscrollWidth: "3px", //The width of the scroll bar
|
||||
navbarMenuHeight: "200px", //The height of the inner menu
|
||||
//General animation speed for JS animated elements such as box collapse/expand and
|
||||
//sidebar treeview slide up/down. This options accepts an integer as milliseconds,
|
||||
//'fast', 'normal', or 'slow'
|
||||
animationSpeed: 500,
|
||||
//Sidebar push menu toggle button selector
|
||||
sidebarToggleSelector: "[data-toggle='offcanvas']",
|
||||
//Activate sidebar push menu
|
||||
sidebarPushMenu: true,
|
||||
//Activate sidebar slimscroll if the fixed layout is set (requires SlimScroll Plugin)
|
||||
sidebarSlimScroll: true,
|
||||
//Enable sidebar expand on hover effect for sidebar mini
|
||||
//This option is forced to true if both the fixed layout and sidebar mini
|
||||
//are used together
|
||||
sidebarExpandOnHover: false,
|
||||
//BoxRefresh Plugin
|
||||
enableBoxRefresh: true,
|
||||
//Bootstrap.js tooltip
|
||||
enableBSToppltip: true,
|
||||
BSTooltipSelector: "[data-toggle='tooltip']",
|
||||
//Enable Fast Click. Fastclick.js creates a more
|
||||
//native touch experience with touch devices. If you
|
||||
//choose to enable the plugin, make sure you load the script
|
||||
//before AdminLTE's app.js
|
||||
enableFastclick: true,
|
||||
//Control Sidebar Options
|
||||
enableControlSidebar: true,
|
||||
controlSidebarOptions: {
|
||||
//Which button should trigger the open/close event
|
||||
toggleBtnSelector: "[data-toggle='control-sidebar']",
|
||||
//The sidebar selector
|
||||
selector: ".control-sidebar",
|
||||
//Enable slide over content
|
||||
slide: true
|
||||
},
|
||||
//Box Widget Plugin. Enable this plugin
|
||||
//to allow boxes to be collapsed and/or removed
|
||||
enableBoxWidget: true,
|
||||
//Box Widget plugin options
|
||||
boxWidgetOptions: {
|
||||
boxWidgetIcons: {
|
||||
//Collapse icon
|
||||
collapse: 'fa-minus',
|
||||
//Open icon
|
||||
open: 'fa-plus',
|
||||
//Remove icon
|
||||
remove: 'fa-times'
|
||||
},
|
||||
boxWidgetSelectors: {
|
||||
//Remove button selector
|
||||
remove: '[data-widget="remove"]',
|
||||
//Collapse button selector
|
||||
collapse: '[data-widget="collapse"]'
|
||||
}
|
||||
},
|
||||
//Direct Chat plugin options
|
||||
directChat: {
|
||||
//Enable direct chat by default
|
||||
enable: true,
|
||||
//The button to open and close the chat contacts pane
|
||||
contactToggleSelector: '[data-widget="chat-pane-toggle"]'
|
||||
},
|
||||
//Define the set of colors to use globally around the website
|
||||
colors: {
|
||||
lightBlue: "#3c8dbc",
|
||||
red: "#f56954",
|
||||
green: "#00a65a",
|
||||
aqua: "#00c0ef",
|
||||
yellow: "#f39c12",
|
||||
blue: "#0073b7",
|
||||
navy: "#001F3F",
|
||||
teal: "#39CCCC",
|
||||
olive: "#3D9970",
|
||||
lime: "#01FF70",
|
||||
orange: "#FF851B",
|
||||
fuchsia: "#F012BE",
|
||||
purple: "#8E24AA",
|
||||
maroon: "#D81B60",
|
||||
black: "#222222",
|
||||
gray: "#d2d6de"
|
||||
},
|
||||
//The standard screen sizes that bootstrap uses.
|
||||
//If you change these in the variables.less file, change
|
||||
//them here too.
|
||||
screenSizes: {
|
||||
xs: 480,
|
||||
sm: 768,
|
||||
md: 992,
|
||||
lg: 1200
|
||||
}
|
||||
};
|
||||
|
||||
/* ------------------
|
||||
* - Implementation -
|
||||
* ------------------
|
||||
* The next block of code implements AdminLTE's
|
||||
* functions and plugins as specified by the
|
||||
* options above.
|
||||
*/
|
||||
$(function () {
|
||||
"use strict";
|
||||
|
||||
//Fix for IE page transitions
|
||||
$("body").removeClass("hold-transition");
|
||||
|
||||
//Extend options if external options exist
|
||||
if (typeof AdminLTEOptions !== "undefined") {
|
||||
$.extend(true,
|
||||
$.AdminLTE.options,
|
||||
AdminLTEOptions);
|
||||
}
|
||||
|
||||
//Easy access to options
|
||||
var o = $.AdminLTE.options;
|
||||
|
||||
//Set up the object
|
||||
_init();
|
||||
|
||||
//Activate the layout maker
|
||||
$.AdminLTE.layout.activate();
|
||||
|
||||
//Enable sidebar tree view controls
|
||||
$.AdminLTE.tree('.sidebar');
|
||||
|
||||
//Enable control sidebar
|
||||
if (o.enableControlSidebar) {
|
||||
$.AdminLTE.controlSidebar.activate();
|
||||
}
|
||||
|
||||
//Add slimscroll to navbar dropdown
|
||||
if (o.navbarMenuSlimscroll && typeof $.fn.slimscroll != 'undefined') {
|
||||
$(".navbar .menu").slimscroll({
|
||||
height: o.navbarMenuHeight,
|
||||
alwaysVisible: false,
|
||||
size: o.navbarMenuSlimscrollWidth
|
||||
}).css("width", "100%");
|
||||
}
|
||||
|
||||
//Activate sidebar push menu
|
||||
if (o.sidebarPushMenu) {
|
||||
$.AdminLTE.pushMenu.activate(o.sidebarToggleSelector);
|
||||
}
|
||||
|
||||
//Activate Bootstrap tooltip
|
||||
if (o.enableBSToppltip) {
|
||||
$('body').tooltip({
|
||||
selector: o.BSTooltipSelector
|
||||
});
|
||||
}
|
||||
|
||||
//Activate box widget
|
||||
if (o.enableBoxWidget) {
|
||||
$.AdminLTE.boxWidget.activate();
|
||||
}
|
||||
|
||||
//Activate fast click
|
||||
if (o.enableFastclick && typeof FastClick != 'undefined') {
|
||||
FastClick.attach(document.body);
|
||||
}
|
||||
|
||||
//Activate direct chat widget
|
||||
if (o.directChat.enable) {
|
||||
$(document).on('click', o.directChat.contactToggleSelector, function () {
|
||||
var box = $(this).parents('.direct-chat').first();
|
||||
box.toggleClass('direct-chat-contacts-open');
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
* INITIALIZE BUTTON TOGGLE
|
||||
* ------------------------
|
||||
*/
|
||||
$('.btn-group[data-toggle="btn-toggle"]').each(function () {
|
||||
var group = $(this);
|
||||
$(this).find(".btn").on('click', function (e) {
|
||||
group.find(".btn.active").removeClass("active");
|
||||
$(this).addClass("active");
|
||||
e.preventDefault();
|
||||
});
|
||||
|
||||
});
|
||||
});
|
||||
|
||||
/* ----------------------------------
|
||||
* - Initialize the AdminLTE Object -
|
||||
* ----------------------------------
|
||||
* All AdminLTE functions are implemented below.
|
||||
*/
|
||||
function _init() {
|
||||
'use strict';
|
||||
/* Layout
|
||||
* ======
|
||||
* Fixes the layout height in case min-height fails.
|
||||
*
|
||||
* @type Object
|
||||
* @usage $.AdminLTE.layout.activate()
|
||||
* $.AdminLTE.layout.fix()
|
||||
* $.AdminLTE.layout.fixSidebar()
|
||||
*/
|
||||
$.AdminLTE.layout = {
|
||||
activate: function () {
|
||||
var _this = this;
|
||||
_this.fix();
|
||||
_this.fixSidebar();
|
||||
$(window, ".wrapper").resize(function () {
|
||||
_this.fix();
|
||||
_this.fixSidebar();
|
||||
});
|
||||
},
|
||||
fix: function () {
|
||||
//Get window height and the wrapper height
|
||||
var neg = $('.main-header').outerHeight() + $('.main-footer').outerHeight();
|
||||
var window_height = $(window).height();
|
||||
var sidebar_height = $(".sidebar").height();
|
||||
//Set the min-height of the content and sidebar based on the
|
||||
//the height of the document.
|
||||
if ($("body").hasClass("fixed")) {
|
||||
$(".content-wrapper, .right-side").css('min-height', window_height - $('.main-footer').outerHeight());
|
||||
} else {
|
||||
var postSetWidth;
|
||||
if (window_height >= sidebar_height) {
|
||||
$(".content-wrapper, .right-side").css('min-height', window_height - neg);
|
||||
postSetWidth = window_height - neg;
|
||||
} else {
|
||||
$(".content-wrapper, .right-side").css('min-height', sidebar_height);
|
||||
postSetWidth = sidebar_height;
|
||||
}
|
||||
|
||||
//Fix for the control sidebar height
|
||||
var controlSidebar = $($.AdminLTE.options.controlSidebarOptions.selector);
|
||||
if (typeof controlSidebar !== "undefined") {
|
||||
if (controlSidebar.height() > postSetWidth)
|
||||
$(".content-wrapper, .right-side").css('min-height', controlSidebar.height());
|
||||
}
|
||||
|
||||
}
|
||||
},
|
||||
fixSidebar: function () {
|
||||
//Make sure the body tag has the .fixed class
|
||||
if (!$("body").hasClass("fixed")) {
|
||||
if (typeof $.fn.slimScroll != 'undefined') {
|
||||
$(".sidebar").slimScroll({destroy: true}).height("auto");
|
||||
}
|
||||
return;
|
||||
} else if (typeof $.fn.slimScroll == 'undefined' && window.console) {
|
||||
window.console.error("Error: the fixed layout requires the slimscroll plugin!");
|
||||
}
|
||||
//Enable slimscroll for fixed layout
|
||||
if ($.AdminLTE.options.sidebarSlimScroll) {
|
||||
if (typeof $.fn.slimScroll != 'undefined') {
|
||||
//Destroy if it exists
|
||||
$(".sidebar").slimScroll({destroy: true}).height("auto");
|
||||
//Add slimscroll
|
||||
$(".sidebar").slimscroll({
|
||||
height: ($(window).height() - $(".main-header").height()) + "px",
|
||||
color: "rgba(0,0,0,0.2)",
|
||||
size: "3px"
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/* PushMenu()
|
||||
* ==========
|
||||
* Adds the push menu functionality to the sidebar.
|
||||
*
|
||||
* @type Function
|
||||
* @usage: $.AdminLTE.pushMenu("[data-toggle='offcanvas']")
|
||||
*/
|
||||
$.AdminLTE.pushMenu = {
|
||||
activate: function (toggleBtn) {
|
||||
//Get the screen sizes
|
||||
var screenSizes = $.AdminLTE.options.screenSizes;
|
||||
|
||||
//Enable sidebar toggle
|
||||
$(document).on('click', toggleBtn, function (e) {
|
||||
e.preventDefault();
|
||||
|
||||
//Enable sidebar push menu
|
||||
if ($(window).width() > (screenSizes.sm - 1)) {
|
||||
if ($("body").hasClass('sidebar-collapse')) {
|
||||
$("body").removeClass('sidebar-collapse').trigger('expanded.pushMenu');
|
||||
} else {
|
||||
$("body").addClass('sidebar-collapse').trigger('collapsed.pushMenu');
|
||||
}
|
||||
}
|
||||
//Handle sidebar push menu for small screens
|
||||
else {
|
||||
if ($("body").hasClass('sidebar-open')) {
|
||||
$("body").removeClass('sidebar-open').removeClass('sidebar-collapse').trigger('collapsed.pushMenu');
|
||||
} else {
|
||||
$("body").addClass('sidebar-open').trigger('expanded.pushMenu');
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
$(".content-wrapper").click(function () {
|
||||
//Enable hide menu when clicking on the content-wrapper on small screens
|
||||
if ($(window).width() <= (screenSizes.sm - 1) && $("body").hasClass("sidebar-open")) {
|
||||
$("body").removeClass('sidebar-open');
|
||||
}
|
||||
});
|
||||
|
||||
//Enable expand on hover for sidebar mini
|
||||
if ($.AdminLTE.options.sidebarExpandOnHover
|
||||
|| ($('body').hasClass('fixed')
|
||||
&& $('body').hasClass('sidebar-mini'))) {
|
||||
this.expandOnHover();
|
||||
}
|
||||
},
|
||||
expandOnHover: function () {
|
||||
var _this = this;
|
||||
var screenWidth = $.AdminLTE.options.screenSizes.sm - 1;
|
||||
//Expand sidebar on hover
|
||||
$('.main-sidebar').hover(function () {
|
||||
if ($('body').hasClass('sidebar-mini')
|
||||
&& $("body").hasClass('sidebar-collapse')
|
||||
&& $(window).width() > screenWidth) {
|
||||
_this.expand();
|
||||
}
|
||||
}, function () {
|
||||
if ($('body').hasClass('sidebar-mini')
|
||||
&& $('body').hasClass('sidebar-expanded-on-hover')
|
||||
&& $(window).width() > screenWidth) {
|
||||
_this.collapse();
|
||||
}
|
||||
});
|
||||
},
|
||||
expand: function () {
|
||||
$("body").removeClass('sidebar-collapse').addClass('sidebar-expanded-on-hover');
|
||||
},
|
||||
collapse: function () {
|
||||
if ($('body').hasClass('sidebar-expanded-on-hover')) {
|
||||
$('body').removeClass('sidebar-expanded-on-hover').addClass('sidebar-collapse');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/* Tree()
|
||||
* ======
|
||||
* Converts the sidebar into a multilevel
|
||||
* tree view menu.
|
||||
*
|
||||
* @type Function
|
||||
* @Usage: $.AdminLTE.tree('.sidebar')
|
||||
*/
|
||||
$.AdminLTE.tree = function (menu) {
|
||||
var _this = this;
|
||||
var animationSpeed = $.AdminLTE.options.animationSpeed;
|
||||
$(document).on('click', menu + ' li a', function (e) {
|
||||
//Get the clicked link and the next element
|
||||
var $this = $(this);
|
||||
var checkElement = $this.next();
|
||||
|
||||
//Check if the next element is a menu and is visible
|
||||
if ((checkElement.is('.treeview-menu')) && (checkElement.is(':visible')) && (!$('body').hasClass('sidebar-collapse'))) {
|
||||
//Close the menu
|
||||
checkElement.slideUp(animationSpeed, function () {
|
||||
checkElement.removeClass('menu-open');
|
||||
//Fix the layout in case the sidebar stretches over the height of the window
|
||||
//_this.layout.fix();
|
||||
});
|
||||
checkElement.parent("li").removeClass("active");
|
||||
}
|
||||
//If the menu is not visible
|
||||
else if ((checkElement.is('.treeview-menu')) && (!checkElement.is(':visible'))) {
|
||||
//Get the parent menu
|
||||
var parent = $this.parents('ul').first();
|
||||
//Close all open menus within the parent
|
||||
var ul = parent.find('ul:visible').slideUp(animationSpeed);
|
||||
//Remove the menu-open class from the parent
|
||||
ul.removeClass('menu-open');
|
||||
//Get the parent li
|
||||
var parent_li = $this.parent("li");
|
||||
|
||||
//Open the target menu and add the menu-open class
|
||||
checkElement.slideDown(animationSpeed, function () {
|
||||
//Add the class active to the parent li
|
||||
checkElement.addClass('menu-open');
|
||||
parent.find('li.active').removeClass('active');
|
||||
parent_li.addClass('active');
|
||||
//Fix the layout in case the sidebar stretches over the height of the window
|
||||
_this.layout.fix();
|
||||
});
|
||||
}
|
||||
//if this isn't a link, prevent the page from being redirected
|
||||
if (checkElement.is('.treeview-menu')) {
|
||||
e.preventDefault();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/* ControlSidebar
|
||||
* ==============
|
||||
* Adds functionality to the right sidebar
|
||||
*
|
||||
* @type Object
|
||||
* @usage $.AdminLTE.controlSidebar.activate(options)
|
||||
*/
|
||||
$.AdminLTE.controlSidebar = {
|
||||
//instantiate the object
|
||||
activate: function () {
|
||||
//Get the object
|
||||
var _this = this;
|
||||
//Update options
|
||||
var o = $.AdminLTE.options.controlSidebarOptions;
|
||||
//Get the sidebar
|
||||
var sidebar = $(o.selector);
|
||||
//The toggle button
|
||||
var btn = $(o.toggleBtnSelector);
|
||||
|
||||
//Listen to the click event
|
||||
btn.on('click', function (e) {
|
||||
e.preventDefault();
|
||||
//If the sidebar is not open
|
||||
if (!sidebar.hasClass('control-sidebar-open')
|
||||
&& !$('body').hasClass('control-sidebar-open')) {
|
||||
//Open the sidebar
|
||||
_this.open(sidebar, o.slide);
|
||||
} else {
|
||||
_this.close(sidebar, o.slide);
|
||||
}
|
||||
});
|
||||
|
||||
//If the body has a boxed layout, fix the sidebar bg position
|
||||
var bg = $(".control-sidebar-bg");
|
||||
_this._fix(bg);
|
||||
|
||||
//If the body has a fixed layout, make the control sidebar fixed
|
||||
if ($('body').hasClass('fixed')) {
|
||||
_this._fixForFixed(sidebar);
|
||||
} else {
|
||||
//If the content height is less than the sidebar's height, force max height
|
||||
if ($('.content-wrapper, .right-side').height() < sidebar.height()) {
|
||||
_this._fixForContent(sidebar);
|
||||
}
|
||||
}
|
||||
},
|
||||
//Open the control sidebar
|
||||
open: function (sidebar, slide) {
|
||||
//Slide over content
|
||||
if (slide) {
|
||||
sidebar.addClass('control-sidebar-open');
|
||||
} else {
|
||||
//Push the content by adding the open class to the body instead
|
||||
//of the sidebar itself
|
||||
$('body').addClass('control-sidebar-open');
|
||||
}
|
||||
},
|
||||
//Close the control sidebar
|
||||
close: function (sidebar, slide) {
|
||||
if (slide) {
|
||||
sidebar.removeClass('control-sidebar-open');
|
||||
} else {
|
||||
$('body').removeClass('control-sidebar-open');
|
||||
}
|
||||
},
|
||||
_fix: function (sidebar) {
|
||||
var _this = this;
|
||||
if ($("body").hasClass('layout-boxed')) {
|
||||
sidebar.css('position', 'absolute');
|
||||
sidebar.height($(".wrapper").height());
|
||||
$(window).resize(function () {
|
||||
_this._fix(sidebar);
|
||||
});
|
||||
} else {
|
||||
sidebar.css({
|
||||
'position': 'fixed',
|
||||
'height': 'auto'
|
||||
});
|
||||
}
|
||||
},
|
||||
_fixForFixed: function (sidebar) {
|
||||
sidebar.css({
|
||||
'position': 'fixed',
|
||||
'max-height': '100%',
|
||||
'overflow': 'auto',
|
||||
'padding-bottom': '50px'
|
||||
});
|
||||
},
|
||||
_fixForContent: function (sidebar) {
|
||||
$(".content-wrapper, .right-side").css('min-height', sidebar.height());
|
||||
}
|
||||
};
|
||||
|
||||
/* BoxWidget
|
||||
* =========
|
||||
* BoxWidget is a plugin to handle collapsing and
|
||||
* removing boxes from the screen.
|
||||
*
|
||||
* @type Object
|
||||
* @usage $.AdminLTE.boxWidget.activate()
|
||||
* Set all your options in the main $.AdminLTE.options object
|
||||
*/
|
||||
$.AdminLTE.boxWidget = {
|
||||
selectors: $.AdminLTE.options.boxWidgetOptions.boxWidgetSelectors,
|
||||
icons: $.AdminLTE.options.boxWidgetOptions.boxWidgetIcons,
|
||||
animationSpeed: $.AdminLTE.options.animationSpeed,
|
||||
activate: function (_box) {
|
||||
var _this = this;
|
||||
if (!_box) {
|
||||
_box = document; // activate all boxes per default
|
||||
}
|
||||
//Listen for collapse event triggers
|
||||
$(_box).on('click', _this.selectors.collapse, function (e) {
|
||||
e.preventDefault();
|
||||
_this.collapse($(this));
|
||||
});
|
||||
|
||||
//Listen for remove event triggers
|
||||
$(_box).on('click', _this.selectors.remove, function (e) {
|
||||
e.preventDefault();
|
||||
_this.remove($(this));
|
||||
});
|
||||
},
|
||||
collapse: function (element) {
|
||||
var _this = this;
|
||||
//Find the box parent
|
||||
var box = element.parents(".box").first();
|
||||
//Find the body and the footer
|
||||
var box_content = box.find("> .box-body, > .box-footer, > form >.box-body, > form > .box-footer");
|
||||
if (!box.hasClass("collapsed-box")) {
|
||||
//Convert minus into plus
|
||||
element.children(":first")
|
||||
.removeClass(_this.icons.collapse)
|
||||
.addClass(_this.icons.open);
|
||||
//Hide the content
|
||||
box_content.slideUp(_this.animationSpeed, function () {
|
||||
box.addClass("collapsed-box");
|
||||
});
|
||||
} else {
|
||||
//Convert plus into minus
|
||||
element.children(":first")
|
||||
.removeClass(_this.icons.open)
|
||||
.addClass(_this.icons.collapse);
|
||||
//Show the content
|
||||
box_content.slideDown(_this.animationSpeed, function () {
|
||||
box.removeClass("collapsed-box");
|
||||
});
|
||||
}
|
||||
},
|
||||
remove: function (element) {
|
||||
//Find the box parent
|
||||
var box = element.parents(".box").first();
|
||||
box.slideUp(this.animationSpeed);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/* ------------------
|
||||
* - Custom Plugins -
|
||||
* ------------------
|
||||
* All custom plugins are defined below.
|
||||
*/
|
||||
|
||||
/*
|
||||
* BOX REFRESH BUTTON
|
||||
* ------------------
|
||||
* This is a custom plugin to use with the component BOX. It allows you to add
|
||||
* a refresh button to the box. It converts the box's state to a loading state.
|
||||
*
|
||||
* @type plugin
|
||||
* @usage $("#box-widget").boxRefresh( options );
|
||||
*/
|
||||
(function ($) {
|
||||
|
||||
"use strict";
|
||||
|
||||
$.fn.boxRefresh = function (options) {
|
||||
|
||||
// Render options
|
||||
var settings = $.extend({
|
||||
//Refresh button selector
|
||||
trigger: ".refresh-btn",
|
||||
//File source to be loaded (e.g: ajax/src.php)
|
||||
source: "",
|
||||
//Callbacks
|
||||
onLoadStart: function (box) {
|
||||
return box;
|
||||
}, //Right after the button has been clicked
|
||||
onLoadDone: function (box) {
|
||||
return box;
|
||||
} //When the source has been loaded
|
||||
|
||||
}, options);
|
||||
|
||||
//The overlay
|
||||
var overlay = $('<div class="overlay"><div class="fa fa-refresh fa-spin"></div></div>');
|
||||
|
||||
return this.each(function () {
|
||||
//if a source is specified
|
||||
if (settings.source === "") {
|
||||
if (window.console) {
|
||||
window.console.log("Please specify a source first - boxRefresh()");
|
||||
}
|
||||
return;
|
||||
}
|
||||
//the box
|
||||
var box = $(this);
|
||||
//the button
|
||||
var rBtn = box.find(settings.trigger).first();
|
||||
|
||||
//On trigger click
|
||||
rBtn.on('click', function (e) {
|
||||
e.preventDefault();
|
||||
//Add loading overlay
|
||||
start(box);
|
||||
|
||||
//Perform ajax call
|
||||
box.find(".box-body").load(settings.source, function () {
|
||||
done(box);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
function start(box) {
|
||||
//Add overlay and loading img
|
||||
box.append(overlay);
|
||||
|
||||
settings.onLoadStart.call(box);
|
||||
}
|
||||
|
||||
function done(box) {
|
||||
//Remove overlay and loading img
|
||||
box.find(overlay).remove();
|
||||
|
||||
settings.onLoadDone.call(box);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
})(jQuery);
|
||||
|
||||
/*
|
||||
* EXPLICIT BOX CONTROLS
|
||||
* -----------------------
|
||||
* This is a custom plugin to use with the component BOX. It allows you to activate
|
||||
* a box inserted in the DOM after the app.js was loaded, toggle and remove box.
|
||||
*
|
||||
* @type plugin
|
||||
* @usage $("#box-widget").activateBox();
|
||||
* @usage $("#box-widget").toggleBox();
|
||||
* @usage $("#box-widget").removeBox();
|
||||
*/
|
||||
(function ($) {
|
||||
|
||||
'use strict';
|
||||
|
||||
$.fn.activateBox = function () {
|
||||
$.AdminLTE.boxWidget.activate(this);
|
||||
};
|
||||
|
||||
$.fn.toggleBox = function(){
|
||||
var button = $($.AdminLTE.boxWidget.selectors.collapse, this);
|
||||
$.AdminLTE.boxWidget.collapse(button);
|
||||
};
|
||||
|
||||
$.fn.removeBox = function(){
|
||||
var button = $($.AdminLTE.boxWidget.selectors.remove, this);
|
||||
$.AdminLTE.boxWidget.remove(button);
|
||||
};
|
||||
|
||||
})(jQuery);
|
||||
|
||||
/*
|
||||
* TODO LIST CUSTOM PLUGIN
|
||||
* -----------------------
|
||||
* This plugin depends on iCheck plugin for checkbox and radio inputs
|
||||
*
|
||||
* @type plugin
|
||||
* @usage $("#todo-widget").todolist( options );
|
||||
*/
|
||||
(function ($) {
|
||||
|
||||
'use strict';
|
||||
|
||||
$.fn.todolist = function (options) {
|
||||
// Render options
|
||||
var settings = $.extend({
|
||||
//When the user checks the input
|
||||
onCheck: function (ele) {
|
||||
return ele;
|
||||
},
|
||||
//When the user unchecks the input
|
||||
onUncheck: function (ele) {
|
||||
return ele;
|
||||
}
|
||||
}, options);
|
||||
|
||||
return this.each(function () {
|
||||
|
||||
if (typeof $.fn.iCheck != 'undefined') {
|
||||
$('input', this).on('ifChecked', function () {
|
||||
var ele = $(this).parents("li").first();
|
||||
ele.toggleClass("done");
|
||||
settings.onCheck.call(ele);
|
||||
});
|
||||
|
||||
$('input', this).on('ifUnchecked', function () {
|
||||
var ele = $(this).parents("li").first();
|
||||
ele.toggleClass("done");
|
||||
settings.onUncheck.call(ele);
|
||||
});
|
||||
} else {
|
||||
$('input', this).on('change', function () {
|
||||
var ele = $(this).parents("li").first();
|
||||
ele.toggleClass("done");
|
||||
if ($('input', ele).is(":checked")) {
|
||||
settings.onCheck.call(ele);
|
||||
} else {
|
||||
settings.onUncheck.call(ele);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
};
|
||||
}(jQuery));
|
After Width: | Height: | Size: 32 KiB |
After Width: | Height: | Size: 9.4 KiB |
After Width: | Height: | Size: 480 KiB |
After Width: | Height: | Size: 70 KiB |
After Width: | Height: | Size: 52 KiB |
After Width: | Height: | Size: 100 KiB |
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 80 KiB |
|
@ -0,0 +1,156 @@
|
|||
var mem_usedp = 0;
|
||||
var cpu_usedp = 0;
|
||||
|
||||
|
||||
function processMemData(data)
|
||||
{
|
||||
mem_usedp = data.monitor.mem_use.usedp;
|
||||
var usedp = data.monitor.mem_use.usedp;
|
||||
var unit = data.monitor.mem_use.unit;
|
||||
var quota = data.monitor.mem_use.quota;
|
||||
var val = data.monitor.mem_use.val;
|
||||
var out = "("+val+unit+"/"+quota+unit+")";
|
||||
$("#con_mem").html((usedp/0.01).toFixed(2)+"%<br/>"+out);
|
||||
}
|
||||
function getMemY()
|
||||
{
|
||||
return mem_usedp*100;
|
||||
}
|
||||
function processCpuData(data)
|
||||
{
|
||||
cpu_usedp = data.monitor.cpu_use.usedp;
|
||||
var val = data.monitor.cpu_use.val;
|
||||
var unit = data.monitor.cpu_use.unit;
|
||||
$("#con_cpu").html(val +" "+ unit);
|
||||
}
|
||||
function getCpuY()
|
||||
{
|
||||
return cpu_usedp*100;
|
||||
}
|
||||
|
||||
function plot_graph(container,url,processData,getY) {
|
||||
|
||||
//var container = $("#flot-line-chart-moving");
|
||||
|
||||
// Determine how many data points to keep based on the placeholder's initial size;
|
||||
// this gives us a nice high-res plot while avoiding more than one point per pixel.
|
||||
|
||||
var maximum = container.outerWidth() / 2 || 300;
|
||||
|
||||
//
|
||||
|
||||
var data = [];
|
||||
|
||||
|
||||
|
||||
function getBaseData() {
|
||||
|
||||
while (data.length < maximum) {
|
||||
data.push(0)
|
||||
}
|
||||
|
||||
// zip the generated y values with the x values
|
||||
|
||||
var res = [];
|
||||
for (var i = 0; i < data.length; ++i) {
|
||||
res.push([i, data[i]])
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
function getData() {
|
||||
|
||||
if (data.length) {
|
||||
data = data.slice(1);
|
||||
}
|
||||
|
||||
if (data.length < maximum) {
|
||||
$.post(url,{user:"root",key:"root"},processData,"json");
|
||||
var y = getY();
|
||||
data.push(y < 0 ? 0 : y > 100 ? 100 : y);
|
||||
}
|
||||
|
||||
// zip the generated y values with the x values
|
||||
|
||||
var res = [];
|
||||
for (var i = 0; i < data.length; ++i) {
|
||||
res.push([i, data[i]])
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
||||
series = [{
|
||||
data: getBaseData(),
|
||||
lines: {
|
||||
fill: true
|
||||
}
|
||||
}];
|
||||
|
||||
|
||||
var plot = $.plot(container, series, {
|
||||
grid: {
|
||||
|
||||
color: "#999999",
|
||||
tickColor: "#D4D4D4",
|
||||
borderWidth:0,
|
||||
minBorderMargin: 20,
|
||||
labelMargin: 10,
|
||||
backgroundColor: {
|
||||
colors: ["#ffffff", "#ffffff"]
|
||||
},
|
||||
margin: {
|
||||
top: 8,
|
||||
bottom: 20,
|
||||
left: 20
|
||||
},
|
||||
markings: function(axes) {
|
||||
var markings = [];
|
||||
var xaxis = axes.xaxis;
|
||||
for (var x = Math.floor(xaxis.min); x < xaxis.max; x += xaxis.tickSize * 2) {
|
||||
markings.push({
|
||||
xaxis: {
|
||||
from: x,
|
||||
to: x + xaxis.tickSize
|
||||
},
|
||||
color: "#fff"
|
||||
});
|
||||
}
|
||||
return markings;
|
||||
}
|
||||
},
|
||||
colors: ["#1ab394"],
|
||||
xaxis: {
|
||||
tickFormatter: function() {
|
||||
return "";
|
||||
}
|
||||
},
|
||||
yaxis: {
|
||||
min: 0,
|
||||
max: 110
|
||||
},
|
||||
legend: {
|
||||
show: true
|
||||
}
|
||||
});
|
||||
|
||||
// Update the random dataset at 25FPS for a smoothly-animating chart
|
||||
|
||||
setInterval(function updateRandom() {
|
||||
series[0].data = getData();
|
||||
plot.setData(series);
|
||||
plot.draw();
|
||||
}, 1000);
|
||||
|
||||
}
|
||||
|
||||
var host = window.location.host;
|
||||
|
||||
var node_name = $("#node_name").html();
|
||||
var url = "http://" + host + "/monitor/vnodes/" + node_name;
|
||||
|
||||
plot_graph($("#mem-chart"),url + "/mem_use",processMemData,getMemY);
|
||||
plot_graph($("#cpu-chart"),url + "/cpu_use",processCpuData,getCpuY);
|
|
@ -0,0 +1,197 @@
|
|||
|
||||
var used = 0;
|
||||
var total = 0;
|
||||
var idle = 0;
|
||||
var disk_usedp = 0;
|
||||
var count = 0;
|
||||
var MB = 1024;
|
||||
|
||||
function processMemData(data)
|
||||
{
|
||||
used = data.monitor.meminfo.used;
|
||||
total = data.monitor.meminfo.total;
|
||||
var used2 = ((data.monitor.meminfo.used)/MB).toFixed(2);
|
||||
var total2 = ((data.monitor.meminfo.total)/MB).toFixed(2);
|
||||
var free2 = ((data.monitor.meminfo.free)/MB).toFixed(2);
|
||||
$("#mem_used").html(used2);
|
||||
$("#mem_total").html(total2);
|
||||
$("#mem_free").html(free2);
|
||||
}
|
||||
function getMemY()
|
||||
{
|
||||
if(total == 0)
|
||||
return 0;
|
||||
else
|
||||
return (used/total)*100;
|
||||
}
|
||||
function processCpuData(data)
|
||||
{
|
||||
idle = data.monitor.cpuinfo.idle;
|
||||
var us = data.monitor.cpuinfo.user;
|
||||
var sy = data.monitor.cpuinfo.system;
|
||||
var wa = data.monitor.cpuinfo.iowait;
|
||||
$("#cpu_user").html(us);
|
||||
$("#cpu_system").html(sy);
|
||||
$("#cpu_iowait").html(wa);
|
||||
$("#cpu_idle").html(idle);
|
||||
}
|
||||
function getCpuY()
|
||||
{
|
||||
count++;
|
||||
//alert(idle);
|
||||
if(count <= 3 && idle <= 10)
|
||||
return 0;
|
||||
else
|
||||
return (100-idle);
|
||||
}
|
||||
function processDiskData(data)
|
||||
{
|
||||
var vals = data.monitor.diskinfo;
|
||||
disk_usedp = vals[0].usedp;
|
||||
for(var idx = 0; idx < vals.length; ++idx)
|
||||
{
|
||||
var used = (vals[idx].used/MB/MB).toFixed(2);
|
||||
var total = (vals[idx].total/MB/MB).toFixed(2);
|
||||
var free = (vals[idx].free/MB/MB).toFixed(2);
|
||||
var usedp = (vals[idx].percent);
|
||||
var name = "#disk_" + (idx+1) + "_";
|
||||
$(name+"device").html(vals[idx].device);
|
||||
$(name+"used").html(used);
|
||||
$(name+"total").html(total);
|
||||
$(name+"free").html(free);
|
||||
$(name+"usedp").html(usedp);
|
||||
}
|
||||
}
|
||||
function getDiskY()
|
||||
{
|
||||
return disk_usedp;
|
||||
}
|
||||
|
||||
function plot_graph(container,url,processData,getY) {
|
||||
|
||||
//var container = $("#flot-line-chart-moving");
|
||||
|
||||
// Determine how many data points to keep based on the placeholder's initial size;
|
||||
// this gives us a nice high-res plot while avoiding more than one point per pixel.
|
||||
|
||||
var maximum = container.outerWidth() / 2 || 300;
|
||||
|
||||
//
|
||||
|
||||
var data = [];
|
||||
|
||||
|
||||
|
||||
function getBaseData() {
|
||||
|
||||
while (data.length < maximum) {
|
||||
data.push(0)
|
||||
}
|
||||
|
||||
// zip the generated y values with the x values
|
||||
|
||||
var res = [];
|
||||
for (var i = 0; i < data.length; ++i) {
|
||||
res.push([i, data[i]])
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
function getData() {
|
||||
|
||||
if (data.length) {
|
||||
data = data.slice(1);
|
||||
}
|
||||
|
||||
if (data.length < maximum) {
|
||||
$.post(url,{user:"root",key:"unias"},processData,"json");
|
||||
var y = getY();
|
||||
data.push(y < 0 ? 0 : y > 100 ? 100 : y);
|
||||
}
|
||||
|
||||
// zip the generated y values with the x values
|
||||
|
||||
var res = [];
|
||||
for (var i = 0; i < data.length; ++i) {
|
||||
res.push([i, data[i]])
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
||||
series = [{
|
||||
data: getBaseData(),
|
||||
lines: {
|
||||
fill: true
|
||||
}
|
||||
}];
|
||||
|
||||
|
||||
var plot = $.plot(container, series, {
|
||||
grid: {
|
||||
|
||||
color: "#999999",
|
||||
tickColor: "#D4D4D4",
|
||||
borderWidth:0,
|
||||
minBorderMargin: 20,
|
||||
labelMargin: 10,
|
||||
backgroundColor: {
|
||||
colors: ["#ffffff", "#ffffff"]
|
||||
},
|
||||
margin: {
|
||||
top: 8,
|
||||
bottom: 20,
|
||||
left: 20
|
||||
},
|
||||
markings: function(axes) {
|
||||
var markings = [];
|
||||
var xaxis = axes.xaxis;
|
||||
for (var x = Math.floor(xaxis.min); x < xaxis.max; x += xaxis.tickSize * 2) {
|
||||
markings.push({
|
||||
xaxis: {
|
||||
from: x,
|
||||
to: x + xaxis.tickSize
|
||||
},
|
||||
color: "#fff"
|
||||
});
|
||||
}
|
||||
return markings;
|
||||
}
|
||||
},
|
||||
colors: ["#1ab394"],
|
||||
xaxis: {
|
||||
tickFormatter: function() {
|
||||
return "";
|
||||
}
|
||||
},
|
||||
yaxis: {
|
||||
min: 0,
|
||||
max: 110
|
||||
},
|
||||
legend: {
|
||||
show: true
|
||||
}
|
||||
});
|
||||
|
||||
// Update the random dataset at 25FPS for a smoothly-animating chart
|
||||
|
||||
setInterval(function updateRandom() {
|
||||
series[0].data = getData();
|
||||
plot.setData(series);
|
||||
plot.draw();
|
||||
}, 1000);
|
||||
|
||||
}
|
||||
var host = window.location.host;
|
||||
|
||||
var com_ip = $("#com_ip").html();
|
||||
var url = "http://" + host + "/monitor/hosts/"+com_ip;
|
||||
|
||||
plot_graph($("#mem-chart"), url + "/meminfo",processMemData,getMemY);
|
||||
plot_graph($("#cpu-chart"), url + "/cpuinfo",processCpuData,getCpuY);
|
||||
//plot_graph($("#disk-chart"), url + "/diskinfo",processDiskData,getDiskY);
|
||||
$.post(url+"/diskinfo",{user:"root",key:"unias"},processDiskData,"json");
|
||||
|
|
@ -0,0 +1,130 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Create Workspace{% endblock %}
|
||||
|
||||
{% block css_src %}
|
||||
<!--<style>
|
||||
.divcontent { overflow-y:scroll; height:200px;}
|
||||
</style>-->
|
||||
{% endblock %}
|
||||
|
||||
{% block panel_title %}Workspace Info{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-lg-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Workspace Add</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
<form id="form" class="form-hrizontal" action="/workspace/add/" method="POST">
|
||||
|
||||
<div class="row">
|
||||
<div class="form-group"><label class="col-sm-2 control-label">Workspace Name</label>
|
||||
<div class="col-sm-10"><input type="text" class="form-control" name="clusterName" id="clusterName"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="hr-line-dashed"></div>
|
||||
<div class="row">
|
||||
<div class="form-group"><label class="col-sm-2 control-label">Image Choose</label>
|
||||
<div class="col-sm-10">
|
||||
<table class="table table-striped table-bordered table-hover table-image" >
|
||||
<thead>
|
||||
<tr>
|
||||
<th>ImageName</th>
|
||||
<th>Type</th>
|
||||
<th>Owner</th>
|
||||
<th>Description</th>
|
||||
<th>Choose</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>base</td>
|
||||
<td><div class="label label-outline-success">public</div></td>
|
||||
<td>docklet</td>
|
||||
<td>A base image for you</td>
|
||||
<td><div class="i-checks"><label><input type="radio" name="image" value="base_base_base" checked="checked"></label></div></td>
|
||||
</tr>
|
||||
{% for image in images['private'] %}
|
||||
<tr>
|
||||
<td>{{image['name']}}</td>
|
||||
<td><div class="label label-outline-warning">{{"private"}}</div></td>
|
||||
<td>{{user}}</td>
|
||||
<td><a href="/image/description/{{image['name']}}_{{user}}_private/" target="_blank">{{image['description']}}</a></td>
|
||||
<td><div class="i-checks"><label><input type="radio" name="image" value="{{image['name']}}_{{user}}_private"></label></div></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
{% for p_user,p_images in images['public'].items() %}
|
||||
{% for image in p_images %}
|
||||
<tr>
|
||||
<td>{{image['name']}}</td>
|
||||
<td><div class="label label-outline-success">{{"public"}}</div></td>
|
||||
<td>{{p_user}}</td>
|
||||
<td><a href="/image/description/{{image['name']}}_{{p_user}}_public" target="_blank">{{image['description']}}</a></td>
|
||||
<td><div class="i-checks"><label><input type="radio" name="image" value="{{image['name']}}_{{p_user}}_public"></label></div></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="hr-line-dashed"></div>
|
||||
<div class="row">
|
||||
<div class="form-group">
|
||||
<div class="col-sm-4 col-sm-offset-2">
|
||||
<button class="btn btn-primary" type="submit">Create</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
<!-- Custom and plugin javascript -->
|
||||
<script src="/static/js/inspinia.js"></script>
|
||||
|
||||
<script src="http://cdn.bootcss.com/pace/1.0.2/pace.min.js"></script>
|
||||
|
||||
<!-- Steps -->
|
||||
<script src="http://cdn.bootcss.com/jquery-steps/1.1.0/jquery.steps.min.js"></script>
|
||||
|
||||
<!-- Jquery Validate -->
|
||||
<script src="http://cdn.bootcss.com/jquery-validate/1.15.0/jquery.validate.min.js"></script>
|
||||
|
||||
|
||||
<script src="http://cdn.bootcss.com/datatables/1.10.11/js/jquery.dataTables.js"></script>
|
||||
<script src="http://cdn.bootcss.com/datatables/1.10.11/js/dataTables.bootstrap.js"></script>
|
||||
<script src="http://cdn.bootcss.com/datatables-tabletools/2.1.5/js/TableTools.min.js"></script>
|
||||
|
||||
|
||||
<script>
|
||||
$(document).ready(function(){
|
||||
$(".table-image").DataTable();
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
|
@ -0,0 +1,173 @@
|
|||
{% extends "base_AdminLTE.html"%}
|
||||
{% block title %}Docklet | Admin{% endblock %}
|
||||
|
||||
{% block panel_title %}Admin{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/">Home</a>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block css_src %}
|
||||
|
||||
<link href="//cdn.bootcss.com/datatables/1.10.11/css/dataTables.bootstrap.min.css" rel="stylesheet">
|
||||
<link href="//cdn.bootcss.com/datatables/1.10.11/css/jquery.dataTables_themeroller.css" rel="stylesheet">
|
||||
<link href="/static/dist/css/modalconfig.css" rel="stylesheet">
|
||||
|
||||
{% endblock %}
|
||||
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Quota</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
<button type="button" class="btn btn-primary btn-sm" data-toggle="modal" data-target="#AddGroupModal"><i class="fa fa-plus"></i> Add Quota Group</button>
|
||||
<div class="modal inmodal" id="AddGroupModal" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content animated fadeIn">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
|
||||
<i class="fa fa-laptop modal-icon"></i>
|
||||
<h4 class="modal-title">Add Group</h4>
|
||||
<small class="font-bold">Add a group to Docklet</small>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
|
||||
<form action="/group/add/" method="POST" id="addGroupForm">
|
||||
<div class="form-group">
|
||||
<label>Group Name</label>
|
||||
<input type = "text" placeholder="Enter GroupName" class="form-control" name="name" id="mymyname">
|
||||
</div>
|
||||
|
||||
</form>
|
||||
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-white" data-dismiss="modal">Close</button>
|
||||
<button type="button" class="btn btn-primary" onClick="javascript:sendAddGroup();">Submit</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<table id="myGroupTable" class="table table-striped table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>ID</th>
|
||||
<th>Name</th>
|
||||
<th>CPU</th>
|
||||
<th>Memory</th>
|
||||
<th>ImageQuantity</th>
|
||||
<th>LifeCycle</th>
|
||||
<th>Command</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
</tbody>
|
||||
<div class="modal inmodal" id="ModifyGroupModal" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content animated fadeIn">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
|
||||
<i class="fa fa-laptop modal-icon"></i>
|
||||
<h4 class="modal-title">Modify Group</h4>
|
||||
<small class="font-bold">Modify a group in Docklet</small>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<form action="/group/modify/" method="POST" id="modifyGroupForm">
|
||||
<div class="form-group">
|
||||
<label>Group Name</label>
|
||||
<input type = "text" placeholder="Enter Groupname" class="form-control" name="groupname" id="mGroupname" readonly="readonly">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>CPU Quota</label>
|
||||
<input type = "text" placeholder="Enter CPU Quota" class="form-control" name="cpu" id="mCpu">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Memory Quota</label>
|
||||
<input type="text" placeholder="Enter Memory Quota" class="form-control" name="memory" id="mMemory">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Image Quantity</label>
|
||||
<input type = "text" placeholder="Enter Image Quantity" class="form-control" name="image" id="mImage">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Life Cycle</label>
|
||||
<input type = "text" placeholder="Enter Life Cycle" class="form-control" name="lifecycle" id="mLifecycle">
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-white" data-dismiss="modal">Close</button>
|
||||
<button type="button" class="btn btn-primary" onClick="javascript:sendModifyGroup();">Submit</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
<script src="//cdn.bootcss.com/datatables/1.10.11/js/jquery.dataTables.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/datatables/1.10.11/js/dataTables.bootstrap.min.js"></script>
|
||||
|
||||
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
var gTable = $('#myGroupTable').dataTable({
|
||||
"ajax": {
|
||||
"url": "/group/detail/",
|
||||
"type": "POST"
|
||||
},
|
||||
//"scrollX": true,
|
||||
"columnDefs": [
|
||||
{
|
||||
"render": function ( data, type, row ) {
|
||||
return '<a class="btn btn-info btn-sm" data-toggle="modal" data-target="#ModifyGroupModal" onClick="javascript:setFormGroup(' + row[0] + ');">' + 'Edit' + '</a>';
|
||||
},
|
||||
"targets": 6
|
||||
},
|
||||
]
|
||||
|
||||
});
|
||||
});
|
||||
function sendAddGroup(){
|
||||
document.getElementById("addGroupForm").submit();
|
||||
}
|
||||
function sendModifyGroup(){
|
||||
document.getElementById("modifyGroupForm").submit();
|
||||
}
|
||||
function setFormGroup(arg){
|
||||
$.post("/group/query/",
|
||||
{
|
||||
ID: arg,
|
||||
},
|
||||
function(data,status){
|
||||
var result = eval("("+data+")").data;
|
||||
$("#mGroupname").val(result.name);
|
||||
$("#mCpu").val(result.cpu);
|
||||
$("#mMemory").val(result.memory);
|
||||
$("#mImage").val(result.imageQuantity);
|
||||
$("#mLifecycle").val(result.lifeCycle);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
{% endblock %}
|
|
@ -0,0 +1,290 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<title>{% block title %}Docklet | Dashboard{% endblock %}</title>
|
||||
<!-- Tell the browser to be responsive to screen width -->
|
||||
<meta content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no" name="viewport">
|
||||
<link rel="shortcut icon" href="/static/img/favicon.ico">
|
||||
|
||||
<link href="//cdn.bootcss.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet">
|
||||
|
||||
<!-- Font Awesome -->
|
||||
<link href="//cdn.bootcss.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet">
|
||||
|
||||
<!-- Ionicons -->
|
||||
<link href="//cdn.bootcss.com/ionicons/2.0.1/css/ionicons.min.css" rel="stylesheet">
|
||||
|
||||
<link href="//cdn.bootcss.com/animate.css/3.5.1/animate.min.css" rel="stylesheet">
|
||||
<link href="//cdn.bootcss.com/toastr.js/latest/css/toastr.min.css" rel="stylesheet">
|
||||
|
||||
<!-- Theme style -->
|
||||
|
||||
<link rel="stylesheet" href="/static/dist/css/AdminLTE.min.css">
|
||||
|
||||
<link rel="stylesheet" href="/static/dist/css/skins/skin-blue.min.css">
|
||||
|
||||
|
||||
{%block css_src %}{% endblock %}
|
||||
|
||||
|
||||
</head>
|
||||
|
||||
<body class="hold-transition skin-blue sidebar-mini">
|
||||
<div class="wrapper">
|
||||
|
||||
<!-- Main Header -->
|
||||
<header class="main-header">
|
||||
|
||||
<!-- Logo -->
|
||||
<a href="" class="logo">
|
||||
<!-- mini logo for sidebar mini 50x50 pixels -->
|
||||
<span class="logo-mini"></span>
|
||||
<!-- logo for regular state and mobile devices -->
|
||||
<span class="logo-lg"><b>Docklet</b></span>
|
||||
</a>
|
||||
|
||||
<!-- Header Navbar -->
|
||||
<nav class="navbar navbar-static-top" role="navigation">
|
||||
<!-- Sidebar toggle button-->
|
||||
<a href="#" class="sidebar-toggle" data-toggle="offcanvas" role="button">
|
||||
<span class="sr-only">Toggle navigation</span>
|
||||
</a>
|
||||
<!-- Navbar Right Menu -->
|
||||
<div class="navbar-custom-menu">
|
||||
<ul class="nav navbar-nav">
|
||||
<!-- Messages: style can be found in dropdown.less-->
|
||||
|
||||
<li class="dropdown user user-menu">
|
||||
<!-- Menu Toggle Button -->
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown">
|
||||
<!-- The user image in the navbar-->
|
||||
<img src="{{ mysession['avatar'] }}" class="user-image" alt="User Image">
|
||||
<!-- hidden-xs hides the username on small devices so only the image appears. -->
|
||||
<span class="hidden-xs">{{ mysession['nickname'] }}</span>
|
||||
</a>
|
||||
<ul class="dropdown-menu">
|
||||
<!-- The user image in the menu -->
|
||||
<li class="user-header">
|
||||
<img src="{{ mysession['avatar'] }}" class="img-circle" alt="User Image">
|
||||
|
||||
<p>
|
||||
{{ mysession['nickname'] }}
|
||||
<small>{{ mysession['description'] }}</small>
|
||||
</p>
|
||||
</li>
|
||||
<!-- Menu Body -->
|
||||
|
||||
<!-- Menu Footer-->
|
||||
<li class="user-footer">
|
||||
<div class="pull-left">
|
||||
<a href="/user/info/" class="btn btn-default btn-flat">Profile</a>
|
||||
</div>
|
||||
<div class="pull-right">
|
||||
<a href="/logout/" class="btn btn-default btn-flat">Sign out</a>
|
||||
</div>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<!-- Control Sidebar Toggle Button -->
|
||||
<li>
|
||||
<a href="/document/" target="_blank"><strong>Help</strong></a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="/logout/" ><strong>Logout</strong></a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
</header>
|
||||
<!-- Left side column. contains the logo and sidebar -->
|
||||
<aside class="main-sidebar">
|
||||
|
||||
<!-- sidebar: style can be found in sidebar.less -->
|
||||
<section class="sidebar">
|
||||
|
||||
<!-- Sidebar user panel (optional) -->
|
||||
<div class="user-panel">
|
||||
<div class="pull-left image">
|
||||
<img src="{{ mysession['avatar'] }}" class="img-circle" alt="User Image">
|
||||
</div>
|
||||
<div class="pull-left info">
|
||||
<p>{{ mysession['nickname'] }}</p>
|
||||
<!-- Status -->
|
||||
<a href="#"><i class="fa fa-circle text-success"></i> {{ mysession['status']}}</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Sidebar Menu -->
|
||||
<ul class="sidebar-menu">
|
||||
<li class="header">USER OPERATIONS</li>
|
||||
<!-- Optionally, you can add icons to the links -->
|
||||
<li class="active" id="nav_Dashboard">
|
||||
<a href="/dashboard/"><i class="fa fa-th-large"></i> <span class="nav-label">Dashboard</span></a>
|
||||
</li>
|
||||
<li id="nav_Config">
|
||||
<a href="/config/"><i class="fa fa-gears"></i> <span class="nav-label">Config</span></a>
|
||||
</li>
|
||||
|
||||
<li id="nav_Status">
|
||||
<a href='/vclusters/'><i class="fa fa-bar-chart"></i> <span class="nav-label">Status</span></a>
|
||||
</li>
|
||||
|
||||
|
||||
{% if mysession['usergroup'] == 'root' or mysession['usergroup'] == 'admin'%}
|
||||
<li class="header">ADMIN OPERATIONS</li>
|
||||
<li id="nav_Hosts">
|
||||
<a href='/hosts/'><i class="fa fa-sitemap"></i> <span class="nav-label">Hosts</span></a>
|
||||
</li>
|
||||
<li id="user_List">
|
||||
<a href='/user/list/'><i class="fa fa-users"></i> <span class="nav-label">Users</span></a>
|
||||
</li>
|
||||
<li id="admin">
|
||||
<a href='/admin/'><i class="fa fa-gears"></i> <span class="nav-label">Admin</span></a>
|
||||
</li>
|
||||
{% endif %}
|
||||
|
||||
</ul>
|
||||
<!-- /.sidebar-menu -->
|
||||
</section>
|
||||
<!-- /.sidebar -->
|
||||
</aside>
|
||||
|
||||
<!-- Content Wrapper. Contains page content -->
|
||||
<div class="content-wrapper">
|
||||
<!-- Content Header (Page header) -->
|
||||
<section class="content-header">
|
||||
<h1>
|
||||
<strong>{% block panel_title %}Dashboard{% endblock %}</strong>
|
||||
</h1>
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>Dashboard</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
</section>
|
||||
<!-- Main content -->
|
||||
<section class="content">
|
||||
|
||||
{% block content %}
|
||||
{% endblock %}
|
||||
|
||||
</section>
|
||||
<!-- /.content -->
|
||||
</div>
|
||||
<!-- /.content-wrapper -->
|
||||
|
||||
<!-- Main Footer -->
|
||||
<footer class="main-footer">
|
||||
<!-- To the right -->
|
||||
<div class="pull-right hidden-xs">
|
||||
<i>Docklet 0.2.6</i>
|
||||
</div>
|
||||
<!-- Default to the left -->
|
||||
<strong>Copyright</strong>© 2016 <a href="http://docklet.unias.org">UniAS</a>@<a href="http://www.sei.pku.edu.cn"> SEI, PKU</a>
|
||||
|
||||
</footer>
|
||||
|
||||
</div>
|
||||
<!-- ./wrapper -->
|
||||
|
||||
<!-- REQUIRED JS SCRIPTS -->
|
||||
|
||||
<!-- jQuery 2.2.1 -->
|
||||
<script src="//cdn.bootcss.com/jquery/2.2.1/jquery.min.js"></script>
|
||||
<!-- Bootstrap 3.3.5 -->
|
||||
<script src="//cdn.bootcss.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
<!-- AdminLTE App -->
|
||||
<script src="/static/dist/js/app.min.js"></script>
|
||||
|
||||
<script src="//cdn.bootcss.com/fastclick/1.0.6/fastclick.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/jQuery-slimScroll/1.3.7/jquery.slimscroll.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/toastr.js/latest/js/toastr.min.js"></script>
|
||||
|
||||
|
||||
|
||||
<script type="text/javascript">
|
||||
var pathname = window.location.pathname;
|
||||
pathname = pathname.split(/\//);
|
||||
if(pathname[1] != 'dashboard')
|
||||
$("#nav_Dashboard").removeClass("active");
|
||||
if(pathname[1] == 'vclusters')
|
||||
$("#nav_Status").addClass("active");
|
||||
else if(pathname[1] == 'hosts')
|
||||
$("#nav_Hosts").addClass("active");
|
||||
else if(pathname[1] == 'config')
|
||||
$("#nav_Config").addClass("active");
|
||||
else if(pathname[1] == 'user')
|
||||
{
|
||||
if (pathname[2] == 'list')
|
||||
$("#user_List").addClass("active");
|
||||
}
|
||||
|
||||
</script>
|
||||
|
||||
|
||||
{% if mysession['status'] == 'init' %}
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
toastr.options = {
|
||||
"closeButton": false,
|
||||
"debug": true,
|
||||
"progressBar": false,
|
||||
"preventDuplicates": false,
|
||||
"positionClass": "toast-top-left",
|
||||
"onclick": function(){
|
||||
window.location.href="/activate/";
|
||||
},
|
||||
"showDuration": "0",
|
||||
"hideDuration": "0",
|
||||
"timeOut": "0",
|
||||
"extendedTimeOut": "0",
|
||||
"showEasing": "swing",
|
||||
"hideEasing": "linear",
|
||||
"showMethod": "fadeIn",
|
||||
"hideMethod": "fadeOut"
|
||||
};
|
||||
toastr.error("You are not activated. Click this notification to activate your account.");
|
||||
});
|
||||
</script>
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if mysession['status'] == 'applying' %}
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
toastr.options = {
|
||||
"closeButton": false,
|
||||
"debug": true,
|
||||
"progressBar": false,
|
||||
"preventDuplicates": false,
|
||||
"positionClass": "toast-top-left",
|
||||
"onclick": function(){
|
||||
},
|
||||
"showDuration": "0",
|
||||
"hideDuration": "0",
|
||||
"timeOut": "0",
|
||||
"extendedTimeOut": "0",
|
||||
"showEasing": "swing",
|
||||
"hideEasing": "linear",
|
||||
"showMethod": "fadeIn",
|
||||
"hideMethod": "fadeOut"
|
||||
};
|
||||
toastr.warning("You applying is being checked.");
|
||||
});
|
||||
</script>
|
||||
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% block script_src %}
|
||||
{% endblock %}
|
||||
</body>
|
||||
|
||||
</html>
|
|
@ -0,0 +1,315 @@
|
|||
{% extends "base_AdminLTE.html"%}
|
||||
|
||||
<!--
|
||||
Config Page :
|
||||
1. images
|
||||
2. workspace templates
|
||||
|
||||
-->
|
||||
|
||||
{% block title %}Docklet | Config{% endblock %}
|
||||
|
||||
{% block panel_title %}Config{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block css_src %}
|
||||
<link href="/static/dist/css/modalconfig.css" rel="stylesheet">
|
||||
{% endblock %}
|
||||
|
||||
|
||||
{% block content %}
|
||||
{% for clustername, clusterinfo in clusters.items() %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">WorkSpace Name: {{ clustername }}</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h4 class="box-title">VCLUSTER</h4>
|
||||
<h5>create_time:{{clusterinfo['create_time']}}      start_time:{{clusterinfo['start_time']}}</h5>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
<button type="button" class="btn btn-primary btn-sm" data-toggle="modal" data-target="#Scaleout_{{ clustername }}"><i class="fa fa-plus"></i>Add Node</button>
|
||||
<div class="modal inmodal" id="Scaleout_{{ clustername }}" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content animated fadeIn">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
|
||||
<i class="fa fa-plus modal-icon"></i>
|
||||
<h4 class="modal-title">Choose Image</h4>
|
||||
<small class="font-bold">Choose an image to add node</small>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div class="form-group">
|
||||
<form action="/workspace/scaleout/{{ clustername }}/" method="POST" id="scaleout">
|
||||
<table class="table table-striped table-bordered table-hover table-image">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>ImageName</th>
|
||||
<th>Type</th>
|
||||
<th>Owner</th>
|
||||
<th>Choose</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>base</td>
|
||||
<td><div class="label label-outline-success">public</div></td>
|
||||
<td>docklet</td>
|
||||
<td><input type="radio" name="image" value="base_base_base" checked="checked"></td>
|
||||
</tr>
|
||||
{% for image in images['private'] %}
|
||||
<tr>
|
||||
<td>{{image['name']}}</td>
|
||||
<td><div class="label label-outline-warning">private</div></td>
|
||||
<td>{{mysession['username']}}</td>
|
||||
<td><input type="radio" name="image" value="{{image['name']}}_{{mysession['username']}}_private"></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
{% for p_user, p_images in images['public'].items() %}
|
||||
{% for image in p_images %}
|
||||
<tr>
|
||||
<td>{{image['name']}}</td>
|
||||
<td><div class="label label-outline-success">public</div></td>
|
||||
<td>{{p_user}}</td>
|
||||
<td><input type="radio" name="image" value="{{image['name']}}_{{p_user}}_public"></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-white" data-dismiss="modal">Close</button>
|
||||
<button type="submit" class="btn btn-success">Add</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Node ID</th>
|
||||
<th>Node Name</th>
|
||||
<th>IP Address</th>
|
||||
<th>Status</th>
|
||||
<th>Image</th>
|
||||
<th>Save</th>
|
||||
<th>Delete</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for container in clusterinfo['containers'] %}
|
||||
<tr>
|
||||
<td>{{ loop.index }}</td>
|
||||
<td>{{ container['containername'] }}</td>
|
||||
<td>{{ container['ip'] }}</td>
|
||||
|
||||
{% if clusterinfo['status'] == 'stopped' %}
|
||||
<td><div class="text-warning"><i class="fa fa-stop"></i> Stopped</div></td>
|
||||
{% else %}
|
||||
<td><div class="text-success"><i class="fa fa-play"></i> Running</div></td>
|
||||
{% endif %}
|
||||
|
||||
<td>{{ container['image'] }}</td>
|
||||
<td><button type="button" class="btn btn-success btn-xs" data-toggle="modal" data-target="#DelModal_{{ container['containername'] }}">Save</button></td>
|
||||
{% if container['containername'][-2:] == '-0' %}
|
||||
<td><button class="btn btn-xs btn-default">Delete</button></td>
|
||||
{% else %}
|
||||
<td><a class="btn btn-xs btn-danger" href="/workspace/scalein/{{ clustername }}/{{ container['containername'] }}/">Delete</a></td>
|
||||
{% endif %}
|
||||
<div class="modal inmodal" id="DelModal_{{ container['containername'] }}" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content animated fadeIn">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
|
||||
<i class="fa fa-save modal-icon"></i>
|
||||
<h4 class="modal-title">Save Image</h4>
|
||||
<small class="font-bold">Save Your Environment As a Image</small>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div class="form-group">
|
||||
<form action="/workspace/save/{{ clustername }}/{{ container['containername'] }}/" method="POST" id="saveImage">
|
||||
<label>Image Name</label>
|
||||
<input type="text" placeholder="Enter Image Name" class="form-control" name="ImageName" id="ImageName"/>
|
||||
<br/>
|
||||
<label>Description</label>
|
||||
<textarea rows="5" cols="60" name="description" id="description">please input your description</textarea>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-white" data-dismiss="modal">Close</button>
|
||||
<button type="submit" class="btn btn-success">Save</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<br/>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">SERVICE</h3>
|
||||
<h5><a href="/_web/{{ mysession['username'] }}/{{ clustername }}/" title="click here jump to your proxy server">{{ clusterinfo['proxy_url'] }}</a></h5>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
<form action="/addproxy/{{ clustername }}/" id="addproxy" method="POST">
|
||||
{% if 'proxy_ip' in clusterinfo %}
|
||||
<p>ip:<input type="text" id="proxy_ip" name="proxy_ip" value={{ clusterinfo['proxy_ip'][:clusterinfo['proxy_ip'].index(':')] }} readonly="true"/>port:<input type="text" id="proxy_port" name="proxy_port" value={{ clusterinfo['proxy_ip'][clusterinfo['proxy_ip'].index(':')+1:] }} readonly="true"/>
|
||||
<button type="button" class="btn-xs btn-default">enable</button>
|
||||
<a href="/deleteproxy/{{ clustername }}/"><button type="button" class="btn-xs btn-danger">disable</button></a></p>
|
||||
{% else %}
|
||||
<p>ip:<input type="text" id="proxy_ip" name="proxy_ip" value={{ clusterinfo["containers"][0]["ip"][:clusterinfo["containers"][0]["ip"].index("/")] }} />port:<input type="text" id="proxy_port" name="proxy_port" value="8000"/>
|
||||
<button type="submit" class="btn-xs btn-success">enable</button>
|
||||
<button type="button" class="btn-xs btn-default">disable</button></p>
|
||||
{% endif %}
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<div class="row">
|
||||
<div class="col-lg-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Image Info</h3>
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
|
||||
<table class="table table-striped table-bordered table-hover table-image" >
|
||||
<thead>
|
||||
<tr>
|
||||
<th>ImageName</th>
|
||||
<th>Type</th>
|
||||
<th>Owner</th>
|
||||
<th>CreateTime</th>
|
||||
<th>Description</th>
|
||||
<th>Status</th>
|
||||
<th>Operation</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>base</td>
|
||||
<td><div class="label label-outline-success">public</div></td>
|
||||
<td>docklet</td>
|
||||
<td>2015-01-01 00:00:00</td>
|
||||
<td>A Base Image For You</td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{% for image in images['private'] %}
|
||||
<tr>
|
||||
<td>{{image['name']}}</td>
|
||||
<td><div class="label label-outline-warning">{{"private"}}</div></td>
|
||||
<td>{{mysession['username']}}</td>
|
||||
<td>{{image['time']}}</td>
|
||||
<td><a href="/image/description/{{image['name']}}_{{mysession['username']}}_private/" target="_blank">{{image['description']}}</a></td>
|
||||
{% if image['isshared'] == 'false' %}
|
||||
<td><div class="label label-outline-default">unshared</div></td>
|
||||
<td>
|
||||
<a href="/image/share/{{ image['name'] }}/"><button type="button" class="btn btn-xs btn-success">share</button></a>
|
||||
<a href="/image/delete/{{ image['name'] }}/"><button type="button" class="btn btn-xs btn-danger">delete</button></a>
|
||||
</td>
|
||||
{% else %}
|
||||
<td><div class="label label-outline-default">shared</div></td>
|
||||
<td>
|
||||
<a href="/image/unshare/{{ image['name'] }}/"><button type="button" class="btn btn-xs btn-warning">unshare</button></a>
|
||||
<a href="/image/delete/{{ image['name'] }}/"><button type="button" class="btn btn-xs btn-danger">delete</button></a>
|
||||
</td>
|
||||
{% endif %}
|
||||
</tr>
|
||||
{% endfor %}
|
||||
{% for p_user,p_images in images['public'].items() %}
|
||||
{% for image in p_images %}
|
||||
<tr>
|
||||
<td>{{image['name']}}</td>
|
||||
<td><div class="label label-outline-success">{{"public"}}</div></td>
|
||||
<td>{{p_user}}</td>
|
||||
<td>{{image['time']}}</td>
|
||||
<td><a href="/image/description/{{image['name']}}_{{p_user}}_public/" target="_blank">{{image['description']}}</a></td>
|
||||
<td></td>
|
||||
{% if p_user == mysession['username'] %}
|
||||
<td><a href="/image/unshare/{{ image['name'] }}/"><button type="button" class="btn btn-xs btn-warning">unshare</button></a></td>
|
||||
{% else %}
|
||||
<td></td>
|
||||
{% endif %}
|
||||
</tr>
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
|
||||
<script src="http://cdn.bootcss.com/datatables/1.10.11/js/jquery.dataTables.js"></script>
|
||||
<script src="http://cdn.bootcss.com/datatables/1.10.11/js/dataTables.bootstrap.js"></script>
|
||||
<script src="http://cdn.bootcss.com/datatables-tabletools/2.1.5/js/TableTools.min.js"></script>
|
||||
|
||||
<script>
|
||||
$(document).ready(function() {
|
||||
$(".table-image").DataTable();
|
||||
});
|
||||
|
||||
</script>
|
||||
|
||||
{% endblock %}
|
|
@ -0,0 +1,89 @@
|
|||
{% extends "base_AdminLTE.html"%}
|
||||
{% block title %}Docklet | Dashboard{% endblock %}
|
||||
|
||||
{% block panel_title %}Dashboard{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<a href='/dashboard/'>Dashboard</a>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-lg-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Workspaces</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
|
||||
<p>
|
||||
<a href="/workspace/create/"><button type="button" class="btn btn-primary btn-sm"><i class="fa fa-plus"></i> Add Workspace</button></a>
|
||||
</p>
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>ID</th>
|
||||
<th>Name</th>
|
||||
<th>Status</th>
|
||||
<th>Operation</th>
|
||||
<th>WorkSpace</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for cluster in clusters %}
|
||||
<tr>
|
||||
<td>{{ cluster['id'] }}</td>
|
||||
<td><a href="/config/">{{ cluster['name'] }}</a></td>
|
||||
{% if cluster['status'] == 'running' %}
|
||||
<td><a href="/vclusters/"><div class="text-success"><i class="fa fa-play"></i> Running</div></a></td>
|
||||
<td>
|
||||
<a href="/workspace/stop/{{ cluster['name'] }}/"><button type="button" class="btn btn-xs btn-warning"> Stop </button></a>
|
||||
<button type="button" class="btn btn-xs btn-default"> Delete </button>
|
||||
</td>
|
||||
<td>
|
||||
<a href="/go/{{ mysession['username'] }}/{{ cluster['name'] }}" target="_blank"><button type="button" class="btn btn-xs btn-success"> Go </button></a>
|
||||
</td>
|
||||
{% else %}
|
||||
<td><a href="/monitor/Node/"><div class="text-warning"><i class="fa fa-stop "></i> Stopped</div></a></td>
|
||||
<td>
|
||||
<a href="/workspace/start/{{ cluster['name'] }}/"><button type="button" class="btn btn-xs btn-success"> Start </button></a>
|
||||
<a href="/workspace/delete/{{ cluster['name'] }}/"><button type="button" class="btn btn-xs btn-danger">Delete</button></a>
|
||||
</td>
|
||||
<td>
|
||||
<button type="button" class="btn btn-xs btn-default"> Go </button>
|
||||
</td>
|
||||
{% endif %}
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
{% block script_src %}
|
||||
|
||||
<script type="text/javascript">
|
||||
function sendAdd(){
|
||||
document.getElementById("addForm").submit();
|
||||
}
|
||||
function sendDel(){
|
||||
document.getElementById("delForm").submit();
|
||||
}
|
||||
</script>
|
||||
{% endblock %}
|
|
@ -0,0 +1,329 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<title>Docklet | Dashboard</title>
|
||||
<!-- Tell the browser to be responsive to screen width -->
|
||||
<meta content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no" name="viewport">
|
||||
<link rel="shortcut icon" href="/static/img/favicon.ico">
|
||||
|
||||
<link href="//cdn.bootcss.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet">
|
||||
|
||||
<!-- Font Awesome -->
|
||||
<link href="//cdn.bootcss.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet">
|
||||
|
||||
<!-- Ionicons -->
|
||||
<link href="//cdn.bootcss.com/ionicons/2.0.1/css/ionicons.min.css" rel="stylesheet">
|
||||
|
||||
<link href="//cdn.bootcss.com/animate.css/3.5.1/animate.min.css" rel="stylesheet">
|
||||
<link href="//cdn.bootcss.com/toastr.js/latest/css/toastr.min.css" rel="stylesheet">
|
||||
|
||||
<!-- Theme style -->
|
||||
|
||||
<link rel="stylesheet" href="/static/dist/css/AdminLTE.min.css">
|
||||
|
||||
<link rel="stylesheet" href="/static/dist/css/skins/skin-blue.min.css">
|
||||
|
||||
|
||||
</head>
|
||||
|
||||
<body class="hold-transition skin-blue sidebar-mini">
|
||||
<div class="wrapper">
|
||||
|
||||
<!-- Main Header -->
|
||||
<header class="main-header">
|
||||
|
||||
<!-- Logo -->
|
||||
<a href="" class="logo">
|
||||
<!-- mini logo for sidebar mini 50x50 pixels -->
|
||||
<span class="logo-mini"></span>
|
||||
<!-- logo for regular state and mobile devices -->
|
||||
<span class="logo-lg"><b>Docklet</b></span>
|
||||
</a>
|
||||
|
||||
<!-- Header Navbar -->
|
||||
<nav class="navbar navbar-static-top" role="navigation">
|
||||
<!-- Sidebar toggle button-->
|
||||
<a href="#" class="sidebar-toggle" data-toggle="offcanvas" role="button">
|
||||
<span class="sr-only">Toggle navigation</span>
|
||||
</a>
|
||||
<!-- Navbar Right Menu -->
|
||||
<div class="navbar-custom-menu">
|
||||
<ul class="nav navbar-nav">
|
||||
<!-- Messages: style can be found in dropdown.less-->
|
||||
|
||||
<li class="dropdown user user-menu">
|
||||
<!-- Menu Toggle Button -->
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown">
|
||||
<!-- The user image in the navbar-->
|
||||
<img src="{{ mysession['avatar'] }}" class="user-image" alt="User Image">
|
||||
<!-- hidden-xs hides the username on small devices so only the image appears. -->
|
||||
<span class="hidden-xs">{{ mysession['nickname'] }}</span>
|
||||
</a>
|
||||
<ul class="dropdown-menu">
|
||||
<!-- The user image in the menu -->
|
||||
<li class="user-header">
|
||||
<img src="{{ mysession['avatar'] }}" class="img-circle" alt="User Image">
|
||||
|
||||
<p>
|
||||
{{ mysession['nickname'] }}
|
||||
<small>{{ mysession['description'] }}</small>
|
||||
</p>
|
||||
</li>
|
||||
<!-- Menu Body -->
|
||||
|
||||
<!-- Menu Footer-->
|
||||
<li class="user-footer">
|
||||
<div class="pull-left">
|
||||
Profile
|
||||
</div>
|
||||
<div class="pull-right">
|
||||
Sign out
|
||||
</div>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<!-- Control Sidebar Toggle Button -->
|
||||
<li>
|
||||
<a href="/document/" target="_blank"><strong>Help</strong></a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
</header>
|
||||
<!-- Left side column. contains the logo and sidebar -->
|
||||
<aside class="main-sidebar">
|
||||
|
||||
<!-- sidebar: style can be found in sidebar.less -->
|
||||
<section class="sidebar">
|
||||
|
||||
<!-- Sidebar user panel (optional) -->
|
||||
<div class="user-panel">
|
||||
<div class="pull-left image">
|
||||
<img src="{{ mysession['avatar'] }}" class="img-circle" alt="User Image">
|
||||
</div>
|
||||
<div class="pull-left info">
|
||||
<p>{{ mysession['nickname'] }}</p>
|
||||
<!-- Status -->
|
||||
<i class="fa fa-circle text-success"></i> {{ mysession['status']}}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Sidebar Menu -->
|
||||
<ul class="sidebar-menu">
|
||||
<li class="header">USER OPERATIONS</li>
|
||||
<!-- Optionally, you can add icons to the links -->
|
||||
<li class="active" id="nav_Dashboard">
|
||||
<a href="javascript:void(0)"><i class="fa fa-th-large"></i> <span class="nav-label">Dashboard</span></a>
|
||||
</li>
|
||||
<li id="nav_Config">
|
||||
<a href="javascript:void(0)"><i class="fa fa-gears"></i> <span class="nav-label">Config</span></a>
|
||||
</li>
|
||||
|
||||
<li id="nav_Status">
|
||||
<a href="javascript:void(0)"><i class="fa fa-bar-chart"></i> <span class="nav-label">Status</span></a>
|
||||
</li>
|
||||
|
||||
|
||||
{% if mysession['usergroup'] == 'root' or mysession['usergroup'] == 'admin'%}
|
||||
<li class="header">ADMIN OPERATIONS</li>
|
||||
<li id="nav_Hosts">
|
||||
<i class="fa fa-sitemap"></i> <span class="nav-label">Hosts</span>
|
||||
</li>
|
||||
<li id="user_List">
|
||||
<i class="fa fa-users"></i> <span class="nav-label">Users</span>
|
||||
</li>
|
||||
<li id="admin">
|
||||
<i class="fa fa-gears"></i> <span class="nav-label">Admin</span>
|
||||
</li>
|
||||
{% endif %}
|
||||
|
||||
</ul>
|
||||
<!-- /.sidebar-menu -->
|
||||
</section>
|
||||
<!-- /.sidebar -->
|
||||
</aside>
|
||||
|
||||
<!-- Content Wrapper. Contains page content -->
|
||||
<div class="content-wrapper">
|
||||
<!-- Content Header (Page header) -->
|
||||
<section class="content-header">
|
||||
<h1>
|
||||
<strong>Dashboard</strong>
|
||||
</h1>
|
||||
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<i class="fa fa-dashboard"></i>Home
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>Dashboard</strong>
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
</section>
|
||||
<!-- Main content -->
|
||||
<section class="content">
|
||||
|
||||
|
||||
<div class="row">
|
||||
<div class="col-lg-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Workspaces</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
|
||||
<p>
|
||||
<button type="button" class="btn btn-primary btn-sm"><i class="fa fa-plus"></i> Add Workspace</button>
|
||||
</p>
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>ID</th>
|
||||
<th>Name</th>
|
||||
<th>Status</th>
|
||||
<th>Operation</th>
|
||||
<th>WorkSpace</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>1</td>
|
||||
<td>guest-1-0</td>
|
||||
<td><div class="text-success"><i class="fa fa-play"></i> Running</div></td>
|
||||
<td>
|
||||
<button type="button" class="btn btn-xs btn-warning"> Stop </button>
|
||||
<button type="button" class="btn btn-xs btn-default"> Delete </button>
|
||||
</td>
|
||||
<td>
|
||||
<a href="/go/guest/guestspace" target="_blank"><button type="button" class="btn btn-xs btn-success"> Go </button></a>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</section>
|
||||
<!-- /.content -->
|
||||
</div>
|
||||
<!-- /.content-wrapper -->
|
||||
|
||||
<!-- Main Footer -->
|
||||
<footer class="main-footer">
|
||||
<!-- To the right -->
|
||||
<div class="pull-right hidden-xs">
|
||||
<i>Docklet</i> 0.25
|
||||
</div>
|
||||
<!-- Default to the left -->
|
||||
<strong>Copyright</strong>© 2016 <a href="http://docklet.unias.org">UniAS</a>@<a href="http://www.sei.pku.edu.cn"> SEI, PKU</a>
|
||||
|
||||
</footer>
|
||||
|
||||
</div>
|
||||
<!-- ./wrapper -->
|
||||
|
||||
<!-- REQUIRED JS SCRIPTS -->
|
||||
|
||||
<!-- jQuery 2.2.1 -->
|
||||
<script src="//cdn.bootcss.com/jquery/2.2.1/jquery.min.js"></script>
|
||||
<!-- Bootstrap 3.3.5 -->
|
||||
<script src="//cdn.bootcss.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
<!-- AdminLTE App -->
|
||||
<script src="/static/dist/js/app.min.js"></script>
|
||||
|
||||
<script src="//cdn.bootcss.com/fastclick/1.0.6/fastclick.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/jQuery-slimScroll/1.3.7/jquery.slimscroll.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/toastr.js/latest/js/toastr.min.js"></script>
|
||||
|
||||
|
||||
|
||||
<script type="text/javascript">
|
||||
var pathname = window.location.pathname;
|
||||
pathname = pathname.split(/\//);
|
||||
if(pathname[1] != 'dashboard')
|
||||
$("#nav_Dashboard").removeClass("active");
|
||||
if(pathname[1] == 'vclusters')
|
||||
$("#nav_Status").addClass("active");
|
||||
else if(pathname[1] == 'hosts')
|
||||
$("#nav_Hosts").addClass("active");
|
||||
else if(pathname[1] == 'config')
|
||||
$("#nav_Config").addClass("active");
|
||||
else if(pathname[1] == 'user')
|
||||
{
|
||||
if (pathname[2] == 'list')
|
||||
$("#user_List").addClass("active");
|
||||
}
|
||||
|
||||
</script>
|
||||
|
||||
|
||||
{% if mysession['status'] == 'init' %}
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
toastr.options = {
|
||||
"closeButton": false,
|
||||
"debug": true,
|
||||
"progressBar": false,
|
||||
"preventDuplicates": false,
|
||||
"positionClass": "toast-top-left",
|
||||
"onclick": function(){
|
||||
window.location.href="/activate/";
|
||||
},
|
||||
"showDuration": "0",
|
||||
"hideDuration": "0",
|
||||
"timeOut": "0",
|
||||
"extendedTimeOut": "0",
|
||||
"showEasing": "swing",
|
||||
"hideEasing": "linear",
|
||||
"showMethod": "fadeIn",
|
||||
"hideMethod": "fadeOut"
|
||||
};
|
||||
toastr.error("You are not activated. Click this notification to activate your account.");
|
||||
});
|
||||
</script>
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if mysession['status'] == 'applying' %}
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
toastr.options = {
|
||||
"closeButton": false,
|
||||
"debug": true,
|
||||
"progressBar": false,
|
||||
"preventDuplicates": false,
|
||||
"positionClass": "toast-top-left",
|
||||
"onclick": function(){
|
||||
},
|
||||
"showDuration": "0",
|
||||
"hideDuration": "0",
|
||||
"timeOut": "0",
|
||||
"extendedTimeOut": "0",
|
||||
"showEasing": "swing",
|
||||
"hideEasing": "linear",
|
||||
"showMethod": "fadeIn",
|
||||
"hideMethod": "fadeOut"
|
||||
};
|
||||
toastr.warning("You applying is being checked.");
|
||||
});
|
||||
</script>
|
||||
|
||||
{% endif %}
|
||||
|
||||
|
||||
</body>
|
||||
|
||||
</html>
|
|
@ -0,0 +1,10 @@
|
|||
{% extends "base_AdminLTE.html"%}
|
||||
{% block title %}Docklet | Error{% endblock %}
|
||||
|
||||
{% block panel_title %}Error{% endblock %}
|
||||
|
||||
{% block panel_list %}{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<pre>{{message}}</pre>
|
||||
{% endblock %}
|
|
@ -0,0 +1,30 @@
|
|||
{% extends "base_AdminLTE.html"%}
|
||||
|
||||
|
||||
{% block title %}Docklet | Error{% endblock %}
|
||||
|
||||
{% block panel_title %}401 Error Page{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
|
||||
<div class="error-page">
|
||||
<h2 class="headline text-red">401</h2>
|
||||
|
||||
<div class="error-content">
|
||||
<h3><br/><i class="fa fa-warning text-red"></i> Unauthorized Action</h3>
|
||||
|
||||
<p>
|
||||
Sorry, but you did not have the authorizaion for that action, you can go back to
|
||||
<a href="/dashboard/">dashboard</a> or <a href="/logout">log out</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
|
@ -0,0 +1,30 @@
|
|||
{% extends "base_AdminLTE.html"%}
|
||||
|
||||
|
||||
{% block title %}Docklet | Error{% endblock %}
|
||||
|
||||
{% block panel_title %}500 Error Page{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
|
||||
<div class="error-page">
|
||||
<h2 class="headline text-red">500</h2>
|
||||
|
||||
<div class="error-content">
|
||||
<h3><br/><i class="fa fa-warning text-red"></i> Internal Server Error</h3>
|
||||
|
||||
<p>
|
||||
The server encountered something unexpected that didn't allow it to complete the request. We apologize.You can go back to
|
||||
<a href="/dashboard/">dashboard</a> or <a href="/logout">log out</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
|
@ -0,0 +1,113 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
|
||||
<head>
|
||||
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
<title>Docklet | Home</title>
|
||||
<link rel="shortcut icon" href="/static/img/favicon.ico">
|
||||
|
||||
<link href="http://cdn.bootcss.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="http://cdn.bootcss.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet">
|
||||
<link href="http://cdn.bootcss.com/animate.css/2.0/animate.min.css" rel="stylesheet">
|
||||
<link href="/static/css/docklet.css" rel="stylesheet">
|
||||
<style type="text/css">
|
||||
h1 { font-weight: 400 }
|
||||
h2 { font-weight: 300 }
|
||||
.underline { border-bottom : 1px solid #FFFFFF }
|
||||
a:link {text-decoration: none }
|
||||
</style>
|
||||
|
||||
</head>
|
||||
|
||||
<body style="background-color:#FFFFFF">
|
||||
|
||||
<div class="navbar navbar-fixed-top" role="navigation" style="border:none !important; background: #F3F3F3; opacity:0.9">
|
||||
<div class="container">
|
||||
<div class="row" style="padding-top:8px; padding-bottom:5px;">
|
||||
<div class="col-lg-2 col-md-2 col-sm-4 col-xs-4">
|
||||
<img src="/static/img/logoname.png" style="width:100%">
|
||||
</div>
|
||||
|
||||
<div class="col-lg-1 col-lg-offset-8 col-md-1 col-md-offset-8 col-sm-2 col-sm-offset-4 col-xs-4 col-xs-offset-0" style="padding-left:0px">
|
||||
<a href="/login/"><button type="button" class="btn btn-outline-success btn-block">Login</button></a>
|
||||
</div>
|
||||
<div class="col-lg-1 col-md-1 col-sm-2 col-xs-4" style="padding-left:0px">
|
||||
<a href="/dashboard_guest/"><button type="button" class="btn btn-outline-success btn-block">Try</button></a>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<!-- <div class="container" style="width:100%"> -->
|
||||
<div class="container">
|
||||
<div class="row" style="margin-top: 100px; margin-bottom:50px; margin-left:0px; margin-right:0px; background: #FFFFFF">
|
||||
<img src="/static/img/home.png" style="width:100%">
|
||||
</div>
|
||||
|
||||
<div class="row docklet-red-block" style="padding-top:100px; padding-bottom:100px">
|
||||
<div class="col-lg-3 col-lg-offset-1 col-md-3 col-md-offset-1 col-sm-4 col-sm-offset-0 col-xs-8 col-xs-offset-2">
|
||||
<img src="/static/img/workspace.png" style="width:100%">
|
||||
</div>
|
||||
<div class="col-lg-6 col-lg-offset-1 col-md-6 col-md-offset-1 col-sm-8 col-sm-offset-0 col-xs-12 col-xs-offset-0">
|
||||
<h1>Workspace = Cluster+Service+Data</h1>
|
||||
<h2>Package service and data based on virtual cluster as virtual compute environment for your work. This is your Workspace !</h2>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row docklet-green-block" style="padding-top:100px; padding-bottom:100px">
|
||||
<div class="col-lg-6 col-lg-offset-1 col-md-6 col-md-offset-1 col-sm-8 col-sm-offset-0 col-xs-12 col-xs-offset-0">
|
||||
<h1>Click and Go</h1>
|
||||
<h2>Distributed or single node ? Never mind !
|
||||
Click it just like start an app on your smart phone, and your workspace is
|
||||
ready for you.</h2>
|
||||
</div>
|
||||
<div class="col-lg-3 col-lg-offset-1 col-md-3 col-md-offset-1 col-sm-4 col-sm-offset-0 col-xs-8 col-xs-offset-2">
|
||||
<img src="/static/img/app.png" style="width:100%">
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="row docklet-yellow-block" style="padding-top:100px; padding-bottom:100px">
|
||||
<div class="col-lg-3 col-lg-offset-1 col-md-3 col-md-offset-1 col-sm-4 col-sm-offset-0 col-xs-8 col-xs-offset-2">
|
||||
<img src="/static/img/web.png" style="width:100%">
|
||||
</div>
|
||||
<div class="col-lg-6 col-lg-offset-1 col-md-6 col-md-offset-1 col-sm-8 col-sm-offset-0 col-xs-12 col-xs-offset-0">
|
||||
<h1>All in Web</h1>
|
||||
<h2>All you need is a web browser.
|
||||
Compute in web, code in web, plot in web, anything in web !
|
||||
You can get to work anytime and anywhere by internet.</h2>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="row docklet-blue-block" style="padding-top:100px; padding-bottom:100px">
|
||||
<div class="col-lg-8 col-lg-offset-2 col-md-10 col-md-offset-1 col-sm-10 col-sm-offset-1 col-xs-12 col-xs-offset-0">
|
||||
<h2>Now,   jupyter / python3 / matplotlib / sklearn /scipy / numpy / pandas / latex is ready for you</h2>
|
||||
<h2>And,   more workspaces are coming for your <span class="underline">data processing</span> / <span class="underline">data mining</span> / <span class="underline">machine learning</span> work</h2>
|
||||
<br/>
|
||||
<a href="/login/"><button type="button" class="btn btn-lg btn-outline-warning">Get to Started</button></a>
|
||||
<span>   </span>
|
||||
<a href="/dashboard_guest/"><button type="button" class="btn btn-lg btn-outline-warning">Have a Try</button></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
<div class="row">
|
||||
<p class="m-t"> <small>Copyright© 2016 <a href="http://docklet.unias.org">UniAS</a>@<a href="http://www.sei.pku.edu.cn"> SEI, PKU</a></small> </p>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<!-- Mainly scripts -->
|
||||
<script src="http://cdn.bootcss.com/jquery/2.2.1/jquery.min.js"></script>
|
||||
<script src="http://cdn.bootcss.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
|
||||
</body>
|
||||
|
||||
</html>
|
|
@ -0,0 +1,10 @@
|
|||
{% extends "base_AdminLTE.html"%}
|
||||
{% block title %}Docklet | Description{% endblock %}
|
||||
|
||||
{% block panel_title %}Description{% endblock %}
|
||||
|
||||
{% block panel_list %}{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<pre>{{description}}</pre>
|
||||
{% endblock %}
|
|
@ -0,0 +1,102 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Container{% endblock %}
|
||||
|
||||
{% block panel_title %}ContainerInfo{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/index/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>ContainerInfo</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Cluster Name: {{ clustername }}</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
<p>
|
||||
<a href="/cluster/scaleout/{{ clustername }}"><button type="button" class="btn btn-primary btn-sm"><i class="fa fa-plus"></i>Add Container</button></a>
|
||||
</p>
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Node ID</th>
|
||||
<th>Node Name</th>
|
||||
<th>IP Address</th>
|
||||
<th>Status</th>
|
||||
<th>Last Save</th>
|
||||
<th>Image</th>
|
||||
<th>Detail</th>
|
||||
<th>Flush</th>
|
||||
<th>Save</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for container in containers %}
|
||||
<tr>
|
||||
<td>{{ loop.index }}</td>
|
||||
<td>{{ container['containername'] }}</td>
|
||||
<td>{{ container['ip'] }}</td>
|
||||
|
||||
{% if status == 'stopped' %}
|
||||
<td><div class="label label-danger">Stopped</div></td>
|
||||
{% else %}
|
||||
<td><div class="label label-primary">Running</div></td>
|
||||
{% endif %}
|
||||
|
||||
<td>{{ container['lastsave'] }}</td>
|
||||
<td>{{ container['image'] }}</td>
|
||||
<td><a class="btn btn-info" href='/monitor/Node/{{ container['containername'] }}/detail/'>Detail</a></td>
|
||||
<td><a class="btn btn-warning" href="/cluster/flush/{{ clustername }}/{{ container['containername'] }}/">Flush</a></td>
|
||||
<td><button type="button" class="btn btn-success btn-sm btn-block" data-toggle="modal" data-target="#DelModal_{{ container['containername'] }}"> save</button></td>
|
||||
<div class="modal inmodal" id="DelModal_{{ container['containername'] }}" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content animated fadeIn">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
|
||||
<i class="fa fa-trash modal-icon"></i>
|
||||
<h4 class="modal-title">Save Image</h4>
|
||||
<small class="font-bold">Save Your Environment As a Image</small>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div class="form-group">
|
||||
<form action="/cluster/save/{{ clustername }}/{{ container['containername'] }}/" method="GET" id="saveImage">
|
||||
<label>Image Name</label>
|
||||
<input type="text" placeholder="Enter image name" class="form-control" name="ImageName" id="ImageName"/>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-white" data-dismiss="modal">Close</button>
|
||||
<button type="submit" class="btn btn-success">Save</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
|
@ -0,0 +1,71 @@
|
|||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<title>Docklet | Login</title>
|
||||
<!-- Tell the browser to be responsive to screen width -->
|
||||
<meta content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no" name="viewport">
|
||||
|
||||
<!-- Bootstrap 3.3.5 -->
|
||||
<link href="//cdn.bootcss.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet">
|
||||
<!-- Font Awesome -->
|
||||
<link href="//cdn.bootcss.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet">
|
||||
<!-- Ionicons -->
|
||||
<link href="//cdn.bootcss.com/ionicons/2.0.1/css/ionicons.min.css" rel="stylesheet">
|
||||
<!-- Theme style -->
|
||||
<link rel="stylesheet" href="/static/dist/css/AdminLTE.min.css">
|
||||
|
||||
<link rel="shortcut icon" href="/static/img/favicon.ico">
|
||||
|
||||
|
||||
</head>
|
||||
<body class="hold-transition login-page">
|
||||
<div class="login-box">
|
||||
<div class="login-logo">
|
||||
<img src="/static/img/logo.png" class="logo-name" height="50%" width="50%">
|
||||
<!--a href="/"><b>Docklet</b></a-->
|
||||
</div>
|
||||
<!-- /.login-logo -->
|
||||
<div class="login-box-body">
|
||||
<p class="login-box-msg">An easy and quick way to launch your DISTRIBUTED applications!</p>
|
||||
|
||||
<form action="" method="POST">
|
||||
<div class="form-group has-feedback">
|
||||
<input type="text" class="form-control" placeholder="Username" name="username">
|
||||
<span class="glyphicon glyphicon-user form-control-feedback"></span>
|
||||
</div>
|
||||
<div class="form-group has-feedback">
|
||||
<input type="password" class="form-control" placeholder="Password" name="password">
|
||||
<span class="glyphicon glyphicon-lock form-control-feedback"></span>
|
||||
</div>
|
||||
<div class="row">
|
||||
|
||||
<div class="col-xs-12">
|
||||
<button type="submit" class="btn btn-primary btn-block btn-flat">Sign In</button>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
<div class="social-auth-links text-center">
|
||||
<!--p>- OR -</p>
|
||||
<a href="#" class="btn btn-block btn-social btn-facebook btn-flat"><i class="fa fa-facebook"></i> Sign in using
|
||||
Facebook</a>
|
||||
<a href="#" class="btn btn-block btn-social btn-google btn-flat"><i class="fa fa-google-plus"></i> Sign in using
|
||||
Google+</a-->
|
||||
{{ link }}
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<!-- /.login-box-body -->
|
||||
</div>
|
||||
<!-- /.login-box -->
|
||||
|
||||
<!-- jQuery 2.2.1 -->
|
||||
<script src="//cdn.bootcss.com/jquery/2.2.1/jquery.min.js"></script>
|
||||
<!-- Bootstrap 3.3.5 -->
|
||||
<script src="//cdn.bootcss.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,145 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Hosts{% endblock %}
|
||||
|
||||
{% block panel_title %}Hosts Info{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>HostsInfo</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">All Hosts Info</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body table-responsive">
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>NO</th>
|
||||
<th>IP Address</th>
|
||||
<th>Status</th>
|
||||
<th>Nodes running</th>
|
||||
<th>Cpu used</th>
|
||||
<th>Mem used</th>
|
||||
<th>Disk used</th>
|
||||
<th>Summary</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for phym in machines %}
|
||||
<tr>
|
||||
<td>{{ loop.index }}</td>
|
||||
<td>{{ phym['ip'] }}</td>
|
||||
{% if phym['status'] == 'STOPPED' %}
|
||||
<td><div id='{{ loop.index }}_status' class="label label-danger">Stopped</div></td>
|
||||
{% else %}
|
||||
<td><div id='{{ loop.index }}_status' class="label label-primary">Running</div></td>
|
||||
{% endif %}
|
||||
<td>
|
||||
<label id='{{ loop.index }}_conrunning'>{{ phym['containers']['running'] }}</label> /
|
||||
<a href='/hosts/{{ phym['ip'] }}/containers/' id='{{ loop.index }}_contotal' >{{ phym['containers']['total'] }}</a>
|
||||
</td>
|
||||
<td id='{{ loop.index }}_cpu'>--</td>
|
||||
<td id='{{ loop.index }}_mem'>--</td>
|
||||
<td id='{{ loop.index }}_disk'>--</td>
|
||||
<td><a class="btn btn-info btn-xs" href='/hosts/{{ phym['ip'] }}/'>Realtime</a></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
<script type='text/javascript'>
|
||||
function update(url,index)
|
||||
{
|
||||
|
||||
var MB = 1024;
|
||||
$.post(url+"/status",{},function(data){
|
||||
var status = data.monitor.status;
|
||||
if(status == 'RUNNING')
|
||||
{
|
||||
var tmp = $("#"+index+"_status");
|
||||
tmp.removeClass();
|
||||
tmp.addClass("label label-primary");
|
||||
tmp.html("Running");
|
||||
}
|
||||
else if(status == 'STOPPED')
|
||||
{
|
||||
var tmp = $("#"+index+"_status");
|
||||
tmp.removeClass();
|
||||
tmp.addClass("label label-danger");
|
||||
tmp.html("Stopped");
|
||||
}
|
||||
|
||||
$.post(url+"/containers",{},function(data){
|
||||
var containers = data.monitor.containers;
|
||||
$("#"+index+"_contotal").html(containers.total);
|
||||
$("#"+index+"_conrunning").html(containers.running);
|
||||
},"json");
|
||||
|
||||
if(status == 'STOPPED')
|
||||
{
|
||||
$("#"+index+"_cpu").html('--');
|
||||
$("#"+index+"_mem").html('--');
|
||||
$("#"+index+"_disk").html('--');
|
||||
return;
|
||||
}
|
||||
|
||||
$.post(url+"/cpuinfo",{},function(data){
|
||||
var idle = data.monitor.cpuinfo.idle;
|
||||
var usedp = (100 - idle).toFixed(2);
|
||||
$("#"+index+"_cpu").html(String(usedp)+"%");
|
||||
},"json");
|
||||
|
||||
$.post(url+"/meminfo",{},function(data){
|
||||
var used = data.monitor.meminfo.used;
|
||||
var total = data.monitor.meminfo.total;
|
||||
var usedp = String(((used/total)*100).toFixed(2))+"%";
|
||||
$("#"+index+"_mem").html(usedp);
|
||||
},"json");
|
||||
|
||||
$.post(url+"/diskinfo",{},function(data){
|
||||
var val = data.monitor.diskinfo;
|
||||
var usedp = val[0].percent;
|
||||
$("#"+index+"_disk").html(String(usedp)+"%");
|
||||
},"json");
|
||||
},"json");
|
||||
}
|
||||
|
||||
function updateAll()
|
||||
{
|
||||
var host = window.location.host;
|
||||
var url0 = "http://" + host + "/monitor/hosts/";
|
||||
{% for phym in machines %}
|
||||
url = url0 + '{{ phym['ip'] }}';
|
||||
update(url,'{{ loop.index }}');
|
||||
{% endfor %}
|
||||
}
|
||||
setInterval(updateAll,5000);
|
||||
|
||||
</script>
|
||||
{% endblock %}
|
|
@ -0,0 +1,136 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Hosts{% endblock %}
|
||||
|
||||
{% block panel_title %}Node list for {{ com_ip }}{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href='/hosts/'>Hosts</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>Node List</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Total Nodes</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body table-responsive">
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>NO</th>
|
||||
<th>Name</th>
|
||||
<th>State</th>
|
||||
<th>PID</th>
|
||||
<th>IP Address</th>
|
||||
<th>Cpu used</th>
|
||||
<th>Mem used</th>
|
||||
<th>Summary</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for container in containerslist %}
|
||||
<tr>
|
||||
<td>{{ loop.index }}</td>
|
||||
<td>{{ container['Name'] }}</td>
|
||||
{% if container['State'] == 'STOPPED' %}
|
||||
<td><div id='{{ loop.index }}_state' class="label label-danger">Stopped</div></td>
|
||||
<td id='{{ loop.index }}_pid'>--</td>
|
||||
<td id='{{ loop.index }}_ip'>--</td>
|
||||
{% else %}
|
||||
<td><div id='{{ loop.index }}_state' class="label label-primary">Running</div></td>
|
||||
<td id='{{ loop.index }}_pid'>{{ container['PID'] }}</td>
|
||||
<td id='{{ loop.inde }}_ip'>{{ container['IP'] }}</td>
|
||||
{% endif %}
|
||||
<td id='{{ loop.index }}_cpu'>--</td>
|
||||
<td id='{{ loop.index }}_mem'>--</td>
|
||||
<td><a class="btn btn-info btn-xs"
|
||||
href='/vclusters/root/{{
|
||||
container['Name'] }}/'>Realtime</a></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
<script type='text/javascript'>
|
||||
function update(url,index)
|
||||
{
|
||||
|
||||
$.post(url+"/basic_info",{},function(data){
|
||||
var state = data.monitor.basic_info.State;
|
||||
if(state == 'RUNNING')
|
||||
{
|
||||
var tmp = $("#"+index+"_state");
|
||||
tmp.removeClass();
|
||||
tmp.addClass("label label-primary");
|
||||
tmp.html("Running");
|
||||
$("#"+index+"_pid").html(data.monitor.basic_info.PID);
|
||||
$("#"+index+"_ip").html(data.monitor.basic_info.IP);
|
||||
}
|
||||
else if(state == 'STOPPED')
|
||||
{
|
||||
var tmp = $("#"+index+"_state");
|
||||
tmp.removeClass();
|
||||
tmp.addClass("label label-danger");
|
||||
tmp.html("Stopped");
|
||||
$("#"+index+"_pid").html('--');
|
||||
$("#"+index+"_ip").html('--');
|
||||
$("#"+index+"_cpu").html('--');
|
||||
$("#"+index+"_mem").html('--');
|
||||
return;
|
||||
}
|
||||
|
||||
$.post(url+"/cpu_use",{},function(data){
|
||||
var val = data.monitor.cpu_use.val;
|
||||
var unit = data.monitor.cpu_use.unit;
|
||||
$("#"+index+"_cpu").html(val +" "+ unit);
|
||||
},"json");
|
||||
|
||||
$.post(url+"/mem_use",{},function(data){
|
||||
var val = data.monitor.mem_use.val;
|
||||
var unit = data.monitor.mem_use.unit
|
||||
$("#"+index+"_mem").html(val+" "+unit);
|
||||
},"json");
|
||||
|
||||
},"json");
|
||||
}
|
||||
|
||||
function updateAll()
|
||||
{
|
||||
var host = window.location.host;
|
||||
var url0 = "http://" + host + "/monitor/vnodes/";
|
||||
|
||||
{% for container in containerslist %}
|
||||
url = url0 + '{{ container['Name'] }}';
|
||||
update(url,'{{ loop.index }}');
|
||||
{% endfor %}
|
||||
}
|
||||
setInterval(updateAll,5000);
|
||||
|
||||
</script>
|
||||
{% endblock %}
|
|
@ -0,0 +1,289 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Hosts{% endblock %}
|
||||
|
||||
{% block panel_title %}Summary for <div id='com_ip'>{{ com_ip }}</div>{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href='/hosts/'>Hosts</a>
|
||||
</li>
|
||||
<li class='active'>
|
||||
<strong>Summary</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block css_src %}
|
||||
<link href="/static/dist/css/flotconfig.css" rel="stylesheet">
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">CPU info</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body table-responsive">
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Processor ID</th>
|
||||
<th>Model name</th>
|
||||
<th>physical id</th>
|
||||
<th>core id</th>
|
||||
<th>cpu MHz</th>
|
||||
<th>cache size</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for processor in processors %}
|
||||
<tr>
|
||||
<th>{{ processor['processor'] }}</th>
|
||||
<td>{{ processor['model name']}}</td>
|
||||
<td>{{ processor['physical id']}}</td>
|
||||
<td>{{ processor['core id']}}</td>
|
||||
<td>{{ processor['cpu MHz']}}</td>
|
||||
<td>{{ processor['cache size']}}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">OS info</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body table-responsive">
|
||||
<table class="table table-bordered">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th>OS name</th>
|
||||
<td>{{ OSinfo['platform']}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>OS node name</th>
|
||||
<td>{{ OSinfo['node']}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>OS kernel release</th>
|
||||
<td>{{ OSinfo['release']}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>OS kernel version</th>
|
||||
<td>{{ OSinfo['version']}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>OS kernel machine</th>
|
||||
<td>{{ OSinfo['machine']}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Cpu and Memory Status</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body table-responsive">
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th colspan='4'>Cpu(%)</th>
|
||||
<th colspan='3'>Memory(MB)</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>user</th>
|
||||
<th>system</th>
|
||||
<th>iowait</th>
|
||||
<th>idle</th>
|
||||
<th>used</th>
|
||||
<th>free</th>
|
||||
<th>total</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td id='cpu_user'>--</td>
|
||||
<td id='cpu_system'>--</td>
|
||||
<td id='cpu_iowait'>--</td>
|
||||
<td id='cpu_idle'>--</td>
|
||||
<td id='mem_used'>--</td>
|
||||
<td id='mem_free'>--</td>
|
||||
<td id='mem_total'>--</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Disk Status</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body table-responsive">
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th colspan='5'>Disk info</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>device</th>
|
||||
<th>used(MB)</th>
|
||||
<th>free(MB)</th>
|
||||
<th>total(MB)</th>
|
||||
<th>used percent(%)</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for diskinfo in diskinfos %}
|
||||
<tr>
|
||||
<td id='disk_{{ loop.index }}_device'>--</td>
|
||||
<td id='disk_{{ loop.index }}_used'>--</td>
|
||||
<td id='disk_{{ loop.index }}_free'>--</td>
|
||||
<td id='disk_{{ loop.index }}_total'>--</td>
|
||||
<td id='disk_{{ loop.index }}_usedp'>--</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-lg-6">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Memory Used(%):</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
|
||||
<div class="flot-chart">
|
||||
<div class="flot-chart-content" id="mem-chart"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-lg-6">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">CPU Used(%):</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
|
||||
<div class="flot-chart">
|
||||
<div class="flot-chart-content" id="cpu-chart"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<!--<div class="row">
|
||||
<div class="col-lg-6">
|
||||
<div class="ibox float-e-margins">
|
||||
<div class="ibox-title">
|
||||
<h5>Disk Used(%):</h5>
|
||||
<div class="ibox-tools">
|
||||
<a class="collapse-link">
|
||||
<i class="fa fa-chevron-up"></i>
|
||||
</a>
|
||||
<a class="dropdown-toggle" data-toggle="dropdown" href="#">
|
||||
<i class="fa fa-wrench"></i>
|
||||
</a>
|
||||
<ul class="dropdown-menu dropdown-user">
|
||||
<li><a href="#">Config option 1</a>
|
||||
</li>
|
||||
<li><a href="#">Config option 2</a>
|
||||
</li>
|
||||
</ul>
|
||||
<a class="close-link">
|
||||
<i class="fa fa-times"></i>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
<div class="ibox-content">
|
||||
|
||||
<div class="flot-chart">
|
||||
<div class="flot-chart-content" id="disk-chart"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>-->
|
||||
|
||||
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
<!-- Flot -->
|
||||
<script src="//cdn.bootcss.com/flot.tooltip/0.8.6/jquery.flot.tooltip.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/flot/0.8.3/jquery.flot.js"></script>
|
||||
<script src="//cdn.bootcss.com/flot/0.8.3/jquery.flot.resize.js"></script>
|
||||
<script src="//cdn.bootcss.com/flot/0.8.3/jquery.flot.pie.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/flot/0.8.3/jquery.flot.time.min.js"></script>
|
||||
|
||||
<!-- Flot demo data -->
|
||||
<script src="/static/js/plot_monitorReal.js"></script>
|
||||
{% endblock %}
|
|
@ -0,0 +1,65 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | MonitorUser{% endblock %}
|
||||
|
||||
{% block panel_title %}Users Info{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/index/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>UsersInfo</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">All Users Info</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>NO</th>
|
||||
<th>Name</th>
|
||||
<th>Running/Total Clusters</th>
|
||||
<th>Running/Total Containers</th>
|
||||
<th>Register Time</th>
|
||||
<th>Last Login</th>
|
||||
<th>Frequency</th>
|
||||
<th>Detail</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for user in userslist %}
|
||||
<tr>
|
||||
<td>{{ loop.index }}</td>
|
||||
<td>{{ user['name'] }}</td>
|
||||
<td>{{ user['clustercnt']['clurun'] }}/{{ user['clustercnt']['clutotal'] }}</td>
|
||||
<td>{{ user['clustercnt']['conrun'] }}/{{ user['clustercnt']['contotal'] }}</td>
|
||||
<td>--</td>
|
||||
<td>--</td>
|
||||
<td>--</td>
|
||||
<td><a class="btn btn-info" href='/monitor/User/{{ user['name'] }}/clusters/'>Clusters</a></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
|
@ -0,0 +1,74 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Monitor{% endblock %}
|
||||
|
||||
{% block panel_title %}NodeInfo for {{ muser }}{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href='/monitor/User/'>UsersInfo</a>
|
||||
</li>
|
||||
<li class='active'>
|
||||
<strong>Clusters</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
{% for cluster in clusters %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Cluster Name: {{ cluster }}</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Node ID</th>
|
||||
<th>Node Name</th>
|
||||
<th>IP Address</th>
|
||||
<th>Status</th>
|
||||
<th>Create Time</th>
|
||||
<th>detail</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for container in containers[cluster]['containers'] %}
|
||||
<tr>
|
||||
<td>{{ loop.index }}</td>
|
||||
<td>{{ container['containername'] }}</td>
|
||||
<td>{{ container['ip'] }}</td>
|
||||
|
||||
{% if containers[cluster]['status'] == 'stopped' %}
|
||||
<td><div class="label label-danger">Stopped</div></td>
|
||||
{% else %}
|
||||
<td><div class="label label-primary">Running</div></td>
|
||||
{% endif %}
|
||||
|
||||
<td>xxxxx</td>
|
||||
<td><a class="btn btn-info" href='/monitor/Node/{{ container['containername'] }}/detail/'>Detail</a></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endfor %}
|
||||
{% endblock %}
|
|
@ -0,0 +1,136 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Status{% endblock %}
|
||||
|
||||
{% block panel_title %}Workspace VCluster Status{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>VClusterStatus</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
{% for cluster in clusters %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">VCluster Name: {{ cluster }}</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body table-responsive">
|
||||
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Node ID</th>
|
||||
<th>Node Name</th>
|
||||
<th>IP Address</th>
|
||||
<th>Status</th>
|
||||
<th>Cpu used</th>
|
||||
<th>Mem used</th>
|
||||
<th>Summary</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for container in containers[cluster]['containers'] %}
|
||||
<tr>
|
||||
<td>{{ loop.index }}</td>
|
||||
<td>{{ container['containername'] }}</td>
|
||||
<td>{{ container['ip'] }}</td>
|
||||
|
||||
{% if containers[cluster]['status'] == 'stopped' %}
|
||||
<td><div id='{{cluster}}_{{ loop.index }}_state' class="label label-danger">Stopped</div></td>
|
||||
{% else %}
|
||||
<td><div id='{{cluster}}_{{ loop.index }}_state' class="label label-primary">Running</div></td>
|
||||
{% endif %}
|
||||
<td id='{{cluster}}_{{ loop.index }}_cpu'>--</td>
|
||||
<td id='{{cluster}}_{{ loop.index }}_mem'>--</td>
|
||||
|
||||
<td><a class="btn btn-info btn-xs" href='/vclusters/{{ cluster }}/{{ container['containername'] }}/'>Realtime</a></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endfor %}
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
<script type='text/javascript'>
|
||||
function update(url,index)
|
||||
{
|
||||
|
||||
$.post(url+"/basic_info",{},function(data){
|
||||
var state = data.monitor.basic_info.State;
|
||||
if(state == 'RUNNING')
|
||||
{
|
||||
var tmp = $("#"+index+"_state");
|
||||
tmp.removeClass();
|
||||
tmp.addClass("label label-primary");
|
||||
tmp.html("Running");
|
||||
$("#"+index+"_pid").html(data.monitor.basic_info.PID);
|
||||
$("#"+index+"_ip").html(data.monitor.basic_info.IP);
|
||||
}
|
||||
else if(state == 'STOPPED')
|
||||
{
|
||||
var tmp = $("#"+index+"_state");
|
||||
tmp.removeClass();
|
||||
tmp.addClass("label label-danger");
|
||||
tmp.html("Stopped");
|
||||
$("#"+index+"_pid").html('--');
|
||||
$("#"+index+"_ip").html('--');
|
||||
$("#"+index+"_cpu").html('--');
|
||||
$("#"+index+"_mem").html('--');
|
||||
return;
|
||||
}
|
||||
|
||||
$.post(url+"/cpu_use",{},function(data){
|
||||
var usedp = data.monitor.cpu_use.usedp;
|
||||
$("#"+index+"_cpu").html((usedp/0.01).toFixed(2)+"%");
|
||||
},"json");
|
||||
|
||||
$.post(url+"/mem_use",{},function(data){
|
||||
var usedp = data.monitor.mem_use.usedp;
|
||||
var unit = data.monitor.mem_use.unit;
|
||||
var quota = data.monitor.mem_use.quota;
|
||||
var val = data.monitor.mem_use.val;
|
||||
var out = "("+val+unit+"/"+quota+unit+")";
|
||||
$("#"+index+"_mem").html((usedp/0.01).toFixed(2)+"%<br/>"+out);
|
||||
},"json");
|
||||
|
||||
},"json");
|
||||
}
|
||||
|
||||
function updateAll()
|
||||
{
|
||||
var host = window.location.host;
|
||||
var url0 = "http://" + host + "/monitor/vnodes/";
|
||||
|
||||
{% for cluster in clusters %}
|
||||
{% for container in containers[cluster]['containers'] %}
|
||||
url = url0 + '{{ container['containername'] }}';
|
||||
update(url,'{{cluster}}_{{ loop.index }}');
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
}
|
||||
setInterval(updateAll,5000);
|
||||
|
||||
</script>
|
||||
{% endblock %}
|
|
@ -0,0 +1,122 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Node Summary{% endblock %}
|
||||
|
||||
{% block panel_title %}Summary for <div id='node_name'>{{ node_name }}</div>{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href='/vclusters/'>VClusterStatus</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>Summary</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block css_src %}
|
||||
<link href="/static/dist/css/flotconfig.css" rel="stylesheet">
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Current Status</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="ibox-body table-responsive">
|
||||
<table class="table table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>State</th>
|
||||
<th>IP Address</th>
|
||||
<th>CPU Use</th>
|
||||
<th>Mem Use</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>{{ container['Name'] }}</td>
|
||||
{% if container['State'] == 'STOPPED' %}
|
||||
<td><div id='con_state' class="label label-danger">Stopped</div></td>
|
||||
<td id='con_ip'>--</td>
|
||||
{% else %}
|
||||
<td><div id='con_state' class="label label-primary">Running</div></td>
|
||||
<td id='con_ip'>{{ container['IP'] }}</td>
|
||||
{% endif %}
|
||||
<td id='con_cpu'>--</td>
|
||||
<td id='con_mem'>--</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-lg-6">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">Memory Used(%):</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
|
||||
<div class="flot-chart">
|
||||
<div class="flot-chart-content" id="mem-chart"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-lg-6">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">CPU Used(%):</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
|
||||
<div class="flot-chart">
|
||||
<div class="flot-chart-content" id="cpu-chart"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
|
||||
<!-- Flot -->
|
||||
<script src="//cdn.bootcss.com/flot/0.8.3/jquery.flot.js"></script>
|
||||
<script src="//cdn.bootcss.com/flot.tooltip/0.8.6/jquery.flot.tooltip.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/flot/0.8.3/jquery.flot.resize.js"></script>
|
||||
<script src="//cdn.bootcss.com/flot/0.8.3/jquery.flot.pie.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/flot/0.8.3/jquery.flot.time.min.js"></script>
|
||||
|
||||
<script src="/static/js/plot_monitor.js"></script>
|
||||
|
||||
{% endblock %}
|
|
@ -0,0 +1,24 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Failed{% endblock %}
|
||||
|
||||
{% block panel_title %}Failed{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<strong><i class="fa fa-dashboard"></i>Home</strong>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>Failed</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
{% block content %}
|
||||
<div class="ibox-content text-center p-md">
|
||||
<h1>Failed</h1>
|
||||
<br/>
|
||||
<br/>
|
||||
<a href="/dashboard/"><span class="btn btn-w-m btn-success">Click Here Back To The Dashboard</span></a>
|
||||
</div>
|
||||
{% endblock %}
|
|
@ -0,0 +1,24 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Success{% endblock %}
|
||||
|
||||
{% block panel_title %}Success{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<strong><i class="fa fa-dashboard"></i>Home</strong>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>Success</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
{% block content %}
|
||||
<div class="ibox-body text-center p-md">
|
||||
<h1>SUCCESS</h1>
|
||||
<br/>
|
||||
<br/>
|
||||
<a href="/dashboard/"><span class="btn btn-w-m btn-success">Click Here Back To The Dashboard</span></a>
|
||||
</div>
|
||||
{% endblock %}
|
|
@ -0,0 +1,35 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Confirm{% endblock %}
|
||||
|
||||
{% block panel_title %}Confirm{% endblock %}
|
||||
|
||||
{% block css_src %}
|
||||
.hide { display:none; }
|
||||
{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<strong><i class="fa fa-dashboard"></i>Home</strong>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>Confirm</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
|
||||
{% block content %}
|
||||
<div class="box-body text-center p-md">
|
||||
<form action="/workspace/save/{{ clustername }}/{{ containername }}/force/" method="POST">
|
||||
<label>Image:</label>
|
||||
<input type="text" name="ImageName" id="ImageName" readonly="true" value="{{ image }}"/>
|
||||
<label> exists, are you sure to overwrite it?</label>
|
||||
<div class="hide"><input type="text" name="description" id="description" readonly="true" value="{{ description }}"/></div>
|
||||
<br/>
|
||||
<button type="submit" class="btn btn-warning">Yes</button>
|
||||
<a href="/config/"><button type="button" class="btn btn-success">No</button></a>
|
||||
</form>
|
||||
</div>
|
||||
{% endblock %}
|
|
@ -0,0 +1,73 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<title>Docklet | Login</title>
|
||||
<!-- Tell the browser to be responsive to screen width -->
|
||||
<meta content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no" name="viewport">
|
||||
<!-- Bootstrap 3.3.5 -->
|
||||
<link href="//cdn.bootcss.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet">
|
||||
<!-- Font Awesome -->
|
||||
<link href="//cdn.bootcss.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet">
|
||||
<!-- Ionicons -->
|
||||
<link href="//cdn.bootcss.com/ionicons/2.0.1/css/ionicons.min.css" rel="stylesheet">
|
||||
<!-- Theme style -->
|
||||
<link rel="stylesheet" href="/static/dist/css/AdminLTE.min.css">
|
||||
|
||||
<link rel="shortcut icon" href="/static/img/favicon.ico">
|
||||
|
||||
|
||||
</head>
|
||||
<body class="hold-transition login-page">
|
||||
<div class="login-box">
|
||||
<div class="login-logo">
|
||||
<img src="/static/img/logo.png" class="logo-name" height="50%" width="50%">
|
||||
<!--a href="/"><b>Docklet</b></a-->
|
||||
</div>
|
||||
<!-- /.login-logo -->
|
||||
<div class="login-box-body">
|
||||
<p class="login-box-msg">An easy and quick way to launch your DISTRIBUTED applications!</p>
|
||||
<form class="m-t" role="form" action="" id="activateForm" method="POST">
|
||||
<div class="form-group">
|
||||
<input type="email" class="form-control" placeholder="E-mail" required="" name="email" value="{{ info['e_mail'] }}">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<input type="text" class="form-control" placeholder="Student number or Staff number" required="" name="studentnumber" value="{{ info['student_number'] }}" >
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<input type="text" class="form-control" placeholder="Department e.g. SEI, EECS" required="" name="department" value="{{ info['department'] }}">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<input type="text" class="form-control" placeholder="True Name e.g. Zhang San" required="" name="truename" value="{{ info['truename'] }}">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<input type="text" class="form-control" placeholder="Telephone Number" required="" name="tel" value="{{ info['tel'] }}">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<textarea class="form-control" name="description" form="activateForm" id="mDescription">
|
||||
{{ info['description'] }}
|
||||
</textarea>
|
||||
</div>
|
||||
<input type="hidden" name="activate" value="true">
|
||||
<div class="row">
|
||||
|
||||
<div class="col-xs-12">
|
||||
<button type="submit" class="btn btn-primary btn-block btn-flat">Activate</button>
|
||||
</div>
|
||||
</div>
|
||||
<!--p class="text-muted text-center"><small>Do not have an account?</small></p-->
|
||||
<!--a class="btn btn-sm btn-white btn-block" href="register.html">Create an account</a-->
|
||||
</form>
|
||||
</div>
|
||||
<!-- /.login-box-body -->
|
||||
</div>
|
||||
<!-- /.login-box -->
|
||||
|
||||
<!-- jQuery 2.2.1 -->
|
||||
<script src="//cdn.bootcss.com/jquery/2.2.1/jquery.min.js"></script>
|
||||
<!-- Bootstrap 3.3.5 -->
|
||||
<script src="//cdn.bootcss.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,256 @@
|
|||
{% extends 'base_AdminLTE.html' %}
|
||||
|
||||
{% block title %}Docklet | Information Modify{% endblock %}
|
||||
|
||||
{% block css_src %}
|
||||
<link href="//cdn.bootcss.com/x-editable/1.5.1/bootstrap3-editable/css/bootstrap-editable.css" rel="stylesheet">
|
||||
{% endblock %}
|
||||
|
||||
{% block panel_title %}Detail for User Infomation{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/">Home</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<strong>User Info</strong>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-md-5">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">User Info</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
<table class="table table-bordered">
|
||||
<tr>
|
||||
<td>User Name</td>
|
||||
<td>{{ info['username'] }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Nickname</td>
|
||||
<td><a href="#" id="nickname" data-type="text" data-pk="1" data-url="/user/info/" data-title="Enter nickname">{{ info['nickname'] }}</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Description</td>
|
||||
<td><a href="#" id="description" data-type="text" data-pk="1" data-url="/user/info/" data-title="Enter description">{{ info['description'] }}</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Truename</td>
|
||||
<td>{{ info['truename'] }}</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Status</td>
|
||||
<td>{{ info['status'] }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>E-mail</td>
|
||||
<td><a href="#" id="e_mail" data-type="text" data-pk="1" data-url="/user/info/" data-title="Enter e-mail">{{ info['e_mail'] }}</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Department</td>
|
||||
<td><a href="#" id="department" data-type="text" data-pk="1" data-url="/user/info/" data-title="Enter department">{{ info['department'] }}</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>ID Number</td>
|
||||
<td>{{ info['student_number'] }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Telephone</td>
|
||||
<td><a href="#" id="tel" data-type="text" data-pk="1" data-url="/user/info/" data-title="Enter telephone number">{{ info['tel'] }}</a></td>
|
||||
</tr>
|
||||
<!--
|
||||
<tr>
|
||||
<td>password</td>
|
||||
<td>
|
||||
<div class="col-md-12" >
|
||||
<button type="button" class="btn btn-white btn-xs btn-block" data-toggle="modal" data-target="#ChpasswordModal"> Change Password</button>
|
||||
</div>
|
||||
<div class="modal inmodal" id="ChpasswordModal" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content animated fadeIn">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
|
||||
<i class="fa fa-laptop modal-icon"></i>
|
||||
<h4 class="modal-title">Change Password</h4>
|
||||
<small class="font-bold">modify your password</small>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
|
||||
<form action="/group/add/" method="POST" id="ChpasswordForm">
|
||||
<div class="form-group">
|
||||
<label>Old password</label>
|
||||
<input type = "password" placeholder="Enter old password" class="form-control" name="o_password">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>New password</label>
|
||||
<input type = "password" placeholder="Enter new password" class="form-control" name="n_password">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Verify</label>
|
||||
<input type = "password" placeholder="Enter new password again" class="form-control" name="v_password">
|
||||
</div>
|
||||
</form>
|
||||
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-white" data-dismiss="modal">Close</button>
|
||||
<button type="button" class="btn btn-primary" onClick="javascript:sendAddGroup();">Submit</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
|
||||
</tr>
|
||||
-->
|
||||
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<!--
|
||||
<div class="col-lg-7">
|
||||
<div class="ibox float-e-margins">
|
||||
<div class="ibox-title back-change">
|
||||
<h5>Avatar <small>Upload your avatar</small></h5>
|
||||
<div class="ibox-tools">
|
||||
<a class="collapse-link">
|
||||
<i class="fa fa-chevron-up"></i>
|
||||
</a>
|
||||
<a class="dropdown-toggle" data-toggle="dropdown" href="#">
|
||||
<i class="fa fa-wrench"></i>
|
||||
</a>
|
||||
<a class="close-link">
|
||||
<i class="fa fa-times"></i>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
<div class="ibox-content">
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="image-crop">
|
||||
<img src="/static/img/profile.png">
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<h4>Preview image</h4>
|
||||
<div class="img-preview img-preview-sm"></div>
|
||||
<h4>Comon method</h4>
|
||||
<p>
|
||||
You can upload new image to crop container and easy upload as your avatar
|
||||
</p>
|
||||
<div class="btn-group">
|
||||
<label title="Upload image file" for="inputImage" class="btn btn-primary">
|
||||
<input type="file" accept="image/*" name="file" id="inputImage" class="hide">
|
||||
Upload new image
|
||||
</label>
|
||||
<label title="setavatar" id="download" class="btn btn-primary">Set as avatar</label>
|
||||
</div>
|
||||
<h4>Other method</h4>
|
||||
<p>
|
||||
Some methods to process the image
|
||||
</p>
|
||||
<div class="btn-group">
|
||||
<button class="btn btn-white" id="zoomIn" type="button">Zoom In</button>
|
||||
<button class="btn btn-white" id="zoomOut" type="button">Zoom Out</button>
|
||||
<button class="btn btn-white" id="rotateLeft" type="button">Rotate Left</button>
|
||||
<button class="btn btn-white" id="rotateRight" type="button">Rotate Right</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
-->
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
<script src="//cdn.bootcss.com/x-editable/1.5.1/bootstrap3-editable/js/bootstrap-editable.min.js"></script>
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function(){
|
||||
$.fn.editable.defaults.mode = 'popup';
|
||||
$('#nickname').editable();
|
||||
$('#description').editable();
|
||||
$('#department').editable();
|
||||
$('#e_mail').editable();
|
||||
$('#tel').editable();
|
||||
//var $image = $(".image-crop > img")
|
||||
//$($image).cropper({
|
||||
// aspectRatio: 1,
|
||||
// preview: ".img-preview",
|
||||
// done: function(data) {
|
||||
// Output the result data for cropping image.
|
||||
// }
|
||||
//});
|
||||
/*
|
||||
var $inputImage = $("#inputImage");
|
||||
if (window.FileReader) {
|
||||
$inputImage.change(function() {
|
||||
var fileReader = new FileReader(),
|
||||
files = this.files,
|
||||
file;
|
||||
|
||||
if (!files.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
file = files[0];
|
||||
|
||||
if (/^image\/\w+$/.test(file.type)) {
|
||||
fileReader.readAsDataURL(file);
|
||||
fileReader.onload = function () {
|
||||
$inputImage.val("");
|
||||
$image.cropper("reset", true).cropper("replace", this.result);
|
||||
};
|
||||
} else {
|
||||
showMessage("Please choose an image file.");
|
||||
}
|
||||
});
|
||||
} else {
|
||||
$inputImage.addClass("hide");
|
||||
}
|
||||
|
||||
$("#download").click(function() {
|
||||
window.open($image.cropper("getDataURL"));
|
||||
});
|
||||
|
||||
$("#zoomIn").click(function() {
|
||||
$image.cropper("zoom", 0.1);
|
||||
});
|
||||
|
||||
$("#zoomOut").click(function() {
|
||||
$image.cropper("zoom", -0.1);
|
||||
});
|
||||
|
||||
$("#rotateLeft").click(function() {
|
||||
$image.cropper("rotate", 45);
|
||||
});
|
||||
|
||||
$("#rotateRight").click(function() {
|
||||
$image.cropper("rotate", -45);
|
||||
});
|
||||
*/
|
||||
|
||||
});
|
||||
|
||||
|
||||
</script>
|
||||
|
||||
|
||||
{% endblock %}
|
|
@ -0,0 +1,30 @@
|
|||
{% extends "base_AdminLTE.html"%}
|
||||
|
||||
|
||||
{% block title %}Docklet | Error{% endblock %}
|
||||
|
||||
{% block panel_title %}500 Error Page{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
|
||||
<div class="error-page">
|
||||
<h2 class="headline text-red">500</h2>
|
||||
|
||||
<div class="error-content">
|
||||
<h3><br/><i class="fa fa-warning text-red"></i> Internal Server Error</h3>
|
||||
|
||||
<p>
|
||||
Please examine your mail server config(now exim4).You can go back to
|
||||
<a href="/dashboard/">dashboard</a> or <a href="/logout">log out</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
|
@ -0,0 +1,273 @@
|
|||
{% extends "base_AdminLTE.html"%}
|
||||
{% block title %}Docklet | UserList{% endblock %}
|
||||
|
||||
{% block panel_title %}UserList{% endblock %}
|
||||
|
||||
{% block panel_list %}
|
||||
<ol class="breadcrumb">
|
||||
<li>
|
||||
<a href="/dashboard/"><i class="fa fa-dashboard"></i>Home</a>
|
||||
</li>
|
||||
<li class="active">
|
||||
<a href='/user/list/'>UserList</a>
|
||||
</li>
|
||||
</ol>
|
||||
{% endblock %}
|
||||
|
||||
{% block css_src %}
|
||||
|
||||
<link href="//cdn.bootcss.com/datatables/1.10.11/css/dataTables.bootstrap.min.css" rel="stylesheet">
|
||||
<link href="//cdn.bootcss.com/datatables/1.10.11/css/jquery.dataTables_themeroller.css" rel="stylesheet">
|
||||
<link href="/static/dist/css/modalconfig.css" rel="stylesheet">
|
||||
|
||||
|
||||
{% endblock %}
|
||||
|
||||
|
||||
{% block content %}
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="box box-info">
|
||||
<div class="box-header with-border">
|
||||
<h3 class="box-title">User List</h3>
|
||||
|
||||
<div class="box-tools pull-right">
|
||||
<button type="button" class="btn btn-box-tool" data-widget="collapse"><i class="fa fa-minus"></i>
|
||||
</button>
|
||||
<button type="button" class="btn btn-box-tool" data-widget="remove"><i class="fa fa-times"></i></button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="box-body">
|
||||
<button type="button" class="btn btn-primary btn-sm" data-toggle="modal" data-target="#AddUserModal"><i class="fa fa-plus"></i> Add User</button>
|
||||
<div class="modal inmodal" id="AddUserModal" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content animated fadeIn">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
|
||||
<i class="fa fa-laptop modal-icon"></i>
|
||||
<h4 class="modal-title">Add User</h4>
|
||||
<small class="font-bold">Add a user in Docklet</small>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<form action="/user/add/" method="POST" id="addUserForm">
|
||||
<div class="form-group">
|
||||
<label>User Name</label>
|
||||
<input type = "text" placeholder="Enter Username" class="form-control" name="username">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>PASSWORD</label>
|
||||
<input type = "password" placeholder="Enter Password" class="form-control" name="password">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>E-mail</label>
|
||||
<input type="email" placeholder="Enter E-mail Address" class="form-control" name="e_mail">
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-white" data-dismiss="modal">Close</button>
|
||||
<button type="button" class="btn btn-primary" onClick="javascript:sendAddUser();">Submit</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<table id="myDataTable" class="table table-striped table-bordered">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>ID</th>
|
||||
<th>User</th>
|
||||
<th>Name</th>
|
||||
<th>E_mail</th>
|
||||
<th>Tel</th>
|
||||
<th>RegisterDate</th>
|
||||
<th>Status</th>
|
||||
<th>Group</th>
|
||||
<th>Command</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
</tbody>
|
||||
<div class="modal inmodal" id="ModifyUserModal" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content animated fadeIn">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
|
||||
<i class="fa fa-laptop modal-icon"></i>
|
||||
<h4 class="modal-title">Modify User</h4>
|
||||
<small class="font-bold">Modify a user in Docklet</small>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<form action="/user/modify/" method="POST" id="modifyUserForm">
|
||||
<div class="form-group">
|
||||
<label>User Name</label>
|
||||
<input type = "text" placeholder="Enter Username" class="form-control" name="username" id="mUsername" readonly="readonly">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Status</label>
|
||||
<select class="form-control" name="status" id="mStatus">
|
||||
<option>normal</option>
|
||||
<option>applying</option>
|
||||
<option>init</option>
|
||||
<option>locked</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>True Name</label>
|
||||
<input type = "text" placeholder="Enter Truename" class="form-control" name="truename" id="mTruename">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>E-mail</label>
|
||||
<input type="email" placeholder="Enter E-mail Address" class="form-control" name="e_mail" id="mE_mail">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Department</label>
|
||||
<input type = "text" placeholder="Enter Department" class="form-control" name="department" id="mDepartment">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Student Number</label>
|
||||
<input type = "text" placeholder="Enter Student Number" class="form-control" name="student_number" id="mStudentNumber">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Telephone Number</label>
|
||||
<input type = "text" placeholder="Enter Telephone Number" class="form-control" name="tel" id="mTel">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Change Password?</label>
|
||||
<select class="form-control" name="Chpassword" id="mChpassword">
|
||||
<option>Yes</option>
|
||||
<option>No</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Password</label>
|
||||
<input type = "text" placeholder="Enter Password" class="form-control" name="password" id="mPassword">
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>User Group</label>
|
||||
<select class="form-control" name="group" id="mUserGroup">
|
||||
{% for group in groups %}
|
||||
<option>{{ group }}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label>Auth Method</label>
|
||||
<select class="form-control" name="auth_method" id="mAuthMethod">
|
||||
<option>local</option>
|
||||
<option>pam</option>
|
||||
<option>iaaa</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>Description</label>
|
||||
<textarea class="form-control" name="description" id="mDescription" readonly="readonly">
|
||||
|
||||
</textarea>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-white" data-dismiss="modal">Close</button>
|
||||
<button type="button" class="btn btn-primary" onClick="javascript:sendModifyUser();">Submit</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block script_src %}
|
||||
<script src="//cdn.bootcss.com/datatables/1.10.11/js/jquery.dataTables.min.js"></script>
|
||||
<script src="//cdn.bootcss.com/datatables/1.10.11/js/dataTables.bootstrap.min.js"></script>
|
||||
|
||||
|
||||
<script type="text/javascript">
|
||||
$(document).ready(function() {
|
||||
var oTable = $('#myDataTable').dataTable({
|
||||
"ajax": {
|
||||
"url": "/user/list/",
|
||||
"type": "POST"
|
||||
},
|
||||
//"scrollX": true,
|
||||
"columnDefs": [
|
||||
{
|
||||
"render": function ( data, type, row ) {
|
||||
return '<a class="btn btn-info btn-sm" data-toggle="modal" data-target="#ModifyUserModal" onClick="javascript:setFormUser(' + row[0] + ');">' + 'Edit' + '</a>';
|
||||
},
|
||||
"targets": 8
|
||||
},
|
||||
]
|
||||
|
||||
});
|
||||
var gTable = $('#myGroupTable').dataTable({
|
||||
"ajax": {
|
||||
"url": "/group/detail/",
|
||||
"type": "POST"
|
||||
},
|
||||
//"scrollX": true,
|
||||
"columnDefs": [
|
||||
{
|
||||
"render": function ( data, type, row ) {
|
||||
return '<a class="btn btn-info btn-sm" data-toggle="modal" data-target="#ModifyGroupModal" onClick="javascript:setFormGroup(' + row[0] + ');">' + 'Edit' + '</a>';
|
||||
},
|
||||
"targets": 6
|
||||
},
|
||||
]
|
||||
|
||||
});
|
||||
});
|
||||
function sendAddUser(){
|
||||
document.getElementById("addUserForm").submit();
|
||||
}
|
||||
function sendAddGroup(){
|
||||
document.getElementById("addGroupForm").submit();
|
||||
}
|
||||
function sendModifyUser(){
|
||||
document.getElementById("modifyUserForm").submit();
|
||||
}
|
||||
function sendModifyGroup(){
|
||||
document.getElementById("modifyGroupForm").submit();
|
||||
}
|
||||
function setFormUser(arg){
|
||||
$.post("/user/query/",
|
||||
{
|
||||
ID: arg,
|
||||
},
|
||||
function(data,status){
|
||||
var result = eval("("+data+")").data;
|
||||
$("#mUsername").val(result.username);
|
||||
$("#mTruename").val(result.truename);
|
||||
$("#mE_mail").val(result.e_mail);
|
||||
$("#mDepartment").val(result.department);
|
||||
$("#mStudentNumber").val(result.student_number);
|
||||
$("#mTel").val(result.tel);
|
||||
$("#mChpassword").val('No');
|
||||
$("#mPassword").val(result.password);
|
||||
$("#mStatus").val(result.status);
|
||||
$("#mUserGroup").val(result.group);
|
||||
$("#mAuthMethod").val(result.auth_method);
|
||||
$("#mDescription").val(result.description);
|
||||
});
|
||||
}
|
||||
function setFormGroup(arg){
|
||||
$.post("/group/query/",
|
||||
{
|
||||
ID: arg,
|
||||
},
|
||||
function(data,status){
|
||||
var result = eval("("+data+")").data;
|
||||
$("#mGroupname").val(result.name);
|
||||
$("#mCpu").val(result.cpu);
|
||||
$("#mMemory").val(result.memory);
|
||||
$("#mImage").val(result.imageQuantity);
|
||||
$("#mLifecycle").val(result.lifeCycle);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
{% endblock %}
|
|
@ -0,0 +1,448 @@
|
|||
#!/usr/bin/python3
|
||||
import json
|
||||
import os
|
||||
import getopt
|
||||
|
||||
import sys, inspect
|
||||
this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
|
||||
src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"..", "src")))
|
||||
if src_folder not in sys.path:
|
||||
sys.path.insert(0, src_folder)
|
||||
|
||||
# must first init loadenv
|
||||
import tools, env
|
||||
config = env.getenv("CONFIG")
|
||||
tools.loadenv(config)
|
||||
|
||||
from webViews.log import initlogging
|
||||
initlogging("docklet-web")
|
||||
from webViews.log import logger
|
||||
|
||||
from flask import Flask, request, session, render_template, redirect, send_from_directory, make_response, url_for, abort
|
||||
from webViews.dashboard import dashboardView
|
||||
from webViews.user.userlist import userlistView, useraddView, usermodifyView, groupaddView, userdataView, userqueryView
|
||||
from webViews.user.userinfo import userinfoView
|
||||
from webViews.user.userActivate import userActivateView
|
||||
from webViews.user.grouplist import grouplistView, groupqueryView, groupdetailView, groupmodifyView
|
||||
from functools import wraps
|
||||
from webViews.dockletrequest import dockletRequest
|
||||
from webViews.cluster import *
|
||||
from webViews.admin import *
|
||||
from webViews.monitor import *
|
||||
from webViews.authenticate.auth import login_required, administration_required,activated_required
|
||||
from webViews.authenticate.register import registerView
|
||||
from webViews.authenticate.login import loginView, logoutView
|
||||
import webViews.dockletrequest
|
||||
from webViews import cookie_tool
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
external_login = env.getenv('EXTERNAL_LOGIN')
|
||||
#default config
|
||||
external_login_url = '/external_auth/'
|
||||
external_login_callback_url = '/external_auth_callback/'
|
||||
if (external_login == 'True'):
|
||||
sys.path.insert(0, os.path.realpath(os.path.abspath(os.path.join(this_folder,"../src", "plugin"))))
|
||||
import external_generate
|
||||
from webViews.authenticate.login import external_loginView, external_login_callbackView
|
||||
external_login_url = external_generate.external_login_url
|
||||
external_login_callback_url = external_generate.external_login_callback_url
|
||||
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
|
||||
|
||||
@app.route("/", methods=['GET'])
|
||||
def home():
|
||||
return render_template('home.html')
|
||||
|
||||
@app.route("/login/", methods=['GET', 'POST'])
|
||||
def login():
|
||||
return loginView.as_view()
|
||||
|
||||
@app.route(external_login_url, methods=['GET'])
|
||||
def external_login_func():
|
||||
try:
|
||||
return external_loginView.as_view()
|
||||
except:
|
||||
abort(404)
|
||||
|
||||
@app.route(external_login_callback_url, methods=['GET'])
|
||||
def external_login_callback():
|
||||
try:
|
||||
return external_login_callbackView.as_view()
|
||||
except:
|
||||
abort(404)
|
||||
|
||||
@app.route("/logout/", methods=["GET"])
|
||||
@login_required
|
||||
def logout():
|
||||
return logoutView.as_view()
|
||||
|
||||
@app.route("/register/", methods=['GET', 'POST'])
|
||||
@administration_required
|
||||
#now forbidden,only used by SEI & PKU Staffs and students.
|
||||
#can be used by admin for testing
|
||||
def register():
|
||||
return registerView.as_view()
|
||||
|
||||
|
||||
|
||||
@app.route("/activate/", methods=['GET', 'POST'])
|
||||
@login_required
|
||||
def activate():
|
||||
return userActivateView.as_view()
|
||||
|
||||
@app.route("/dashboard/", methods=['GET'])
|
||||
@login_required
|
||||
def dashboard():
|
||||
return dashboardView.as_view()
|
||||
|
||||
@app.route("/dashboard_guest/", methods=['GET'])
|
||||
def dashboard_guest():
|
||||
resp = make_response(dashboard_guestView.as_view())
|
||||
resp.set_cookie('guest-cookie', cookie_tool.generate_cookie('guest', app.secret_key))
|
||||
return resp
|
||||
|
||||
@app.route("/document/", methods=['GET'])
|
||||
def redirect_dochome():
|
||||
return redirect("http://docklet.unias.org/userguide")
|
||||
|
||||
@app.route("/config/", methods=['GET'])
|
||||
@login_required
|
||||
def config():
|
||||
return configView.as_view()
|
||||
|
||||
|
||||
@app.route("/workspace/create/", methods=['GET'])
|
||||
@activated_required
|
||||
def addCluster():
|
||||
return addClusterView.as_view()
|
||||
|
||||
@app.route("/workspace/list/", methods=['GET'])
|
||||
@login_required
|
||||
def listCluster():
|
||||
return listClusterView.as_view()
|
||||
|
||||
@app.route("/workspace/add/", methods=['POST'])
|
||||
@login_required
|
||||
def createCluster():
|
||||
createClusterView.clustername = request.form["clusterName"]
|
||||
createClusterView.image = request.form["image"]
|
||||
return createClusterView.as_view()
|
||||
|
||||
@app.route("/workspace/scaleout/<clustername>/", methods=['POST'])
|
||||
@login_required
|
||||
def scaleout(clustername):
|
||||
scaleoutView.image = request.form["image"]
|
||||
scaleoutView.clustername = clustername
|
||||
return scaleoutView.as_view()
|
||||
|
||||
@app.route("/workspace/scalein/<clustername>/<containername>/", methods=['GET'])
|
||||
@login_required
|
||||
def scalein(clustername,containername):
|
||||
scaleinView.clustername = clustername
|
||||
scaleinView.containername = containername
|
||||
return scaleinView.as_view()
|
||||
|
||||
@app.route("/workspace/start/<clustername>/", methods=['GET'])
|
||||
@login_required
|
||||
def startClustet(clustername):
|
||||
startClusterView.clustername = clustername
|
||||
return startClusterView.as_view()
|
||||
|
||||
@app.route("/workspace/stop/<clustername>/", methods=['GET'])
|
||||
@login_required
|
||||
def stopClustet(clustername):
|
||||
stopClusterView.clustername = clustername
|
||||
return stopClusterView.as_view()
|
||||
|
||||
@app.route("/workspace/delete/<clustername>/", methods=['GET'])
|
||||
@login_required
|
||||
def deleteClustet(clustername):
|
||||
deleteClusterView.clustername = clustername
|
||||
return deleteClusterView.as_view()
|
||||
|
||||
@app.route("/workspace/detail/<clustername>/", methods=['GET'])
|
||||
@login_required
|
||||
def detailCluster(clustername):
|
||||
detailClusterView.clustername = clustername
|
||||
return detailClusterView.as_view()
|
||||
|
||||
@app.route("/workspace/flush/<clustername>/<containername>/", methods=['GET'])
|
||||
@login_required
|
||||
def flushCluster(clustername,containername):
|
||||
flushClusterView.clustername = clustername
|
||||
flushClusterView.containername = containername
|
||||
return flushClusterView.as_view()
|
||||
|
||||
@app.route("/workspace/save/<clustername>/<containername>/", methods=['POST'])
|
||||
@login_required
|
||||
def saveImage(clustername,containername):
|
||||
saveImageView.clustername = clustername
|
||||
saveImageView.containername = containername
|
||||
saveImageView.isforce = "false"
|
||||
saveImageView.imagename = request.form['ImageName']
|
||||
saveImageView.description = request.form['description']
|
||||
return saveImageView.as_view()
|
||||
|
||||
@app.route("/workspace/save/<clustername>/<containername>/force/", methods=['POST'])
|
||||
@login_required
|
||||
def saveImage_force(clustername,containername):
|
||||
saveImageView.clustername = clustername
|
||||
saveImageView.containername = containername
|
||||
saveImageView.isforce = "true"
|
||||
saveImageView.imagename = request.form['ImageName']
|
||||
saveImageView.description = request.form['description']
|
||||
return saveImageView.as_view()
|
||||
|
||||
@app.route("/addproxy/<clustername>/", methods=['POST'])
|
||||
@login_required
|
||||
def addproxy(clustername):
|
||||
addproxyView.clustername = clustername
|
||||
addproxyView.ip = request.form['proxy_ip']
|
||||
addproxyView.port = request.form['proxy_port']
|
||||
return addproxyView.as_view()
|
||||
|
||||
@app.route("/deleteproxy/<clustername>/", methods=['GET'])
|
||||
@login_required
|
||||
def deleteproxy(clustername):
|
||||
deleteproxyView.clustername = clustername
|
||||
return deleteproxyView.as_view()
|
||||
|
||||
@app.route("/image/description/<image>/", methods=['GET'])
|
||||
@login_required
|
||||
def descriptionImage(image):
|
||||
descriptionImageView.image = image
|
||||
return descriptionImageView.as_view()
|
||||
|
||||
@app.route("/image/share/<image>/", methods=['GET'])
|
||||
@login_required
|
||||
def shareImage(image):
|
||||
shareImageView.image = image
|
||||
return shareImageView.as_view()
|
||||
|
||||
@app.route("/image/unshare/<image>/", methods=['GET'])
|
||||
@login_required
|
||||
def unshareImage(image):
|
||||
unshareImageView.image = image
|
||||
return unshareImageView.as_view()
|
||||
|
||||
@app.route("/image/delete/<image>/", methods=['GET'])
|
||||
@login_required
|
||||
def deleteImage(image):
|
||||
deleteImageView.image = image
|
||||
return deleteImageView.as_view()
|
||||
|
||||
@app.route("/hosts/", methods=['GET'])
|
||||
@administration_required
|
||||
def hosts():
|
||||
return hostsView.as_view()
|
||||
|
||||
@app.route("/hosts/<com_ip>/", methods=['GET'])
|
||||
@administration_required
|
||||
def hostsRealtime(com_ip):
|
||||
hostsRealtimeView.com_ip = com_ip
|
||||
return hostsRealtimeView.as_view()
|
||||
|
||||
@app.route("/hosts/<com_ip>/containers/", methods=['GET'])
|
||||
@administration_required
|
||||
def hostsConAll(com_ip):
|
||||
hostsConAllView.com_ip = com_ip
|
||||
return hostsConAllView.as_view()
|
||||
|
||||
@app.route("/vclusters/", methods=['GET'])
|
||||
@login_required
|
||||
def status():
|
||||
return statusView.as_view()
|
||||
|
||||
@app.route("/vclusters/<vcluster_name>/<node_name>/", methods=['GET'])
|
||||
@login_required
|
||||
def statusRealtime(vcluster_name,node_name):
|
||||
statusRealtimeView.node_name = node_name
|
||||
return statusRealtimeView.as_view()
|
||||
|
||||
@app.route("/monitor/hosts/<comid>/<infotype>", methods=['POST'])
|
||||
@app.route("/monitor/vnodes/<comid>/<infotype>", methods=['POST'])
|
||||
@login_required
|
||||
def monitor_request(comid,infotype):
|
||||
data = {
|
||||
"user": session['username']
|
||||
}
|
||||
result = dockletRequest.post(request.path, data)
|
||||
return json.dumps(result)
|
||||
|
||||
@app.route("/monitor/User/", methods=['GET'])
|
||||
@administration_required
|
||||
def monitorUserAll():
|
||||
return monitorUserAllView.as_view()
|
||||
|
||||
|
||||
|
||||
|
||||
@app.route("/user/list/", methods=['GET', 'POST'])
|
||||
@administration_required
|
||||
def userlist():
|
||||
return userlistView.as_view()
|
||||
|
||||
@app.route("/group/list/", methods=['POST'])
|
||||
@administration_required
|
||||
def grouplist():
|
||||
return grouplistView.as_view()
|
||||
|
||||
@app.route("/group/detail/", methods=['POST'])
|
||||
@administration_required
|
||||
def groupdetail():
|
||||
return groupdetailView.as_view()
|
||||
|
||||
@app.route("/group/query/", methods=['POST'])
|
||||
@administration_required
|
||||
def groupquery():
|
||||
return groupqueryView.as_view()
|
||||
|
||||
@app.route("/group/modify/", methods=['POST'])
|
||||
@administration_required
|
||||
def groupmodify():
|
||||
return groupmodifyView.as_view()
|
||||
|
||||
@app.route("/user/data/", methods=['GET', 'POST'])
|
||||
@administration_required
|
||||
def userdata():
|
||||
return userdataView.as_view()
|
||||
|
||||
@app.route("/user/add/", methods=['POST'])
|
||||
@administration_required
|
||||
def useradd():
|
||||
return useraddView.as_view()
|
||||
|
||||
@app.route("/user/modify/", methods=['POST'])
|
||||
@administration_required
|
||||
def usermodify():
|
||||
return usermodifyView.as_view()
|
||||
|
||||
@app.route("/group/add/", methods=['POST'])
|
||||
@administration_required
|
||||
def groupadd():
|
||||
return groupaddView.as_view()
|
||||
|
||||
@app.route("/user/info/", methods=['GET', 'POST'])
|
||||
@login_required
|
||||
def userinfo():
|
||||
return userinfoView.as_view()
|
||||
|
||||
@app.route("/user/query/", methods=['GET', 'POST'])
|
||||
@administration_required
|
||||
def userquery():
|
||||
return userqueryView.as_view()
|
||||
|
||||
|
||||
@app.route("/admin/", methods=['GET', 'POST'])
|
||||
@administration_required
|
||||
def adminpage():
|
||||
return adminView.as_view()
|
||||
|
||||
@app.route('/index/', methods=['GET'])
|
||||
def jupyter_control():
|
||||
return redirect('/dashboard/')
|
||||
|
||||
# for download basefs.tar.bz
|
||||
# remove, not the function of docklet
|
||||
# should download it from a http server
|
||||
#@app.route('/download/basefs', methods=['GET'])
|
||||
#def download():
|
||||
#fsdir = env.getenv("FS_PREFIX")
|
||||
#return send_from_directory(fsdir+'/local', 'basefs.tar.bz', as_attachment=True)
|
||||
|
||||
# jupyter auth APIs
|
||||
@app.route('/jupyter/', methods=['GET'])
|
||||
def jupyter_prefix():
|
||||
path = request.args.get('next')
|
||||
if path == None:
|
||||
return redirect('/login/')
|
||||
return redirect('/login/'+'?next='+path)
|
||||
|
||||
@app.route('/jupyter/home/', methods=['GET'])
|
||||
def jupyter_home():
|
||||
return redirect('/dashboard/')
|
||||
|
||||
@app.route('/jupyter/login/', methods=['GET', 'POST'])
|
||||
def jupyter_login():
|
||||
return redirect('/login/')
|
||||
|
||||
@app.route('/jupyter/logout/', methods=['GET'])
|
||||
def jupyter_logout():
|
||||
return redirect('/logout/')
|
||||
|
||||
@app.route('/jupyter/authorizations/cookie/<cookie_name>/<cookie_content>/', methods=['GET'])
|
||||
def jupyter_auth(cookie_name, cookie_content):
|
||||
username = cookie_tool.parse_cookie(cookie_content, app.secret_key)
|
||||
if username == None:
|
||||
resp = make_response('cookie auth failed')
|
||||
resp.status_code = 404
|
||||
return resp
|
||||
return json.dumps({'name': username})
|
||||
|
||||
@app.errorhandler(401)
|
||||
def not_authorized(error):
|
||||
if "username" in session:
|
||||
return render_template('error/401.html', mysession = session)
|
||||
else:
|
||||
return redirect('/login/')
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_server_error(error):
|
||||
if "username" in session:
|
||||
return render_template('error/500.html', mysession = session)
|
||||
else:
|
||||
return redirect('/login/')
|
||||
if __name__ == '__main__':
|
||||
'''
|
||||
to generate a secret_key
|
||||
|
||||
from base64 import b64encode
|
||||
from os import urandom
|
||||
|
||||
secret_key = urandom(24)
|
||||
secret_key = b64encode(secret_key).decode('utf-8')
|
||||
|
||||
'''
|
||||
logger.info('Start Flask...:')
|
||||
try:
|
||||
secret_key_file = open(env.getenv('FS_PREFIX') + '/local/web_secret_key.txt')
|
||||
app.secret_key = secret_key_file.read()
|
||||
secret_key_file.close()
|
||||
except:
|
||||
from base64 import b64encode
|
||||
from os import urandom
|
||||
secret_key = urandom(24)
|
||||
secret_key = b64encode(secret_key).decode('utf-8')
|
||||
app.secret_key = secret_key
|
||||
secret_key_file = open(env.getenv('FS_PREFIX') + '/local/web_secret_key.txt', 'w')
|
||||
secret_key_file.write(secret_key)
|
||||
secret_key_file.close()
|
||||
|
||||
os.environ['APP_KEY'] = app.secret_key
|
||||
runcmd = sys.argv[0]
|
||||
app.runpath = runcmd.rsplit('/', 1)[0]
|
||||
|
||||
webip = "0.0.0.0"
|
||||
webport = env.getenv("WEB_PORT")
|
||||
|
||||
webViews.dockletrequest.endpoint = 'http://%s:%d' % (env.getenv('MASTER_IP'), env.getenv('MASTER_PORT'))
|
||||
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "i:p:", ["ip=", "port="])
|
||||
except getopt.GetoptError:
|
||||
print ("%s -i ip -p port" % sys.argv[0])
|
||||
sys.exit(2)
|
||||
for opt, arg in opts:
|
||||
if opt in ("-i", "--ip"):
|
||||
webip = arg
|
||||
elif opt in ("-p", "--port"):
|
||||
webport = int(arg)
|
||||
|
||||
app.run(host = webip, port = webport, debug = True, threaded=True)
|