docklet/bin/docklet-worker

244 lines
6.5 KiB
Bash
Executable File

#!/bin/sh
[ $(id -u) != '0' ] && echo "root is needed" && exit 1
# get some path of docklet
bindir=${0%/*}
# $bindir maybe like /opt/docklet/src/../bin
# use command below to make $bindir in normal absolute path
DOCKLET_BIN=$(cd $bindir; pwd)
DOCKLET_HOME=${DOCKLET_BIN%/*}
DOCKLET_CONF=$DOCKLET_HOME/conf
LXC_SCRIPT=$DOCKLET_CONF/lxc-script
DOCKLET_SRC=$DOCKLET_HOME/src
DOCKLET_LIB=$DOCKLET_SRC
DOCKLET_WEB=$DOCKLET_HOME/web
# working directory, default to /opt/docklet
FS_PREFIX=/opt/docklet
# cluster net ip range, default is 172.16.0.1/16
CLUSTER_NET="172.16.0.1/16"
# ip addresses range of containers for batch job, default is 10.16.0.0/16
BATCH_NET="10.16.0.0/16"
#configurable-http-proxy public port, default is 8000
PROXY_PORT=8000
#configurable-http-proxy api port, default is 8001
PROXY_API_PORT=8001
DISTRIBUTED_GATEWAY=False
. $DOCKLET_CONF/docklet.conf
export FS_PREFIX
RUN_DIR=$FS_PREFIX/local/run
LOG_DIR=$FS_PREFIX/local/log
# This next line determines what user the script runs as.
DAEMON_USER=root
# settings for docklet worker
DAEMON=$DOCKLET_LIB/worker/worker.py
DAEMON_NAME=docklet-worker
DAEMON_OPTS=
# The process ID of the script when it runs is stored here:
PIDFILE=$RUN_DIR/$DAEMON_NAME.pid
# settings for docklet batch worker, which is required for batch job processing system
BATCH_ON=True
DAEMON_BATCH=$DOCKLET_LIB/worker/taskworker.py
DAEMON_NAME_BATCH=docklet-taskworker
PIDFILE_BATCH=$RUN_DIR/batch.pid
DAEMON_OPTS_BATCH=
# settings for docklet proxy, which is required for web access
DAEMON_PROXY=`which configurable-http-proxy`
DAEMON_NAME_PROXY=docklet-proxy
PIDFILE_PROXY=$RUN_DIR/proxy.pid
DAEMON_OPTS_PROXY=
DOCKMETER_NAME=$DAEMON_NAME-metering
DOCKMETER_PIDFILE=$RUN_DIR/$DOCKMETER_NAME.pid
. /lib/lsb/init-functions
###########
pre_start () {
[ ! -d $FS_PREFIX/global ] && mkdir -p $FS_PREFIX/global
[ ! -d $FS_PREFIX/local ] && mkdir -p $FS_PREFIX/local
[ ! -d $FS_PREFIX/global/users ] && mkdir -p $FS_PREFIX/global/users
[ ! -d $FS_PREFIX/local/volume ] && mkdir -p $FS_PREFIX/local/volume
[ ! -d $FS_PREFIX/local/temp ] && mkdir -p $FS_PREFIX/local/temp
[ ! -d $FS_PREFIX/local/run ] && mkdir -p $FS_PREFIX/local/run
[ ! -d $FS_PREFIX/local/log ] && mkdir -p $FS_PREFIX/local/log
tempdir=/opt/docklet/local/temp
RUNNING_CONFIG=$FS_PREFIX/local/docklet-running.conf
grep -P "^[\s]*[a-zA-Z]" $DOCKLET_CONF/docklet.conf > $RUNNING_CONFIG
echo "DOCKLET_HOME=$DOCKLET_HOME" >> $RUNNING_CONFIG
echo "DOCKLET_BIN=$DOCKLET_BIN" >> $RUNNING_CONFIG
echo "DOCKLET_CONF=$DOCKLET_CONF" >> $RUNNING_CONFIG
echo "LXC_SCRIPT=$LXC_SCRIPT" >> $RUNNING_CONFIG
echo "DOCKLET_SRC=$DOCKLET_SRC" >> $RUNNING_CONFIG
echo "DOCKLET_LIB=$DOCKLET_LIB" >> $RUNNING_CONFIG
export CONFIG=$RUNNING_CONFIG
# iptables for NAT network for containers to access web
iptables -t nat -F
iptables -t nat -A POSTROUTING -s $CLUSTER_NET -j MASQUERADE
iptables -t nat -A POSTROUTING -s $BATCH_NET -j MASQUERADE
if [ ! -d $FS_PREFIX/local/basefs ]; then
log_daemon_msg "basefs does not exist, run prepare.sh first" && exit 1
fi
if [ ! -d $FS_PREFIX/local/packagefs ]; then
mkdir -p $FS_PREFIX/local/packagefs
fi
}
do_start() {
pre_start
DAEMON_OPTS=$1
log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX"
#python3 $DAEMON
start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS
log_end_msg $?
}
do_start_batch () {
if [ "$BATCH_ON" = "False" ]
then
return 1
fi
log_daemon_msg "Starting $DAEMON_NAME_BATCH in $FS_PREFIX"
DAEMON_OPTS_BATCH=""
start-stop-daemon --start --background --pidfile $PIDFILE_BATCH --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_BATCH -- $DAEMON_OPTS_BATCH
log_end_msg $?
}
do_start_proxy () {
if [ "$DISTRIBUTED_GATEWAY" = "False" ]
then
return 1
fi
log_daemon_msg "Starting $DAEMON_NAME_PROXY daemon in $FS_PREFIX"
DAEMON_OPTS_PROXY="--port $PROXY_PORT --api-port $PROXY_API_PORT --default-target=http://localhost:8888"
start-stop-daemon --start --background --pidfile $PIDFILE_PROXY --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_PROXY -- $DAEMON_OPTS_PROXY
log_end_msg $?
}
do_stop () {
log_daemon_msg "Stopping $DAEMON_NAME daemon"
start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE --retry 10
log_end_msg $?
}
do_stop_batch () {
if [ "$BATCH_ON" = "False" ]
then
return 1
fi
log_daemon_msg "Stopping $DAEMON_NAME_BATCH daemon"
start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_BATCH --retry 10
log_end_msg $?
}
do_stop_proxy () {
if [ "$DISTRIBUTED_GATEWAY" = "False" ]
then
return 1
fi
log_daemon_msg "Stopping $DAEMON_NAME_PROXY daemon"
start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_PROXY --retry 10
log_end_msg $?
}
do_start_meter() {
log_daemon_msg "Starting $DOCKMETER_NAME in $FS_PREFIX"
start-stop-daemon --start --background --pidfile $DOCKMETER_PIDFILE --make-pidfile --exec $DOCKLET_HOME/meter/main.py
log_end_msg $?
}
do_stop_meter() {
log_daemon_msg "Stopping $DOCKMETER_NAME daemon"
start-stop-daemon --stop --pidfile $DOCKMETER_PIDFILE --remove-pidfile
log_end_msg $?
}
case "$1" in
start)
do_start "normal-worker"
do_start_batch
do_start_proxy
;;
stop)
do_stop
do_stop_batch
do_stop_proxy
;;
start-meter)
do_start_meter
;;
stop-meter)
do_stop_meter
;;
start_batch)
do_start "batch-worker"
do_start_batch
;;
stop_batch)
do_stop
do_stop_batch
;;
start_proxy)
do_start_proxy
;;
stop_proxy)
do_stop_proxy
;;
console)
pre_start
cprofilev $DAEMON $DAEMON_OPTS
;;
restart)
do_stop
do_stop_batch
do_stop_proxy
do_start "normal-worker"
do_start_batch
do_start_proxy
;;
status)
status_of_proc -p $PIDFILE "$DAEMON" "$DAEMON_NAME" && exit 0 || exit $?
status_of_proc -p $PIDFILE_BATCH "$DAEMON_BATCH" "$DAEMON_NAME_BATCH" || status=$?
status_of_proc -p $PIDFILE_PROXY "$DAEMON_PROXY" "$DAEMON_NAME_PROXY" || status=$?
;;
*)
echo "Usage: $DAEMON_NAME {start|stop|restart|status}"
exit 1
;;
esac
exit 0