Compare commits

...

2 Commits

Author SHA1 Message Date
wql 70bf202a79 chore: config batch_run 2024-08-25 19:09:19 +08:00
wql aeb04d75c2 chore: change nproc_per_node to 6 2024-08-25 18:16:05 +08:00
13 changed files with 653 additions and 5 deletions

57
9g_lora_1.sh Normal file
View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=1
MAX_STEP=1000
GPU_NUM=multi
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

57
9g_lora_1_single.sh Normal file
View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=1
MAX_STEP=1000
GPU_NUM=single
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

View File

@ -4,15 +4,20 @@ export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/test/models_0824
NO=1
MAX_STEP=500
GPU_NUM=multi
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters 695"
OPTS+=" --inspect-iters 2000"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
@ -34,7 +39,7 @@ OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard /workspace/repo/CPM-9G-8B/logs/tensorboard/${MODEL_NAME}/${CUR_DATE}/"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
@ -46,7 +51,7 @@ OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=2 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=1
MAX_STEP=500
GPU_NUM=single
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

57
9g_lora_2.sh Normal file
View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=2
MAX_STEP=1000
GPU_NUM=multi
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

57
9g_lora_2_single.sh Normal file
View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=2
MAX_STEP=1000
GPU_NUM=single
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

57
9g_lora_2_step500.sh Normal file
View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=2
MAX_STEP=500
GPU_NUM=multi
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=2
MAX_STEP=500
GPU_NUM=single
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

57
9g_lora_3.sh Normal file
View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=3
MAX_STEP=1000
GPU_NUM=multi
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

57
9g_lora_3_single.sh Normal file
View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=3
MAX_STEP=1000
GPU_NUM=single
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

57
9g_lora_3_step500.sh Normal file
View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=3
MAX_STEP=500
GPU_NUM=multi
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

View File

@ -0,0 +1,57 @@
#! /bin/bash
export MASTER_ADDR="localhost"
export MASTER_PORT=12348
CPM_PATH="/workspace/repo/CPM-9G-8B/9G-Train"
NO=3
MAX_STEP=500
GPU_NUM=single
EXP_PATH=/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/models/
MODEL_NAME="9g-sft"
TB_PATH="/workspace/repo/CPM-9G-8B/results/lora/${GPU_NUM}/${MAX_STEP}/${NO}/logs/"
OPTS=""
OPTS+=" --vocab /v2/sft_8b_v2/vocab.txt"
OPTS+=" --model-config /v2/sft_8b_v2/config.json"
OPTS+=" --train-iters ${MAX_STEP}"
OPTS+=" --inspect-iters 500"
OPTS+=" --warmup-iters 20"
OPTS+=" --lr-decay-style cosine"
OPTS+=" --weight-decay 0.01"
OPTS+=" --clip-grad 1.0"
OPTS+=" --loss-scale 1048576"
OPTS+=" --max-loss-scale 33554432"
OPTS+=" --min-loss-scale 1"
OPTS+=" --loss-scale-steps 32"
OPTS+=" --offload"
OPTS+=" --batch-size 2"
OPTS+=" --max-length 4096"
OPTS+=" --lr 3e-4"
OPTS+=" --start-step 0"
OPTS+=" --epoch 4"
OPTS+=" --load /v2/sft_8b_v2/cpm_live_8b-1500-float16.pt"
OPTS+=" --dataset /workspace/repo/CPM-9G-8B/dataset_bin"
# TODO 这些 /data 在启元机器上需要改成 /home 下的路径
OPTS+=" --save ${EXP_PATH}/checkpoints"
OPTS+=" --save-name ${MODEL_NAME}"
OPTS+=" --tensorboard ${TB_PATH}"
OPTS+=" --delta-tuning"
OPTS+=" --delta-type lora"
OPTS+=" --lora-r 64" # 常用的lora 参数
OPTS+=" --lora-dropout 0.05"
OPTS+=" --lora-alpha 64" # 常用的lora alpha 参数
OPTS+=" --lora-layer project_q project_v project_k w_0 w_1 w_out"
OPTS+=" --save-origin-model"
OPTS+=" $@"
CMD="torchrun --nnodes=1 --nproc_per_node=7 --rdzv_id=1 --rdzv_backend=c10d --rdzv_endpoint=${MASTER_ADDR}:${MASTER_PORT} ${CPM_PATH}/apps/cpm9g/sft_cpm9g_delta.py ${OPTS}"
echo "${CMD}"
$CMD

16
batch_run.sh Normal file
View File

@ -0,0 +1,16 @@
export CUDA_VISIBLE_DEVICES=1,2,3,4,5,6
bash 9g_lora_1.sh | tee /results/screen_log/9g_lora_1_log.txt
bash 9g_lora_1_step500.sh | tee /results/screen_log/9g_lora_1_step500_log.txt
bash 9g_lora_2.sh | tee /results/screen_log/9g_lora_2_log.txt
bash 9g_lora_2_step500.sh | tee /results/screen_log/9g_lora_2_step500_log.txt
bash 9g_lora_3.sh | tee /results/screen_log/9g_lora_3_log.txt
bash 9g_lora_3_step500.sh | tee /results/screen_log/9g_lora_3_step500_log.txt
export CUDA_VISIBLE_DEVICES=6
bash 9g_lora_1_single.sh | tee /results/screen_log/9g_lora_1_single_log.txt
bash 9g_lora_1_step500_single.sh | tee /results/screen_log/9g_lora_1_step500_single_log.txt
bash 9g_lora_2_single.sh | tee /results/screen_log/9g_lora_2_single_log.txt
bash 9g_lora_2_step500_single.sh | tee /results/screen_log/9g_lora_2_step500_single_log.txt
bash 9g_lora_3_single.sh | tee /results/screen_log/9g_lora_3_single_log.txt
bash 9g_lora_3_step500_single.sh | tee /results/screen_log/9g_lora_3_step500_single_log.txt