Compare commits

...

2 Commits

Author SHA1 Message Date
wql d7d54df525 change: change batch run 2024-08-19 10:48:31 +08:00
wql 7c5d56ca26 change: change yaml 2024-08-19 10:41:43 +08:00
2 changed files with 50 additions and 49 deletions

View File

@ -1,56 +1,56 @@
FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_1.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_1.log
FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_2.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_2.log
FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_3.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_3.log
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_1_single.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_1_single.log
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_2_single.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_2_single.log
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_3_single.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_3_single.log
FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_1.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_1.txt
FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_2.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_2.txt
FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_3.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_3.txt
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_1_single.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_1_single.txt
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_2_single.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_2_single.txt
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_3_single.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_3_single.txt
FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1.yaml > results/inference/Llama2-7B/llama2_predict_1.log
FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2.yaml > results/inference/Llama2-7B/llama2_predict_2.log
FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3.yaml > results/inference/Llama2-7B/llama2_predict_3.log
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1_single.yaml > results/inference/Llama2-7B/llama2_predict_1_single.log
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2_single.yaml > results/inference/Llama2-7B/llama2_predict_2_single.log
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3_single.yaml > results/inference/Llama2-7B/llama2_predict_3_single.log
FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1.yaml | tee results/inference/Llama2-7B/llama2_predict_1.txt
FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2.yaml | tee results/inference/Llama2-7B/llama2_predict_2.txt
FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3.yaml | tee results/inference/Llama2-7B/llama2_predict_3.txt
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1_single.yaml | tee results/inference/Llama2-7B/llama2_predict_1_single.txt
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2_single.yaml | tee results/inference/Llama2-7B/llama2_predict_2_single.txt
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3_single.yaml | tee results/inference/Llama2-7B/llama2_predict_3_single.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_1.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_1.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_2.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_2.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_3.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_3.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_1.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_1.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_2.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_2.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_3.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_3.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1.yaml > results/inference/Qwen-7B/Qwen_predict_1.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2.yaml > results/inference/Qwen-7B/Qwen_predict_2.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3.yaml > results/inference/Qwen-7B/Qwen_predict_3.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1_single.yaml > results/inference/Qwen-7B/Qwen_predict_1_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2_single.yaml > results/inference/Qwen-7B/Qwen_predict_2_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3_single.yaml > results/inference/Qwen-7B/Qwen_predict_3_single.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1.yaml | tee results/inference/Qwen-7B/Qwen_predict_1.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2.yaml | tee results/inference/Qwen-7B/Qwen_predict_2.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3.yaml | tee results/inference/Qwen-7B/Qwen_predict_3.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_1_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_2_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_3_single.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_1.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_2.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_3.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_1.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_2.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_3.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_1.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_2.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_3.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1_single.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_1_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2_single.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_2_single.log
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3_single.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_3_single.log
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_1.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_2.txt
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_3.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_1_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_2_single.txt
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_3_single.txt

View File

@ -16,7 +16,7 @@ overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1
output_dir: ./results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_ms1000_bf
logging_steps: 3
save_steps: 100
plot_loss: true
@ -31,6 +31,7 @@ lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true
ddp_timeout: 180000000
max_steps: 1000
### eval
val_size: 0.1