diff --git a/batch_run.sh b/batch_run.sh index 1b6bd9db..b9363a0c 100644 --- a/batch_run.sh +++ b/batch_run.sh @@ -1,56 +1,56 @@ -FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.txt -FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.txt -FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.txt -CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.txt -CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.txt -CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.txt +FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_log.txt +FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_log.txt +FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_log.txt +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single_log.txt +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single_log.txt +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single_log.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1.yaml | tee results/inference/Llama2-7B/llama2_predict_1.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2.yaml | tee results/inference/Llama2-7B/llama2_predict_2.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3.yaml | tee results/inference/Llama2-7B/llama2_predict_3.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1_single.yaml | tee results/inference/Llama2-7B/llama2_predict_1_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2_single.yaml | tee results/inference/Llama2-7B/llama2_predict_2_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3_single.yaml | tee results/inference/Llama2-7B/llama2_predict_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1.yaml | tee results/inference/Llama2-7B/llama2_predict_1_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2.yaml | tee results/inference/Llama2-7B/llama2_predict_2_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3.yaml | tee results/inference/Llama2-7B/llama2_predict_3_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1_single.yaml | tee results/inference/Llama2-7B/llama2_predict_1_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2_single.yaml | tee results/inference/Llama2-7B/llama2_predict_2_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3_single.yaml | tee results/inference/Llama2-7B/llama2_predict_3_single_log.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single_log.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single_log.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single_log.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1.yaml | tee results/inference/Qwen-7B/Qwen_predict_1.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2.yaml | tee results/inference/Qwen-7B/Qwen_predict_2.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3.yaml | tee results/inference/Qwen-7B/Qwen_predict_3.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_1_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_2_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1.yaml | tee results/inference/Qwen-7B/Qwen_predict_1_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2.yaml | tee results/inference/Qwen-7B/Qwen_predict_2_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3.yaml | tee results/inference/Qwen-7B/Qwen_predict_3_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_1_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_2_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_3_single_log.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_1.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_2.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_3.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_1_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_2_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_3_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single_log.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_1.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_2.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_3.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_1_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_2_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_1_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_2_log.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_3_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_1_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_2_single_log.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_3_single_log.txt diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml index 2d72254d..bb89b1f7 100644 --- a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml index a05c246b..70b697f1 100644 --- a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml index 6d2c1f49..af14b4df 100644 --- a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml index c0e80f6a..de7b4a11 100644 --- a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml index 246ec950..6704b2bc 100644 --- a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml index 8d000a4e..785bd604 100644 --- a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml index 87095852..cd08d3a6 100644 --- a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml index a424f350..331c4aaf 100644 --- a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml index 57584c9f..f86be996 100644 --- a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml index 8c24eec0..376ea958 100644 --- a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml index b5f5c279..f6d13652 100644 --- a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml index ad41d47d..4ed25d4d 100644 --- a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml index af9ae3c0..62bfe478 100644 --- a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ fp16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml index f3cd2d6c..4977b9d7 100644 --- a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ fp16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml index 3f19b20e..fe46a60c 100644 --- a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_2 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ fp16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml index f649e9ea..ce394af8 100644 --- a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_2_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ fp16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml index 08791e89..7fce6daf 100644 --- a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_3 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ fp16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml index 2d3aaea1..ab0695ea 100644 --- a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_3_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ fp16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml index cbb067f7..b9991fc6 100644 --- a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_1 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml index 5a0cdb40..359d1fad 100644 --- a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml index 15100500..b0b0b3d9 100644 --- a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_2 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml index 86bb1001..7004b44a 100644 --- a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml index 8ff184cf..6586b9e0 100644 --- a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_3 logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1 diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml index 1c761a8e..469971eb 100644 --- a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml @@ -18,7 +18,7 @@ preprocessing_num_workers: 16 ### output output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single logging_steps: 3 -save_steps: 100 +save_steps: 500 plot_loss: true overwrite_output_dir: true @@ -33,6 +33,7 @@ bf16: true ddp_timeout: 180000000 max_steps: 1000 include_num_input_tokens_seen: true +include_tokens_per_second: true ### eval val_size: 0.1