From bfa2e166d7e2e52e3505d8a18469d04ec41a935a Mon Sep 17 00:00:00 2001 From: wql Date: Thu, 22 Aug 2024 09:35:51 +0800 Subject: [PATCH] change: new train yaml --- batch_run.sh | 48 +++++++++---------- .../Baichuan2-7B/Baichuan2_lora_sft_1.yaml | 41 ++++++++++++++++ .../Baichuan2_lora_sft_1_single.yaml | 41 ++++++++++++++++ .../Baichuan2-7B/Baichuan2_lora_sft_2.yaml | 41 ++++++++++++++++ .../Baichuan2_lora_sft_2_single.yaml | 41 ++++++++++++++++ .../Baichuan2-7B/Baichuan2_lora_sft_3.yaml | 41 ++++++++++++++++ .../Baichuan2_lora_sft_3_single.yaml | 41 ++++++++++++++++ .../ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml | 41 ++++++++++++++++ .../ChatGLM2_lora_sft_1_single.yaml | 41 ++++++++++++++++ .../ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml | 41 ++++++++++++++++ .../ChatGLM2_lora_sft_2_single.yaml | 41 ++++++++++++++++ .../ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml | 41 ++++++++++++++++ .../ChatGLM2_lora_sft_3_single.yaml | 41 ++++++++++++++++ .../Llama2-7B/llama2_lora_sft_1.yaml | 41 ++++++++++++++++ .../Llama2-7B/llama2_lora_sft_1_single.yaml | 41 ++++++++++++++++ .../Llama2-7B/llama2_lora_sft_2.yaml | 41 ++++++++++++++++ .../Llama2-7B/llama2_lora_sft_2_single.yaml | 41 ++++++++++++++++ .../Llama2-7B/llama2_lora_sft_3.yaml | 41 ++++++++++++++++ .../Llama2-7B/llama2_lora_sft_3_single.yaml | 41 ++++++++++++++++ .../lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml | 41 ++++++++++++++++ .../Qwen-7B/Qwen_lora_sft_1_single.yaml | 41 ++++++++++++++++ .../lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml | 41 ++++++++++++++++ .../Qwen-7B/Qwen_lora_sft_2_single.yaml | 41 ++++++++++++++++ .../lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml | 41 ++++++++++++++++ .../Qwen-7B/Qwen_lora_sft_3_single.yaml | 41 ++++++++++++++++ 25 files changed, 1008 insertions(+), 24 deletions(-) create mode 100644 results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml create mode 100644 results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml create mode 100644 results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml create mode 100644 results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml create mode 100644 results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml create mode 100644 results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml create mode 100644 results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml create mode 100644 results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml create mode 100644 results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml create mode 100644 results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml create mode 100644 results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml create mode 100644 results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml create mode 100644 results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml create mode 100644 results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml create mode 100644 results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml create mode 100644 results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml create mode 100644 results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml create mode 100644 results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml create mode 100644 results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml create mode 100644 results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml create mode 100644 results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml create mode 100644 results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml create mode 100644 results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml create mode 100644 results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml diff --git a/batch_run.sh b/batch_run.sh index 56b379a9..1b6bd9db 100644 --- a/batch_run.sh +++ b/batch_run.sh @@ -1,9 +1,9 @@ -FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_1.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_1.txt -FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_2.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_2.txt -FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_3.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_3.txt -CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_1_single.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_1_single.txt -CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_2_single.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_2_single.txt -CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_3_single.yaml | tee results/lora_sft/Llama2-7B/llama2_lora_sft_3_single.txt +FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.txt +FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.txt +FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.txt +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.txt +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.txt +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1.yaml | tee results/inference/Llama2-7B/llama2_predict_1.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2.yaml | tee results/inference/Llama2-7B/llama2_predict_2.txt @@ -12,26 +12,26 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_ # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2_single.yaml | tee results/inference/Llama2-7B/llama2_predict_2_single.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3_single.yaml | tee results/inference/Llama2-7B/llama2_predict_3_single.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml | tee results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1.txt -FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2.txt -FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3.txt -CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.txt -CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.txt -CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml | tee results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_1.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_1.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_2.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_2.txt -# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_3.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_3.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single.txt -# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single.yaml | tee results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.txt +# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.txt +# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1.yaml | tee results/inference/Qwen-7B/Qwen_predict_1.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2.yaml | tee results/inference/Qwen-7B/Qwen_predict_2.txt diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml new file mode 100644 index 00000000..2d72254d --- /dev/null +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/baichuan + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: baichuan +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml new file mode 100644 index 00000000..a05c246b --- /dev/null +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/baichuan + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: baichuan +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml new file mode 100644 index 00000000..6d2c1f49 --- /dev/null +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/baichuan + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: baichuan +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml new file mode 100644 index 00000000..c0e80f6a --- /dev/null +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/baichuan + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: baichuan +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml new file mode 100644 index 00000000..246ec950 --- /dev/null +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/baichuan + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: baichuan +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml new file mode 100644 index 00000000..8d000a4e --- /dev/null +++ b/results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/baichuan + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: baichuan +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml new file mode 100644 index 00000000..87095852 --- /dev/null +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/chatglm/data + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: chatglm2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml new file mode 100644 index 00000000..a424f350 --- /dev/null +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/chatglm/data + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: chatglm2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml new file mode 100644 index 00000000..57584c9f --- /dev/null +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/chatglm/data + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: chatglm2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml new file mode 100644 index 00000000..8c24eec0 --- /dev/null +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/chatglm/data + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: chatglm2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml new file mode 100644 index 00000000..b5f5c279 --- /dev/null +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/chatglm/data + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: chatglm2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml new file mode 100644 index 00000000..ad41d47d --- /dev/null +++ b/results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/chatglm/data + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: chatglm2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml new file mode 100644 index 00000000..af9ae3c0 --- /dev/null +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +fp16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml new file mode 100644 index 00000000..f3cd2d6c --- /dev/null +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +fp16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml new file mode 100644 index 00000000..3f19b20e --- /dev/null +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_2 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +fp16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml new file mode 100644 index 00000000..f649e9ea --- /dev/null +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_2_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +fp16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml new file mode 100644 index 00000000..08791e89 --- /dev/null +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_3 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +fp16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml new file mode 100644 index 00000000..2d3aaea1 --- /dev/null +++ b/results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_3_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +fp16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml new file mode 100644 index 00000000..cbb067f7 --- /dev/null +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/qwen + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: qwen +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_1 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml new file mode 100644 index 00000000..5a0cdb40 --- /dev/null +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/qwen + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: qwen +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml new file mode 100644 index 00000000..15100500 --- /dev/null +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/qwen + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: qwen +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_2 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml new file mode 100644 index 00000000..86bb1001 --- /dev/null +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/qwen + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: qwen +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml new file mode 100644 index 00000000..8ff184cf --- /dev/null +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/qwen + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: qwen +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_3 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml new file mode 100644 index 00000000..1c761a8e --- /dev/null +++ b/results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: ../../llm/qwen + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: qwen +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 1000 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file