From 746ceac74a71e501da074581376c99b32892154f Mon Sep 17 00:00:00 2001 From: wql Date: Mon, 19 Aug 2024 09:57:13 +0800 Subject: [PATCH] train:test train --- .../llama2_lora_sft_1_max_step1000.yaml | 40 +++++++++++++++++++ .../llama2_lora_sft_1_no_ms_bf.yaml | 39 ++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_max_step1000.yaml create mode 100644 results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_no_ms_bf.yaml diff --git a/results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_max_step1000.yaml b/results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_max_step1000.yaml new file mode 100644 index 00000000..318219c8 --- /dev/null +++ b/results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_max_step1000.yaml @@ -0,0 +1,40 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +fp16: true +ddp_timeout: 180000000 +max_steps: 1000 + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file diff --git a/results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_no_ms_bf.yaml b/results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_no_ms_bf.yaml new file mode 100644 index 00000000..a43c5e01 --- /dev/null +++ b/results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_no_ms_bf.yaml @@ -0,0 +1,39 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file