From 29a4e49dfee30e9a072b3c8035d2365add6f5f7c Mon Sep 17 00:00:00 2001 From: wql Date: Thu, 22 Aug 2024 10:46:22 +0800 Subject: [PATCH] add: add test yaml --- results/lora_sft_2/test/test1.yaml | 42 ++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 results/lora_sft_2/test/test1.yaml diff --git a/results/lora_sft_2/test/test1.yaml b/results/lora_sft_2/test/test1.yaml new file mode 100644 index 00000000..33701533 --- /dev/null +++ b/results/lora_sft_2/test/test1.yaml @@ -0,0 +1,42 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: belle_1m +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1 +logging_steps: 3 +save_steps: 500 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +fp16: true +ddp_timeout: 180000000 +max_steps: 100 +include_num_input_tokens_seen: true +include_tokens_per_second: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file