From c93a5b8b8f54b63d92577bf75ba1e1c0122ada1a Mon Sep 17 00:00:00 2001 From: wql Date: Tue, 20 Aug 2024 09:42:58 +0800 Subject: [PATCH] add: add include_num_input_tokens_seen --- .../llama2_lora_sft_1_test_token.yaml | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_test_token.yaml diff --git a/results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_test_token.yaml b/results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_test_token.yaml new file mode 100644 index 00000000..dd8f4233 --- /dev/null +++ b/results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_test_token.yaml @@ -0,0 +1,41 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: identity,alpaca_en_demo +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_test_token +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 +max_steps: 100 +include_num_input_tokens_seen: true + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file