diff --git a/results/lora_sft/Llama2-7B/llama2_lora_sft.yaml b/results/lora_sft/Llama2-7B/llama2_lora_sft.yaml new file mode 100644 index 00000000..a83daada --- /dev/null +++ b/results/lora_sft/Llama2-7B/llama2_lora_sft.yaml @@ -0,0 +1,39 @@ +### model +model_name_or_path: modelscope/Llama-2-7b-ms + +### method +stage: sft +do_train: true +finetuning_type: lora +lora_target: all + +### dataset +dataset: alpaca_gpt4_zh +template: llama2 +cutoff_len: 1024 +max_samples: 10000 +overwrite_cache: true +preprocessing_num_workers: 16 + +### output +output_dir: ./saves/LLaMA2-7B/lora/train_24_8_13_13_16 +logging_steps: 3 +save_steps: 100 +plot_loss: true +overwrite_output_dir: true + +### train +per_device_train_batch_size: 2 +gradient_accumulation_steps: 8 +learning_rate: 1.0e-4 +num_train_epochs: 10.0 +lr_scheduler_type: cosine +warmup_ratio: 0.1 +bf16: true +ddp_timeout: 180000000 + +### eval +val_size: 0.1 +per_device_eval_batch_size: 2 +eval_strategy: steps +eval_steps: 500 \ No newline at end of file