forked from p04798526/LLaMA-Factory-Mirror
change: change llama2_lora_sft.yaml
This commit is contained in:
parent
1ee249021b
commit
41c42f67a2
|
@ -5,31 +5,31 @@ model_name_or_path: modelscope/Llama-2-7b-ms
|
|||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_target: all
|
||||
lora_target: q_proj,v_proj
|
||||
|
||||
### dataset
|
||||
dataset: alpaca_zh
|
||||
template: llama2
|
||||
cutoff_len: 1024
|
||||
max_samples: 1000
|
||||
max_samples: 100000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
### output
|
||||
output_dir: ./saves/LLaMA2-7B/lora/train_24_8_12_15_46
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
output_dir: ./saves/LLaMA2-7B/lora/train_24_8_12_16_46
|
||||
logging_steps: 5
|
||||
save_steps: 100
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 2
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 3.0
|
||||
learning_rate: 5e-5
|
||||
num_train_epochs: 5.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
fp16: true
|
||||
ddp_timeout: 180000000
|
||||
|
||||
### eval
|
||||
|
|
Loading…
Reference in New Issue