train: change lr

This commit is contained in:
wql 2024-08-13 07:36:30 +08:00
parent 9fce0acb9b
commit dc953dd514
1 changed files with 3 additions and 3 deletions

View File

@ -8,7 +8,7 @@ finetuning_type: lora
lora_target: q_proj,v_proj
### dataset
dataset: alpaca_en
dataset: alpaca_gpt4_zh
template: llama2
cutoff_len: 1024
max_samples: 100000
@ -16,7 +16,7 @@ overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: ./saves/LLaMA2-7B/lora/train_24_8_12_23_21
output_dir: ./saves/LLaMA2-7B/lora/train_24_8_13_07_26
logging_steps: 3
save_steps: 100
plot_loss: true
@ -25,7 +25,7 @@ overwrite_output_dir: true
### train
per_device_train_batch_size: 2
gradient_accumulation_steps: 8
learning_rate: 5.0e-5
learning_rate: 1.5e-4
num_train_epochs: 10
lr_scheduler_type: cosine
warmup_ratio: 0.1