forked from p04798526/LLaMA-Factory-Mirror
train: train_24_8_12_23_21
This commit is contained in:
parent
4e88b01cd1
commit
4b0b73c570
|
@ -8,7 +8,7 @@ finetuning_type: lora
|
|||
lora_target: q_proj,v_proj
|
||||
|
||||
### dataset
|
||||
dataset: alpaca_en
|
||||
dataset: alpaca_zh
|
||||
template: llama2
|
||||
cutoff_len: 1024
|
||||
max_samples: 100000
|
||||
|
@ -16,7 +16,7 @@ overwrite_cache: true
|
|||
preprocessing_num_workers: 16
|
||||
|
||||
### output
|
||||
output_dir: ./saves/LLaMA2-7B/lora/train_24_8_12_16_46
|
||||
output_dir: ./saves/LLaMA2-7B/lora/train_24_8_12_23_21
|
||||
logging_steps: 3
|
||||
save_steps: 100
|
||||
plot_loss: true
|
||||
|
@ -34,7 +34,7 @@ ddp_timeout: 180000000
|
|||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 1
|
||||
per_device_eval_batch_size: 1 # 1 最大
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
||||
|
||||
|
|
Loading…
Reference in New Issue