forked from p04798526/LLaMA-Factory-Mirror
add: add include_num_input_tokens_seen
This commit is contained in:
parent
d3f91c8e2f
commit
c93a5b8b8f
|
@ -0,0 +1,41 @@
|
||||||
|
### model
|
||||||
|
model_name_or_path: modelscope/Llama-2-7b-ms
|
||||||
|
|
||||||
|
### method
|
||||||
|
stage: sft
|
||||||
|
do_train: true
|
||||||
|
finetuning_type: lora
|
||||||
|
lora_target: all
|
||||||
|
|
||||||
|
### dataset
|
||||||
|
dataset: identity,alpaca_en_demo
|
||||||
|
template: llama2
|
||||||
|
cutoff_len: 1024
|
||||||
|
max_samples: 10000
|
||||||
|
overwrite_cache: true
|
||||||
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
|
### output
|
||||||
|
output_dir: ./results/lora_sft/Llama2-7B_2/llama2_lora_sft_1_test_token
|
||||||
|
logging_steps: 3
|
||||||
|
save_steps: 100
|
||||||
|
plot_loss: true
|
||||||
|
overwrite_output_dir: true
|
||||||
|
|
||||||
|
### train
|
||||||
|
per_device_train_batch_size: 2
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
learning_rate: 1.0e-4
|
||||||
|
num_train_epochs: 10.0
|
||||||
|
lr_scheduler_type: cosine
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
bf16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
max_steps: 100
|
||||||
|
include_num_input_tokens_seen: true
|
||||||
|
|
||||||
|
### eval
|
||||||
|
val_size: 0.1
|
||||||
|
per_device_eval_batch_size: 2
|
||||||
|
eval_strategy: steps
|
||||||
|
eval_steps: 500
|
Loading…
Reference in New Issue