chore: add lora sft and predict template yaml file
This commit is contained in:
parent
a61372ee0f
commit
fa9a9007f9
|
@ -0,0 +1,42 @@
|
|||
### model
|
||||
model_name_or_path: ../../llm/baichuan
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
do_train: true
|
||||
finetuning_type: lora
|
||||
lora_target: all
|
||||
|
||||
### dataset
|
||||
dataset: belle_1m
|
||||
template: baichuan
|
||||
cutoff_len: 1024
|
||||
max_samples: 10000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
### output
|
||||
output_dir: ./results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single_step500
|
||||
logging_steps: 3
|
||||
save_steps: 500
|
||||
plot_loss: true
|
||||
overwrite_output_dir: true
|
||||
|
||||
### train
|
||||
per_device_train_batch_size: 2
|
||||
gradient_accumulation_steps: 8
|
||||
learning_rate: 1.0e-4
|
||||
num_train_epochs: 10.0
|
||||
lr_scheduler_type: cosine
|
||||
warmup_ratio: 0.1
|
||||
bf16: true
|
||||
ddp_timeout: 180000000
|
||||
max_steps: 500
|
||||
include_num_input_tokens_seen: true
|
||||
include_tokens_per_second: true
|
||||
|
||||
### eval
|
||||
val_size: 0.1
|
||||
per_device_eval_batch_size: 2
|
||||
eval_strategy: steps
|
||||
eval_steps: 500
|
|
@ -0,0 +1,23 @@
|
|||
### model
|
||||
model_name_or_path: ../../llm/baichuan
|
||||
|
||||
### method
|
||||
do_predict: true
|
||||
|
||||
### dataset
|
||||
eval_dataset: alpaca_gpt4_zh
|
||||
template: baichuan
|
||||
cutoff_len: 1024
|
||||
max_samples: 50
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
include_tokens_per_second: true
|
||||
|
||||
### output
|
||||
output_dir: ./results/inference/Baichuan2-7B/Baichuan2_predict_1
|
||||
overwrite_output_dir: true
|
||||
|
||||
### eval
|
||||
per_device_eval_batch_size: 2
|
||||
predict_with_generate: true
|
||||
ddp_timeout: 180000000
|
Loading…
Reference in New Issue