train:test train

This commit is contained in:
wql 2024-08-19 09:57:13 +08:00
parent 539d4d08f1
commit 746ceac74a
2 changed files with 79 additions and 0 deletions

View File

@ -0,0 +1,40 @@
### model
model_name_or_path: modelscope/Llama-2-7b-ms
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: belle_1m
template: llama2
cutoff_len: 1024
max_samples: 10000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1
logging_steps: 3
save_steps: 100
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 2
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 10.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
ddp_timeout: 180000000
max_steps: 1000
### eval
val_size: 0.1
per_device_eval_batch_size: 2
eval_strategy: steps
eval_steps: 500

View File

@ -0,0 +1,39 @@
### model
model_name_or_path: modelscope/Llama-2-7b-ms
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: belle_1m
template: llama2
cutoff_len: 1024
max_samples: 10000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: ./results/lora_sft/Llama2-7B/llama2_lora_sft_1
logging_steps: 3
save_steps: 100
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 2
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 10.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
bf16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 2
eval_strategy: steps
eval_steps: 500