22 lines
415 B
YAML
22 lines
415 B
YAML
# model
|
|
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
|
|
|
# method
|
|
stage: sft
|
|
do_train: true
|
|
finetuning_type: lora
|
|
lora_target: q_proj,v_proj
|
|
|
|
# dataset
|
|
dataset: identity,alpaca_gpt4_en
|
|
template: llama3
|
|
cutoff_len: 1024
|
|
max_samples: 1000
|
|
overwrite_cache: true
|
|
preprocessing_num_workers: 16
|
|
tokenized_path: saves/llama3-8b/dataset/sft
|
|
|
|
# output
|
|
output_dir: saves/llama3-8b/lora/sft
|
|
overwrite_output_dir: true
|