36 lines
1018 B
Bash
36 lines
1018 B
Bash
#!/bin/bash
|
|
|
|
CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \
|
|
--stage dpo \
|
|
--do_train \
|
|
--model_name_or_path meta-llama/Llama-2-7b-hf \
|
|
--adapter_name_or_path ../../saves/LLaMA2-7B/lora/sft \
|
|
--create_new_adapter \
|
|
--dataset comparison_gpt4_en \
|
|
--dataset_dir ../../data \
|
|
--template default \
|
|
--finetuning_type lora \
|
|
--lora_target q_proj,v_proj \
|
|
--output_dir ../../saves/LLaMA2-7B/lora/dpo \
|
|
--overwrite_cache \
|
|
--overwrite_output_dir \
|
|
--cutoff_len 1024 \
|
|
--preprocessing_num_workers 16 \
|
|
--per_device_train_batch_size 1 \
|
|
--per_device_eval_batch_size 1 \
|
|
--gradient_accumulation_steps 8 \
|
|
--lr_scheduler_type cosine \
|
|
--logging_steps 10 \
|
|
--warmup_steps 20 \
|
|
--save_steps 100 \
|
|
--eval_steps 100 \
|
|
--evaluation_strategy steps \
|
|
--load_best_model_at_end \
|
|
--learning_rate 1e-5 \
|
|
--num_train_epochs 1.0 \
|
|
--max_samples 1000 \
|
|
--val_size 0.1 \
|
|
--dpo_ftx 1.0 \
|
|
--plot_loss \
|
|
--fp16
|