LLaMA-Factory-Mirror/examples/lora_single_gpu/pretrain.sh

32 lines
880 B
Bash
Raw Normal View History

2024-02-28 23:19:25 +08:00
#!/bin/bash
CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \
--stage pt \
--do_train \
--model_name_or_path meta-llama/Llama-2-7b-hf \
--dataset c4_demo \
--dataset_dir ../../data \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--output_dir ../../saves/LLaMA2-7B/lora/pretrain \
--overwrite_cache \
--overwrite_output_dir \
--cutoff_len 1024 \
2024-03-06 13:14:57 +08:00
--preprocessing_num_workers 16 \
2024-02-28 23:19:25 +08:00
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 8 \
--lr_scheduler_type cosine \
--logging_steps 10 \
2024-03-06 13:14:57 +08:00
--warmup_steps 20 \
2024-02-28 23:19:25 +08:00
--save_steps 100 \
--eval_steps 100 \
--evaluation_strategy steps \
--load_best_model_at_end \
--learning_rate 5e-5 \
--num_train_epochs 3.0 \
--max_samples 10000 \
--val_size 0.1 \
--plot_loss \
--fp16