From 982a1cdd24dfa51535af3e49c7ea80fddc95b0ee Mon Sep 17 00:00:00 2001 From: codingma Date: Sat, 13 Jul 2024 13:16:22 +0800 Subject: [PATCH 1/2] 1. fix output_dir in llama3_lora_pretrain.yaml 2. add llava1_5.yaml for inference --- examples/inference/llava1_5.yaml | 3 +++ examples/train_lora/llama3_lora_pretrain.yaml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 examples/inference/llava1_5.yaml diff --git a/examples/inference/llava1_5.yaml b/examples/inference/llava1_5.yaml new file mode 100644 index 00000000..75035141 --- /dev/null +++ b/examples/inference/llava1_5.yaml @@ -0,0 +1,3 @@ +model_name_or_path: llava-hf/llava-1.5-7b-hf +template: vicuna +visual_inputs: true \ No newline at end of file diff --git a/examples/train_lora/llama3_lora_pretrain.yaml b/examples/train_lora/llama3_lora_pretrain.yaml index 5e8aaaef..7e3ea06c 100644 --- a/examples/train_lora/llama3_lora_pretrain.yaml +++ b/examples/train_lora/llama3_lora_pretrain.yaml @@ -15,7 +15,7 @@ overwrite_cache: true preprocessing_num_workers: 16 ### output -output_dir: saves/llama3-8b/lora/sft +output_dir: saves/llama3-8b/lora/pretrain logging_steps: 10 save_steps: 500 plot_loss: true From f618b80fa2173003b7e960dd20cecbede3db41c9 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sat, 13 Jul 2024 20:30:06 +0800 Subject: [PATCH 2/2] Update llava1_5.yaml --- examples/inference/llava1_5.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/inference/llava1_5.yaml b/examples/inference/llava1_5.yaml index 75035141..d613be68 100644 --- a/examples/inference/llava1_5.yaml +++ b/examples/inference/llava1_5.yaml @@ -1,3 +1,3 @@ model_name_or_path: llava-hf/llava-1.5-7b-hf template: vicuna -visual_inputs: true \ No newline at end of file +visual_inputs: true