diff --git a/README.md b/README.md index 3eebf355..f3ced20e 100644 --- a/README.md +++ b/README.md @@ -149,34 +149,34 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ ## Supported Models -| Model | Model size | Default module | Template | -| -------------------------------------------------------- | -------------------------------- | ----------------- | --------- | -| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | W_pack | baichuan2 | -| [BLOOM](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - | -| [BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - | -| [ChatGLM3](https://huggingface.co/THUDM) | 6B | query_key_value | chatglm3 | -| [Command-R](https://huggingface.co/CohereForAI) | 35B/104B | q_proj,v_proj | cohere | -| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | q_proj,v_proj | deepseek | -| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | query_key_value | falcon | -| [Gemma/CodeGemma](https://huggingface.co/google) | 2B/7B | q_proj,v_proj | gemma | -| [GLM4](https://huggingface.co/THUDM) | 9B | query_key_value | glm4 | -| [InternLM2](https://huggingface.co/internlm) | 7B/20B | wqkv | intern2 | -| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - | -| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 | -| [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | q_proj,v_proj | llama3 | -| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | q_proj,v_proj | vicuna | -| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | q_proj,v_proj | mistral | -| [OLMo](https://huggingface.co/allenai) | 1B/7B | q_proj,v_proj | - | -| [PaliGemma](https://huggingface.co/google) | 3B | q_proj,v_proj | gemma | -| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - | -| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | qkv_proj | phi | -| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | c_attn | qwen | -| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | q_proj,v_proj | qwen | -| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | q_proj,v_proj | - | -| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | q_proj,v_proj | xverse | -| [Yi (1/1.5)](https://huggingface.co/01-ai) | 6B/9B/34B | q_proj,v_proj | yi | -| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | q_proj,v_proj | yi_vl | -| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | q_proj,v_proj | yuan | +| Model | Model size | Template | +| -------------------------------------------------------- | -------------------------------- | --------- | +| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 | +| [BLOOM](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - | +| [BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - | +| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 | +| [Command-R](https://huggingface.co/CohereForAI) | 35B/104B | cohere | +| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek | +| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon | +| [Gemma/CodeGemma](https://huggingface.co/google) | 2B/7B | gemma | +| [GLM4](https://huggingface.co/THUDM) | 9B | glm4 | +| [InternLM2](https://huggingface.co/internlm) | 7B/20B | intern2 | +| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | +| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | +| [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | llama3 | +| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | vicuna | +| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral | +| [OLMo](https://huggingface.co/allenai) | 1B/7B | - | +| [PaliGemma](https://huggingface.co/google) | 3B | gemma | +| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | - | +| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi | +| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | qwen | +| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | qwen | +| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | - | +| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse | +| [Yi (1/1.5)](https://huggingface.co/01-ai) | 6B/9B/34B | yi | +| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl | +| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan | > [!NOTE] > **Default module** is used for the `lora_target` argument, you can use `lora_target: all` to specify all the available modules for better convergence. diff --git a/README_zh.md b/README_zh.md index 09a7f330..982c0123 100644 --- a/README_zh.md +++ b/README_zh.md @@ -149,41 +149,39 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd ## 模型 -| 模型名 | 模型大小 | 默认模块 | Template | -| -------------------------------------------------------- | -------------------------------- | ----------------- | --------- | -| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | W_pack | baichuan2 | -| [BLOOM](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - | -| [BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - | -| [ChatGLM3](https://huggingface.co/THUDM) | 6B | query_key_value | chatglm3 | -| [Command-R](https://huggingface.co/CohereForAI) | 35B/104B | q_proj,v_proj | cohere | -| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | q_proj,v_proj | deepseek | -| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | query_key_value | falcon | -| [Gemma/CodeGemma](https://huggingface.co/google) | 2B/7B | q_proj,v_proj | gemma | -| [GLM4](https://huggingface.co/THUDM) | 9B | query_key_value | glm4 | -| [InternLM2](https://huggingface.co/internlm) | 7B/20B | wqkv | intern2 | -| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - | -| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 | -| [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | q_proj,v_proj | llama3 | -| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | q_proj,v_proj | vicuna | -| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | q_proj,v_proj | mistral | -| [OLMo](https://huggingface.co/allenai) | 1B/7B | q_proj,v_proj | - | -| [PaliGemma](https://huggingface.co/google) | 3B | q_proj,v_proj | gemma | -| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - | -| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | qkv_proj | phi | -| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | c_attn | qwen | -| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | q_proj,v_proj | qwen | -| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | q_proj,v_proj | - | -| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | q_proj,v_proj | xverse | -| [Yi (1/1.5)](https://huggingface.co/01-ai) | 6B/9B/34B | q_proj,v_proj | yi | -| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | q_proj,v_proj | yi_vl | -| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | q_proj,v_proj | yuan | +| 模型名 | 模型大小 | Template | +| -------------------------------------------------------- | -------------------------------- | --------- | +| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 | +| [BLOOM](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - | +| [BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - | +| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 | +| [Command-R](https://huggingface.co/CohereForAI) | 35B/104B | cohere | +| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek | +| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon | +| [Gemma/CodeGemma](https://huggingface.co/google) | 2B/7B | gemma | +| [GLM4](https://huggingface.co/THUDM) | 9B | glm4 | +| [InternLM2](https://huggingface.co/internlm) | 7B/20B | intern2 | +| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | +| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | +| [LLaMA-3](https://huggingface.co/meta-llama) | 8B/70B | llama3 | +| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | vicuna | +| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral | +| [OLMo](https://huggingface.co/allenai) | 1B/7B | - | +| [PaliGemma](https://huggingface.co/google) | 3B | gemma | +| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | - | +| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi | +| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | qwen | +| [Qwen1.5 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/32B/72B/110B | qwen | +| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | - | +| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse | +| [Yi (1/1.5)](https://huggingface.co/01-ai) | 6B/9B/34B | yi | +| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl | +| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan | > [!NOTE] -> **默认模块**应作为 `lora_target` 参数的默认值,可使用 `lora_target: all` 参数指定全部模块以取得更好的效果。 -> > 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。 > -> 请务必在训练和推理时使用**完全一致**的模板。 +> 请务必在训练和推理时采用**完全一致**的模板。 项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。 diff --git a/examples/extras/fsdp_qlora/llama3_lora_sft.yaml b/examples/extras/fsdp_qlora/llama3_lora_sft.yaml index 348459b8..084269ef 100644 --- a/examples/extras/fsdp_qlora/llama3_lora_sft.yaml +++ b/examples/extras/fsdp_qlora/llama3_lora_sft.yaml @@ -6,7 +6,7 @@ quantization_bit: 4 stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### ddp ddp_timeout: 180000000 diff --git a/examples/extras/loraplus/llama3_lora_sft.yaml b/examples/extras/loraplus/llama3_lora_sft.yaml index 960f613e..1ba654ec 100644 --- a/examples/extras/loraplus/llama3_lora_sft.yaml +++ b/examples/extras/loraplus/llama3_lora_sft.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all loraplus_lr_ratio: 16.0 ### dataset diff --git a/examples/lora_multi_gpu/llama3_lora_sft.yaml b/examples/lora_multi_gpu/llama3_lora_sft.yaml index 9be3c780..348e53b9 100644 --- a/examples/lora_multi_gpu/llama3_lora_sft.yaml +++ b/examples/lora_multi_gpu/llama3_lora_sft.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### ddp ddp_timeout: 180000000 diff --git a/examples/lora_multi_gpu/llama3_lora_sft_ds.yaml b/examples/lora_multi_gpu/llama3_lora_sft_ds.yaml index 41152243..1c432fa7 100644 --- a/examples/lora_multi_gpu/llama3_lora_sft_ds.yaml +++ b/examples/lora_multi_gpu/llama3_lora_sft_ds.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### ddp ddp_timeout: 180000000 diff --git a/examples/lora_multi_npu/llama3_lora_sft_ds.yaml b/examples/lora_multi_npu/llama3_lora_sft_ds.yaml index 1ed24d04..a0ec8aa1 100644 --- a/examples/lora_multi_npu/llama3_lora_sft_ds.yaml +++ b/examples/lora_multi_npu/llama3_lora_sft_ds.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### ddp ddp_timeout: 180000000 diff --git a/examples/lora_single_gpu/llama3_lora_dpo.yaml b/examples/lora_single_gpu/llama3_lora_dpo.yaml index 158c9e04..78344330 100644 --- a/examples/lora_single_gpu/llama3_lora_dpo.yaml +++ b/examples/lora_single_gpu/llama3_lora_dpo.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: dpo do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all pref_beta: 0.1 pref_loss: sigmoid # [sigmoid (dpo), orpo, simpo] diff --git a/examples/lora_single_gpu/llama3_lora_kto.yaml b/examples/lora_single_gpu/llama3_lora_kto.yaml index ead221e9..d5234c0a 100644 --- a/examples/lora_single_gpu/llama3_lora_kto.yaml +++ b/examples/lora_single_gpu/llama3_lora_kto.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: kto do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: kto_en_demo diff --git a/examples/lora_single_gpu/llama3_lora_ppo.yaml b/examples/lora_single_gpu/llama3_lora_ppo.yaml index 19e7ccb3..98c842f9 100644 --- a/examples/lora_single_gpu/llama3_lora_ppo.yaml +++ b/examples/lora_single_gpu/llama3_lora_ppo.yaml @@ -6,7 +6,7 @@ reward_model: saves/llama3-8b/lora/reward stage: ppo do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: identity,alpaca_en_demo diff --git a/examples/lora_single_gpu/llama3_lora_pretrain.yaml b/examples/lora_single_gpu/llama3_lora_pretrain.yaml index 9167a893..db435ca9 100644 --- a/examples/lora_single_gpu/llama3_lora_pretrain.yaml +++ b/examples/lora_single_gpu/llama3_lora_pretrain.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: pt do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: c4_demo diff --git a/examples/lora_single_gpu/llama3_lora_reward.yaml b/examples/lora_single_gpu/llama3_lora_reward.yaml index 91663057..1ce42ea4 100644 --- a/examples/lora_single_gpu/llama3_lora_reward.yaml +++ b/examples/lora_single_gpu/llama3_lora_reward.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: rm do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: dpo_en_demo diff --git a/examples/lora_single_gpu/llama3_lora_sft.yaml b/examples/lora_single_gpu/llama3_lora_sft.yaml index cc93d05a..651b636f 100644 --- a/examples/lora_single_gpu/llama3_lora_sft.yaml +++ b/examples/lora_single_gpu/llama3_lora_sft.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: identity,alpaca_en_demo diff --git a/examples/lora_single_gpu/llama3_preprocess.yaml b/examples/lora_single_gpu/llama3_preprocess.yaml index 86dad37b..34bb9efc 100644 --- a/examples/lora_single_gpu/llama3_preprocess.yaml +++ b/examples/lora_single_gpu/llama3_preprocess.yaml @@ -5,7 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: identity,alpaca_en_demo diff --git a/examples/lora_single_gpu/llava1_5_lora_sft.yaml b/examples/lora_single_gpu/llava1_5_lora_sft.yaml index 95c1d40d..df510a93 100644 --- a/examples/lora_single_gpu/llava1_5_lora_sft.yaml +++ b/examples/lora_single_gpu/llava1_5_lora_sft.yaml @@ -6,7 +6,7 @@ visual_inputs: true stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: mllm_demo diff --git a/examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml b/examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml index 23301de5..d54d6af6 100644 --- a/examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml +++ b/examples/qlora_single_gpu/llama3_lora_sft_aqlm.yaml @@ -5,7 +5,7 @@ model_name_or_path: ISTA-DASLab/Meta-Llama-3-8B-Instruct-AQLM-2Bit-1x16 stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: identity,alpaca_en_demo diff --git a/examples/qlora_single_gpu/llama3_lora_sft_awq.yaml b/examples/qlora_single_gpu/llama3_lora_sft_awq.yaml index 40a290a3..5cef178a 100644 --- a/examples/qlora_single_gpu/llama3_lora_sft_awq.yaml +++ b/examples/qlora_single_gpu/llama3_lora_sft_awq.yaml @@ -5,7 +5,7 @@ model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-AWQ stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: identity,alpaca_en_demo diff --git a/examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml b/examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml index 6652d8cf..b308dcab 100644 --- a/examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml +++ b/examples/qlora_single_gpu/llama3_lora_sft_bitsandbytes.yaml @@ -6,7 +6,7 @@ quantization_bit: 4 stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: identity,alpaca_en_demo diff --git a/examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml b/examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml index 323ea7c6..b950042e 100644 --- a/examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml +++ b/examples/qlora_single_gpu/llama3_lora_sft_gptq.yaml @@ -5,7 +5,7 @@ model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-GPTQ stage: sft do_train: true finetuning_type: lora -lora_target: q_proj,v_proj +lora_target: all ### dataset dataset: identity,alpaca_en_demo diff --git a/src/llamafactory/extras/constants.py b/src/llamafactory/extras/constants.py index 687e16cc..4d9cb26d 100644 --- a/src/llamafactory/extras/constants.py +++ b/src/llamafactory/extras/constants.py @@ -20,8 +20,6 @@ CHOICES = ["A", "B", "C", "D"] DATA_CONFIG = "dataset_info.json" -DEFAULT_MODULE = defaultdict(str) - DEFAULT_TEMPLATE = defaultdict(str) FILEEXT2TYPE = { @@ -80,7 +78,6 @@ class DownloadSource(str, Enum): def register_model_group( models: Dict[str, Dict[DownloadSource, str]], - module: Optional[str] = None, template: Optional[str] = None, vision: bool = False, ) -> None: @@ -91,8 +88,6 @@ def register_model_group( else: assert prefix == name.split("-")[0], "prefix should be identical." SUPPORTED_MODELS[name] = path - if module is not None: - DEFAULT_MODULE[prefix] = module if template is not None: DEFAULT_TEMPLATE[prefix] = template if vision: @@ -127,7 +122,6 @@ register_model_group( DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan-13B-Chat", }, }, - module="W_pack", template="baichuan", ) @@ -151,7 +145,6 @@ register_model_group( DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-13B-Chat", }, }, - module="W_pack", template="baichuan2", ) @@ -171,7 +164,6 @@ register_model_group( DownloadSource.MODELSCOPE: "AI-ModelScope/bloom-7b1", }, }, - module="query_key_value", ) @@ -190,7 +182,6 @@ register_model_group( DownloadSource.MODELSCOPE: "AI-ModelScope/bloomz-7b1-mt", }, }, - module="query_key_value", ) @@ -229,7 +220,6 @@ register_model_group( DownloadSource.MODELSCOPE: "ZhipuAI/chatglm2-6b", } }, - module="query_key_value", template="chatglm2", ) @@ -245,7 +235,6 @@ register_model_group( DownloadSource.MODELSCOPE: "ZhipuAI/chatglm3-6b", }, }, - module="query_key_value", template="chatglm3", ) @@ -344,7 +333,6 @@ register_model_group( DownloadSource.MODELSCOPE: "AI-ModelScope/dbrx-instruct", }, }, - module="Wqkv", template="dbrx", ) @@ -463,7 +451,6 @@ register_model_group( DownloadSource.MODELSCOPE: "modelscope/falcon-180B-chat", }, }, - module="query_key_value", template="falcon", ) @@ -512,7 +499,6 @@ register_model_group( DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b-chat-1m", }, }, - module="query_key_value", template="glm4", ) @@ -559,7 +545,6 @@ register_model_group( DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2-chat-20b", }, }, - module="wqkv", template="intern2", ) @@ -581,7 +566,6 @@ register_model_group( DownloadSource.MODELSCOPE: "DeepLang/LingoWhale-8B", } }, - module="qkv_proj", ) @@ -868,7 +852,6 @@ register_model_group( DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-128k-instruct", }, }, - module="qkv_proj", template="phi", ) @@ -940,7 +923,6 @@ register_model_group( DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat-Int4", }, }, - module="c_attn", template="qwen", ) @@ -1153,7 +1135,6 @@ register_model_group( DownloadSource.MODELSCOPE: "TeleAI/TeleChat-12B-v2", }, }, - module="query,key_value", template="telechat", ) diff --git a/src/llamafactory/hparams/finetuning_args.py b/src/llamafactory/hparams/finetuning_args.py index b9322f18..08af31e4 100644 --- a/src/llamafactory/hparams/finetuning_args.py +++ b/src/llamafactory/hparams/finetuning_args.py @@ -24,12 +24,7 @@ class FreezeArguments: "help": ( "Name(s) of trainable modules for freeze (partial-parameter) fine-tuning. " "Use commas to separate multiple modules. " - "Use `all` to specify all the available modules. " - "LLaMA choices: [`mlp`, `self_attn`], " - "BLOOM & Falcon & ChatGLM choices: [`mlp`, `self_attention`], " - "Qwen choices: [`mlp`, `attn`], " - "InternLM2 choices: [`feed_forward`, `attention`], " - "Others choices: the same as LLaMA." + "Use `all` to specify all the available modules." ) }, ) @@ -79,13 +74,7 @@ class LoraArguments: "help": ( "Name(s) of target modules to apply LoRA. " "Use commas to separate multiple modules. " - "Use `all` to specify all the linear modules. " - "LLaMA choices: [`q_proj`, `k_proj`, `v_proj`, `o_proj`, `gate_proj`, `up_proj`, `down_proj`], " - "BLOOM & Falcon & ChatGLM choices: [`query_key_value`, `dense`, `dense_h_to_4h`, `dense_4h_to_h`], " - "Baichuan choices: [`W_pack`, `o_proj`, `gate_proj`, `up_proj`, `down_proj`], " - "Qwen choices: [`c_attn`, `attn.c_proj`, `w1`, `w2`, `mlp.c_proj`], " - "InternLM2 choices: [`wqkv`, `wo`, `w1`, `w2`, `w3`], " - "Others choices: the same as LLaMA." + "Use `all` to specify all the linear modules." ) }, ) diff --git a/src/llamafactory/webui/common.py b/src/llamafactory/webui/common.py index 62004bce..304b56a5 100644 --- a/src/llamafactory/webui/common.py +++ b/src/llamafactory/webui/common.py @@ -8,7 +8,6 @@ from yaml import safe_dump, safe_load from ..extras.constants import ( CHECKPOINT_NAMES, DATA_CONFIG, - DEFAULT_MODULE, DEFAULT_TEMPLATE, PEFT_METHODS, STAGES_USE_PAIR_DATA, @@ -118,13 +117,6 @@ def get_model_info(model_name: str) -> Tuple[str, str, bool]: return get_model_path(model_name), get_template(model_name), get_visual(model_name) -def get_module(model_name: str) -> str: - r""" - Gets the LoRA modules of this model. - """ - return DEFAULT_MODULE.get(get_prefix(model_name), "all") - - def get_template(model_name: str) -> str: r""" Gets the template name if the model is a chat model. diff --git a/src/llamafactory/webui/runner.py b/src/llamafactory/webui/runner.py index 4ec1531a..e8fdd129 100644 --- a/src/llamafactory/webui/runner.py +++ b/src/llamafactory/webui/runner.py @@ -8,7 +8,7 @@ from transformers.trainer import TRAINING_ARGS_NAME from ..extras.constants import PEFT_METHODS, TRAINING_STAGES from ..extras.misc import is_gpu_or_npu_available, torch_gc from ..extras.packages import is_gradio_available -from .common import DEFAULT_CACHE_DIR, get_module, get_save_dir, load_config +from .common import DEFAULT_CACHE_DIR, get_save_dir, load_config from .locales import ALERTS from .utils import abort_leaf_process, gen_cmd, get_eval_results, get_trainer_info, load_args, save_args, save_cmd @@ -159,7 +159,7 @@ class Runner: args["create_new_adapter"] = get("train.create_new_adapter") args["use_rslora"] = get("train.use_rslora") args["use_dora"] = get("train.use_dora") - args["lora_target"] = get("train.lora_target") or get_module(model_name) + args["lora_target"] = get("train.lora_target") or "all" args["additional_target"] = get("train.additional_target") or None if args["use_llama_pro"]: