diff --git a/src/llmtuner/hparams/finetuning_args.py b/src/llmtuner/hparams/finetuning_args.py index 1a7d5860..277602ae 100644 --- a/src/llmtuner/hparams/finetuning_args.py +++ b/src/llmtuner/hparams/finetuning_args.py @@ -20,7 +20,8 @@ class FinetuningArguments: BLOOM choices: [\"24\", \"30\", \"70\"], \ Falcon choices: [\"32\", \"60\"], \ Baichuan choices: [\"32\", \"40\"] \ - Qwen choices: [\"32\"]"} + Qwen choices: [\"32\"], \ + XVERSE choices: [\"40\"]"} ) num_layer_trainable: Optional[int] = field( default=3, @@ -32,7 +33,8 @@ class FinetuningArguments: LLaMA & LLaMA-2 choices: [\"mlp\", \"self_attn\"], \ BLOOM & Falcon choices: [\"mlp\", \"self_attention\"], \ Baichuan choices: [\"mlp\", \"self_attn\"], \ - Qwen choices: [\"attn\", \"mlp\"]"} + Qwen choices: [\"mlp\", \"attn\"], \ + InternLM, XVERSE choices: the same as LLaMA."} ) lora_rank: Optional[int] = field( default=8, @@ -49,10 +51,11 @@ class FinetuningArguments: lora_target: Optional[str] = field( default="q_proj,v_proj", metadata={"help": "Name(s) of target modules to apply LoRA. Use commas to separate multiple modules. \ - LLaMA & LLaMA-2 & InternLM choices: [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \ + LLaMA & LLaMA-2 choices: [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \ BLOOM & Falcon choices: [\"query_key_value\", \"self_attention.dense\", \"mlp.dense\"], \ Baichuan choices: [\"W_pack\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \ - Qwen choices: [\"c_attn\", \"c_proj\", \"w1\", \"w2\"]"} + Qwen choices: [\"c_attn\", \"attn.c_proj\", \"w1\", \"w2\", \"mlp.c_proj\"], \ + InternLM, XVERSE choices: the same as LLaMA."} ) def __post_init__(self):