From 8f7819fcaa00f6ddcf59552c2866d8fc5659d0e9 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Wed, 19 Jul 2023 17:07:33 +0800 Subject: [PATCH] fix #194 --- src/llmtuner/hparams/finetuning_args.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/llmtuner/hparams/finetuning_args.py b/src/llmtuner/hparams/finetuning_args.py index 23bb324b..f43e5786 100644 --- a/src/llmtuner/hparams/finetuning_args.py +++ b/src/llmtuner/hparams/finetuning_args.py @@ -16,9 +16,10 @@ class FinetuningArguments: default=32, metadata={"help": "Number of decoder blocks in the model. \ LLaMA choices: [\"32\", \"40\", \"60\", \"80\"], \ + LLaMA-2 choices: [\"32\", \"40\", \"80\"], \ BLOOM choices: [\"24\", \"30\", \"70\"], \ Falcon choices: [\"32\", \"60\"], \ - Baichuan choices: [\"32\"]"} + Baichuan choices: [\"32\", \"40\"]"} ) num_layer_trainable: Optional[int] = field( default=3, @@ -27,7 +28,7 @@ class FinetuningArguments: name_module_trainable: Optional[Literal["mlp", "self_attn", "self_attention"]] = field( default="mlp", metadata={"help": "Name of trainable modules for Freeze fine-tuning. \ - LLaMA choices: [\"mlp\", \"self_attn\"], \ + LLaMA & LLaMA-2 choices: [\"mlp\", \"self_attn\"], \ BLOOM & Falcon choices: [\"mlp\", \"self_attention\"], \ Baichuan choices: [\"mlp\", \"self_attn\"]"} ) @@ -46,7 +47,7 @@ class FinetuningArguments: lora_target: Optional[str] = field( default="q_proj,v_proj", metadata={"help": "Name(s) of target modules to apply LoRA. Use commas to separate multiple modules. \ - LLaMA choices: [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \ + LLaMA & LLaMA-2 choices: [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \ BLOOM & Falcon choices: [\"query_key_value\", \"self_attention.dense\", \"mlp.dense\"], \ Baichuan choices: [\"W_pack\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"]"} )