From 860549b99b520ae9509559b18646f83fd95933b8 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Fri, 26 Apr 2024 02:49:39 +0800 Subject: [PATCH] update hparam name --- src/llmtuner/hparams/model_args.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index 97b908e4..be65cd27 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -81,6 +81,10 @@ class ModelArguments: default=False, metadata={"help": "Whether or not to use unsloth's optimization for the LoRA training."}, ) + visual_inputs: bool = field( + default=False, + metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."}, + ) moe_aux_loss_coef: Optional[float] = field( default=None, metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."}, @@ -169,10 +173,6 @@ class ModelArguments: default=False, metadata={"help": "For debugging purposes, print the status of the parameters in the model."}, ) - use_mllm: bool = field( - default=False, - metadata={"help": "Whether use Multimodal LLM."}, - ) def __post_init__(self): self.compute_dtype = None