diff --git a/src/llamafactory/chat/vllm_engine.py b/src/llamafactory/chat/vllm_engine.py index 8d602655..ba0cc1b3 100644 --- a/src/llamafactory/chat/vllm_engine.py +++ b/src/llamafactory/chat/vllm_engine.py @@ -59,6 +59,7 @@ class VllmEngine(BaseEngine): "disable_log_requests": True, "enforce_eager": model_args.vllm_enforce_eager, "enable_lora": model_args.adapter_name_or_path is not None, + "max_lora_rank": model_args.vllm_max_lora_rank, } if model_args.visual_inputs: diff --git a/src/llamafactory/hparams/model_args.py b/src/llamafactory/hparams/model_args.py index ac70bb3c..5885bb09 100644 --- a/src/llamafactory/hparams/model_args.py +++ b/src/llamafactory/hparams/model_args.py @@ -117,6 +117,10 @@ class ModelArguments: default=False, metadata={"help": "Whether or not to disable CUDA graph in the vLLM engine."}, ) + vllm_max_lora_rank: int = field( + default=8, + metadata={"help": "Maximum rank of all LoRAs in the vLLM engine."}, + ) offload_folder: str = field( default="offload", metadata={"help": "Path to offload model weights."},