From 8239907f578dc4b185cfb1789944db23d112fa2c Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 25 Apr 2024 01:01:59 +0800 Subject: [PATCH] remove error --- src/llmtuner/model/loader.py | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index f3856da7..a6c37922 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -202,29 +202,6 @@ def load_mm_model( patch_config(config, tokenizer, model_args, init_kwargs, is_trainable) model = None - if is_trainable and model_args.use_unsloth: - from unsloth import FastLanguageModel # type: ignore - - unsloth_kwargs = { - "model_name": model_args.model_name_or_path, - "max_seq_length": model_args.model_max_length, - "dtype": model_args.compute_dtype, - "load_in_4bit": model_args.quantization_bit == 4, - "token": model_args.hf_hub_token, - "device_map": {"": get_current_device()}, - "rope_scaling": getattr(config, "rope_scaling", None), - "fix_tokenizer": False, - "trust_remote_code": True, - } - try: - model, _ = FastLanguageModel.from_pretrained(**unsloth_kwargs) - except NotImplementedError: - logger.warning("Unsloth does not support model type {}.".format(getattr(config, "model_type", None))) - model_args.use_unsloth = False - - if model_args.adapter_name_or_path: - model_args.adapter_name_or_path = None - logger.warning("Unsloth does not support loading adapters.") if model is None: init_kwargs["config"] = config init_kwargs["pretrained_model_name_or_path"] = model_args.model_name_or_path