From b9f87cdc11b3fe712574b91455dc722b69c60c66 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Wed, 13 Mar 2024 12:33:45 +0800 Subject: [PATCH] fix #2802 --- src/llmtuner/model/adapter.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index 2f203b1d..81f0b7f6 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -107,14 +107,18 @@ def init_adapter( adapter_to_merge = model_args.adapter_name_or_path for adapter in adapter_to_merge: - model: "LoraModel" = PeftModel.from_pretrained(model, adapter) + model: "LoraModel" = PeftModel.from_pretrained( + model, adapter, offload_folder=model_args.offload_folder + ) model = model.merge_and_unload() if len(adapter_to_merge) > 0: logger.info("Merged {} adapter(s).".format(len(adapter_to_merge))) if adapter_to_resume is not None: # resume lora training - model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_trainable) + model = PeftModel.from_pretrained( + model, adapter_to_resume, is_trainable=is_trainable, offload_folder=model_args.offload_folder + ) if is_trainable and adapter_to_resume is None: # create new lora weights while training if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all":