diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index 2f203b1d..81f0b7f6 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -107,14 +107,18 @@ def init_adapter( adapter_to_merge = model_args.adapter_name_or_path for adapter in adapter_to_merge: - model: "LoraModel" = PeftModel.from_pretrained(model, adapter) + model: "LoraModel" = PeftModel.from_pretrained( + model, adapter, offload_folder=model_args.offload_folder + ) model = model.merge_and_unload() if len(adapter_to_merge) > 0: logger.info("Merged {} adapter(s).".format(len(adapter_to_merge))) if adapter_to_resume is not None: # resume lora training - model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_trainable) + model = PeftModel.from_pretrained( + model, adapter_to_resume, is_trainable=is_trainable, offload_folder=model_args.offload_folder + ) if is_trainable and adapter_to_resume is None: # create new lora weights while training if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all":