diff --git a/scripts/loftq_init.py b/scripts/loftq_init.py index 4d2c01b9..ad976673 100644 --- a/scripts/loftq_init.py +++ b/scripts/loftq_init.py @@ -67,7 +67,7 @@ def quantize_loftq( loftq_dir = os.path.join(output_dir, "loftq_init") # Save LoftQ model - setattr(peft_model.peft_config["default"], "base_model_name_or_path", output_dir) + setattr(peft_model.peft_config["default"], "base_model_name_or_path", os.path.abspath(output_dir)) setattr(peft_model.peft_config["default"], "init_lora_weights", True) # don't apply loftq again peft_model.save_pretrained(loftq_dir, safe_serialization=save_safetensors) print("Adapter weights saved in {}".format(loftq_dir)) diff --git a/scripts/pissa_init.py b/scripts/pissa_init.py index ad9d161c..78b3fde0 100644 --- a/scripts/pissa_init.py +++ b/scripts/pissa_init.py @@ -62,6 +62,7 @@ def quantize_pissa( pissa_dir = os.path.join(output_dir, "pissa_init") # Save PiSSA model + setattr(peft_model.peft_config["default"], "base_model_name_or_path", os.path.abspath(output_dir)) setattr(peft_model.peft_config["default"], "init_lora_weights", True) # don't apply pissa again peft_model.save_pretrained(pissa_dir, safe_serialization=save_safetensors) print("Adapter weights saved in {}".format(pissa_dir))