modify export model

This commit is contained in:
BUAADreamer 2024-05-08 10:36:36 +08:00
parent b3a9ae4085
commit 0ca1d1967d
1 changed files with 6 additions and 3 deletions

View File

@ -15,11 +15,9 @@ from .pt import run_pt
from .rm import run_rm
from .sft import run_sft
if TYPE_CHECKING:
from transformers import TrainerCallback
logger = get_logger(__name__)
@ -52,7 +50,9 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None:
if model_args.adapter_name_or_path is not None and model_args.export_quantization_bit is not None:
raise ValueError("Please merge adapters before quantizing the model.")
tokenizer = load_tokenizer(model_args)["tokenizer"]
tokenizer_module = load_tokenizer(model_args)["tokenizer"]
tokenizer = tokenizer_module['tokenizer']
processor = tokenizer_module['processor']
get_template_and_fix_tokenizer(tokenizer, data_args.template)
model = load_model(tokenizer, model_args, finetuning_args) # must after fixing tokenizer to resize vocab
@ -88,3 +88,6 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None:
tokenizer.push_to_hub(model_args.export_hub_model_id, token=model_args.hf_hub_token)
except Exception:
logger.warning("Cannot save tokenizer, please copy the files manually.")
if model_args.visual_inputs:
processor.image_processor.save_pretrained(model_args.export_dir)