diff --git a/src/llmtuner/model/utils.py b/src/llmtuner/model/utils.py index d3a5e1ed..0eabae25 100644 --- a/src/llmtuner/model/utils.py +++ b/src/llmtuner/model/utils.py @@ -181,8 +181,8 @@ def resize_embedding_layer(model: "PreTrainedModel", tokenizer: "PreTrainedToken logger.warning("Current model does not support resizing token embeddings.") return - old_vocab_size = model.get_input_embeddings().weight.size(0) - if len(tokenizer) > old_vocab_size: + current_embedding_size = model.get_input_embeddings().weight.size(0) + if len(tokenizer) > current_embedding_size: model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=64) - new_vocab_size = model.get_input_embeddings().weight.size(0) - logger.info("Resized token embeddings from {} to {}.".format(old_vocab_size, new_vocab_size)) + new_embedding_size = model.get_input_embeddings().weight.size(0) + logger.info("Resized token embeddings from {} to {}.".format(current_embedding_size, new_embedding_size))