From 9dc6a296e327c5ff27cbd1697437d9d3145e3d9a Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 12 Aug 2023 22:02:43 +0800 Subject: [PATCH] tiny fix --- src/llmtuner/tuner/core/loader.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llmtuner/tuner/core/loader.py b/src/llmtuner/tuner/core/loader.py index 47c72de1..8de98387 100644 --- a/src/llmtuner/tuner/core/loader.py +++ b/src/llmtuner/tuner/core/loader.py @@ -83,17 +83,17 @@ def load_model_and_tokenizer( # Set RoPE scaling if model_args.rope_scaling is not None: - require_version("transformers>=4.31.0", "RoPE scaling requires transformers>=4.31.0") - if hasattr(config, "use_dynamic_ntk"): # for Qwen models if is_trainable: - logger.warning("Qwen model does not support rope scaling in training.") + logger.warning("Qwen model does not support RoPE scaling in training.") else: setattr(config, "use_dynamic_ntk", True) setattr(config, "use_logn_attn", True) logger.info("Using dynamic NTK scaling.") elif hasattr(config, "rope_scaling"): # for LLaMA models + require_version("transformers>=4.31.0", "RoPE scaling requires transformers>=4.31.0") + if is_trainable: if model_args.rope_scaling == "dynamic": logger.warning(