From 555ca8d780a1fbaf42e73450f5eb33048329d921 Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Tue, 25 Jun 2024 02:55:50 +0800 Subject: [PATCH] lint --- src/llamafactory/hparams/parser.py | 5 +---- tests/data/test_formatter.py | 4 +--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/src/llamafactory/hparams/parser.py b/src/llamafactory/hparams/parser.py index a4b7f7a5..d4bcfbc6 100644 --- a/src/llamafactory/hparams/parser.py +++ b/src/llamafactory/hparams/parser.py @@ -212,10 +212,7 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS: ): raise ValueError("Distributed training does not support layer-wise GaLore.") - if ( - finetuning_args.use_badam - and training_args.parallel_mode == ParallelMode.DISTRIBUTED - ): + if finetuning_args.use_badam and training_args.parallel_mode == ParallelMode.DISTRIBUTED: if finetuning_args.badam_mode == "ratio": raise ValueError("Radio-based BAdam does not yet support distributed training, use layer-wise BAdam.") elif not is_deepspeed_zero3_enabled(): diff --git a/tests/data/test_formatter.py b/tests/data/test_formatter.py index a01e8a7e..37b21dc5 100644 --- a/tests/data/test_formatter.py +++ b/tests/data/test_formatter.py @@ -113,9 +113,7 @@ def test_glm4_tool_formatter(): assert formatter.apply(content=json.dumps(tools)) == [ "你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的," "你的任务是针对用户的问题和要求提供适当的答复和支持。# 可用工具\n\n" - "## test_tool\n\n{}\n在调用上述函数时,请使用 Json 格式表示调用的参数。".format( - json.dumps(tools[0], indent=4) - ) + "## test_tool\n\n{}\n在调用上述函数时,请使用 Json 格式表示调用的参数。".format(json.dumps(tools[0], indent=4)) ]