From 99ab7a8c1c966232faa11b6a42b9740d9a20ace3 Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Mon, 15 Jul 2024 01:16:26 +0800 Subject: [PATCH] allow computing rouge in training --- src/llamafactory/hparams/parser.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llamafactory/hparams/parser.py b/src/llamafactory/hparams/parser.py index cca8c505..00901f7d 100644 --- a/src/llamafactory/hparams/parser.py +++ b/src/llamafactory/hparams/parser.py @@ -200,9 +200,6 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS: if training_args.max_steps == -1 and data_args.streaming: raise ValueError("Please specify `max_steps` in streaming mode.") - if training_args.do_train and training_args.predict_with_generate: - raise ValueError("`predict_with_generate` cannot be set as True while training.") - if training_args.do_train and data_args.dataset is None: raise ValueError("Please specify dataset for training.") @@ -211,6 +208,9 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS: ): raise ValueError("Please specify dataset for evaluation.") + if training_args.predict_with_generate and data_args.eval_dataset is None: + raise ValueError("Cannot use `predict_with_generate` if `eval_dataset` is None.") + if training_args.do_train and model_args.quantization_device_map == "auto": raise ValueError("Cannot use device map for quantized models in training.")