diff --git a/src/llmtuner/__init__.py b/src/llmtuner/__init__.py index bbc1420b..3b5665f8 100644 --- a/src/llmtuner/__init__.py +++ b/src/llmtuner/__init__.py @@ -6,4 +6,4 @@ from llmtuner.tuner import export_model, run_exp from llmtuner.webui import create_ui, create_web_demo -__version__ = "0.1.6" +__version__ = "0.1.7" diff --git a/src/llmtuner/tuner/sft/trainer.py b/src/llmtuner/tuner/sft/trainer.py index 1ddaec1f..17cb3949 100644 --- a/src/llmtuner/tuner/sft/trainer.py +++ b/src/llmtuner/tuner/sft/trainer.py @@ -55,7 +55,7 @@ class Seq2SeqPeftTrainer(PeftTrainer): self.tokenizer.pad_token_id * torch.ones_like(generated_tokens[:, :max(prompt_len, label_len)]) ) - return (loss, generated_tokens, labels) + return loss, generated_tokens, labels def _pad_tensors_to_target_len( self,