From d2ebd225dbb922adec99c1eb774c16f5cb973d2c Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 28 Sep 2023 01:02:11 +0800 Subject: [PATCH] tiny fix --- src/llmtuner/dsets/preprocess.py | 2 +- src/llmtuner/extras/patches/llama_patch.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/dsets/preprocess.py b/src/llmtuner/dsets/preprocess.py index 6ee2ee1c..a062076d 100644 --- a/src/llmtuner/dsets/preprocess.py +++ b/src/llmtuner/dsets/preprocess.py @@ -100,7 +100,7 @@ def preprocess_dataset( return model_inputs def preprocess_packed_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: - # build inputs with format ` X Y ` and labels with format ` ... Y ` + # build inputs with format ` X Y ` and labels with format ` X Y ` # we do not mask the inputs in packed training. model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} input_ids, labels = [], [] diff --git a/src/llmtuner/extras/patches/llama_patch.py b/src/llmtuner/extras/patches/llama_patch.py index 930d3a25..cc22041d 100644 --- a/src/llmtuner/extras/patches/llama_patch.py +++ b/src/llmtuner/extras/patches/llama_patch.py @@ -173,7 +173,7 @@ class LlamaFlashAttention2(LlamaAttention): state = state.reshape(bsz * num_group, group_size, self.num_heads, self.head_dim) if attention_mask is not None: - logger.warning_once("Padded sequences are less efficient.") + logger.warning_once("Padded sequences are less efficient in FlashAttention.") batch_size = query_states.shape[0] # -q_len: assumes left padding unpadded_q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(query_states, attention_mask[:, -q_len:])