This commit is contained in:
hiyouga 2023-09-28 01:02:11 +08:00
parent c902236397
commit d2ebd225db
2 changed files with 2 additions and 2 deletions

View File

@ -100,7 +100,7 @@ def preprocess_dataset(
return model_inputs
def preprocess_packed_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]:
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
# build inputs with format `<bos> X Y <eos>` and labels with format `<bos> X Y <eos>`
# we do not mask the inputs in packed training.
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
input_ids, labels = [], []

View File

@ -173,7 +173,7 @@ class LlamaFlashAttention2(LlamaAttention):
state = state.reshape(bsz * num_group, group_size, self.num_heads, self.head_dim)
if attention_mask is not None:
logger.warning_once("Padded sequences are less efficient.")
logger.warning_once("Padded sequences are less efficient in FlashAttention.")
batch_size = query_states.shape[0]
# -q_len: assumes left padding
unpadded_q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(query_states, attention_mask[:, -q_len:])