From 4135e69406c09f3892276e4fee7e5757a52a42d6 Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Sun, 21 Jul 2024 17:07:45 +0800 Subject: [PATCH] fix flashattn + packing --- src/llamafactory/data/processors/feedback.py | 8 +++---- src/llamafactory/data/processors/pairwise.py | 9 ++++---- .../data/processors/supervised.py | 22 ++++++++++++------- .../data/processors/unsupervised.py | 6 ++--- 4 files changed, 25 insertions(+), 20 deletions(-) diff --git a/src/llamafactory/data/processors/feedback.py b/src/llamafactory/data/processors/feedback.py index 8eadeda0..bed0c33c 100644 --- a/src/llamafactory/data/processors/feedback.py +++ b/src/llamafactory/data/processors/feedback.py @@ -38,7 +38,7 @@ def _encode_feedback_example( template: "Template", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], - data_args: "DataArguments", + cutoff_len: int, ) -> Tuple[List[int], List[int], List[int], List[int], bool]: if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models prompt[0]["content"] = template.image_token + prompt[0]["content"] @@ -67,10 +67,10 @@ def _encode_feedback_example( prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + prompt_ids kl_prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + kl_prompt_ids - source_len, target_len = infer_seqlen(len(prompt_ids), len(response_ids), data_args.cutoff_len) + source_len, target_len = infer_seqlen(len(prompt_ids), len(response_ids), cutoff_len) prompt_ids = prompt_ids[:source_len] response_ids = response_ids[:target_len] - kl_source_len, kl_target_len = infer_seqlen(len(kl_prompt_ids), len(kl_response_ids), data_args.cutoff_len) + kl_source_len, kl_target_len = infer_seqlen(len(kl_prompt_ids), len(kl_response_ids), cutoff_len) kl_prompt_ids = kl_prompt_ids[:kl_source_len] kl_response_ids = kl_response_ids[:kl_target_len] @@ -120,7 +120,7 @@ def preprocess_feedback_dataset( template=template, tokenizer=tokenizer, processor=processor, - data_args=data_args, + cutoff_len=data_args.cutoff_len, ) model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids)) diff --git a/src/llamafactory/data/processors/pairwise.py b/src/llamafactory/data/processors/pairwise.py index 9084c683..ddf885b5 100644 --- a/src/llamafactory/data/processors/pairwise.py +++ b/src/llamafactory/data/processors/pairwise.py @@ -37,7 +37,7 @@ def _encode_pairwise_example( template: "Template", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], - data_args: "DataArguments", + cutoff_len: int, ) -> Tuple[List[int], List[int], List[int], List[int]]: if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models prompt[0]["content"] = template.image_token + prompt[0]["content"] @@ -55,9 +55,8 @@ def _encode_pairwise_example( image_token_id = tokenizer.convert_tokens_to_ids(template.image_token) prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + prompt_ids - source_len, target_len = infer_seqlen( - len(prompt_ids), max(len(chosen_ids), len(rejected_ids)), data_args.cutoff_len - ) # consider the response is more important + # consider the response is more important + source_len, target_len = infer_seqlen(len(prompt_ids), max(len(chosen_ids), len(rejected_ids)), cutoff_len) prompt_ids = prompt_ids[:source_len] chosen_ids = chosen_ids[:target_len] rejected_ids = rejected_ids[:target_len] @@ -105,7 +104,7 @@ def preprocess_pairwise_dataset( template=template, tokenizer=tokenizer, processor=processor, - data_args=data_args, + cutoff_len=data_args.cutoff_len, ) model_inputs["chosen_input_ids"].append(chosen_input_ids) model_inputs["chosen_attention_mask"].append([1] * len(chosen_input_ids)) diff --git a/src/llamafactory/data/processors/supervised.py b/src/llamafactory/data/processors/supervised.py index 22039920..d4583b98 100644 --- a/src/llamafactory/data/processors/supervised.py +++ b/src/llamafactory/data/processors/supervised.py @@ -38,7 +38,9 @@ def _encode_supervised_example( template: "Template", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], - data_args: "DataArguments", + cutoff_len: int, + train_on_prompt: bool, + mask_history: bool, ) -> Tuple[List[int], List[int]]: if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models prompt[0]["content"] = template.image_token + prompt[0]["content"] @@ -54,22 +56,22 @@ def _encode_supervised_example( encoded_pairs = template.encode_multiturn(tokenizer, messages, system, tools) total_length = 1 if template.efficient_eos else 0 for turn_idx, (source_ids, target_ids) in enumerate(encoded_pairs): - if total_length >= data_args.cutoff_len: + if total_length >= cutoff_len: break - source_len, target_len = infer_seqlen(len(source_ids), len(target_ids), data_args.cutoff_len - total_length) + source_len, target_len = infer_seqlen(len(source_ids), len(target_ids), cutoff_len - total_length) source_ids = source_ids[:source_len] target_ids = target_ids[:target_len] total_length += source_len + target_len - if data_args.train_on_prompt: + if train_on_prompt: source_label = source_ids elif turn_idx != 0 and template.efficient_eos: source_label = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (source_len - 1) else: source_label = [IGNORE_INDEX] * source_len - if data_args.mask_history and turn_idx != len(encoded_pairs) - 1: + if mask_history and turn_idx != len(encoded_pairs) - 1: target_label = [IGNORE_INDEX] * target_len else: target_label = target_ids @@ -112,7 +114,9 @@ def preprocess_supervised_dataset( template=template, tokenizer=tokenizer, processor=processor, - data_args=data_args, + cutoff_len=data_args.cutoff_len, + train_on_prompt=data_args.train_on_prompt, + mask_history=data_args.mask_history, ) model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids)) @@ -150,7 +154,9 @@ def preprocess_packed_supervised_dataset( template=template, tokenizer=tokenizer, processor=None, - data_args=data_args, + cutoff_len=data_args.cutoff_len - 1, # reserved for the padding token + train_on_prompt=data_args.train_on_prompt, + mask_history=data_args.mask_history, ) length = len(input_ids) if length > data_args.cutoff_len: @@ -163,7 +169,7 @@ def preprocess_packed_supervised_dataset( valid_num += 1 model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} - knapsacks = greedy_knapsack(lengths, data_args.cutoff_len) + knapsacks = greedy_knapsack(lengths, data_args.cutoff_len - 1) # reserved for the padding token for knapsack in knapsacks: packed_input_ids, packed_attention_masks, packed_labels = [], [], [] for i, length in enumerate(knapsack): diff --git a/src/llamafactory/data/processors/unsupervised.py b/src/llamafactory/data/processors/unsupervised.py index b3fc85c9..7bd1904b 100644 --- a/src/llamafactory/data/processors/unsupervised.py +++ b/src/llamafactory/data/processors/unsupervised.py @@ -37,7 +37,7 @@ def _encode_unsupervised_example( template: "Template", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], - data_args: "DataArguments", + cutoff_len: int, ) -> Tuple[List[int], List[int]]: if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models prompt[0]["content"] = template.image_token + prompt[0]["content"] @@ -55,7 +55,7 @@ def _encode_unsupervised_example( image_token_id = tokenizer.convert_tokens_to_ids(template.image_token) input_ids = [image_token_id] * getattr(processor, "image_seq_length") + input_ids - source_len, target_len = infer_seqlen(len(input_ids), len(labels), data_args.cutoff_len) + source_len, target_len = infer_seqlen(len(input_ids), len(labels), cutoff_len) input_ids = input_ids[:source_len] labels = labels[:target_len] return input_ids, labels @@ -88,7 +88,7 @@ def preprocess_unsupervised_dataset( template=template, tokenizer=tokenizer, processor=processor, - data_args=data_args, + cutoff_len=data_args.cutoff_len, ) model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids))