forked from p04798526/LLaMA-Factory-Mirror
fix flashattn + packing
This commit is contained in:
parent
ad71296a7c
commit
4135e69406
|
@ -38,7 +38,7 @@ def _encode_feedback_example(
|
||||||
template: "Template",
|
template: "Template",
|
||||||
tokenizer: "PreTrainedTokenizer",
|
tokenizer: "PreTrainedTokenizer",
|
||||||
processor: Optional["ProcessorMixin"],
|
processor: Optional["ProcessorMixin"],
|
||||||
data_args: "DataArguments",
|
cutoff_len: int,
|
||||||
) -> Tuple[List[int], List[int], List[int], List[int], bool]:
|
) -> Tuple[List[int], List[int], List[int], List[int], bool]:
|
||||||
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
|
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
|
||||||
prompt[0]["content"] = template.image_token + prompt[0]["content"]
|
prompt[0]["content"] = template.image_token + prompt[0]["content"]
|
||||||
|
@ -67,10 +67,10 @@ def _encode_feedback_example(
|
||||||
prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + prompt_ids
|
prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + prompt_ids
|
||||||
kl_prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + kl_prompt_ids
|
kl_prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + kl_prompt_ids
|
||||||
|
|
||||||
source_len, target_len = infer_seqlen(len(prompt_ids), len(response_ids), data_args.cutoff_len)
|
source_len, target_len = infer_seqlen(len(prompt_ids), len(response_ids), cutoff_len)
|
||||||
prompt_ids = prompt_ids[:source_len]
|
prompt_ids = prompt_ids[:source_len]
|
||||||
response_ids = response_ids[:target_len]
|
response_ids = response_ids[:target_len]
|
||||||
kl_source_len, kl_target_len = infer_seqlen(len(kl_prompt_ids), len(kl_response_ids), data_args.cutoff_len)
|
kl_source_len, kl_target_len = infer_seqlen(len(kl_prompt_ids), len(kl_response_ids), cutoff_len)
|
||||||
kl_prompt_ids = kl_prompt_ids[:kl_source_len]
|
kl_prompt_ids = kl_prompt_ids[:kl_source_len]
|
||||||
kl_response_ids = kl_response_ids[:kl_target_len]
|
kl_response_ids = kl_response_ids[:kl_target_len]
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ def preprocess_feedback_dataset(
|
||||||
template=template,
|
template=template,
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
processor=processor,
|
processor=processor,
|
||||||
data_args=data_args,
|
cutoff_len=data_args.cutoff_len,
|
||||||
)
|
)
|
||||||
model_inputs["input_ids"].append(input_ids)
|
model_inputs["input_ids"].append(input_ids)
|
||||||
model_inputs["attention_mask"].append([1] * len(input_ids))
|
model_inputs["attention_mask"].append([1] * len(input_ids))
|
||||||
|
|
|
@ -37,7 +37,7 @@ def _encode_pairwise_example(
|
||||||
template: "Template",
|
template: "Template",
|
||||||
tokenizer: "PreTrainedTokenizer",
|
tokenizer: "PreTrainedTokenizer",
|
||||||
processor: Optional["ProcessorMixin"],
|
processor: Optional["ProcessorMixin"],
|
||||||
data_args: "DataArguments",
|
cutoff_len: int,
|
||||||
) -> Tuple[List[int], List[int], List[int], List[int]]:
|
) -> Tuple[List[int], List[int], List[int], List[int]]:
|
||||||
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
|
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
|
||||||
prompt[0]["content"] = template.image_token + prompt[0]["content"]
|
prompt[0]["content"] = template.image_token + prompt[0]["content"]
|
||||||
|
@ -55,9 +55,8 @@ def _encode_pairwise_example(
|
||||||
image_token_id = tokenizer.convert_tokens_to_ids(template.image_token)
|
image_token_id = tokenizer.convert_tokens_to_ids(template.image_token)
|
||||||
prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + prompt_ids
|
prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + prompt_ids
|
||||||
|
|
||||||
source_len, target_len = infer_seqlen(
|
# consider the response is more important
|
||||||
len(prompt_ids), max(len(chosen_ids), len(rejected_ids)), data_args.cutoff_len
|
source_len, target_len = infer_seqlen(len(prompt_ids), max(len(chosen_ids), len(rejected_ids)), cutoff_len)
|
||||||
) # consider the response is more important
|
|
||||||
prompt_ids = prompt_ids[:source_len]
|
prompt_ids = prompt_ids[:source_len]
|
||||||
chosen_ids = chosen_ids[:target_len]
|
chosen_ids = chosen_ids[:target_len]
|
||||||
rejected_ids = rejected_ids[:target_len]
|
rejected_ids = rejected_ids[:target_len]
|
||||||
|
@ -105,7 +104,7 @@ def preprocess_pairwise_dataset(
|
||||||
template=template,
|
template=template,
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
processor=processor,
|
processor=processor,
|
||||||
data_args=data_args,
|
cutoff_len=data_args.cutoff_len,
|
||||||
)
|
)
|
||||||
model_inputs["chosen_input_ids"].append(chosen_input_ids)
|
model_inputs["chosen_input_ids"].append(chosen_input_ids)
|
||||||
model_inputs["chosen_attention_mask"].append([1] * len(chosen_input_ids))
|
model_inputs["chosen_attention_mask"].append([1] * len(chosen_input_ids))
|
||||||
|
|
|
@ -38,7 +38,9 @@ def _encode_supervised_example(
|
||||||
template: "Template",
|
template: "Template",
|
||||||
tokenizer: "PreTrainedTokenizer",
|
tokenizer: "PreTrainedTokenizer",
|
||||||
processor: Optional["ProcessorMixin"],
|
processor: Optional["ProcessorMixin"],
|
||||||
data_args: "DataArguments",
|
cutoff_len: int,
|
||||||
|
train_on_prompt: bool,
|
||||||
|
mask_history: bool,
|
||||||
) -> Tuple[List[int], List[int]]:
|
) -> Tuple[List[int], List[int]]:
|
||||||
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
|
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
|
||||||
prompt[0]["content"] = template.image_token + prompt[0]["content"]
|
prompt[0]["content"] = template.image_token + prompt[0]["content"]
|
||||||
|
@ -54,22 +56,22 @@ def _encode_supervised_example(
|
||||||
encoded_pairs = template.encode_multiturn(tokenizer, messages, system, tools)
|
encoded_pairs = template.encode_multiturn(tokenizer, messages, system, tools)
|
||||||
total_length = 1 if template.efficient_eos else 0
|
total_length = 1 if template.efficient_eos else 0
|
||||||
for turn_idx, (source_ids, target_ids) in enumerate(encoded_pairs):
|
for turn_idx, (source_ids, target_ids) in enumerate(encoded_pairs):
|
||||||
if total_length >= data_args.cutoff_len:
|
if total_length >= cutoff_len:
|
||||||
break
|
break
|
||||||
|
|
||||||
source_len, target_len = infer_seqlen(len(source_ids), len(target_ids), data_args.cutoff_len - total_length)
|
source_len, target_len = infer_seqlen(len(source_ids), len(target_ids), cutoff_len - total_length)
|
||||||
source_ids = source_ids[:source_len]
|
source_ids = source_ids[:source_len]
|
||||||
target_ids = target_ids[:target_len]
|
target_ids = target_ids[:target_len]
|
||||||
total_length += source_len + target_len
|
total_length += source_len + target_len
|
||||||
|
|
||||||
if data_args.train_on_prompt:
|
if train_on_prompt:
|
||||||
source_label = source_ids
|
source_label = source_ids
|
||||||
elif turn_idx != 0 and template.efficient_eos:
|
elif turn_idx != 0 and template.efficient_eos:
|
||||||
source_label = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (source_len - 1)
|
source_label = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (source_len - 1)
|
||||||
else:
|
else:
|
||||||
source_label = [IGNORE_INDEX] * source_len
|
source_label = [IGNORE_INDEX] * source_len
|
||||||
|
|
||||||
if data_args.mask_history and turn_idx != len(encoded_pairs) - 1:
|
if mask_history and turn_idx != len(encoded_pairs) - 1:
|
||||||
target_label = [IGNORE_INDEX] * target_len
|
target_label = [IGNORE_INDEX] * target_len
|
||||||
else:
|
else:
|
||||||
target_label = target_ids
|
target_label = target_ids
|
||||||
|
@ -112,7 +114,9 @@ def preprocess_supervised_dataset(
|
||||||
template=template,
|
template=template,
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
processor=processor,
|
processor=processor,
|
||||||
data_args=data_args,
|
cutoff_len=data_args.cutoff_len,
|
||||||
|
train_on_prompt=data_args.train_on_prompt,
|
||||||
|
mask_history=data_args.mask_history,
|
||||||
)
|
)
|
||||||
model_inputs["input_ids"].append(input_ids)
|
model_inputs["input_ids"].append(input_ids)
|
||||||
model_inputs["attention_mask"].append([1] * len(input_ids))
|
model_inputs["attention_mask"].append([1] * len(input_ids))
|
||||||
|
@ -150,7 +154,9 @@ def preprocess_packed_supervised_dataset(
|
||||||
template=template,
|
template=template,
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
processor=None,
|
processor=None,
|
||||||
data_args=data_args,
|
cutoff_len=data_args.cutoff_len - 1, # reserved for the padding token
|
||||||
|
train_on_prompt=data_args.train_on_prompt,
|
||||||
|
mask_history=data_args.mask_history,
|
||||||
)
|
)
|
||||||
length = len(input_ids)
|
length = len(input_ids)
|
||||||
if length > data_args.cutoff_len:
|
if length > data_args.cutoff_len:
|
||||||
|
@ -163,7 +169,7 @@ def preprocess_packed_supervised_dataset(
|
||||||
valid_num += 1
|
valid_num += 1
|
||||||
|
|
||||||
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
|
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
|
||||||
knapsacks = greedy_knapsack(lengths, data_args.cutoff_len)
|
knapsacks = greedy_knapsack(lengths, data_args.cutoff_len - 1) # reserved for the padding token
|
||||||
for knapsack in knapsacks:
|
for knapsack in knapsacks:
|
||||||
packed_input_ids, packed_attention_masks, packed_labels = [], [], []
|
packed_input_ids, packed_attention_masks, packed_labels = [], [], []
|
||||||
for i, length in enumerate(knapsack):
|
for i, length in enumerate(knapsack):
|
||||||
|
|
|
@ -37,7 +37,7 @@ def _encode_unsupervised_example(
|
||||||
template: "Template",
|
template: "Template",
|
||||||
tokenizer: "PreTrainedTokenizer",
|
tokenizer: "PreTrainedTokenizer",
|
||||||
processor: Optional["ProcessorMixin"],
|
processor: Optional["ProcessorMixin"],
|
||||||
data_args: "DataArguments",
|
cutoff_len: int,
|
||||||
) -> Tuple[List[int], List[int]]:
|
) -> Tuple[List[int], List[int]]:
|
||||||
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
|
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
|
||||||
prompt[0]["content"] = template.image_token + prompt[0]["content"]
|
prompt[0]["content"] = template.image_token + prompt[0]["content"]
|
||||||
|
@ -55,7 +55,7 @@ def _encode_unsupervised_example(
|
||||||
image_token_id = tokenizer.convert_tokens_to_ids(template.image_token)
|
image_token_id = tokenizer.convert_tokens_to_ids(template.image_token)
|
||||||
input_ids = [image_token_id] * getattr(processor, "image_seq_length") + input_ids
|
input_ids = [image_token_id] * getattr(processor, "image_seq_length") + input_ids
|
||||||
|
|
||||||
source_len, target_len = infer_seqlen(len(input_ids), len(labels), data_args.cutoff_len)
|
source_len, target_len = infer_seqlen(len(input_ids), len(labels), cutoff_len)
|
||||||
input_ids = input_ids[:source_len]
|
input_ids = input_ids[:source_len]
|
||||||
labels = labels[:target_len]
|
labels = labels[:target_len]
|
||||||
return input_ids, labels
|
return input_ids, labels
|
||||||
|
@ -88,7 +88,7 @@ def preprocess_unsupervised_dataset(
|
||||||
template=template,
|
template=template,
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
processor=processor,
|
processor=processor,
|
||||||
data_args=data_args,
|
cutoff_len=data_args.cutoff_len,
|
||||||
)
|
)
|
||||||
model_inputs["input_ids"].append(input_ids)
|
model_inputs["input_ids"].append(input_ids)
|
||||||
model_inputs["attention_mask"].append([1] * len(input_ids))
|
model_inputs["attention_mask"].append([1] * len(input_ids))
|
||||||
|
|
Loading…
Reference in New Issue