Merge pull request #3748 from BUAADreamer/main
Add MLLM YI-VL and save processor config during training
This commit is contained in:
commit
75f405ec30
|
@ -856,6 +856,21 @@ _register_template(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="yi_vl",
|
||||||
|
format_user=StringFormatter(slots=["### Human: {{content}}\n### Assistant:"]),
|
||||||
|
format_separator=EmptyFormatter(slots=["\n"]),
|
||||||
|
default_system=(
|
||||||
|
"This is a chat between an inquisitive human and an AI assistant. "
|
||||||
|
"Assume the role of the AI assistant. Read all the images carefully, "
|
||||||
|
"and respond to the human's questions with informative, helpful, detailed and polite answers. "
|
||||||
|
"这是一个好奇的人类和一个人工智能助手之间的对话。假设你扮演这个AI助手的角色。"
|
||||||
|
"仔细阅读所有的图像,并对人类的问题做出信息丰富、有帮助、详细的和礼貌的回答。\n"
|
||||||
|
),
|
||||||
|
stop_words=["###"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
_register_template(
|
_register_template(
|
||||||
name="yuan",
|
name="yuan",
|
||||||
format_user=StringFormatter(slots=["{{content}}", {"token": "<sep>"}]),
|
format_user=StringFormatter(slots=["{{content}}", {"token": "<sep>"}]),
|
||||||
|
|
|
@ -18,7 +18,7 @@ from .utils.moe import add_z3_leaf_module, configure_moe
|
||||||
from .utils.quantization import configure_quantization
|
from .utils.quantization import configure_quantization
|
||||||
from .utils.rope import configure_rope
|
from .utils.rope import configure_rope
|
||||||
from .utils.valuehead import prepare_valuehead_model
|
from .utils.valuehead import prepare_valuehead_model
|
||||||
from .utils.visual import autocast_projector_dtype, configure_hidden_size
|
from .utils.visual import autocast_projector_dtype, configure_visual_model
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
@ -55,7 +55,7 @@ def patch_config(
|
||||||
configure_longlora(config, model_args, is_trainable)
|
configure_longlora(config, model_args, is_trainable)
|
||||||
configure_quantization(config, tokenizer, model_args, init_kwargs)
|
configure_quantization(config, tokenizer, model_args, init_kwargs)
|
||||||
configure_moe(config, model_args, is_trainable)
|
configure_moe(config, model_args, is_trainable)
|
||||||
configure_hidden_size(config)
|
configure_visual_model(config)
|
||||||
|
|
||||||
if model_args.use_cache and not is_trainable:
|
if model_args.use_cache and not is_trainable:
|
||||||
setattr(config, "use_cache", True)
|
setattr(config, "use_cache", True)
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
from typing import TYPE_CHECKING, Tuple
|
from typing import TYPE_CHECKING, Tuple
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
import transformers.models
|
||||||
|
from transformers.activations import ACT2FN
|
||||||
|
|
||||||
from ...extras.logging import get_logger
|
from ...extras.logging import get_logger
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from transformers import PretrainedConfig, PreTrainedModel
|
from transformers import LlavaConfig, PretrainedConfig, PreTrainedModel
|
||||||
|
|
||||||
from ...hparams import ModelArguments
|
from ...hparams import ModelArguments
|
||||||
|
|
||||||
|
@ -14,9 +16,23 @@ if TYPE_CHECKING:
|
||||||
logger = get_logger(__name__)
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def configure_hidden_size(config: "PretrainedConfig") -> None:
|
class LlavaMultiModalProjector(torch.nn.Module):
|
||||||
if getattr(config, "model_type", None) == "llava":
|
def __init__(self, config: "LlavaConfig"):
|
||||||
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
|
super().__init__()
|
||||||
|
|
||||||
|
self.linear_1 = torch.nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True)
|
||||||
|
self.linear_2 = torch.nn.LayerNorm(config.text_config.hidden_size, bias=True)
|
||||||
|
self.linear_3 = torch.nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
|
||||||
|
self.linear_4 = torch.nn.LayerNorm(config.text_config.hidden_size, bias=True)
|
||||||
|
self.act = ACT2FN[config.projector_hidden_act]
|
||||||
|
|
||||||
|
def forward(self, image_features):
|
||||||
|
hidden_states = self.linear_1(image_features)
|
||||||
|
hidden_states = self.linear_2(hidden_states)
|
||||||
|
hidden_states = self.act(hidden_states)
|
||||||
|
hidden_states = self.linear_3(hidden_states)
|
||||||
|
hidden_states = self.linear_4(hidden_states)
|
||||||
|
return hidden_states
|
||||||
|
|
||||||
|
|
||||||
def autocast_projector_dtype(
|
def autocast_projector_dtype(
|
||||||
|
@ -31,3 +47,11 @@ def autocast_projector_dtype(
|
||||||
logger.info("Casting multimodal projector outputs in {}.".format(model_args.compute_dtype))
|
logger.info("Casting multimodal projector outputs in {}.".format(model_args.compute_dtype))
|
||||||
mm_projector: "torch.nn.Module" = getattr(model, mm_projector_name)
|
mm_projector: "torch.nn.Module" = getattr(model, mm_projector_name)
|
||||||
mm_projector.register_forward_hook(_mm_projector_forward_post_hook)
|
mm_projector.register_forward_hook(_mm_projector_forward_post_hook)
|
||||||
|
|
||||||
|
|
||||||
|
def configure_visual_model(config: "PretrainedConfig") -> None:
|
||||||
|
if getattr(config, "model_type", None) == "llava":
|
||||||
|
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
|
||||||
|
|
||||||
|
if getattr(config, "is_yi_vl_derived_model", None):
|
||||||
|
transformers.models.llava.modeling_llava.LlavaMultiModalProjector = LlavaMultiModalProjector
|
||||||
|
|
|
@ -13,6 +13,7 @@ from ..utils import create_custom_optimzer, create_custom_scheduler
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
from transformers import ProcessorMixin
|
||||||
from transformers.trainer import PredictionOutput
|
from transformers.trainer import PredictionOutput
|
||||||
|
|
||||||
from ...hparams import FinetuningArguments
|
from ...hparams import FinetuningArguments
|
||||||
|
@ -26,9 +27,12 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
|
||||||
Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE.
|
Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, finetuning_args: "FinetuningArguments", **kwargs) -> None:
|
def __init__(
|
||||||
|
self, finetuning_args: "FinetuningArguments", processor: Optional["ProcessorMixin"], **kwargs
|
||||||
|
) -> None:
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.finetuning_args = finetuning_args
|
self.finetuning_args = finetuning_args
|
||||||
|
self.processor = processor
|
||||||
if finetuning_args.use_badam:
|
if finetuning_args.use_badam:
|
||||||
from badam import clip_grad_norm_for_sparse_tensor
|
from badam import clip_grad_norm_for_sparse_tensor
|
||||||
|
|
||||||
|
@ -45,6 +49,12 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer):
|
||||||
create_custom_scheduler(self.args, num_training_steps, optimizer)
|
create_custom_scheduler(self.args, num_training_steps, optimizer)
|
||||||
return super().create_scheduler(num_training_steps, optimizer)
|
return super().create_scheduler(num_training_steps, optimizer)
|
||||||
|
|
||||||
|
def _save(self, output_dir: Optional[str] = None, state_dict: Optional[Dict[str, "torch.Tensor"]] = None) -> None:
|
||||||
|
super()._save(output_dir, state_dict)
|
||||||
|
if self.processor is not None:
|
||||||
|
output_dir = output_dir if output_dir is not None else self.args.output_dir
|
||||||
|
getattr(self.processor, "image_processor").save_pretrained(output_dir)
|
||||||
|
|
||||||
def prediction_step(
|
def prediction_step(
|
||||||
self,
|
self,
|
||||||
model: "torch.nn.Module",
|
model: "torch.nn.Module",
|
||||||
|
|
|
@ -55,10 +55,10 @@ def run_sft(
|
||||||
model=model,
|
model=model,
|
||||||
args=training_args,
|
args=training_args,
|
||||||
finetuning_args=finetuning_args,
|
finetuning_args=finetuning_args,
|
||||||
tokenizer=tokenizer,
|
|
||||||
data_collator=data_collator,
|
data_collator=data_collator,
|
||||||
callbacks=callbacks,
|
callbacks=callbacks,
|
||||||
compute_metrics=ComputeMetrics(tokenizer) if training_args.predict_with_generate else None,
|
compute_metrics=ComputeMetrics(tokenizer) if training_args.predict_with_generate else None,
|
||||||
|
**tokenizer_module,
|
||||||
**split_dataset(dataset, data_args, training_args),
|
**split_dataset(dataset, data_args, training_args),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue