add full parameter finetuning of mllm

This commit is contained in:
BUAADreamer 2024-05-11 13:11:00 +08:00
parent 8b997e32fb
commit 7be7972f28
3 changed files with 7 additions and 2 deletions

View File

@ -85,6 +85,10 @@ class ModelArguments:
default=False, default=False,
metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."}, metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."},
) )
autocast_projector: bool = field(
default=True,
metadata={"help": "Whethor or not to autocast projector."},
)
moe_aux_loss_coef: Optional[float] = field( moe_aux_loss_coef: Optional[float] = field(
default=None, default=None,
metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."}, metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."},

View File

@ -155,7 +155,8 @@ def load_model(
model.eval() model.eval()
else: else:
model.train() model.train()
if model_args.visual_inputs:
model.vision_tower.requires_grad_(False)
trainable_params, all_param = count_parameters(model) trainable_params, all_param = count_parameters(model)
if is_trainable: if is_trainable:
param_stats = "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( param_stats = "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format(

View File

@ -101,7 +101,7 @@ def patch_model(
if model_args.resize_vocab: if model_args.resize_vocab:
resize_embedding_layer(model, tokenizer) resize_embedding_layer(model, tokenizer)
if model_args.visual_inputs: if model_args.visual_inputs and model_args.autocast_projector:
autocast_projector_dtype(model, model_args) autocast_projector_dtype(model, model_args)
if is_trainable: if is_trainable: