forked from p04798526/LLaMA-Factory-Mirror
fix: mixtral output_router_logits
This commit is contained in:
parent
d1fb6c72b5
commit
9f4fe62386
|
@ -316,7 +316,7 @@ def patch_config(
|
|||
if getattr(config, "model_type", None) == "qwen2" and is_trainable and model_args.flash_attn:
|
||||
setattr(config, "use_cache", False) # qwen2 does not support use_cache when using flashattn
|
||||
|
||||
if getattr(config, "model_type", None) == "qwen2_moe" and is_trainable:
|
||||
if getattr(config, "model_type", None) in ["mixtral", "qwen2_moe"] and is_trainable:
|
||||
setattr(config, "output_router_logits", True)
|
||||
|
||||
init_kwargs["torch_dtype"] = model_args.compute_dtype
|
||||
|
|
Loading…
Reference in New Issue