This commit is contained in:
hiyouga 2024-07-01 03:55:20 +08:00
parent 1856a08e87
commit 8c41a0aa6d
1 changed files with 1 additions and 1 deletions

View File

@ -35,7 +35,7 @@ def configure_attn_implementation(
if model_args.flash_attn == "auto": if model_args.flash_attn == "auto":
logger.warning("Gemma-2 models should use eager attention in training, change `flash_attn` to disabled.") logger.warning("Gemma-2 models should use eager attention in training, change `flash_attn` to disabled.")
model_args.flash_attn = "disabled" model_args.flash_attn = "disabled"
else: elif model_args.flash_attn != "disabled":
logger.warning( logger.warning(
"Gemma-2 models should use eager attention in training, but you set `flash_attn: {}`. " "Gemma-2 models should use eager attention in training, but you set `flash_attn: {}`. "
"Will proceed at your own risk.".format(model_args.flash_attn) "Will proceed at your own risk.".format(model_args.flash_attn)