tiny fix
This commit is contained in:
parent
1856a08e87
commit
8c41a0aa6d
|
@ -35,7 +35,7 @@ def configure_attn_implementation(
|
||||||
if model_args.flash_attn == "auto":
|
if model_args.flash_attn == "auto":
|
||||||
logger.warning("Gemma-2 models should use eager attention in training, change `flash_attn` to disabled.")
|
logger.warning("Gemma-2 models should use eager attention in training, change `flash_attn` to disabled.")
|
||||||
model_args.flash_attn = "disabled"
|
model_args.flash_attn = "disabled"
|
||||||
else:
|
elif model_args.flash_attn != "disabled":
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Gemma-2 models should use eager attention in training, but you set `flash_attn: {}`. "
|
"Gemma-2 models should use eager attention in training, but you set `flash_attn: {}`. "
|
||||||
"Will proceed at your own risk.".format(model_args.flash_attn)
|
"Will proceed at your own risk.".format(model_args.flash_attn)
|
||||||
|
|
Loading…
Reference in New Issue