forked from p04798526/LLaMA-Factory-Mirror
fix #2090
This commit is contained in:
parent
368b31f6b7
commit
cc275abe09
|
@ -4,7 +4,6 @@ import torch
|
|||
from typing import TYPE_CHECKING, Tuple
|
||||
from transformers import InfNanRemoveLogitsProcessor, LogitsProcessorList
|
||||
from transformers.utils import (
|
||||
is_torch_bf16_cpu_available,
|
||||
is_torch_bf16_gpu_available,
|
||||
is_torch_cuda_available,
|
||||
is_torch_npu_available,
|
||||
|
@ -13,7 +12,7 @@ from transformers.utils import (
|
|||
|
||||
_is_fp16_available = is_torch_npu_available() or is_torch_cuda_available()
|
||||
try:
|
||||
_is_bf16_available = is_torch_bf16_gpu_available() or is_torch_bf16_cpu_available()
|
||||
_is_bf16_available = is_torch_bf16_gpu_available()
|
||||
except:
|
||||
_is_bf16_available = False
|
||||
|
||||
|
|
Loading…
Reference in New Issue