forked from p04798526/LLaMA-Factory-Mirror
fix #4820
This commit is contained in:
parent
b0aa321a4a
commit
fd8cc49008
|
@ -55,7 +55,15 @@ def compute_accuracy(eval_preds: "EvalPrediction") -> Dict[str, float]:
|
||||||
|
|
||||||
|
|
||||||
def eval_logit_processor(logits: "torch.Tensor", labels: "torch.Tensor") -> "torch.Tensor":
|
def eval_logit_processor(logits: "torch.Tensor", labels: "torch.Tensor") -> "torch.Tensor":
|
||||||
logits = logits[0] if isinstance(logits, (list, tuple)) else logits
|
if isinstance(logits, (list, tuple)):
|
||||||
|
if logits[0].dim() == 3: # (batch_size, seq_len, vocab_size)
|
||||||
|
logits = logits[0]
|
||||||
|
else: # moe models have aux loss
|
||||||
|
logits = logits[1]
|
||||||
|
|
||||||
|
if logits.dim() != 3:
|
||||||
|
raise ValueError("Cannot process the logits.")
|
||||||
|
|
||||||
return torch.argmax(logits, dim=-1)
|
return torch.argmax(logits, dim=-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue