diff --git a/src/llmtuner/train/dpo/trainer.py b/src/llmtuner/train/dpo/trainer.py index 35dcd8db..b144d561 100644 --- a/src/llmtuner/train/dpo/trainer.py +++ b/src/llmtuner/train/dpo/trainer.py @@ -165,13 +165,13 @@ class CustomDPOTrainer(DPOTrainer): reward_accuracies = (chosen_rewards > rejected_rewards).float() prefix = "eval_" if train_eval == "eval" else "" - metrics["{}rewards/chosen".format(prefix)] = chosen_rewards.cpu().mean() - metrics["{}rewards/rejected".format(prefix)] = rejected_rewards.cpu().mean() - metrics["{}rewards/accuracies".format(prefix)] = reward_accuracies.cpu().mean() - metrics["{}rewards/margins".format(prefix)] = (chosen_rewards - rejected_rewards).cpu().mean() - metrics["{}logps/rejected".format(prefix)] = policy_rejected_logps.detach().cpu().mean() - metrics["{}logps/chosen".format(prefix)] = policy_chosen_logps.detach().cpu().mean() - metrics["{}logits/rejected".format(prefix)] = policy_rejected_logits.detach().cpu().mean() - metrics["{}logits/chosen".format(prefix)] = policy_chosen_logits.detach().cpu().mean() + metrics["{}rewards/chosen".format(prefix)] = chosen_rewards.mean().cpu() + metrics["{}rewards/rejected".format(prefix)] = rejected_rewards.mean().cpu() + metrics["{}rewards/accuracies".format(prefix)] = reward_accuracies.mean().cpu() + metrics["{}rewards/margins".format(prefix)] = (chosen_rewards - rejected_rewards).mean().cpu() + metrics["{}logps/rejected".format(prefix)] = policy_rejected_logps.detach().mean().cpu() + metrics["{}logps/chosen".format(prefix)] = policy_chosen_logps.detach().mean().cpu() + metrics["{}logits/rejected".format(prefix)] = policy_rejected_logits.detach().mean().cpu() + metrics["{}logits/chosen".format(prefix)] = policy_chosen_logits.detach().mean().cpu() return losses.mean(), metrics diff --git a/src/llmtuner/train/orpo/trainer.py b/src/llmtuner/train/orpo/trainer.py index 5e0d70d9..88090a9e 100644 --- a/src/llmtuner/train/orpo/trainer.py +++ b/src/llmtuner/train/orpo/trainer.py @@ -113,15 +113,15 @@ class CustomORPOTrainer(DPOTrainer): reward_accuracies = (chosen_rewards > rejected_rewards).float() prefix = "eval_" if train_eval == "eval" else "" - metrics["{}rewards/chosen".format(prefix)] = chosen_rewards.cpu().mean() - metrics["{}rewards/rejected".format(prefix)] = rejected_rewards.cpu().mean() - metrics["{}rewards/accuracies".format(prefix)] = reward_accuracies.cpu().mean() - metrics["{}rewards/margins".format(prefix)] = (chosen_rewards - rejected_rewards).cpu().mean() - metrics["{}logps/rejected".format(prefix)] = rejected_logps.detach().cpu().mean() - metrics["{}logps/chosen".format(prefix)] = chosen_logps.detach().cpu().mean() - metrics["{}logits/rejected".format(prefix)] = rejected_logits.detach().cpu().mean() - metrics["{}logits/chosen".format(prefix)] = chosen_logits.detach().cpu().mean() - metrics["{}sft_loss".format(prefix)] = sft_loss.detach().cpu().mean() - metrics["{}odds_ratio_loss".format(prefix)] = odds_ratio_loss.detach().cpu().mean() + metrics["{}rewards/chosen".format(prefix)] = chosen_rewards.mean().cpu() + metrics["{}rewards/rejected".format(prefix)] = rejected_rewards.mean().cpu() + metrics["{}rewards/accuracies".format(prefix)] = reward_accuracies.mean().cpu() + metrics["{}rewards/margins".format(prefix)] = (chosen_rewards - rejected_rewards).mean().cpu() + metrics["{}logps/rejected".format(prefix)] = rejected_logps.detach().mean().cpu() + metrics["{}logps/chosen".format(prefix)] = chosen_logps.detach().mean().cpu() + metrics["{}logits/rejected".format(prefix)] = rejected_logits.detach().mean().cpu() + metrics["{}logits/chosen".format(prefix)] = chosen_logits.detach().mean().cpu() + metrics["{}sft_loss".format(prefix)] = sft_loss.detach().mean().cpu() + metrics["{}odds_ratio_loss".format(prefix)] = odds_ratio_loss.detach().mean().cpu() return batch_loss, metrics