fix ppo save model

This commit is contained in:
hiyouga 2023-09-12 16:25:29 +08:00
parent d4be857e23
commit 7ba57d5b14
1 changed files with 2 additions and 1 deletions

View File

@ -147,7 +147,8 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
dataiter = iter(self.dataloader)
steps_trained = 0
self.log_callback.on_train_end(
self.log_callback.on_train_end(self.args, self.state, self.control)
self.save_callback.on_train_end(
self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model)
)