unwrap_model_for_generation(reward_model) is necessary for zero3 training
This commit is contained in:
hiyouga 2024-07-03 19:45:51 +08:00
parent 87346c0946
commit 8845e94f91
1 changed files with 3 additions and 2 deletions

View File

@ -393,7 +393,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
else:
reward_model = self.reward_model
with self.amp_context: # support bf16
with unwrap_model_for_generation(reward_model, self.accelerator), self.amp_context: # support bf16
_, _, values = reward_model(**batch, return_dict=True, use_cache=False)
if self.finetuning_args.reward_model_type == "lora":
@ -496,4 +496,5 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
self.model.save_checkpoint(output_dir)
elif self.args.should_save:
self._save(output_dir)
unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model)
self._save(output_dir, state_dict=unwrapped_model.state_dict())