diff --git a/examples/lora_single_gpu/llama3_lora_dpo.yaml b/examples/lora_single_gpu/llama3_lora_dpo.yaml index 958be1b5..f68244b7 100644 --- a/examples/lora_single_gpu/llama3_lora_dpo.yaml +++ b/examples/lora_single_gpu/llama3_lora_dpo.yaml @@ -6,6 +6,7 @@ stage: dpo do_train: true finetuning_type: lora lora_target: q_proj,v_proj +pref_beta: 0.1 pref_loss: sigmoid # [sigmoid (dpo), orpo, simpo] ### dataset