update examples

This commit is contained in:
hiyouga 2024-06-27 00:53:33 +08:00
parent f17c9dfd84
commit d417e63f92
4 changed files with 6 additions and 5 deletions

View File

@ -94,10 +94,10 @@ FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.
### QLoRA Fine-Tuning
#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes Quantization (Recommended)
#### Supervised Fine-Tuning with 4/8-bit Bitsandbytes/HQQ/EETQ Quantization (Recommended)
```bash
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bitsandbytes.yaml
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
```
#### Supervised Fine-Tuning with 4/8-bit GPTQ Quantization

View File

@ -94,10 +94,10 @@ FORCE_TORCHRUN=1 llamafactory-cli train examples/train_lora/llama3_lora_sft_ds3.
### QLoRA 微调
#### 基于 4/8 比特 Bitsandbytes 量化进行指令监督微调(推荐)
#### 基于 4/8 比特 Bitsandbytes/HQQ/EETQ 量化进行指令监督微调(推荐)
```bash
llamafactory-cli train examples/train_qlora/llama3_lora_sft_bitsandbytes.yaml
llamafactory-cli train examples/train_qlora/llama3_lora_sft_otfq.yaml
```
#### 基于 4/8 比特 GPTQ 量化进行指令监督微调

View File

@ -7,7 +7,7 @@ do_train: true
finetuning_type: lora
lora_target: all
pref_beta: 0.1
pref_loss: sigmoid # [sigmoid (dpo), orpo, simpo]
pref_loss: sigmoid # choices: [sigmoid (dpo), orpo, simpo]
### dataset
dataset: dpo_en_demo

View File

@ -1,6 +1,7 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
quantization_bit: 4
quantization_method: bitsandbytes # choices: [bitsandbytes (4/8), hqq (2/3/4/5/6/8), eetq (8)]
### method
stage: sft