forked from p04798526/LLaMA-Factory-Mirror
update examples
This commit is contained in:
parent
e2a28f51c6
commit
0a690ada6f
|
@ -192,7 +192,7 @@ llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
|||
#### Full-Parameter Fine-Tuning using Adam-mini
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/adam_mini/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LoRA+ Fine-Tuning
|
||||
|
|
|
@ -192,7 +192,7 @@ llamafactory-cli train examples/extras/badam/llama3_full_sft.yaml
|
|||
#### 使用 Adam-mini 进行全参数训练
|
||||
|
||||
```bash
|
||||
llamafactory-cli train examples/extras/adam_mini/llama3_full_sft.yaml
|
||||
llamafactory-cli train examples/extras/adam_mini/qwen2_full_sft.yaml
|
||||
```
|
||||
|
||||
#### LoRA+ 微调
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
### model
|
||||
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
|
||||
model_name_or_path: Qwen/Qwen2-1.5B-Instruct
|
||||
|
||||
### method
|
||||
stage: sft
|
||||
|
@ -9,14 +9,14 @@ use_adam_mini: true
|
|||
|
||||
### dataset
|
||||
dataset: identity,alpaca_en_demo
|
||||
template: llama3
|
||||
template: qwen
|
||||
cutoff_len: 1024
|
||||
max_samples: 1000
|
||||
overwrite_cache: true
|
||||
preprocessing_num_workers: 16
|
||||
|
||||
### output
|
||||
output_dir: saves/llama3-8b/full/sft
|
||||
output_dir: saves/qwen2-1_5b/full/sft
|
||||
logging_steps: 10
|
||||
save_steps: 500
|
||||
plot_loss: true
|
Loading…
Reference in New Issue