forked from p04798526/LLaMA-Factory-Mirror
update readme
This commit is contained in:
parent
4f18a310e9
commit
3ba1054593
|
@ -22,7 +22,7 @@ jobs:
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
python -m pip install black ruff
|
python -m pip install ruff
|
||||||
|
|
||||||
- name: Check quality
|
- name: Check quality
|
||||||
run: |
|
run: |
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -3,9 +3,9 @@
|
||||||
check_dirs := src tests
|
check_dirs := src tests
|
||||||
|
|
||||||
quality:
|
quality:
|
||||||
black --check $(check_dirs)
|
|
||||||
ruff $(check_dirs)
|
ruff $(check_dirs)
|
||||||
|
ruff format --check $(check_dirs)
|
||||||
|
|
||||||
style:
|
style:
|
||||||
black $(check_dirs)
|
|
||||||
ruff $(check_dirs) --fix
|
ruff $(check_dirs) --fix
|
||||||
|
ruff format $(check_dirs)
|
||||||
|
|
|
@ -398,6 +398,9 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
||||||
--fp16
|
--fp16
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Use `--adapter_name_or_path path_to_sft_checkpoint,path_to_ppo_checkpoint` to infer the fine-tuned model.
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> Use `--per_device_train_batch_size=1` for LLaMA-2 models in fp16 PPO training.
|
> Use `--per_device_train_batch_size=1` for LLaMA-2 models in fp16 PPO training.
|
||||||
|
|
||||||
|
@ -426,6 +429,9 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
||||||
--fp16
|
--fp16
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Use `--adapter_name_or_path path_to_sft_checkpoint,path_to_dpo_checkpoint` to infer the fine-tuned model.
|
||||||
|
|
||||||
### Distributed Training
|
### Distributed Training
|
||||||
|
|
||||||
#### Use Huggingface Accelerate
|
#### Use Huggingface Accelerate
|
||||||
|
|
|
@ -398,6 +398,9 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
||||||
--fp16
|
--fp16
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> 使用 `--adapter_name_or_path path_to_sft_checkpoint,path_to_ppo_checkpoint` 来进行微调模型的推理。
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> 如果使用 fp16 精度进行 LLaMA-2 模型的 PPO 训练,请使用 `--per_device_train_batch_size=1`。
|
> 如果使用 fp16 精度进行 LLaMA-2 模型的 PPO 训练,请使用 `--per_device_train_batch_size=1`。
|
||||||
|
|
||||||
|
@ -426,6 +429,9 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
||||||
--fp16
|
--fp16
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> 使用 `--adapter_name_or_path path_to_sft_checkpoint,path_to_dpo_checkpoint` 来进行微调模型的推理。
|
||||||
|
|
||||||
### 多 GPU 分布式训练
|
### 多 GPU 分布式训练
|
||||||
|
|
||||||
#### 使用 Huggingface Accelerate
|
#### 使用 Huggingface Accelerate
|
||||||
|
|
|
@ -2,11 +2,8 @@
|
||||||
requires = ["setuptools>=61.0"]
|
requires = ["setuptools>=61.0"]
|
||||||
build-backend = "setuptools.build_meta"
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
[tool.black]
|
|
||||||
line-length = 119
|
|
||||||
target-version = ["py38"]
|
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
|
target-version = "py38"
|
||||||
line-length = 119
|
line-length = 119
|
||||||
indent-width = 4
|
indent-width = 4
|
||||||
|
|
||||||
|
@ -17,17 +14,7 @@ select = ["C", "E", "F", "I", "W"]
|
||||||
[tool.ruff.lint.isort]
|
[tool.ruff.lint.isort]
|
||||||
lines-after-imports = 2
|
lines-after-imports = 2
|
||||||
known-first-party = ["llmtuner"]
|
known-first-party = ["llmtuner"]
|
||||||
|
known-third-party = [
|
||||||
[tool.ruff.format]
|
|
||||||
quote-style = "double"
|
|
||||||
indent-style = "space"
|
|
||||||
skip-magic-trailing-comma = false
|
|
||||||
line-ending = "auto"
|
|
||||||
|
|
||||||
[isort]
|
|
||||||
default_section = "FIRSTPARTY"
|
|
||||||
known_first_party = "llmtuner"
|
|
||||||
known_third_party = [
|
|
||||||
"accelerate",
|
"accelerate",
|
||||||
"datasets",
|
"datasets",
|
||||||
"gradio",
|
"gradio",
|
||||||
|
@ -37,10 +24,9 @@ known_third_party = [
|
||||||
"transformers",
|
"transformers",
|
||||||
"trl"
|
"trl"
|
||||||
]
|
]
|
||||||
line_length = 119
|
|
||||||
lines_after_imports = 2
|
[tool.ruff.format]
|
||||||
multi_line_output = 3
|
quote-style = "double"
|
||||||
include_trailing_comma = true
|
indent-style = "space"
|
||||||
force_grid_wrap = 0
|
skip-magic-trailing-comma = false
|
||||||
use_parentheses = true
|
line-ending = "auto"
|
||||||
ensure_newline_before_comments = true
|
|
||||||
|
|
|
@ -75,7 +75,8 @@ class Formatter(ABC):
|
||||||
tool_format: Literal["default"] = "default"
|
tool_format: Literal["default"] = "default"
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def apply(self, **kwargs) -> SLOTS: ...
|
def apply(self, **kwargs) -> SLOTS:
|
||||||
|
...
|
||||||
|
|
||||||
def extract(self, content: str) -> Union[str, Tuple[str, str]]:
|
def extract(self, content: str) -> Union[str, Tuple[str, str]]:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
|
@ -308,6 +308,15 @@ _register_template(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_register_template(
|
||||||
|
name="atom",
|
||||||
|
format_user=StringFormatter(
|
||||||
|
slots=[{"bos_token"}, "Human: {{content}}\n", {"eos_token"}, {"bos_token"}, "Assistant:"]
|
||||||
|
),
|
||||||
|
format_assistant=StringFormatter(slots=["{{content}}\n", {"eos_token"}]),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
_register_template(
|
_register_template(
|
||||||
name="baichuan",
|
name="baichuan",
|
||||||
format_user=StringFormatter(slots=[{"token": "<reserved_102>"}, "{{content}}", {"token": "<reserved_103>"}]),
|
format_user=StringFormatter(slots=[{"token": "<reserved_102>"}, "{{content}}", {"token": "<reserved_103>"}]),
|
||||||
|
@ -515,7 +524,7 @@ _register_template(
|
||||||
_register_template(
|
_register_template(
|
||||||
name="openchat",
|
name="openchat",
|
||||||
format_user=StringFormatter(slots=["GPT4 Correct User: {{content}}", {"eos_token"}, "GPT4 Correct Assistant:"]),
|
format_user=StringFormatter(slots=["GPT4 Correct User: {{content}}", {"eos_token"}, "GPT4 Correct Assistant:"]),
|
||||||
format_assistant=StringFormatter(slots=["{{content}}"]),
|
format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}]),
|
||||||
format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]),
|
format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]),
|
||||||
force_system=True,
|
force_system=True,
|
||||||
)
|
)
|
||||||
|
@ -644,9 +653,3 @@ _register_template(
|
||||||
format_user=StringFormatter(slots=[{"token": "<human>"}, ":{{content}}\n", {"token": "<bot>"}, ":"]),
|
format_user=StringFormatter(slots=[{"token": "<human>"}, ":{{content}}\n", {"token": "<bot>"}, ":"]),
|
||||||
format_separator=EmptyFormatter(slots=["\n"]),
|
format_separator=EmptyFormatter(slots=["\n"]),
|
||||||
)
|
)
|
||||||
|
|
||||||
_register_template(
|
|
||||||
name="atom",
|
|
||||||
format_user=StringFormatter(slots=[{"bos_token"}, "Human: {{content}}\n", {"eos_token"}, {"bos_token"}, "Assistant:"]),
|
|
||||||
format_assistant=StringFormatter(slots=["{{content}}\n", {"eos_token"}]),
|
|
||||||
)
|
|
||||||
|
|
|
@ -884,6 +884,7 @@ register_model_group(
|
||||||
template="zephyr",
|
template="zephyr",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
register_model_group(
|
register_model_group(
|
||||||
models={
|
models={
|
||||||
"Atom-7B": {
|
"Atom-7B": {
|
||||||
|
|
|
@ -34,9 +34,7 @@ def create_top() -> Dict[str, "Component"]:
|
||||||
|
|
||||||
model_name.change(list_adapters, [model_name, finetuning_type], [adapter_path], queue=False).then(
|
model_name.change(list_adapters, [model_name, finetuning_type], [adapter_path], queue=False).then(
|
||||||
get_model_path, [model_name], [model_path], queue=False
|
get_model_path, [model_name], [model_path], queue=False
|
||||||
).then(
|
).then(get_template, [model_name], [template], queue=False) # do not save config since the below line will save
|
||||||
get_template, [model_name], [template], queue=False
|
|
||||||
) # do not save config since the below line will save
|
|
||||||
|
|
||||||
model_path.change(save_config, inputs=[lang, model_name, model_path], queue=False)
|
model_path.change(save_config, inputs=[lang, model_name, model_path], queue=False)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue