update readme

This commit is contained in:
hiyouga 2024-05-29 18:39:11 +08:00
parent 880b4a9acf
commit 89ca832740
7 changed files with 76 additions and 62 deletions

19
.readthedocs.yaml Normal file
View File

@ -0,0 +1,19 @@
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
build:
os: ubuntu-22.04
tools:
python: "3.8"
sphinx:
configuration: docs/source/conf.py
formats:
- pdf
python:
install:
- requirements: docs/requirements-docs.txt

View File

@ -330,7 +330,7 @@ cd LLaMA-Factory
pip install -e .[torch,metrics] pip install -e .[torch,metrics]
``` ```
Extra dependencies available: torch, metrics, deepspeed, bitsandbytes, vllm, galore, badam, gptq, awq, aqlm, qwen, modelscope, quality Extra dependencies available: torch, torch_npu, metrics, deepspeed, bitsandbytes, vllm, galore, badam, gptq, awq, aqlm, qwen, modelscope, quality
> [!TIP] > [!TIP]
> Use `pip install --no-deps -e .` to resolve package conflicts. > Use `pip install --no-deps -e .` to resolve package conflicts.
@ -351,31 +351,27 @@ To enable FlashAttention-2 on the Windows platform, you need to install the prec
Join [NPU user group](assets/wechat_npu.jpg). Join [NPU user group](assets/wechat_npu.jpg).
Use `pip install -e .[torch_npu]` to install LLaMA-Factory with **[torch-npu](https://gitee.com/ascend/pytorch)** library. To install LLaMA Factory on Ascend NPU devices, please specify extra dependencies: `pip install -e .[torch_npu,metrics]`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
To utilize Ascend NPU devices for (distributed) training and inference, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. You can follow chapter **[install CANN](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)** in the installation tutorial to install CANN Toolkit and the kernels, or use the fast installation as following:
```bash ```bash
# replace the url according to your choice # replace the url according to your CANN version and devices
# install CANN Toolkit # install CANN Toolkit
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run
chmod +x Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
./Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
# install CANN Kernels # install CANN Kernels
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run
chmod +x Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
./Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
# set env variables # set env variables
source /usr/local/Ascend/ascend-toolkit/set_env.sh source /usr/local/Ascend/ascend-toolkit/set_env.sh
``` ```
| Requirement | Minimum | Recommend | | Requirement | Minimum | Recommend |
| ------------ | ------- | --------- | | ------------ | ------- | ----------- |
| CANN | 8.0.RC1 | 8.0.RC1 | | CANN | 8.0.RC1 | 8.0.RC1 |
| torch | 2.2.0 | 2.2.0 | | torch | 2.1.0 | 2.1.0 |
| torch-npu | 2.2.0 | 2.2.0 | | torch-npu | 2.1.0 | 2.1.0.post3 |
| deepspeed | 0.13.2 | 0.13.2 | | deepspeed | 0.13.2 | 0.13.2 |
Docker image: Docker image:

View File

@ -330,7 +330,7 @@ cd LLaMA-Factory
pip install -e .[torch,metrics] pip install -e .[torch,metrics]
``` ```
可选的额外依赖项torch、metrics、deepspeed、bitsandbytes、vllm、galore、badam、gptq、awq、aqlm、qwen、modelscope、quality 可选的额外依赖项torch、torch_npu、metrics、deepspeed、bitsandbytes、vllm、galore、badam、gptq、awq、aqlm、qwen、modelscope、quality
> [!TIP] > [!TIP]
> 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。 > 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。
@ -351,31 +351,27 @@ pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/downl
加入 [NPU 用户群](assets/wechat_npu.jpg)。 加入 [NPU 用户群](assets/wechat_npu.jpg)。
使用 `pip install -e .[torch_npu]` 来安装带有 **[torch-npu](https://gitee.com/ascend/pytorch)** 的 LLaMA-Factory。 在昇腾 NPU 设备上安装 LLaMA Factory 时,需要指定额外依赖项,使用 `pip install -e .[torch_npu,metrics]` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令:
如果使用昇腾 NPU 设备进行(分布式)训练或推理,需要安装 **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**。可以参考安装教程中的**[安装 CANN](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)**章节来进行安装,或使用以下快速安装指令:
```bash ```bash
# 请替换 URL 为你需要的 CANN 版本对应的 URL # 请替换 URL 为 CANN 版本和设备型号对应的 URL
# 安装 CANN Toolkit # 安装 CANN Toolkit
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run
chmod +x Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
./Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
# 安装 CANN Kernels # 安装 CANN Kernels
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run
chmod +x Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
./Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
# 设置环境变量 # 设置环境变量
source /usr/local/Ascend/ascend-toolkit/set_env.sh source /usr/local/Ascend/ascend-toolkit/set_env.sh
``` ```
| 依赖项 | 至少 | 推荐 | | 依赖项 | 至少 | 推荐 |
| ------------ | ------- | --------- | | ------------ | ------- | ----------- |
| CANN | 8.0.RC1 | 8.0.RC1 | | CANN | 8.0.RC1 | 8.0.RC1 |
| torch | 2.2.0 | 2.2.0 | | torch | 2.1.0 | 2.1.0 |
| torch-npu | 2.2.0 | 2.2.0 | | torch-npu | 2.1.0 | 2.1.0.post3 |
| deepspeed | 0.13.2 | 0.13.2 | | deepspeed | 0.13.2 | 0.13.2 |
Docker 镜像: Docker 镜像:
@ -383,7 +379,7 @@ Docker 镜像:
- 32GB[下载地址](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) - 32GB[下载地址](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html)
- 64GB[下载地址](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html) - 64GB[下载地址](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
记得使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定您使用的设备。 请使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定运算设备。
如果遇到无法正常推理的情况,请尝试设置 `do_sample: false` 如果遇到无法正常推理的情况,请尝试设置 `do_sample: false`

View File

@ -5,16 +5,16 @@ downcast_bf16: 'no'
fsdp_config: fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_backward_prefetch: BACKWARD_PRE fsdp_backward_prefetch: BACKWARD_PRE
fsdp_cpu_ram_efficient_loading: true
fsdp_forward_prefetch: false fsdp_forward_prefetch: false
fsdp_offload_params: true fsdp_cpu_ram_efficient_loading: true
fsdp_offload_params: true # offload may affect training speed
fsdp_sharding_strategy: FULL_SHARD fsdp_sharding_strategy: FULL_SHARD
fsdp_state_dict_type: FULL_STATE_DICT fsdp_state_dict_type: FULL_STATE_DICT
fsdp_sync_module_states: true fsdp_sync_module_states: true
fsdp_use_orig_params: false fsdp_use_orig_params: true
machine_rank: 0 machine_rank: 0
main_training_function: main main_training_function: main
mixed_precision: fp16 mixed_precision: fp16 # or bf16
num_machines: 1 # the number of nodes num_machines: 1 # the number of nodes
num_processes: 2 # the number of GPUs in all nodes num_processes: 2 # the number of GPUs in all nodes
rdzv_backend: static rdzv_backend: static

View File

@ -21,6 +21,7 @@ def get_requires():
extra_require = { extra_require = {
"torch": ["torch>=1.13.1"], "torch": ["torch>=1.13.1"],
"torch_npu": ["torch==2.1.0", "torch-npu==2.1.0.post3", "decorator"],
"metrics": ["nltk", "jieba", "rouge-chinese"], "metrics": ["nltk", "jieba", "rouge-chinese"],
"deepspeed": ["deepspeed>=0.10.0,<=0.14.0"], "deepspeed": ["deepspeed>=0.10.0,<=0.14.0"],
"bitsandbytes": ["bitsandbytes>=0.39.0"], "bitsandbytes": ["bitsandbytes>=0.39.0"],
@ -33,7 +34,6 @@ extra_require = {
"qwen": ["tiktoken", "transformers_stream_generator"], "qwen": ["tiktoken", "transformers_stream_generator"],
"modelscope": ["modelscope"], "modelscope": ["modelscope"],
"quality": ["ruff"], "quality": ["ruff"],
"torch_npu": ["torch==2.1.0", "torch_npu==2.1.0.post3", "decorator"]
} }

View File

@ -822,6 +822,15 @@ _register_template(
) )
_register_template(
name="telechat",
format_user=StringFormatter(slots=["<_user>{{content}}<_bot>"]),
format_system=StringFormatter(slots=["<_system>{{content}}<_end>"]),
stop_words=["<_end>"],
replace_eos=True,
)
_register_template( _register_template(
name="vicuna", name="vicuna",
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]), format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
@ -918,13 +927,3 @@ _register_template(
format_user=StringFormatter(slots=["<human>:{{content}}\n<bot>:"]), format_user=StringFormatter(slots=["<human>:{{content}}\n<bot>:"]),
format_separator=EmptyFormatter(slots=["\n"]), format_separator=EmptyFormatter(slots=["\n"]),
) )
_register_template(
name="telechat",
format_user=StringFormatter(slots=["<_user>{{content}}<_bot>"]),
format_system=StringFormatter(slots=["<_system>{{content}}<_end>"]),
default_system="",
stop_words=["<_end>"],
replace_eos=True,
)

View File

@ -1095,6 +1095,26 @@ register_model_group(
) )
register_model_group(
models={
"TeleChat-7B-Chat": {
DownloadSource.DEFAULT: "Tele-AI/telechat-7B",
DownloadSource.MODELSCOPE: "TeleAI/telechat-7B",
},
"TeleChat-12B-Chat": {
DownloadSource.DEFAULT: "Tele-AI/TeleChat-12B",
DownloadSource.MODELSCOPE: "TeleAI/TeleChat-12B",
},
"TeleChat-12B-v2-Chat": {
DownloadSource.DEFAULT: "Tele-AI/TeleChat-12B-v2",
DownloadSource.MODELSCOPE: "TeleAI/TeleChat-12B-v2",
},
},
module="query,key_value",
template="telechat",
)
register_model_group( register_model_group(
models={ models={
"Vicuna1.5-7B-Chat": { "Vicuna1.5-7B-Chat": {
@ -1355,19 +1375,3 @@ register_model_group(
}, },
template="zephyr", template="zephyr",
) )
register_model_group(
models={
"TeleChat-12B-Chat": {
DownloadSource.DEFAULT: "Tele-AI/TeleChat-12B",
DownloadSource.MODELSCOPE: "TeleAI/TeleChat-12B",
},
"TeleChat-12B-v2-Chat": {
DownloadSource.DEFAULT: "Tele-AI/TeleChat-12B-v2",
DownloadSource.MODELSCOPE: "TeleAI/TeleChat-12B-v2",
},
},
module='query,key_value',
template="telechat",
)