rename package
This commit is contained in:
parent
b2fc7aeb03
commit
308edbc426
|
@ -6,7 +6,7 @@ COPY requirements.txt /app/
|
||||||
RUN pip install -r requirements.txt
|
RUN pip install -r requirements.txt
|
||||||
|
|
||||||
COPY . /app/
|
COPY . /app/
|
||||||
RUN pip install -e .[deepspeed,metrics,bitsandbytes,qwen]
|
RUN pip install -e .[metrics,bitsandbytes,qwen]
|
||||||
|
|
||||||
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
|
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
|
||||||
EXPOSE 7860
|
EXPOSE 7860
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
||||||
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
|
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
|
||||||
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
||||||
[![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/)
|
[![PyPI](https://img.shields.io/pypi/v/llamafactory)](https://pypi.org/project/llamafactory/)
|
||||||
[![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/)
|
|
||||||
[![Citation](https://img.shields.io/badge/citation-44-green)](#projects-using-llama-factory)
|
[![Citation](https://img.shields.io/badge/citation-44-green)](#projects-using-llama-factory)
|
||||||
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
||||||
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
|
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
|
||||||
|
@ -176,9 +175,9 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
|
||||||
>
|
>
|
||||||
> Remember to use the **SAME** template in training and inference.
|
> Remember to use the **SAME** template in training and inference.
|
||||||
|
|
||||||
Please refer to [constants.py](src/llmtuner/extras/constants.py) for a full list of models we supported.
|
Please refer to [constants.py](src/llamafactory/extras/constants.py) for a full list of models we supported.
|
||||||
|
|
||||||
You also can add a custom chat template to [template.py](src/llmtuner/data/template.py).
|
You also can add a custom chat template to [template.py](src/llamafactory/data/template.py).
|
||||||
|
|
||||||
## Supported Training Approaches
|
## Supported Training Approaches
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,7 @@
|
||||||
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
||||||
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
|
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
|
||||||
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
||||||
[![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/)
|
[![PyPI](https://img.shields.io/pypi/v/llamafactory)](https://pypi.org/project/llamafactory/)
|
||||||
[![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/)
|
|
||||||
[![Citation](https://img.shields.io/badge/citation-44-green)](#使用了-llama-factory-的项目)
|
[![Citation](https://img.shields.io/badge/citation-44-green)](#使用了-llama-factory-的项目)
|
||||||
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
||||||
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
|
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
|
||||||
|
@ -176,9 +175,9 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd
|
||||||
>
|
>
|
||||||
> 请务必在训练和推理时使用**完全一致**的模板。
|
> 请务必在训练和推理时使用**完全一致**的模板。
|
||||||
|
|
||||||
项目所支持模型的完整列表请参阅 [constants.py](src/llmtuner/extras/constants.py)。
|
项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。
|
||||||
|
|
||||||
您也可以在 [template.py](src/llmtuner/data/template.py) 中添加自己的对话模板。
|
您也可以在 [template.py](src/llamafactory/data/template.py) 中添加自己的对话模板。
|
||||||
|
|
||||||
## 训练方法
|
## 训练方法
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ select = ["C", "E", "F", "I", "W"]
|
||||||
|
|
||||||
[tool.ruff.lint.isort]
|
[tool.ruff.lint.isort]
|
||||||
lines-after-imports = 2
|
lines-after-imports = 2
|
||||||
known-first-party = ["llmtuner"]
|
known-first-party = ["llamafactory"]
|
||||||
known-third-party = [
|
known-third-party = [
|
||||||
"accelerate",
|
"accelerate",
|
||||||
"datasets",
|
"datasets",
|
||||||
|
|
|
@ -8,7 +8,7 @@ import torch
|
||||||
from deepspeed.accelerator import get_accelerator # type: ignore
|
from deepspeed.accelerator import get_accelerator # type: ignore
|
||||||
from deepspeed.profiling.flops_profiler import get_model_profile # type: ignore
|
from deepspeed.profiling.flops_profiler import get_model_profile # type: ignore
|
||||||
|
|
||||||
from llmtuner.chat import ChatModel
|
from llamafactory.chat import ChatModel
|
||||||
|
|
||||||
|
|
||||||
def calculate_flops(
|
def calculate_flops(
|
||||||
|
|
|
@ -12,10 +12,10 @@ from torch.utils.data import DataLoader
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from transformers import DataCollatorForLanguageModeling, DataCollatorForSeq2Seq
|
from transformers import DataCollatorForLanguageModeling, DataCollatorForSeq2Seq
|
||||||
|
|
||||||
from llmtuner.data import get_dataset
|
from llamafactory.data import get_dataset
|
||||||
from llmtuner.extras.constants import IGNORE_INDEX
|
from llamafactory.extras.constants import IGNORE_INDEX
|
||||||
from llmtuner.hparams import get_train_args
|
from llamafactory.hparams import get_train_args
|
||||||
from llmtuner.model import load_tokenizer
|
from llamafactory.model import load_tokenizer
|
||||||
|
|
||||||
|
|
||||||
BASE_LR = 3e-4 # 1.5e-4 for 30B-70B models
|
BASE_LR = 3e-4 # 1.5e-4 for 30B-70B models
|
||||||
|
|
|
@ -12,10 +12,10 @@ from torch.utils.data import DataLoader
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from transformers import DataCollatorForLanguageModeling, DataCollatorForSeq2Seq
|
from transformers import DataCollatorForLanguageModeling, DataCollatorForSeq2Seq
|
||||||
|
|
||||||
from llmtuner.data import get_dataset
|
from llamafactory.data import get_dataset
|
||||||
from llmtuner.extras.constants import IGNORE_INDEX
|
from llamafactory.extras.constants import IGNORE_INDEX
|
||||||
from llmtuner.hparams import get_train_args
|
from llamafactory.hparams import get_train_args
|
||||||
from llmtuner.model import load_model, load_tokenizer
|
from llamafactory.model import load_model, load_tokenizer
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|
|
@ -7,9 +7,9 @@ from collections import defaultdict
|
||||||
import fire
|
import fire
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
from llmtuner.data import get_dataset
|
from llamafactory.data import get_dataset
|
||||||
from llmtuner.hparams import get_train_args
|
from llamafactory.hparams import get_train_args
|
||||||
from llmtuner.model import load_tokenizer
|
from llamafactory.model import load_tokenizer
|
||||||
|
|
||||||
|
|
||||||
def length_cdf(
|
def length_cdf(
|
||||||
|
|
6
setup.py
6
setup.py
|
@ -5,7 +5,7 @@ from setuptools import find_packages, setup
|
||||||
|
|
||||||
|
|
||||||
def get_version():
|
def get_version():
|
||||||
with open(os.path.join("src", "llmtuner", "cli.py"), "r", encoding="utf-8") as f:
|
with open(os.path.join("src", "llamafactory", "cli.py"), "r", encoding="utf-8") as f:
|
||||||
file_content = f.read()
|
file_content = f.read()
|
||||||
pattern = r"{}\W*=\W*\"([^\"]+)\"".format("VERSION")
|
pattern = r"{}\W*=\W*\"([^\"]+)\"".format("VERSION")
|
||||||
(version,) = re.findall(pattern, file_content)
|
(version,) = re.findall(pattern, file_content)
|
||||||
|
@ -38,7 +38,7 @@ extra_require = {
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
setup(
|
setup(
|
||||||
name="llmtuner",
|
name="llamafactory",
|
||||||
version=get_version(),
|
version=get_version(),
|
||||||
author="hiyouga",
|
author="hiyouga",
|
||||||
author_email="hiyouga" "@" "buaa.edu.cn",
|
author_email="hiyouga" "@" "buaa.edu.cn",
|
||||||
|
@ -53,7 +53,7 @@ def main():
|
||||||
python_requires=">=3.8.0",
|
python_requires=">=3.8.0",
|
||||||
install_requires=get_requires(),
|
install_requires=get_requires(),
|
||||||
extras_require=extra_require,
|
extras_require=extra_require,
|
||||||
entry_points={"console_scripts": ["llamafactory-cli = llmtuner.cli:main"]},
|
entry_points={"console_scripts": ["llamafactory-cli = llamafactory.cli:main"]},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Development Status :: 4 - Beta",
|
"Development Status :: 4 - Beta",
|
||||||
"Intended Audience :: Developers",
|
"Intended Audience :: Developers",
|
||||||
|
|
|
@ -2,8 +2,8 @@ import os
|
||||||
|
|
||||||
import uvicorn
|
import uvicorn
|
||||||
|
|
||||||
from llmtuner.api.app import create_app
|
from llamafactory.api.app import create_app
|
||||||
from llmtuner.chat import ChatModel
|
from llamafactory.chat import ChatModel
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -32,6 +32,8 @@ class DatasetAttr:
|
||||||
prompt: Optional[str] = "instruction"
|
prompt: Optional[str] = "instruction"
|
||||||
query: Optional[str] = "input"
|
query: Optional[str] = "input"
|
||||||
response: Optional[str] = "output"
|
response: Optional[str] = "output"
|
||||||
|
chosen: Optional[str] = "chosen"
|
||||||
|
rejected: Optional[str] = "rejected"
|
||||||
history: Optional[str] = None
|
history: Optional[str] = None
|
||||||
""" columns for the sharegpt format """
|
""" columns for the sharegpt format """
|
||||||
messages: Optional[str] = "conversations"
|
messages: Optional[str] = "conversations"
|
|
@ -10,7 +10,7 @@ if TYPE_CHECKING:
|
||||||
from datasets import Dataset, IterableDataset
|
from datasets import Dataset, IterableDataset
|
||||||
from transformers import Seq2SeqTrainingArguments
|
from transformers import Seq2SeqTrainingArguments
|
||||||
|
|
||||||
from llmtuner.hparams import DataArguments
|
from ..hparams import DataArguments
|
||||||
|
|
||||||
|
|
||||||
logger = get_logger(__name__)
|
logger = get_logger(__name__)
|
|
@ -30,7 +30,7 @@ except Exception:
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from trl import AutoModelForCausalLMWithValueHead
|
from trl import AutoModelForCausalLMWithValueHead
|
||||||
|
|
||||||
from llmtuner.hparams import ModelArguments
|
from ..hparams import ModelArguments
|
||||||
|
|
||||||
|
|
||||||
logger = get_logger(__name__)
|
logger = get_logger(__name__)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue