fix requires for windows

This commit is contained in:
hiyouga 2024-04-03 21:56:43 +08:00
parent 148bda353f
commit 7f6e412604
5 changed files with 4 additions and 4 deletions

View File

@ -305,7 +305,7 @@ cd LLaMA-Factory
pip install -e .[metrics] pip install -e .[metrics]
``` ```
Extra dependencies available: deepspeed, metrics, unsloth, vllm, bitsandbytes, gptq, awq, aqlm, qwen, quality Extra dependencies available: deepspeed, metrics, unsloth, galore, vllm, bitsandbytes, gptq, awq, aqlm, qwen, quality
<details><summary>For Windows users</summary> <details><summary>For Windows users</summary>

View File

@ -305,7 +305,7 @@ cd LLaMA-Factory
pip install -e .[metrics] pip install -e .[metrics]
``` ```
可选的额外依赖项deepspeed、metrics、unsloth、vllm、bitsandbytes、gptq、awq、aqlm、qwen、quality 可选的额外依赖项deepspeed、metrics、unsloth、galore、vllm、bitsandbytes、gptq、awq、aqlm、qwen、quality
<details><summary>Windows 用户指南</summary> <details><summary>Windows 用户指南</summary>

View File

@ -15,4 +15,3 @@ fastapi
sse-starlette sse-starlette
matplotlib matplotlib
fire fire
galore-torch

View File

@ -23,6 +23,7 @@ extra_require = {
"deepspeed": ["deepspeed>=0.10.0"], "deepspeed": ["deepspeed>=0.10.0"],
"metrics": ["nltk", "jieba", "rouge-chinese"], "metrics": ["nltk", "jieba", "rouge-chinese"],
"unsloth": ["torch==2.2.0", "unsloth[cu121-ampere-torch220]"], "unsloth": ["torch==2.2.0", "unsloth[cu121-ampere-torch220]"],
"galore": ["galore-torch"],
"vllm": ["vllm>=0.3.3"], "vllm": ["vllm>=0.3.3"],
"bitsandbytes": ["bitsandbytes>=0.39.0"], "bitsandbytes": ["bitsandbytes>=0.39.0"],
"gptq": ["optimum>=1.16.0", "auto-gptq>=0.5.0"], "gptq": ["optimum>=1.16.0", "auto-gptq>=0.5.0"],

View File

@ -35,7 +35,7 @@ class HuggingfaceEngine(BaseEngine):
self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template) self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template)
self.model = load_model( self.model = load_model(
self.tokenizer, model_args, finetuning_args, is_trainable=False, add_valuehead=(not self.can_generate) self.tokenizer, model_args, finetuning_args, is_trainable=False, add_valuehead=(not self.can_generate)
) ) # must after fixing tokenizer to resize vocab
self.generating_args = generating_args.to_dict() self.generating_args = generating_args.to_dict()
@staticmethod @staticmethod