diff --git a/README.md b/README.md index 778d90b7..edeedebd 100644 --- a/README.md +++ b/README.md @@ -305,7 +305,7 @@ cd LLaMA-Factory pip install -e .[metrics] ``` -Extra dependencies available: deepspeed, metrics, unsloth, vllm, bitsandbytes, gptq, awq, aqlm, qwen, quality +Extra dependencies available: deepspeed, metrics, unsloth, galore, vllm, bitsandbytes, gptq, awq, aqlm, qwen, quality
For Windows users diff --git a/README_zh.md b/README_zh.md index 7b02c55d..09f02922 100644 --- a/README_zh.md +++ b/README_zh.md @@ -305,7 +305,7 @@ cd LLaMA-Factory pip install -e .[metrics] ``` -可选的额外依赖项:deepspeed、metrics、unsloth、vllm、bitsandbytes、gptq、awq、aqlm、qwen、quality +可选的额外依赖项:deepspeed、metrics、unsloth、galore、vllm、bitsandbytes、gptq、awq、aqlm、qwen、quality
Windows 用户指南 diff --git a/requirements.txt b/requirements.txt index 88b88ee4..fe8ab35c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,4 +15,3 @@ fastapi sse-starlette matplotlib fire -galore-torch diff --git a/setup.py b/setup.py index 2caee7a8..67b6f98d 100644 --- a/setup.py +++ b/setup.py @@ -23,6 +23,7 @@ extra_require = { "deepspeed": ["deepspeed>=0.10.0"], "metrics": ["nltk", "jieba", "rouge-chinese"], "unsloth": ["torch==2.2.0", "unsloth[cu121-ampere-torch220]"], + "galore": ["galore-torch"], "vllm": ["vllm>=0.3.3"], "bitsandbytes": ["bitsandbytes>=0.39.0"], "gptq": ["optimum>=1.16.0", "auto-gptq>=0.5.0"], diff --git a/src/llmtuner/chat/hf_engine.py b/src/llmtuner/chat/hf_engine.py index bcdbd15a..ddb48e47 100644 --- a/src/llmtuner/chat/hf_engine.py +++ b/src/llmtuner/chat/hf_engine.py @@ -35,7 +35,7 @@ class HuggingfaceEngine(BaseEngine): self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template) self.model = load_model( self.tokenizer, model_args, finetuning_args, is_trainable=False, add_valuehead=(not self.can_generate) - ) + ) # must after fixing tokenizer to resize vocab self.generating_args = generating_args.to_dict() @staticmethod