release v0.7.1
This commit is contained in:
parent
2a67ab3925
commit
1c910079d8
4
setup.py
4
setup.py
|
@ -5,9 +5,9 @@ from setuptools import find_packages, setup
|
|||
|
||||
|
||||
def get_version():
|
||||
with open(os.path.join("src", "llmtuner", "__init__.py"), "r", encoding="utf-8") as f:
|
||||
with open(os.path.join("src", "llmtuner", "cli.py"), "r", encoding="utf-8") as f:
|
||||
file_content = f.read()
|
||||
pattern = r"{0}\W*=\W*\"([^\"]+)\"".format("__version__")
|
||||
pattern = r"{}\W*=\W*\"([^\"]+)\"".format("VERSION")
|
||||
(version,) = re.findall(pattern, file_content)
|
||||
return version
|
||||
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
# Level: api, webui > chat, eval, train > data, model > extras, hparams
|
||||
|
||||
__version__ = "0.7.1.dev0"
|
||||
from .cli import VERSION
|
||||
|
||||
|
||||
__version__ = VERSION
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import sys
|
||||
from enum import Enum, unique
|
||||
|
||||
from . import __version__
|
||||
from .api.app import run_api
|
||||
from .chat.chat_model import run_chat
|
||||
from .eval.evaluator import run_eval
|
||||
|
@ -9,17 +8,34 @@ from .train.tuner import export_model, run_exp
|
|||
from .webui.interface import run_web_demo, run_web_ui
|
||||
|
||||
|
||||
USAGE = """
|
||||
Usage:
|
||||
llamafactory-cli api -h: launch an API server
|
||||
llamafactory-cli chat -h: launch a chat interface in CLI
|
||||
llamafactory-cli eval -h: do evaluation
|
||||
llamafactory-cli export -h: merge LoRA adapters and export model
|
||||
llamafactory-cli train -h: do training
|
||||
llamafactory-cli webchat -h: launch a chat interface in Web UI
|
||||
llamafactory-cli webui: launch LlamaBoard
|
||||
llamafactory-cli version: show version info
|
||||
"""
|
||||
USAGE = (
|
||||
"-" * 70
|
||||
+ "\n"
|
||||
+ "| Usage: |\n"
|
||||
+ "| llamafactory-cli api -h: launch an OpenAI-style API server |\n"
|
||||
+ "| llamafactory-cli chat -h: launch a chat interface in CLI |\n"
|
||||
+ "| llamafactory-cli eval -h: evaluate models |\n"
|
||||
+ "| llamafactory-cli export -h: merge LoRA adapters and export model |\n"
|
||||
+ "| llamafactory-cli train -h: train models |\n"
|
||||
+ "| llamafactory-cli webchat -h: launch a chat interface in Web UI |\n"
|
||||
+ "| llamafactory-cli webui: launch LlamaBoard |\n"
|
||||
+ "| llamafactory-cli version: show version info |\n"
|
||||
+ "-" * 70
|
||||
)
|
||||
|
||||
VERSION = "0.7.1"
|
||||
|
||||
WELCOME = (
|
||||
"-" * 58
|
||||
+ "\n"
|
||||
+ "| Welcome to LLaMA Factory, version {}".format(VERSION)
|
||||
+ " " * (21 - len(VERSION))
|
||||
+ "|\n|"
|
||||
+ " " * 56
|
||||
+ "|\n"
|
||||
+ "| Project page: https://github.com/hiyouga/LLaMA-Factory |\n"
|
||||
+ "-" * 58
|
||||
)
|
||||
|
||||
|
||||
@unique
|
||||
|
@ -31,7 +47,7 @@ class Command(str, Enum):
|
|||
TRAIN = "train"
|
||||
WEBDEMO = "webchat"
|
||||
WEBUI = "webui"
|
||||
VERSION = "version"
|
||||
VER = "version"
|
||||
HELP = "help"
|
||||
|
||||
|
||||
|
@ -51,8 +67,8 @@ def main():
|
|||
run_web_demo()
|
||||
elif command == Command.WEBUI:
|
||||
run_web_ui()
|
||||
elif command == Command.VERSION:
|
||||
print("Welcome to LLaMA Factory, version {}".format(__version__))
|
||||
elif command == Command.VER:
|
||||
print(WELCOME)
|
||||
elif command == Command.HELP:
|
||||
print(USAGE)
|
||||
else:
|
||||
|
|
Loading…
Reference in New Issue