diff --git a/src/llmtuner/data/template.py b/src/llmtuner/data/template.py index 77f93c5e..65d38bfb 100644 --- a/src/llmtuner/data/template.py +++ b/src/llmtuner/data/template.py @@ -644,3 +644,9 @@ _register_template( format_user=StringFormatter(slots=[{"token": ""}, ":{{content}}\n", {"token": ""}, ":"]), format_separator=EmptyFormatter(slots=["\n"]), ) + +_register_template( + name="atom", + format_user=StringFormatter(slots=[{"bos_token"}, "Human: {{content}}\n", {"eos_token"}, {"bos_token"}, "Assistant:"]), + format_assistant=StringFormatter(slots=["{{content}}\n", {"eos_token"}]), +) diff --git a/src/llmtuner/extras/constants.py b/src/llmtuner/extras/constants.py index 75775e17..5e4a0cbb 100644 --- a/src/llmtuner/extras/constants.py +++ b/src/llmtuner/extras/constants.py @@ -883,3 +883,17 @@ register_model_group( }, template="zephyr", ) + +register_model_group( + models={ + "Atom-7B": { + DownloadSource.DEFAULT: "FlagAlpha/Atom-7B", + DownloadSource.MODELSCOPE: "FlagAlpha/Atom-7B", + }, + "Atom-7B-Chat": { + DownloadSource.DEFAULT: "FlagAlpha/Atom-7B-Chat", + DownloadSource.MODELSCOPE: "FlagAlpha/Atom-7B-Chat", + }, + }, + template="atom", +)