From 5eaa50fa01a7172408840255d18bcc0ab43a01fb Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 21 Mar 2024 17:04:10 +0800 Subject: [PATCH] add citation --- .github/CITATION.cff | 37 +++++++++++++++++++++++++++++++++++++ README.md | 7 +++---- README_zh.md | 7 +++---- 3 files changed, 43 insertions(+), 8 deletions(-) create mode 100644 .github/CITATION.cff diff --git a/.github/CITATION.cff b/.github/CITATION.cff new file mode 100644 index 00000000..4caf3787 --- /dev/null +++ b/.github/CITATION.cff @@ -0,0 +1,37 @@ +cff-version: 1.2.0 +date-released: 2024-03 +message: "If you use this software, please cite it as below." +authors: +- family-names: "Zheng" + given-names: "Yaowei" +- family-names: "Zhang" + given-names: "Richong" +- family-names: "Zhang" + given-names: "Junhao" +- family-names: "Ye" + given-names: "Yanhan" +- family-names: "Luo" + given-names: "Zheyan" +- family-names: "Ma" + given-names: "Yongqiang" +title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models" +url: "https://arxiv.org/abs/2403.13372" +preferred-citation: + type: article + authors: + - family-names: "Zheng" + given-names: "Yaowei" + - family-names: "Zhang" + given-names: "Richong" + - family-names: "Zhang" + given-names: "Junhao" + - family-names: "Ye" + given-names: "Yanhan" + - family-names: "Luo" + given-names: "Zheyan" + - family-names: "Ma" + given-names: "Yongqiang" + journal: "arXiv preprint arXiv:2403.13372" + title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models" + url: "https://arxiv.org/abs/2403.13372" + year: 2024 diff --git a/README.md b/README.md index fbe95121..7bff140c 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main) [![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/) [![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/) -[![Citation](https://img.shields.io/badge/citation-21-green)](#projects-using-llama-factory) +[![Citation](https://img.shields.io/badge/citation-22-green)](#projects-using-llama-factory) [![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls) [![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK) [![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai) @@ -23,8 +23,6 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/9840a653-7e9c-41c8-ae89 Choose your path: -- **🤗 Spaces**: https://huggingface.co/spaces/hiyouga/LLaMA-Board -- **ModelScope**: https://modelscope.cn/studios/hiyouga/LLaMA-Board - **Colab**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing - **Local machine**: Please refer to [usage](#getting-started) @@ -706,6 +704,7 @@ docker compose -f ./docker-compose.yml up -d 1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819) 1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204) 1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714) +1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.15043) 1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B. 1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge. 1. **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B. @@ -728,7 +727,7 @@ If this work is helpful, please kindly cite as: ```bibtex @article{zheng2024llamafactory, title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models}, - author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo}, + author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Yongqiang Ma}, journal={arXiv preprint arXiv:2403.13372}, year={2024}, url={http://arxiv.org/abs/2403.13372} diff --git a/README_zh.md b/README_zh.md index 0dae2afe..c128660e 100644 --- a/README_zh.md +++ b/README_zh.md @@ -5,7 +5,7 @@ [![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main) [![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/) [![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/) -[![Citation](https://img.shields.io/badge/citation-21-green)](#使用了-llama-factory-的项目) +[![Citation](https://img.shields.io/badge/citation-22-green)](#使用了-llama-factory-的项目) [![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls) [![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK) [![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai) @@ -23,8 +23,6 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd 选择你的打开方式: -- **Hugging Face 空间**:https://huggingface.co/spaces/hiyouga/LLaMA-Board -- **魔搭社区**:https://modelscope.cn/studios/hiyouga/LLaMA-Board - **Colab**:https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing - **本地机器**:请见[如何使用](#如何使用) @@ -679,6 +677,7 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ 1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819) 1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204) 1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714) +1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.15043) 1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。 1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。 1. **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。 @@ -701,7 +700,7 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ ```bibtex @article{zheng2024llamafactory, title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models}, - author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo}, + author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Yongqiang Ma}, journal={arXiv preprint arXiv:2403.13372}, year={2024}, url={http://arxiv.org/abs/2403.13372}