Update quick_start.md

This commit is contained in:
p18457032 2024-08-09 10:26:44 +08:00
parent 394c3f96e7
commit 994a95f94d
2 changed files with 20 additions and 102 deletions

View File

@ -1,98 +0,0 @@
# coding=utf-8
# Copyright 2022 The OpenBMB team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import torch
import argparse
import os
parser = argparse.ArgumentParser(description='Load and save model weights with specified paths.')
parser.add_argument('--model_path', type=str, required=True, help='Path to the model directory.')
parser.add_argument('--output_path', type=str, required=True, help='Path to save the new weights.')
parser.add_argument('--model_type',type=str,default='fm9g',help='The model type need to be one of "fm9g" or "9g-8b"')
parser.add_argument('--task',type=str,default='pt2bin',help='The task need to be one of "pt2bin" or "bin2pt"')
# parser.add_argument('--layer_num', type=int, required=True, help='The layers of model')
args = parser.parse_args()
src_path = args.model_path
dst_path = args.output_path if args.output_path.endswith('/') else args.output_path + ('/')
model_type = args.model_type
task = args.task
assert model_type in ['fm9g'], 'The "model_type" must be one of "fm9g"!'
assert task in ['pt2bin','bin2pt'], 'The task need to be one of "pt2bin" or "bin2pt"!'
if model_type == 'fm9g':
layer_num = 40
if not os.path.exists(dst_path):
os.makedirs(dst_path)
def convert_hf_to_fm9g():
# 2B模型转换bin2pt
ckpt = torch.load(src_path)
new_ckpt = OrderedDict()
new_ckpt['input_embedding.weight'] = ckpt['model.embed_tokens.weight']
new_ckpt["encoder.output_layernorm.weight"] = ckpt['model.norm.weight']
for i in range(layer_num):
new_ckpt[f"encoder.layers.{i}.self_att.self_attention.project_q.weight"] = ckpt[f"model.layers.{i}.self_attn.q_proj.weight"]
new_ckpt[f"encoder.layers.{i}.self_att.self_attention.project_k.weight"] = ckpt[f"model.layers.{i}.self_attn.k_proj.weight"]
new_ckpt[f"encoder.layers.{i}.self_att.self_attention.project_v.weight"] = ckpt[f"model.layers.{i}.self_attn.v_proj.weight"]
new_ckpt[f"encoder.layers.{i}.self_att.self_attention.attention_out.weight"] = ckpt[f"model.layers.{i}.self_attn.o_proj.weight"]
new_ckpt[f"encoder.layers.{i}.self_att.layernorm_before_attention.weight"] = ckpt[f"model.layers.{i}.input_layernorm.weight"]
new_ckpt[f"encoder.layers.{i}.ffn.layernorm_before_ffn.weight"] = ckpt[f"model.layers.{i}.post_attention_layernorm.weight"]
new_ckpt[f"encoder.layers.{i}.ffn.ffn.w_in.w_0.weight"] = ckpt[f'model.layers.{i}.mlp.gate_proj.weight']
new_ckpt[f"encoder.layers.{i}.ffn.ffn.w_in.w_1.weight"] = ckpt[f'model.layers.{i}.mlp.up_proj.weight']
new_ckpt[f"encoder.layers.{i}.ffn.ffn.w_out.weight"] = ckpt[f'model.layers.{i}.mlp.down_proj.weight']
torch.save(new_ckpt, f"{dst_path}fm9g.pt")
def convert_fm9g_to_hf():
#2B模型转换pt2bin
state = torch.load(src_path)
new_state = {}
new_state["model.embed_tokens.weight"] = state["input_embedding.weight"]
new_state["model.norm.weight"] = state["encoder.output_layernorm.weight"]
for lid in range(layer_num):
print(lid)
new_state[f"model.layers.{lid}.self_attn.q_proj.weight"] = state[f"encoder.layers.{lid}.self_att.self_attention.project_q.weight"]
new_state[f"model.layers.{lid}.self_attn.k_proj.weight"] = state[f"encoder.layers.{lid}.self_att.self_attention.project_k.weight"]
new_state[f"model.layers.{lid}.self_attn.v_proj.weight"] = state[f"encoder.layers.{lid}.self_att.self_attention.project_v.weight"]
new_state[f"model.layers.{lid}.self_attn.o_proj.weight"] = state[f"encoder.layers.{lid}.self_att.self_attention.attention_out.weight"]
new_state[f"model.layers.{lid}.mlp.gate_proj.weight"] = state[f"encoder.layers.{lid}.ffn.ffn.w_in.w_0.weight"]
new_state[f"model.layers.{lid}.mlp.up_proj.weight"] = state[f"encoder.layers.{lid}.ffn.ffn.w_in.w_1.weight"]
new_state[f"model.layers.{lid}.mlp.down_proj.weight"] = state[f"encoder.layers.{lid}.ffn.ffn.w_out.weight"]
new_state[f"model.layers.{lid}.input_layernorm.weight"] = state[f"encoder.layers.{lid}.self_att.layernorm_before_attention.weight"]
new_state[f"model.layers.{lid}.post_attention_layernorm.weight"] = state[f"encoder.layers.{lid}.ffn.layernorm_before_ffn.weight"]
del state
state = None
torch.save(new_state, f"{dst_path}fm9g.bin")
if __name__ == "__main__":
if model_type == 'fm9g' and task == 'bin2pt':
convert_hf_to_fm9g()
elif model_type == 'fm9g' and task == 'pt2bin':
convert_fm9g_to_hf()
else:
raise ValueError('Please check the model type and task!')

View File

@ -52,6 +52,7 @@
- [多机训练](#多机训练) - [多机训练](#多机训练)
- [参数详细介绍](#参数详细介绍) - [参数详细介绍](#参数详细介绍)
- [查看训练情况](#查看训练情况) - [查看训练情况](#查看训练情况)
- [模型格式转换](#模型格式转换)
- [模型推理](#模型推理) - [模型推理](#模型推理)
- [常见问题](#常见问题) - [常见问题](#常见问题)
@ -433,6 +434,21 @@ tensorboard -logdir /apps/fm9g_2b/data/tensorboard/2b_0701 #存放.events文
TypeError: MessageToJson() got an unexpected keyword argument 'including_default_value_fields' TypeError: MessageToJson() got an unexpected keyword argument 'including_default_value_fields'
``` ```
## 模型格式转换
模型训练完成后需将pt格式模型文件转换为bin格式模型文件用于模型推理。
我们在本项目中提供了2B模型两种格式相互转换时所用到脚本脚本位于./quick_start_clean/convert_hf_fm9g.py应用方法如下
```shell
python convert_hf_fm9g.py \
--model_path /the_path_to_pt_or_bin/ \ #需要转换模型的路径
--output_path /the_path_to_target_directory/ \ #转换后新格式模型所存放路径
--model_type fm9g \ #2B模型指定fm9g
--task pt2bin #任务类型如果pt模型转换为bin模型指定为pt2bin反之指定为bin2pt
```
8B模型格式转换脚本需要切换至master分支脚本位于本项目master分支下convert.py。
## 模型推理 ## 模型推理
模型推理列举了两种推理方法离线批量推理和部署OpenAI API服务推理 模型推理列举了两种推理方法离线批量推理和部署OpenAI API服务推理
@ -498,7 +514,7 @@ python -m vllm.entrypoints.openai.api_server \
--tokenizer-mode auto \ --tokenizer-mode auto \
--dtype auto \ --dtype auto \
--trust-remote-code \ --trust-remote-code \
--api-key CPMAPI --api-key FM9GAPI
#同样需注意模型加载的是.bin格式 #同样需注意模型加载的是.bin格式
#与离线批量推理类似使用端侧2B模型tokenizer-mode为"auto" #与离线批量推理类似使用端侧2B模型tokenizer-mode为"auto"
#dtype为模型数据类型,设置为"auto"即可 #dtype为模型数据类型,设置为"auto"即可
@ -511,7 +527,7 @@ python -m vllm.entrypoints.openai.api_server \
--model ../models/8b_sft_model/ \ --model ../models/8b_sft_model/ \
--tokenizer-mode cpm \ --tokenizer-mode cpm \
--dtype auto \ --dtype auto \
--api-key CPMAPI --api-key FM9GAPI
#与离线批量推理类似使用8B百亿SFT模型tokenizer-mode为"cpm" #与离线批量推理类似使用8B百亿SFT模型tokenizer-mode为"cpm"
``` ```
@ -530,7 +546,7 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
# client.py # client.py
from openai import OpenAI from openai import OpenAI
# 如果启动服务时指定了api密钥需要修改为对应的密钥否则为"EMPTY" # 如果启动服务时指定了api密钥需要修改为对应的密钥否则为"EMPTY"
openai_api_key = "CPMAPI" openai_api_key = "FM9GAPI"
openai_api_base = "http://localhost:8000/v1" openai_api_base = "http://localhost:8000/v1"
client = OpenAI( client = OpenAI(
api_key=openai_api_key, api_key=openai_api_key,
@ -549,7 +565,7 @@ print("Completion result:", completion)
from openai import OpenAI from openai import OpenAI
client = OpenAI( client = OpenAI(
base_url="http://localhost:8000/v1", base_url="http://localhost:8000/v1",
api_key="CPMAPI", api_key="FM9GAPI",
) )
#每次将上一轮的问题和答案拼接到本轮输入即可 #每次将上一轮的问题和答案拼接到本轮输入即可
completion = client.chat.completions.create( completion = client.chat.completions.create(