From 994a95f94db92fbcf850eaddc83d6ed63cb4d3e8 Mon Sep 17 00:00:00 2001 From: p18457032 Date: Fri, 9 Aug 2024 10:26:44 +0800 Subject: [PATCH] Update quick_start.md --- quick_start_clean/convert_hf_cpm.py | 98 ------------------------ quick_start_clean/readmes/quick_start.md | 24 +++++- 2 files changed, 20 insertions(+), 102 deletions(-) delete mode 100644 quick_start_clean/convert_hf_cpm.py diff --git a/quick_start_clean/convert_hf_cpm.py b/quick_start_clean/convert_hf_cpm.py deleted file mode 100644 index e47d9bb..0000000 --- a/quick_start_clean/convert_hf_cpm.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The OpenBMB team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict - -import torch -import argparse -import os - -parser = argparse.ArgumentParser(description='Load and save model weights with specified paths.') -parser.add_argument('--model_path', type=str, required=True, help='Path to the model directory.') -parser.add_argument('--output_path', type=str, required=True, help='Path to save the new weights.') -parser.add_argument('--model_type',type=str,default='fm9g',help='The model type need to be one of "fm9g" or "9g-8b"') -parser.add_argument('--task',type=str,default='pt2bin',help='The task need to be one of "pt2bin" or "bin2pt"') -# parser.add_argument('--layer_num', type=int, required=True, help='The layers of model') - -args = parser.parse_args() - -src_path = args.model_path -dst_path = args.output_path if args.output_path.endswith('/') else args.output_path + ('/') -model_type = args.model_type -task = args.task - -assert model_type in ['fm9g'], 'The "model_type" must be one of "fm9g"!' -assert task in ['pt2bin','bin2pt'], 'The task need to be one of "pt2bin" or "bin2pt"!' - -if model_type == 'fm9g': - layer_num = 40 - -if not os.path.exists(dst_path): - os.makedirs(dst_path) - - -def convert_hf_to_fm9g(): - # 2B模型转换bin2pt - ckpt = torch.load(src_path) - new_ckpt = OrderedDict() - - new_ckpt['input_embedding.weight'] = ckpt['model.embed_tokens.weight'] - new_ckpt["encoder.output_layernorm.weight"] = ckpt['model.norm.weight'] - for i in range(layer_num): - new_ckpt[f"encoder.layers.{i}.self_att.self_attention.project_q.weight"] = ckpt[f"model.layers.{i}.self_attn.q_proj.weight"] - new_ckpt[f"encoder.layers.{i}.self_att.self_attention.project_k.weight"] = ckpt[f"model.layers.{i}.self_attn.k_proj.weight"] - new_ckpt[f"encoder.layers.{i}.self_att.self_attention.project_v.weight"] = ckpt[f"model.layers.{i}.self_attn.v_proj.weight"] - new_ckpt[f"encoder.layers.{i}.self_att.self_attention.attention_out.weight"] = ckpt[f"model.layers.{i}.self_attn.o_proj.weight"] - new_ckpt[f"encoder.layers.{i}.self_att.layernorm_before_attention.weight"] = ckpt[f"model.layers.{i}.input_layernorm.weight"] - new_ckpt[f"encoder.layers.{i}.ffn.layernorm_before_ffn.weight"] = ckpt[f"model.layers.{i}.post_attention_layernorm.weight"] - - new_ckpt[f"encoder.layers.{i}.ffn.ffn.w_in.w_0.weight"] = ckpt[f'model.layers.{i}.mlp.gate_proj.weight'] - new_ckpt[f"encoder.layers.{i}.ffn.ffn.w_in.w_1.weight"] = ckpt[f'model.layers.{i}.mlp.up_proj.weight'] - new_ckpt[f"encoder.layers.{i}.ffn.ffn.w_out.weight"] = ckpt[f'model.layers.{i}.mlp.down_proj.weight'] - - torch.save(new_ckpt, f"{dst_path}fm9g.pt") - -def convert_fm9g_to_hf(): - #2B模型转换pt2bin - state = torch.load(src_path) - - new_state = {} - new_state["model.embed_tokens.weight"] = state["input_embedding.weight"] - new_state["model.norm.weight"] = state["encoder.output_layernorm.weight"] - for lid in range(layer_num): - print(lid) - new_state[f"model.layers.{lid}.self_attn.q_proj.weight"] = state[f"encoder.layers.{lid}.self_att.self_attention.project_q.weight"] - new_state[f"model.layers.{lid}.self_attn.k_proj.weight"] = state[f"encoder.layers.{lid}.self_att.self_attention.project_k.weight"] - new_state[f"model.layers.{lid}.self_attn.v_proj.weight"] = state[f"encoder.layers.{lid}.self_att.self_attention.project_v.weight"] - - new_state[f"model.layers.{lid}.self_attn.o_proj.weight"] = state[f"encoder.layers.{lid}.self_att.self_attention.attention_out.weight"] - new_state[f"model.layers.{lid}.mlp.gate_proj.weight"] = state[f"encoder.layers.{lid}.ffn.ffn.w_in.w_0.weight"] - new_state[f"model.layers.{lid}.mlp.up_proj.weight"] = state[f"encoder.layers.{lid}.ffn.ffn.w_in.w_1.weight"] - new_state[f"model.layers.{lid}.mlp.down_proj.weight"] = state[f"encoder.layers.{lid}.ffn.ffn.w_out.weight"] - - new_state[f"model.layers.{lid}.input_layernorm.weight"] = state[f"encoder.layers.{lid}.self_att.layernorm_before_attention.weight"] - new_state[f"model.layers.{lid}.post_attention_layernorm.weight"] = state[f"encoder.layers.{lid}.ffn.layernorm_before_ffn.weight"] - del state - state = None - torch.save(new_state, f"{dst_path}fm9g.bin") - - -if __name__ == "__main__": - if model_type == 'fm9g' and task == 'bin2pt': - convert_hf_to_fm9g() - elif model_type == 'fm9g' and task == 'pt2bin': - convert_fm9g_to_hf() - else: - raise ValueError('Please check the model type and task!') \ No newline at end of file diff --git a/quick_start_clean/readmes/quick_start.md b/quick_start_clean/readmes/quick_start.md index be0399d..0b9b8b2 100644 --- a/quick_start_clean/readmes/quick_start.md +++ b/quick_start_clean/readmes/quick_start.md @@ -52,6 +52,7 @@ - [多机训练](#多机训练) - [参数详细介绍](#参数详细介绍) - [查看训练情况](#查看训练情况) + - [模型格式转换](#模型格式转换) - [模型推理](#模型推理) - [常见问题](#常见问题) @@ -433,6 +434,21 @@ tensorboard –-logdir /apps/fm9g_2b/data/tensorboard/2b_0701 #存放.events文 TypeError: MessageToJson() got an unexpected keyword argument 'including_default_value_fields' ``` +## 模型格式转换 +模型训练完成后,需将pt格式模型文件转换为bin格式模型文件用于模型推理。 +我们在本项目中提供了2B模型两种格式相互转换时所用到脚本,脚本位于./quick_start_clean/convert_hf_fm9g.py,应用方法如下: + +```shell +python convert_hf_fm9g.py \ +--model_path /the_path_to_pt_or_bin/ \ #需要转换模型的路径 +--output_path /the_path_to_target_directory/ \ #转换后新格式模型所存放路径 +--model_type fm9g \ #2B模型指定fm9g +--task pt2bin #任务类型如果pt模型转换为bin模型指定为pt2bin,反之指定为bin2pt +``` + +8B模型格式转换脚本需要切换至master分支,脚本位于本项目master分支下convert.py。 + + ## 模型推理 模型推理列举了两种推理方法:离线批量推理和部署OpenAI API服务推理 @@ -498,7 +514,7 @@ python -m vllm.entrypoints.openai.api_server \ --tokenizer-mode auto \ --dtype auto \ --trust-remote-code \ - --api-key CPMAPI + --api-key FM9GAPI #同样需注意模型加载的是.bin格式 #与离线批量推理类似,使用端侧2B模型,tokenizer-mode为"auto" #dtype为模型数据类型,设置为"auto"即可 @@ -511,7 +527,7 @@ python -m vllm.entrypoints.openai.api_server \ --model ../models/8b_sft_model/ \ --tokenizer-mode cpm \ --dtype auto \ - --api-key CPMAPI + --api-key FM9GAPI #与离线批量推理类似,使用8B百亿SFT模型,tokenizer-mode为"cpm" ``` @@ -530,7 +546,7 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) # client.py from openai import OpenAI # 如果启动服务时指定了api密钥,需要修改为对应的密钥,否则为"EMPTY" -openai_api_key = "CPMAPI" +openai_api_key = "FM9GAPI" openai_api_base = "http://localhost:8000/v1" client = OpenAI( api_key=openai_api_key, @@ -549,7 +565,7 @@ print("Completion result:", completion) from openai import OpenAI client = OpenAI( base_url="http://localhost:8000/v1", - api_key="CPMAPI", + api_key="FM9GAPI", ) #每次将上一轮的问题和答案拼接到本轮输入即可 completion = client.chat.completions.create(