Compare commits
13 Commits
Author | SHA1 | Date |
---|---|---|
|
52dbf63007 | |
|
9aeefe95d5 | |
|
a214708255 | |
|
25c3643a05 | |
|
45d7c9b99d | |
|
23d3844492 | |
![]() |
c89395164e | |
![]() |
8e693d5876 | |
![]() |
415c624322 | |
![]() |
4139ba5dfe | |
![]() |
a8d431c14f | |
![]() |
1857f60d1e | |
![]() |
a041469104 |
|
@ -0,0 +1,193 @@
|
|||
import gc
|
||||
from io import BytesIO
|
||||
|
||||
import requests
|
||||
import timm
|
||||
import torch
|
||||
import random
|
||||
import json
|
||||
from PIL import Image
|
||||
from timm.data import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
|
||||
from torchvision.transforms import InterpolationMode, transforms
|
||||
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
|
||||
|
||||
import os,sys
|
||||
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
|
||||
from vis_fm9g.generation.vllm_fm9g import VLLMFM9GBeamSearch
|
||||
from vis_fm9g.model.fm9g import FM9GConfig, FM9GTorch
|
||||
from vis_fm9g.model.vlu_fm9g import VLU_FM9G
|
||||
from vis_fm9g.tokenizer.fm9g_tokenizer import FM9GTokenizer
|
||||
from vis_fm9g.utils.constants import SYSTEM
|
||||
|
||||
def random_selection_device(device_num):
|
||||
device_list = [f"cuda:{i}" for i in range(device_num)]
|
||||
if len(device_list) == 1:
|
||||
return [device_list[0]] * 4
|
||||
elif len(device_list) == 2:
|
||||
a, b = device_list
|
||||
return [a, b, a, b]
|
||||
else:
|
||||
selected = random.sample(device_list, 3)
|
||||
repeated = random.choice(selected)
|
||||
return selected + [repeated]
|
||||
|
||||
def has_pt_file(directory):
|
||||
pt_files = []
|
||||
files = os.listdir(directory)
|
||||
for file in files:
|
||||
if file.endswith(".pt"):
|
||||
full_path = os.path.join(directory, file)
|
||||
pt_files.append(full_path)
|
||||
return pt_files
|
||||
|
||||
def load_checkpoint(model, pretrained_model_name_or_path, device_num=4):
|
||||
pt_files = has_pt_file(pretrained_model_name_or_path)
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
if pt_files:
|
||||
device_map = {}
|
||||
state_dict = model.state_dict()
|
||||
layer_names = list(state_dict.keys())
|
||||
device_list = random_selection_device(device_num=device_num)
|
||||
for i, layer_name in enumerate(layer_names):
|
||||
if "vpm" in layer_name: # 假设 vpm 参数包含 "vpm" 前缀
|
||||
device_map[layer_name] = device_list[0]
|
||||
elif "llm" in layer_name: # 假设 llm 参数包含 "llm" 前缀
|
||||
device_map[layer_name] = device_list[1]
|
||||
else:
|
||||
device_map[layer_name] = device_list[2]
|
||||
model = load_checkpoint_and_dispatch(model, pt_files[0], device_map=device_map)
|
||||
model.to(device)
|
||||
return model
|
||||
else:
|
||||
model_checkpoint = os.path.join(pretrained_model_name_or_path, "sharded")
|
||||
index_file = os.path.join(pretrained_model_name_or_path, "sharded", "model.safetensors.index.json")
|
||||
with open(index_file, "r") as f:
|
||||
index_data = json.load(f)
|
||||
weight_map = index_data["weight_map"]
|
||||
all_params = {name for name, _ in model.named_parameters()}
|
||||
mapped_params = set(weight_map.keys())
|
||||
unmapped_params = all_params - mapped_params
|
||||
# 解析 weight_map,设置 device_map
|
||||
#TODO:强制不同的分支放置到不同的GPU上,避免出现计算问题,但是比较丑陋,需要做后续优化
|
||||
device_list = random_selection_device(device_num=device_num)
|
||||
device_map = {}
|
||||
for param_name, weight_file in weight_map.items():
|
||||
if "vpm" in param_name: # 假设 vpm 参数包含 "vpm" 前缀
|
||||
device_map[param_name] = device_list[0]
|
||||
elif "llm" in param_name: # 假设 llm 参数包含 "llm" 前缀
|
||||
device_map[param_name] = device_list[1]
|
||||
else:
|
||||
device_map[param_name] = device_list[2]
|
||||
for param_name in unmapped_params:
|
||||
device_map[param_name] = device_list[3] # 随机选择设备
|
||||
model = load_checkpoint_and_dispatch(
|
||||
model,
|
||||
model_checkpoint,
|
||||
device_map=device_map,
|
||||
).to(device)
|
||||
return model
|
||||
|
||||
def chat(model, image, question, context, tokenizer, query_nums=64, vision_hidden_states=None, max_length=1024):
|
||||
if not context:
|
||||
question = tokenizer.im_start + tokenizer.unk_token * query_nums + tokenizer.im_end + question
|
||||
final_input = f'{SYSTEM}<用户>{question}<AI>'
|
||||
else:
|
||||
final_input = f'{context}<用户>{question}<AI>'
|
||||
|
||||
data_list = [
|
||||
{'input': final_input}
|
||||
]
|
||||
|
||||
res, vision_hidden_states = model.generate(
|
||||
data_list=data_list,
|
||||
max_inp_length=2048,
|
||||
beam_size=3,
|
||||
img_list=[[image]],
|
||||
max_length=max_length,
|
||||
repetition_penalty=1.1,
|
||||
temperature=0.7,
|
||||
length_penalty=3,
|
||||
return_vision_hidden_states=True
|
||||
)
|
||||
|
||||
answer = res[0]
|
||||
|
||||
context = final_input + answer
|
||||
return answer, context, vision_hidden_states
|
||||
|
||||
|
||||
def load_llm(llm_path):
|
||||
config = FM9GConfig.from_json_file(llm_path)
|
||||
config.use_flash_attn = False
|
||||
cpm_model = FM9GTorch(config)
|
||||
return cpm_model
|
||||
|
||||
|
||||
def load_vpm(vision_encoder, drop_vision_last_layer=False):
|
||||
model = timm.create_model(
|
||||
vision_encoder,
|
||||
pretrained=False,
|
||||
num_classes=0,
|
||||
dynamic_img_size=True,
|
||||
dynamic_img_pad=True
|
||||
)
|
||||
|
||||
if isinstance(model, timm.models.VisionTransformer):
|
||||
if model.attn_pool is not None:
|
||||
model.attn_pool = torch.nn.Identity()
|
||||
|
||||
if drop_vision_last_layer:
|
||||
model.blocks[-1] = torch.nn.Identity()
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def load_vis_fm9g(llm_path, vision_encoder):
|
||||
llm =load_llm(llm_path)
|
||||
vpm = load_vpm(vision_encoder, drop_vision_last_layer=False)
|
||||
|
||||
vision_dim = vpm.embed_dim
|
||||
model = VLU_FM9G(llm, vpm, vision_dim, query_num=256)
|
||||
return model
|
||||
|
||||
|
||||
def load_tokenizer(vocabs_path):
|
||||
return FM9GTokenizer(vocabs_path)
|
||||
|
||||
|
||||
def load_transform(img_size):
|
||||
transform = transforms.Compose([
|
||||
transforms.Resize((img_size, img_size), interpolation=InterpolationMode.BICUBIC),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD)
|
||||
])
|
||||
|
||||
return transform
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
root = "checkpoint/"
|
||||
llm_path = root + 'config.json'
|
||||
vocabs_path = root + 'vocabs.txt'
|
||||
vision_encoder = 'eva02_enormous_patch14_clip_224.laion2b_plus'
|
||||
img_size = 448
|
||||
device_num = 4
|
||||
|
||||
with init_empty_weights():
|
||||
model = load_vis_fm9g(llm_path, vision_encoder)
|
||||
model = load_checkpoint(model, root, device_num=device_num)
|
||||
model.eval()
|
||||
|
||||
tokenizer = load_tokenizer(vocabs_path)
|
||||
transform = load_transform(img_size)
|
||||
beam_search = VLLMFM9GBeamSearch(model, tokenizer, transform)
|
||||
# 图像输入
|
||||
url = 'test.jpg'
|
||||
image = Image.open(url).convert('RGB')
|
||||
# 文本输入
|
||||
prompt = '这幅图描述了什么?'
|
||||
answer, context, _ = chat(
|
||||
beam_search, image, prompt, context=None, tokenizer=tokenizer, query_nums=256
|
||||
)
|
||||
|
||||
print(answer)
|
|
@ -0,0 +1,44 @@
|
|||
[
|
||||
{
|
||||
"id": "000000052347",
|
||||
"conversations": [
|
||||
{
|
||||
"from": "human",
|
||||
"value": ""
|
||||
"<ref>"
|
||||
"</ref>"
|
||||
"<box>"
|
||||
"</box>"
|
||||
"<quad>"
|
|
@ -0,0 +1,347 @@
|
|||
import io
|
||||
import os
|
||||
import re
|
||||
import glob
|
||||
import math
|
||||
import json
|
||||
import base64
|
||||
import random
|
||||
import copy
|
||||
|
||||
from PIL import Image
|
||||
from typing import List
|
||||
|
||||
|
||||
class Register(dict):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Register, self).__init__(*args, **kwargs)
|
||||
self._dict = {}
|
||||
|
||||
def register(self, target):
|
||||
def add_register_item(keys, value):
|
||||
if not callable(value):
|
||||
raise Exception(
|
||||
f"Register object must be callable! But receice:{value} is not callable!")
|
||||
|
||||
if not isinstance(keys, list):
|
||||
keys = [keys]
|
||||
|
||||
for key in keys:
|
||||
if key in self._dict:
|
||||
print(
|
||||
f"error: \033[33m{value.__name__} has been registered before, so we will overriden it\033[0m")
|
||||
exit()
|
||||
|
||||
self[key] = value
|
||||
return value
|
||||
|
||||
if callable(target):
|
||||
return add_register_item(target.__name__, target)
|
||||
else:
|
||||
return lambda x: add_register_item(target, x)
|
||||
|
||||
def __call__(self, target):
|
||||
return self.register(target)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._dict[key] = value
|
||||
|
||||
def __getitem__(self, key):
|
||||
# 如果 key 存在于注册的处理器中,直接返回
|
||||
if key in self._dict:
|
||||
return self._dict[key]
|
||||
else:
|
||||
# 如果 key 不存在,使用默认处理器
|
||||
return self._dict['default']
|
||||
|
||||
def __contains__(self, key):
|
||||
return key in self._dict
|
||||
|
||||
def __str__(self):
|
||||
return str(self._dict)
|
||||
|
||||
def keys(self):
|
||||
return self._dict.keys()
|
||||
|
||||
def values(self):
|
||||
return self._dict.values()
|
||||
|
||||
def items(self):
|
||||
return self._dict.items()
|
||||
|
||||
|
||||
register_data_processor = Register()
|
||||
register_data_path = Register()
|
||||
|
||||
def vqa_instruction_templates(question, idx=None):
|
||||
instructions = [
|
||||
"{Question} A short answer to the question is",
|
||||
"Given the image, answer the following question with no more than three words. {Question}",
|
||||
"Based on the image, respond to this question with a short answer: {Question} Answer:",
|
||||
"Use the provided image to answer the question: {Question} Provide your answer as short as possible:",
|
||||
]
|
||||
if idx is None:
|
||||
new_question = random.choice(
|
||||
instructions).replace("{Question}", question)
|
||||
else:
|
||||
new_question = instructions[idx].replace("{Question}", question)
|
||||
|
||||
return new_question
|
||||
|
||||
|
||||
def caption_instruction_templates():
|
||||
instructions = [
|
||||
"Describe the image concisely.",
|
||||
"Provide a brief description of the given image.",
|
||||
"Offer a succinct explanation of the picture presented.",
|
||||
"Summarize the visual content of the image.",
|
||||
"Give a short and clear explanation of the subsequent image.",
|
||||
"Share a concise interpretation of the image provided.",
|
||||
"Present a compact description of the photo's key features.",
|
||||
"Relay a brief, clear account of the picture shown.",
|
||||
"Render a clear and concise summary of the photo.",
|
||||
"Write a terse but informative summary of the picture.",
|
||||
"Create a compact narrative representing the image presented."
|
||||
]
|
||||
|
||||
new_question = random.choice(instructions)
|
||||
|
||||
return new_question
|
||||
|
||||
|
||||
def ocr_instruction_templates():
|
||||
instructions = [
|
||||
"Identify the text in the image with position."
|
||||
"Pinpoint and indicate the text and its location within the image."
|
||||
"Find the text in the image and identify its positional."
|
||||
"Detect the text within the image and specify its position."
|
||||
"Locate the text in the image and detail its position."
|
||||
]
|
||||
|
||||
new_question = random.choice(instructions)
|
||||
|
||||
return new_question
|
||||
|
||||
|
||||
def textvqa_instruction_templates(question):
|
||||
instructions = [
|
||||
"Answer the question shortly by reading the texts. {Question}"
|
||||
"After reading the text in the image, {Question} A short answer to the question is",
|
||||
"Given the text in the image, answer the following question with no more than three words. {Question}"
|
||||
]
|
||||
|
||||
new_question = random.choice(instructions).replace("{Question}", question)
|
||||
|
||||
return new_question
|
||||
|
||||
|
||||
def load_multimodal_conversation(text_b64, img_b64_buffer):
|
||||
map_role = {
|
||||
'human': 'human',
|
||||
'gpt': 'gpt'
|
||||
}
|
||||
|
||||
text = base64.b64decode(text_b64).decode('utf-8')
|
||||
list_conv = json.loads(text)
|
||||
|
||||
out: List[dict] = []
|
||||
for idx, sentence in enumerate(list_conv):
|
||||
value = sentence['value']
|
||||
|
||||
if idx == 0 and '', '', '"
|
||||
self.ref_start = "<ref>"
|
||||
self.ref_end = "</ref>"
|
||||
self.box_start = "<quad>"
|
||||
self.box_end = "</quad>"
|
||||
self.quad_start = "<quad>"
|
||||
self.quad_end = "</quad>"
|
||||
|
||||
self.tire = Trie()
|
||||
|
||||
self.byte_list = ["<0x0{}>".format(hex(i).upper()[2:]) for i in range(0x10)] + [
|
||||
"<0x{}>".format(hex(i).upper()[2:]) for i in range(0x10, 0x100)
|
||||
]
|
||||
|
||||
self._special_token_set = set([self.unk_token, self.bos_token, self.eos_token] + self.byte_list)
|
||||
self._special_token_set = {self.unk_token, self.bos_token, self.eos_token,
|
||||
self.im_start, self.im_end,
|
||||
self.ref_start, self.ref_end,
|
||||
self.box_start, self.box_end,
|
||||
self.quad_start, self.quad_end}
|
||||
|
||||
if path:
|
||||
all_tokens = load_vocab(io.FileIO(path, "rb"))
|
||||
else:
|
||||
all_tokens = load_vocab(pkg_resources.resource_stream("fm9g", "/fm9g/vocabs/fm9g.txt"))
|
||||
self._byte_set = set(self.byte_list)
|
||||
|
||||
# never split special token
|
||||
for t in self._special_token_set:
|
||||
self.tire.add(t)
|
||||
|
||||
if not vocabs_path:
|
||||
vocabs_path = os.path.join(file_path, "../config/caterpillar.txt")
|
||||
|
||||
all_tokens = load_vocab(io.FileIO(vocabs_path, "rb"))
|
||||
|
||||
self.encoder: Dict[str, int] = {}
|
||||
self._special_encoder: Dict[str, int] = {}
|
||||
for token, token_id in all_tokens.items():
|
||||
if token in self._special_token_set:
|
||||
self.encoder[token] = token_id
|
||||
self._special_encoder[token] = token_id
|
||||
elif token in self._byte_set:
|
||||
self._special_encoder[token] = token_id
|
||||
else:
|
||||
self.encoder[token] = token_id
|
||||
|
||||
|
||||
self.decoder = {v: k for k, v in self.encoder.items()}
|
||||
self._byte_decoder = {self._special_encoder[token]: i for i, token in enumerate(self.byte_list)}
|
||||
|
||||
self._max_word_len = max([len(x) for x in self.encoder.keys()])
|
||||
|
||||
self._len_word_first = {}
|
||||
for x in self.encoder.keys():
|
||||
if not x[0] in self._len_word_first:
|
||||
self._len_word_first[x[0]] = 1
|
||||
if len(x) > self._len_word_first[x[0]]:
|
||||
self._len_word_first[x[0]] = len(x)
|
||||
self.tencoder = StringTrie(self.encoder)
|
||||
|
||||
def get_piece(self, text: str) -> str:
|
||||
if text[0] in self._len_word_first:
|
||||
text = text[: self._len_word_first[text[0]]]
|
||||
len_text = len(text)
|
||||
for i in range(len(text)):
|
||||
sub = text[: len_text - i]
|
||||
if sub in self.encoder:
|
||||
return sub
|
||||
text = text[: self._max_word_len]
|
||||
len_text = len(text)
|
||||
for i in range(len(text)):
|
||||
sub = text[: len_text - i]
|
||||
if sub in self.encoder:
|
||||
return sub
|
||||
return text[0]
|
||||
|
||||
@property
|
||||
|
@ -85,16 +103,26 @@ class FM9GTokenizer(object):
|
|||
def unk_id(self):
|
||||
return self._special_encoder[self.unk_token]
|
||||
|
||||
@property
|
||||
def im_start_id(self):
|
||||
return self._special_encoder[self.im_start]
|
||||
|
||||
@property
|
||||
def im_end_id(self):
|
||||
return self._special_encoder[self.im_end]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.encoder) + len(self._special_encoder)
|
||||
|
||||
def tokenize(self, text: str) -> List[str]:
|
||||
texts = self.tire.split(text)
|
||||
output_tokens: List[str] = []
|
||||
st = 0
|
||||
while st < len(text):
|
||||
piece = self.get_piece(text[st:])
|
||||
output_tokens.append(piece)
|
||||
st += len(piece)
|
||||
for text in texts:
|
||||
st = 0
|
||||
while st < len(text):
|
||||
piece = self.get_piece(text[st:])
|
||||
output_tokens.append(piece)
|
||||
st += len(piece)
|
||||
return output_tokens
|
||||
|
||||
@staticmethod
|
||||
|
@ -106,8 +134,6 @@ class FM9GTokenizer(object):
|
|||
return text
|
||||
|
||||
def encode(self, text: str) -> List[int]:
|
||||
#if len(text) > 20480:
|
||||
# return [0 for _ in range(20480)]
|
||||
ret = []
|
||||
for x in self.tokenize(text):
|
||||
if x in self.encoder:
|
||||
|
@ -127,25 +153,6 @@ class FM9GTokenizer(object):
|
|||
st += 1
|
||||
elif tokens[st] in self._byte_decoder:
|
||||
if (
|
||||
st + 3 < len(tokens)
|
||||
and tokens[st + 1] in self._byte_decoder
|
||||
and tokens[st + 2] in self._byte_decoder
|
||||
and tokens[st + 3] in self._byte_decoder
|
||||
):
|
||||
first_id = self._byte_decoder[tokens[st]]
|
||||
plane_id = self._byte_decoder[tokens[st + 1]]
|
||||
row_id = self._byte_decoder[tokens[st + 2]]
|
||||
cell_id = self._byte_decoder[tokens[st + 3]]
|
||||
int_bytes = int.to_bytes(first_id << 24 | plane_id << 16 | row_id << 8 | cell_id, 4, "big")
|
||||
try:
|
||||
decoded_str = int_bytes.decode("utf-8", errors="replace")
|
||||
ret.append(decoded_str)
|
||||
#print(decoded_str)
|
||||
except UnicodeDecodeError as e:
|
||||
print(f"UnicodeDecodeError: {e}")
|
||||
|
||||
st += 4
|
||||
elif (
|
||||
st + 2 < len(tokens)
|
||||
and tokens[st + 1] in self._byte_decoder
|
||||
and tokens[st + 2] in self._byte_decoder
|
||||
|
@ -153,33 +160,16 @@ class FM9GTokenizer(object):
|
|||
plane_id = self._byte_decoder[tokens[st]]
|
||||
row_id = self._byte_decoder[tokens[st + 1]]
|
||||
cell_id = self._byte_decoder[tokens[st + 2]]
|
||||
int_bytes = int.to_bytes(plane_id << 16 | row_id << 8 | cell_id, 3, "big")
|
||||
try:
|
||||
decoded_str = int_bytes.decode("utf-8", errors="replace")
|
||||
ret.append(decoded_str)
|
||||
except UnicodeDecodeError as e:
|
||||
print(f"UnicodeDecodeError: {e}")
|
||||
ret.append(int.to_bytes(plane_id << 16 | row_id << 8 | cell_id, 3, "big").decode("utf-8"))
|
||||
st += 3
|
||||
elif st + 1 < len(tokens) and tokens[st + 1] in self._byte_decoder:
|
||||
row_id = self._byte_decoder[tokens[st]]
|
||||
cell_id = self._byte_decoder[tokens[st + 1]]
|
||||
int_bytes = int.to_bytes(row_id << 8 | cell_id, 2, "big")
|
||||
try:
|
||||
decoded_str = int_bytes.decode("utf-8", errors="replace")
|
||||
ret.append(decoded_str)
|
||||
except UnicodeDecodeError as e:
|
||||
print(f"UnicodeDecodeError: {e}")
|
||||
#ret.append(int.to_bytes(row_id << 8 | cell_id, 2, "big").decode("utf-8"))
|
||||
ret.append(int.to_bytes(row_id << 8 | cell_id, 2, "big").decode("utf-8"))
|
||||
st += 2
|
||||
else:
|
||||
cell_id = self._byte_decoder[tokens[st]]
|
||||
int_bytes = int.to_bytes(cell_id, 1, "big")
|
||||
try:
|
||||
decoded_str = int_bytes.decode("utf-8", errors="replace")
|
||||
ret.append(decoded_str)
|
||||
except UnicodeDecodeError as e:
|
||||
print(f"UnicodeDecodeError: {e}")
|
||||
#ret.append(int.to_bytes(cell_id, 1, "big").decode("utf-8"))
|
||||
ret.append(int.to_bytes(cell_id, 1, "big").decode("utf-8"))
|
||||
st += 1
|
||||
elif tokens[st] == self.eos_id:
|
||||
ret.append(self.eos_token)
|
||||
|
@ -196,16 +186,53 @@ class FM9GTokenizer(object):
|
|||
# wrap unicode encoding into a helper function
|
||||
ids = []
|
||||
utf8_id = token.encode("utf-8")
|
||||
for _id in utf8_id:
|
||||
ids.append(self._special_encoder[self.byte_list[_id]])
|
||||
plane_id = utf8_id[-3] if len(utf8_id) >= 3 else 0
|
||||
row_id = utf8_id[-2] if len(utf8_id) >= 2 else 0
|
||||
cell_id = utf8_id[-1] if len(utf8_id) >= 1 else 0
|
||||
if plane_id > 0:
|
||||
ids.append(self._special_encoder[self.byte_list[plane_id]])
|
||||
if row_id > 0:
|
||||
ids.append(self._special_encoder[self.byte_list[row_id]])
|
||||
ids.append(self._special_encoder[self.byte_list[cell_id]])
|
||||
return ids
|
||||
|
||||
def next_token(self, text):
|
||||
# fast next token matching
|
||||
token, token_id = self.tencoder.longest_prefix_item(text, (None, None))
|
||||
if token is None:
|
||||
token = text[0]
|
||||
token_ids = self._encode_unicode(token)
|
||||
else:
|
||||
token_ids = [token_id]
|
||||
return token, token_ids
|
||||
|
||||
class LlamaTokenizerWrapper(LlamaTokenizer):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.im_start = ""
|
||||
self.ref_start = "<ref>"
|
||||
self.ref_end = "</ref>"
|
||||
self.box_start = "<box>"
|
||||
self.box_end = "</box>"
|
||||
self.quad_start = "<quad>"
|
||||
self.quad_end = "</quad>"
|
||||
|
||||
@property
|
||||
def eos_id(self):
|
||||
return self.sp_model.eos_id()
|
||||
|
||||
@property
|
||||
def bos_id(self):
|
||||
return self.sp_model.bos_id()
|
||||
|
||||
@property
|
||||
def unk_id(self):
|
||||
return self.sp_model.unk_id()
|
||||
|
||||
@property
|
||||
def im_start_id(self):
|
||||
return self._convert_token_to_id(self.im_start)
|
||||
|
||||
@property
|
||||
def im_end_id(self):
|
||||
return self._convert_token_to_id(self.im_end)
|
||||
|
||||
@staticmethod
|
||||
def escape(text: str) -> str:
|
||||
return text
|
||||
|
||||
@staticmethod
|
||||
def unescape(text: str) -> str:
|
||||
return text
|
|
@ -0,0 +1,56 @@
|
|||
import json
|
||||
import os
|
||||
import shutil
|
||||
import pandas as pd
|
||||
import torch
|
||||
import torch.distributed
|
||||
from deepspeed.utils import logger
|
||||
|
||||
from utils.utils import is_main_process
|
||||
|
||||
|
||||
def export(vllm_engine, cur_epoch_step, global_step, epoch, args):
|
||||
if args.save_deepspeed:
|
||||
logger.info(f'start to deepspped ckpt, save_dir={args.exp_ckpt_dir}')
|
||||
vllm_engine.save_checkpoint(save_dir=args.exp_ckpt_dir, tag=f'global_step{global_step}', client_state={
|
||||
'checkpoint_step': global_step, 'epoch': epoch, 'cur_epoch_step': cur_epoch_step})
|
||||
|
||||
export_model_dir = os.path.join(args.exp_ckpt_dir, f'{args.exp_name}_epoch_{epoch}_ckpt_{global_step}')
|
||||
os.makedirs(export_model_dir, exist_ok=True)
|
||||
base_file_name = f'{args.exp_name}_{cur_epoch_step}_{global_step}'
|
||||
|
||||
# model files
|
||||
if is_main_process():
|
||||
model_state_dict_path = os.path.join(export_model_dir, base_file_name + '.pt')
|
||||
# config 和 vocabs 和模型文件一起存储
|
||||
model_cfg_path = os.path.join(export_model_dir, 'config.json')
|
||||
model_vocab_path = os.path.join(export_model_dir, 'vocabs.txt')
|
||||
paths = [model_state_dict_path, model_cfg_path, model_vocab_path]
|
||||
|
||||
torch.save(vllm_engine.module.state_dict(), model_state_dict_path)
|
||||
shutil.copy(args.llm_path, model_cfg_path)
|
||||
shutil.copy(args.vocabs_path, model_vocab_path)
|
||||
|
||||
info = {
|
||||
'global_step': global_step,
|
||||
'epoch': epoch,
|
||||
'cur_epoch_step': cur_epoch_step,
|
||||
'last_ckpt': model_state_dict_path,
|
||||
'config': model_cfg_path,
|
||||
'vocab': model_vocab_path
|
||||
}
|
||||
with open(os.path.join(export_model_dir, 'lastest_info'), 'w') as f:
|
||||
json.dump(info, f, indent=2)
|
||||
logger.info(f'Successfully save model files! {paths}')
|
||||
torch.distributed.barrier()
|
||||
|
||||
|
||||
def export_eval_file(df, global_step, args):
|
||||
export_dir = os.path.join(args.exp_ckpt_dir, 'pretrain_eval')
|
||||
os.makedirs(export_dir, exist_ok=True)
|
||||
base_file_name = f'{"_".join(args.model_checkpoint.split("/")[-2:])}_{args.vision_encoder.split("_")[0]}_{global_step}'
|
||||
|
||||
if is_main_process():
|
||||
eval_result_path = os.path.join(export_dir, base_file_name + '.csv')
|
||||
logger.info(f'save eval result file to {eval_result_path}')
|
||||
df.to_csv(eval_result_path, index=False)
|
|
@ -0,0 +1,212 @@
|
|||
import os
|
||||
import glob
|
||||
import argparse
|
||||
import numpy as np
|
||||
from datetime import datetime
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
from utils.logger import init_logger
|
||||
|
||||
logger = init_logger(__name__, level="INFO")
|
||||
|
||||
|
||||
def get_args():
|
||||
parser = argparse.ArgumentParser('VLLM pre-training script', add_help=False)
|
||||
|
||||
parser.add_argument('--self_dir', type=str)
|
||||
parser.add_argument('--train_file', type=str)
|
||||
parser.add_argument('--eval_file', type=str)
|
||||
parser.add_argument('--test_file', type=str)
|
||||
parser.add_argument('--exp_name', type=str, default='multimodal')
|
||||
|
||||
parser.add_argument('--batch_size', default=2, type=int)
|
||||
parser.add_argument('--split_by_rank', action='store_true', default=False, help="split parquet by rank")
|
||||
parser.add_argument('--epochs', default=100, type=int)
|
||||
parser.add_argument('--log_step', default=50, type=int)
|
||||
parser.add_argument('--save_step', default=100, type=int)
|
||||
parser.add_argument('--sft', action='store_true', help='is training all parameter')
|
||||
parser.add_argument('--tune_vision', action='store_true', help='is train vision parameter')
|
||||
parser.add_argument('--tune_resampler', action='store_true', help='is train resampler parameter')
|
||||
parser.add_argument('--tune_llm', action='store_true', help='is train llm parameter')
|
||||
parser.add_argument('--delta_tuning', action='store_true', help='is use lora')
|
||||
|
||||
# Model parameters
|
||||
parser.add_argument('--img_size', default=224, type=int)
|
||||
parser.add_argument('--llm_path', default=None, help='Path to LLM model to use', type=str)
|
||||
parser.add_argument('--vocabs_path', default=None, help='Path to vocabs to use', type=str)
|
||||
parser.add_argument('--model_checkpoint', default=None, help='Path to VLLM model to use', type=str)
|
||||
parser.add_argument('--llm_checkpoint', default=None, help='Path to LLM model to use', type=str)
|
||||
parser.add_argument('--data_state_dict_path', default=None, help='Path to dataset state dict', type=str)
|
||||
parser.add_argument('--vpm_path', help='Path to VPM model to use', type=str)
|
||||
parser.add_argument('--vpm_checkpoint', help='Path to VPM model to use', type=str)
|
||||
parser.add_argument('--vision_encoder', default='eva02_enormous_patch14_clip_224.laion2b_plus',
|
||||
choices=['eva02_enormous_patch14_clip_224.laion2b_plus', 'vit_so400m_patch14_siglip_384.webli'], type=str)
|
||||
parser.add_argument('--drop_vision_last_layer', action='store_true', help='is drop last vit layer')
|
||||
parser.add_argument('--prefix', default=None, help='Path prefix to save file', type=str)
|
||||
|
||||
# deepspeed
|
||||
parser.add_argument('--deepspeed_config', default=None, help='Path to deepspeed config to use', type=str)
|
||||
parser.add_argument('--save_deepspeed', action='store_true', default=False, help="is save deepspeed checkpoint")
|
||||
|
||||
# vlu
|
||||
parser.add_argument('--skip_overlength', action='store_true', default=False, help="is skip over length data")
|
||||
parser.add_argument('--skip_no_image', action='store_true', default=False, help="is skip no image data")
|
||||
parser.add_argument("--flash", default="none", choices=["none", "1d", "triton", "cuda"])
|
||||
|
||||
parser.add_argument('--max_length', default=256, type=int, help='max length of input')
|
||||
|
||||
# ----- Training -----
|
||||
parser.add_argument('--device', default='cuda',
|
||||
help='device to use for training / testing')
|
||||
parser.add_argument('--query_num', default=32, type=int,
|
||||
help='query numbers')
|
||||
parser.add_argument('--max_len', default=96, type=int,
|
||||
help='max len')
|
||||
parser.add_argument('--seed', default=0, type=int)
|
||||
parser.add_argument('--start_epoch', default=0, type=int)
|
||||
parser.add_argument('--start_step', default=0, type=int)
|
||||
parser.add_argument('--skip_step', default=0, type=int)
|
||||
parser.add_argument('--num_workers', default=5, type=int)
|
||||
parser.add_argument('--pin_mem', action='store_true',
|
||||
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
|
||||
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem',
|
||||
help='')
|
||||
parser.add_argument('--eval', action='store_true', default=False,
|
||||
help="Perform evaluation only")
|
||||
parser.add_argument('--eval_step', default=5000, type=int, help='evaluate step')
|
||||
parser.add_argument('--save_dataset', action='store_true', default=False, help="is save dataset state dict")
|
||||
parser.add_argument('--export_dir', default=None, help='Path to export', type=str)
|
||||
|
||||
|
||||
# 项目训练中断后,标识是否基于之前训练中断时已保存的 deepspeed 参数(梯度、优化器等)续训
|
||||
# 替代原有的 load_ckpt_dir,load_ckpt_tag
|
||||
parser.add_argument('--need_resume', action='store_true', default=False,
|
||||
help="resume with deepspeed states")
|
||||
parser.add_argument('--need_resume_tag')
|
||||
|
||||
# ----- distributed training parameters -----
|
||||
parser.add_argument('--world_size', default=1, type=int,
|
||||
help='number of distributed processes')
|
||||
parser.add_argument('--local_rank', default=-1, type=int)
|
||||
parser.add_argument('--dist_on_itp', action='store_true')
|
||||
parser.add_argument('--dist_url', default='env://',
|
||||
help='url used to set up distributed training')
|
||||
|
||||
args = parser.parse_args()
|
||||
# 1、文件保存/导出相关
|
||||
args.tensorboard = '{base}/{timestamp}'.format(
|
||||
base=os.path.join(args.export_dir, args.exp_name, 'tensorboard'), timestamp=datetime.now().strftime("%Y%m%d%H%M%S"))
|
||||
args.exp_ckpt_dir= os.path.join(args.export_dir, args.exp_name)
|
||||
os.makedirs(args.tensorboard, exist_ok=True)
|
||||
|
||||
# ----- repo 内路径相关参数 -----
|
||||
# 模型 config,从基准模型的 config 复制而来
|
||||
if not args.llm_path:
|
||||
args.llm_path = _check_default_path(os.path.join(args.self_dir, 'config/config.json'))
|
||||
if not args.vocabs_path:
|
||||
args.vocabs_path = _check_default_path(os.path.join(args.self_dir, 'config/vocabs.txt'))
|
||||
if not args.deepspeed_config:
|
||||
args.deepspeed_config = _check_default_path(os.path.join(args.self_dir, 'config/deepspeed.json'))
|
||||
|
||||
logger.info("get_args() done")
|
||||
return args
|
||||
|
||||
|
||||
|
||||
def _check_default_path(path: str):
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def _extract_ckpt_path(base_dir: str):
|
||||
paths = glob.glob(base_dir + '/*.pt')
|
||||
if len(paths) > 0:
|
||||
return paths[0]
|
||||
else:
|
||||
logger.warning(f'.pt file not found in base_dir({base_dir})')
|
||||
return None
|
||||
|
||||
|
||||
def setup_for_distributed(is_master):
|
||||
"""
|
||||
This function disables printing when not in master process
|
||||
"""
|
||||
import builtins as __builtin__
|
||||
builtin_print = __builtin__.print
|
||||
|
||||
def print(*args, **kwargs):
|
||||
force = kwargs.pop('force', False)
|
||||
if is_master or force:
|
||||
builtin_print(*args, **kwargs)
|
||||
|
||||
__builtin__.print = print
|
||||
|
||||
|
||||
def is_dist_avail_and_initialized():
|
||||
if not dist.is_available():
|
||||
return False
|
||||
if not dist.is_initialized():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_rank():
|
||||
if not is_dist_avail_and_initialized():
|
||||
return 0
|
||||
return dist.get_rank()
|
||||
|
||||
|
||||
def is_main_process():
|
||||
return get_rank() == 0
|
||||
|
||||
|
||||
def init_distributed_mode(args):
|
||||
if args.dist_on_itp:
|
||||
logger.info('init_distributed_mode dist_on_itp')
|
||||
args.rank = int(os.environ["RANK"])
|
||||
args.world_size = os.environ["WORLD_SIZE"]
|
||||
args.gpu = int(os.environ["LOCAL_RANK"])
|
||||
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
|
||||
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
|
||||
logger.info('init_distributed_mode LOCAL_RANK')
|
||||
args.rank = int(os.environ["RANK"])
|
||||
args.world_size = int(os.environ['WORLD_SIZE'])
|
||||
args.gpu = int(os.environ['LOCAL_RANK'])
|
||||
elif 'SLURM_PROCID' in os.environ:
|
||||
logger.info('init_distributed_mode SLURM_PROCID')
|
||||
args.rank = int(os.environ['SLURM_PROCID'])
|
||||
args.gpu = args.rank % torch.cuda.device_count()
|
||||
else:
|
||||
logger.info('Not using distributed mode')
|
||||
args.distributed = False
|
||||
return
|
||||
|
||||
args.distributed = True
|
||||
|
||||
torch.cuda.set_device(args.gpu)
|
||||
torch.set_num_threads(1)
|
||||
torch.multiprocessing.set_sharing_strategy('file_system')
|
||||
args.dist_backend = 'nccl'
|
||||
print('| distributed init (rank {}): {}, gpu {}'.format(
|
||||
args.rank, args.dist_url, args.gpu), flush=True)
|
||||
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
|
||||
world_size=args.world_size, rank=args.rank)
|
||||
torch.distributed.barrier()
|
||||
setup_for_distributed(args.rank == 0)
|
||||
|
||||
|
||||
def setup(args):
|
||||
# init dist
|
||||
init_distributed_mode(args)
|
||||
rank = get_rank()
|
||||
logger.info(f"rank={rank} init_distributed_mode done")
|
||||
|
||||
seed = args.seed + rank
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
|
||||
logger.info(f"rank={rank} setup(args) done")
|
|
@ -0,0 +1,397 @@
|
|||
# import sys
|
||||
import os
|
||||
# sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
import json
|
||||
import math
|
||||
import time
|
||||
import gc
|
||||
import pandas as pd
|
||||
from copy import deepcopy
|
||||
from typing import Dict, List
|
||||
|
||||
from timm.data.constants import *
|
||||
from torch.nn import CrossEntropyLoss
|
||||
from torch.utils.data import DataLoader, DistributedSampler
|
||||
from torchvision.transforms import transforms, InterpolationMode
|
||||
|
||||
from vis_fm9g.dataset.data import register_data_path
|
||||
from utils import utils
|
||||
from vis_fm9g.dataset.utils import SkipBatchSampler
|
||||
from vis_fm9g.tokenizer.fm9g_tokenizer import FM9GTokenizer
|
||||
from vis_fm9g.generation.vllm_fm9g import VLLMFM9GBeamSearch
|
||||
|
||||
import torch
|
||||
import datetime
|
||||
import deepspeed
|
||||
import timm
|
||||
import torch.distributed
|
||||
import torch.utils.data
|
||||
|
||||
from vis_fm9g.dataset.itembuilder import FM9GImageTextBuilder, FM9GCollater
|
||||
from vis_fm9g.dataset.datasets import SingleDataSourceDataset, MultiDataSourceDataset
|
||||
from vis_fm9g.model.vlu_fm9g import VLU_FM9G
|
||||
from vis_fm9g.model.fm9g import FM9GConfig, FM9GTorch
|
||||
|
||||
from deepspeed.utils import logger
|
||||
|
||||
from vis_fm9g.train import exporter, initializer
|
||||
from vis_fm9g.utils.constants import bot_indicator
|
||||
|
||||
import safetensors
|
||||
from safetensors.torch import load_file
|
||||
|
||||
|
||||
def collect_statsd_metric(name, time_monitor):
|
||||
time_monitor[name] = time.time()
|
||||
return time_monitor
|
||||
|
||||
|
||||
def convert_data_to_cuda(data: Dict):
|
||||
for k, v in data.items():
|
||||
if isinstance(v, torch.Tensor):
|
||||
data[k] = data[k].cuda()
|
||||
|
||||
if isinstance(v, List) and len(v) > 0 and isinstance(v[0], torch.Tensor):
|
||||
for i in range(len(v)):
|
||||
v[i] = v[i].cuda()
|
||||
return data
|
||||
|
||||
|
||||
def create_multi_data_source_dataset(file, item_builder):
|
||||
with open(file) as f:
|
||||
data_list = json.load(f)
|
||||
data_source_names = [i['data_source_name'] for i in data_list]
|
||||
data_source_weights = [i['data_source_weight'] for i in data_list]
|
||||
|
||||
ds_list = []
|
||||
for name in data_source_names:
|
||||
if os.path.isdir(name):
|
||||
ds = SingleDataSourceDataset(name, item_builder, *register_data_path[name](name))
|
||||
else:
|
||||
ds = SingleDataSourceDataset(name, item_builder, *register_data_path[name]())
|
||||
ds_list.append(ds)
|
||||
if len(ds_list) > 1:
|
||||
ds = MultiDataSourceDataset(ds_list, data_source_weights)
|
||||
return ds
|
||||
|
||||
|
||||
def get_transform(args):
|
||||
mean = IMAGENET_DEFAULT_MEAN
|
||||
std = IMAGENET_DEFAULT_STD
|
||||
|
||||
transform = transforms.Compose([
|
||||
transforms.Resize((args.img_size, args.img_size), interpolation=InterpolationMode.BICUBIC),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(mean=mean, std=std)
|
||||
])
|
||||
|
||||
return transform
|
||||
|
||||
|
||||
def get_dataloader(tokenizer, data_file, args, is_training=True):
|
||||
transform = get_transform(args)
|
||||
|
||||
builder = FM9GImageTextBuilder(
|
||||
tokenizer=tokenizer,
|
||||
max_len=args.max_len,
|
||||
transform=transform,
|
||||
query_len=args.query_num,
|
||||
min_resolution=0,
|
||||
skip_overlength=args.skip_overlength
|
||||
)
|
||||
|
||||
dataset = create_multi_data_source_dataset(data_file, builder)
|
||||
|
||||
datasampler = DistributedSampler(dataset, num_replicas=args.world_size, rank=args.rank)
|
||||
|
||||
if args.skip_step > 0:
|
||||
datasampler = SkipBatchSampler(datasampler, args.skip_step)
|
||||
|
||||
unpad = (args.flash == 'cuda') and is_training
|
||||
dataloader = DataLoader(
|
||||
dataset,
|
||||
sampler=datasampler,
|
||||
batch_size=args.batch_size,
|
||||
pin_memory=True,
|
||||
num_workers=2,
|
||||
collate_fn=FM9GCollater(tokenizer=tokenizer, max_len=args.max_len, unpad=unpad)
|
||||
)
|
||||
|
||||
return dataloader
|
||||
|
||||
|
||||
def load_llm_tokenizer(args):
|
||||
return FM9GTokenizer(args.vocabs_path)
|
||||
|
||||
|
||||
def train(vllm_model, args):
|
||||
vllm_model.train()
|
||||
if not args.tune_vision:
|
||||
vllm_model.vpm.requires_grad_(False)
|
||||
if not args.tune_resampler:
|
||||
vllm_model.resampler.requires_grad_(False)
|
||||
if not args.tune_llm:
|
||||
vllm_model.llm.requires_grad_(False)
|
||||
|
||||
if args.drop_vision_last_layer:
|
||||
vllm_model.norm.requires_grad_(True)
|
||||
|
||||
vllm_engine, vllm_optim, _, _ = deepspeed.initialize(
|
||||
args=args, model=vllm_model, model_parameters=vllm_model.parameters()
|
||||
)
|
||||
torch.cuda.synchronize()
|
||||
logger.info(f'rank={utils.get_rank()} load model successful')
|
||||
|
||||
tokenizer = load_llm_tokenizer(args)
|
||||
dataloader_train = get_dataloader(tokenizer, data_file=args.train_file, args=args, is_training=True)
|
||||
if args.eval and args.eval_file:
|
||||
dataloader_eval = get_dataloader(tokenizer, data_file=args.eval_file, args=args, is_training=False)
|
||||
else:
|
||||
dataloader_eval = None
|
||||
logger.info(f'rank={utils.get_rank()} load dataloader successful')
|
||||
|
||||
global_step = args.start_step
|
||||
log_loss = 0
|
||||
|
||||
if args.need_resume:
|
||||
load_path, client_state = vllm_engine.load_checkpoint(
|
||||
args.exp_ckpt_dir, tag=args.need_resume_tag)
|
||||
logger.info(f'Load pre-trained checkpoint from {load_path}, states: {client_state}')
|
||||
global_step = client_state['checkpoint_step']
|
||||
args.start_epoch = client_state.get('epoch', args.start_epoch)
|
||||
logger.info(f'rank={utils.get_rank()} load grad successful')
|
||||
|
||||
# init tensorboard writer
|
||||
if args.tensorboard is not None and utils.is_main_process():
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
writer = SummaryWriter(log_dir=args.tensorboard)
|
||||
else:
|
||||
writer = None
|
||||
|
||||
loss_fct = CrossEntropyLoss(reduction='mean', ignore_index=-100)
|
||||
for epoch in range(args.start_epoch, args.epochs):
|
||||
dataloader_train.sampler.set_epoch(epoch)
|
||||
|
||||
logger.info(f'start epoch={epoch}')
|
||||
time_monitor = {}
|
||||
collect_statsd_metric("init", time_monitor)
|
||||
for step, batch in enumerate(dataloader_train):
|
||||
batch = convert_data_to_cuda(batch)
|
||||
collect_statsd_metric('dataload', time_monitor)
|
||||
vllm_model.zero_grad()
|
||||
output = vllm_model(data=batch)
|
||||
|
||||
logits = output.logits.view(-1, output.logits.shape[-1]).contiguous()
|
||||
target = batch['target'].view(-1).type(torch.long).contiguous()
|
||||
loss = loss_fct(logits, target)
|
||||
# logger.info(f'epoch={epoch}, logits={logits}, target={target}, loss={loss}')
|
||||
collect_statsd_metric("forward", time_monitor)
|
||||
vllm_engine.backward(loss)
|
||||
collect_statsd_metric("backward", time_monitor)
|
||||
|
||||
vllm_engine.step()
|
||||
collect_statsd_metric("optim", time_monitor)
|
||||
|
||||
cost_info = f'dataload cost={(time_monitor["dataload"] - time_monitor["init"]): .2f} ' \
|
||||
+ f'forward cost={(time_monitor["forward"] - time_monitor["dataload"]): .2f} ' \
|
||||
+ f'backward cost={(time_monitor["backward"] - time_monitor["forward"]): .2f} ' \
|
||||
+ f'optim cost={(time_monitor["optim"] - time_monitor["backward"]): .2f}'
|
||||
|
||||
log_loss += loss.item()
|
||||
global_step += 1
|
||||
|
||||
if args.tensorboard is not None and utils.is_main_process():
|
||||
writer.add_scalar("Loss/train", loss.item(), global_step)
|
||||
|
||||
if global_step % args.log_step == 0:
|
||||
log_loss = utils.mean(utils.all_gather(log_loss))
|
||||
if utils.is_main_process():
|
||||
logger.info(
|
||||
f'Datetime: {datetime.datetime.now()} Step: {global_step-args.log_step} - {global_step}: loss: {log_loss/args.log_step: .4f}')
|
||||
logger.info(f'time cost info {cost_info}')
|
||||
log_loss = 0
|
||||
|
||||
if global_step % args.save_step == 0:
|
||||
exporter.export(vllm_engine, step, global_step, epoch, args)
|
||||
|
||||
# end step
|
||||
collect_statsd_metric('init', time_monitor)
|
||||
|
||||
if args.eval and global_step % args.eval_step == 0:
|
||||
evaluate(vllm_model, tokenizer, dataloader_eval, global_step, args)
|
||||
vllm_model.train()
|
||||
|
||||
# 最终模型
|
||||
exporter.export(vllm_engine, 0, global_step, args.epochs-1, args)
|
||||
if args.eval:
|
||||
evaluate(vllm_model, tokenizer, dataloader_eval, global_step, args)
|
||||
|
||||
|
||||
def evaluate(vllm_model, tokenizer, dataloader_eval, global_step, args):
|
||||
vllm_model.eval()
|
||||
torch.cuda.empty_cache()
|
||||
config = deepcopy(vllm_model.llm.config)
|
||||
# 推理不用 flash_attn, 新初始化一个 model
|
||||
config.use_flash_attn = False
|
||||
llm = FM9GTorch(config)
|
||||
|
||||
vpm = load_vpm(args)
|
||||
vision_dim = vpm.embed_dim
|
||||
eval_model = VLU_FM9G(llm, vpm, vision_dim, args.query_num)
|
||||
eval_model.eval().cuda()
|
||||
eval_model.load_state_dict(vllm_model.state_dict())
|
||||
|
||||
torch.cuda.synchronize()
|
||||
logger.info(f'rank={utils.get_rank()} start to eval')
|
||||
transform = get_transform(args)
|
||||
beam_search = VLLMFM9GBeamSearch(eval_model, tokenizer, transform)
|
||||
|
||||
results = []
|
||||
for step, batch in enumerate(dataloader_eval):
|
||||
batch = convert_data_to_cuda(batch)
|
||||
|
||||
for piexl_values, raw, source in zip(batch['pixel_values'], deepcopy(batch['raw_data']), batch['source']):
|
||||
raw = raw[3:-4] # 去 <s> </s>
|
||||
last_idx = raw.rfind(bot_indicator) + len(bot_indicator)
|
||||
data_list = [{'input': raw[:last_idx]}]
|
||||
gt = raw[last_idx:]
|
||||
|
||||
with torch.inference_mode():
|
||||
res = beam_search.generate(
|
||||
img_list=[piexl_values],
|
||||
data_list=data_list,
|
||||
use_transform=False,
|
||||
beam_size=1,
|
||||
max_length=1,
|
||||
)
|
||||
results.append(
|
||||
{
|
||||
'y_pred': res[0],
|
||||
'y_true': gt,
|
||||
'source': source
|
||||
}
|
||||
)
|
||||
|
||||
compose_results = utils.all_gather(results)
|
||||
compose_results_flatten = []
|
||||
for r in compose_results:
|
||||
compose_results_flatten.extend(r)
|
||||
|
||||
df = pd.DataFrame.from_dict(compose_results_flatten)
|
||||
exporter.export_eval_file(df, global_step=global_step, args=args)
|
||||
|
||||
source = sorted(list(set(df['source'])))
|
||||
for ds in source:
|
||||
ds_df = df[df['source'] == ds]
|
||||
print(ds, '%.3f' % (sum(ds_df['y_pred'] == ds_df['y_true']) / len(ds_df)), len(ds_df))
|
||||
print('avg', '%.3f' % (sum(df['y_pred'] == df['y_true']) / len(df)), len(df))
|
||||
|
||||
del eval_model
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
vllm_model.train()
|
||||
|
||||
|
||||
|
||||
def load_llm(args):
|
||||
config = FM9GConfig.from_json_file(args.llm_path)
|
||||
|
||||
if args.flash == "none":
|
||||
config.use_flash_attn = False
|
||||
else:
|
||||
config.use_flash_attn = True
|
||||
if args.flash == "1d":
|
||||
config.flash_attn_mask_shape = "1d"
|
||||
else:
|
||||
config.flash_attn_mask_shape = "2d"
|
||||
if args.flash == "triton":
|
||||
config.flash_impl = "triton"
|
||||
elif args.flash == "cuda":
|
||||
config.flash_impl = "cuda"
|
||||
|
||||
cpm_model = FM9GTorch(config)
|
||||
return cpm_model
|
||||
|
||||
|
||||
def load_vpm(args):
|
||||
model = timm.create_model(
|
||||
args.vision_encoder,
|
||||
pretrained=False,
|
||||
num_classes=0,
|
||||
dynamic_img_size=True,
|
||||
dynamic_img_pad=True
|
||||
)
|
||||
|
||||
if isinstance(model, timm.models.VisionTransformer):
|
||||
if model.attn_pool is not None:
|
||||
model.attn_pool = torch.nn.Identity()
|
||||
|
||||
if args.drop_vision_last_layer:
|
||||
model.blocks[-1] = torch.nn.Identity()
|
||||
return model
|
||||
|
||||
def load_sharded_safetensors(sharded_folder):
|
||||
safetensors_files = sorted([f for f in os.listdir(sharded_folder) if f.endswith('.safetensors')])
|
||||
|
||||
if not safetensors_files:
|
||||
raise FileNotFoundError(f"No safetensors files found in {sharded_folder}")
|
||||
|
||||
merged_state_dict = {}
|
||||
for file_name in safetensors_files:
|
||||
file_path = os.path.join(sharded_folder, file_name)
|
||||
logger.info(f"Loading safetensors shard from {file_path}")
|
||||
state_dict = load_file(file_path)
|
||||
merged_state_dict.update(state_dict)
|
||||
|
||||
return merged_state_dict
|
||||
|
||||
def setup_model(args):
|
||||
start = time.time()
|
||||
|
||||
llm = load_llm(args)
|
||||
vpm = load_vpm(args)
|
||||
vision_dim = vpm.embed_dim
|
||||
model = VLU_FM9G(llm, vpm, vision_dim, args.query_num)
|
||||
if args.model_checkpoint:
|
||||
logger.info(f'load model_checkpoint from {args.model_checkpoint}')
|
||||
|
||||
model_checkpoint = args.model_checkpoint
|
||||
file_extension = os.path.splitext(model_checkpoint)[1]
|
||||
|
||||
if file_extension == '.safetensors':
|
||||
logger.info(f"Loading safetensors checkpoint from {model_checkpoint}")
|
||||
state_dict = load_file(model_checkpoint)
|
||||
info = model.load_state_dict(state_dict, strict=True)
|
||||
logger.info(f"Loaded checkpoint info={info}")
|
||||
|
||||
elif os.path.isdir(model_checkpoint):
|
||||
logger.info(f"Loading safetensors from sharded folder: {model_checkpoint}")
|
||||
state_dict = load_sharded_safetensors(model_checkpoint)
|
||||
info = model.load_state_dict(state_dict, strict=True)
|
||||
logger.info(f"Loaded checkpoint info={info}")
|
||||
|
||||
else:
|
||||
state_dict = torch.load(args.model_checkpoint, map_location='cpu')
|
||||
info = model.load_state_dict(state_dict, strict=True)
|
||||
logger.info(f"load checkpoint info={info}" )
|
||||
|
||||
del state_dict
|
||||
gc.collect()
|
||||
|
||||
model.cuda()
|
||||
torch.cuda.empty_cache()
|
||||
return model
|
||||
|
||||
|
||||
def main():
|
||||
args = initializer.get_args()
|
||||
# setup file and device
|
||||
initializer.setup(args)
|
||||
# load model
|
||||
model = setup_model(args)
|
||||
# train
|
||||
train(model, args)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,11 +1,7 @@
|
|||
import copy
|
||||
import json
|
||||
import os
|
||||
from typing import Any
|
||||
from typing import Dict
|
||||
from typing import Union
|
||||
|
||||
from .log import logger
|
||||
import copy
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
|
||||
class Config(object):
|
|
@ -0,0 +1,6 @@
|
|||
from datetime import datetime
|
||||
|
||||
current_time = datetime.now().strftime("%Y年%m月%d日")
|
||||
usr_indicator = "<用户>"
|
||||
bot_indicator = "<AI>"
|
||||
SYSTEM = f"{usr_indicator}你叫九格,是由启元实验室研发的多模态大型语言模型。\n你的知识库截止至2022年4月,当前时间是{current_time}。"
|
|
@ -1,4 +0,0 @@
|
|||
# !/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright @2024, QiYuan Inc
|