Update llamafy_internlm2.py
This commit is contained in:
parent
484becae1b
commit
28135d787d
|
@ -40,7 +40,7 @@ def save_weight(
|
||||||
llama2_state_dict: Dict[str, torch.Tensor] = OrderedDict()
|
llama2_state_dict: Dict[str, torch.Tensor] = OrderedDict()
|
||||||
for key, value in tqdm(internlm2_state_dict.items(), desc="Convert format"):
|
for key, value in tqdm(internlm2_state_dict.items(), desc="Convert format"):
|
||||||
if "output" in key:
|
if "output" in key:
|
||||||
llama2_state_dict["lm_head"] = value
|
llama2_state_dict[key.replace("output", "lm_head")] = value
|
||||||
elif "tok_embeddings" in key:
|
elif "tok_embeddings" in key:
|
||||||
llama2_state_dict[key.replace("tok_embeddings", "embed_tokens")] = value
|
llama2_state_dict[key.replace("tok_embeddings", "embed_tokens")] = value
|
||||||
elif "attention_norm" in key:
|
elif "attention_norm" in key:
|
||||||
|
|
Loading…
Reference in New Issue