forked from p04798526/LLaMA-Factory-Mirror
Merge pull request #26 from BUAADreamer/main
add code for reading from multi files in one directory
This commit is contained in:
commit
e3f380c1be
|
@ -1,107 +1,116 @@
|
||||||
{
|
{
|
||||||
"alpaca_en": {
|
"alpaca_en": {
|
||||||
"hf_hub_url": "tatsu-lab/alpaca"
|
"hf_hub_url": "tatsu-lab/alpaca"
|
||||||
},
|
},
|
||||||
"alpaca_zh": {
|
"alpaca_zh": {
|
||||||
"file_name": "alpaca_data_zh_51k.json",
|
"file_name": "alpaca_data_zh_51k.json",
|
||||||
"file_sha1": "e655af3db557a4197f7b0cf92e1986b08fae6311"
|
"file_sha1": "e655af3db557a4197f7b0cf92e1986b08fae6311"
|
||||||
},
|
},
|
||||||
"alpaca_gpt4_en": {
|
"alpaca_gpt4_en": {
|
||||||
"file_name": "alpaca_gpt4_data_en.json",
|
"file_name": "alpaca_gpt4_data_en.json",
|
||||||
"file_sha1": "647f4ad447bd993e4b6b6223d1be15208bab694a"
|
"file_sha1": "647f4ad447bd993e4b6b6223d1be15208bab694a"
|
||||||
},
|
},
|
||||||
"alpaca_gpt4_zh": {
|
"alpaca_gpt4_zh": {
|
||||||
"file_name": "alpaca_gpt4_data_zh.json",
|
"file_name": "alpaca_gpt4_data_zh.json",
|
||||||
"file_sha1": "3eaa3bda364ccdd59925d7448a698256c31ef845"
|
"file_sha1": "3eaa3bda364ccdd59925d7448a698256c31ef845"
|
||||||
},
|
},
|
||||||
"belle_0.5m": {
|
"belle_0.5m": {
|
||||||
"hf_hub_url": "BelleGroup/train_0.5M_CN"
|
"hf_hub_url": "BelleGroup/train_0.5M_CN"
|
||||||
},
|
},
|
||||||
"belle_1m": {
|
"belle_1m": {
|
||||||
"hf_hub_url": "BelleGroup/train_1M_CN"
|
"hf_hub_url": "BelleGroup/train_1M_CN"
|
||||||
},
|
},
|
||||||
"belle_2m": {
|
"belle_2m": {
|
||||||
"hf_hub_url": "BelleGroup/train_2M_CN"
|
"hf_hub_url": "BelleGroup/train_2M_CN"
|
||||||
},
|
},
|
||||||
"belle_dialog": {
|
"belle_dialog": {
|
||||||
"hf_hub_url": "BelleGroup/generated_chat_0.4M"
|
"hf_hub_url": "BelleGroup/generated_chat_0.4M"
|
||||||
},
|
},
|
||||||
"belle_math": {
|
"belle_math": {
|
||||||
"hf_hub_url": "BelleGroup/school_math_0.25M"
|
"hf_hub_url": "BelleGroup/school_math_0.25M"
|
||||||
},
|
},
|
||||||
"belle_multiturn": {
|
"belle_multiturn": {
|
||||||
"hf_hub_url": "BelleGroup/multiturn_chat_0.8M"
|
"hf_hub_url": "BelleGroup/multiturn_chat_0.8M"
|
||||||
},
|
},
|
||||||
"guanaco": {
|
"guanaco": {
|
||||||
"hf_hub_url": "JosephusCheung/GuanacoDataset"
|
"hf_hub_url": "JosephusCheung/GuanacoDataset"
|
||||||
},
|
},
|
||||||
"firefly": {
|
"firefly": {
|
||||||
"hf_hub_url": "YeungNLP/firefly-train-1.1M",
|
"hf_hub_url": "YeungNLP/firefly-train-1.1M",
|
||||||
"columns": {
|
"columns": {
|
||||||
"prompt": "input",
|
"prompt": "input",
|
||||||
"query": "",
|
"query": "",
|
||||||
"response": "target",
|
"response": "target",
|
||||||
"history": ""
|
"history": ""
|
||||||
}
|
|
||||||
},
|
|
||||||
"codealpaca": {
|
|
||||||
"hf_hub_url": "sahil2801/CodeAlpaca-20k"
|
|
||||||
},
|
|
||||||
"alpaca_cot": {
|
|
||||||
"hf_hub_url": "QingyiSi/Alpaca-CoT"
|
|
||||||
},
|
|
||||||
"webqa": {
|
|
||||||
"hf_hub_url": "suolyer/webqa",
|
|
||||||
"columns": {
|
|
||||||
"prompt": "input",
|
|
||||||
"query": "",
|
|
||||||
"response": "output",
|
|
||||||
"history": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"ultra_chat": {
|
|
||||||
"script_url": "ultra_chat",
|
|
||||||
"columns": {
|
|
||||||
"prompt": "instruction",
|
|
||||||
"query": "",
|
|
||||||
"response": "output",
|
|
||||||
"history": "history"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"example": {
|
|
||||||
"script_url": "example_dataset",
|
|
||||||
"columns": {
|
|
||||||
"prompt": "instruction",
|
|
||||||
"query": "input",
|
|
||||||
"response": "output",
|
|
||||||
"history": "history"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"comparison_gpt4_en": {
|
|
||||||
"file_name": "comparison_gpt4_data_en.json",
|
|
||||||
"file_sha1": "eeb295ce0ab011c37af52596460c8a57d07ad19f"
|
|
||||||
},
|
|
||||||
"comparison_gpt4_zh": {
|
|
||||||
"file_name": "comparison_gpt4_data_zh.json",
|
|
||||||
"file_sha1": "b99a41c1c864019d9b0c07dbcd5df0560cf33ce0"
|
|
||||||
},
|
|
||||||
"hh_rlhf_en": {
|
|
||||||
"script_url": "hh_rlhf_en",
|
|
||||||
"columns": {
|
|
||||||
"prompt": "instruction",
|
|
||||||
"query": "",
|
|
||||||
"response": "output",
|
|
||||||
"history": "history"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"wiki_demo": {
|
|
||||||
"file_name": "wiki_demo.txt",
|
|
||||||
"file_sha1": "b2288edb05b233e5b35250fd4b308a5fa21fa66d",
|
|
||||||
"columns": {
|
|
||||||
"prompt": "text",
|
|
||||||
"query": "",
|
|
||||||
"response": "",
|
|
||||||
"history": ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"codealpaca": {
|
||||||
|
"hf_hub_url": "sahil2801/CodeAlpaca-20k"
|
||||||
|
},
|
||||||
|
"alpaca_cot": {
|
||||||
|
"hf_hub_url": "QingyiSi/Alpaca-CoT"
|
||||||
|
},
|
||||||
|
"webqa": {
|
||||||
|
"hf_hub_url": "suolyer/webqa",
|
||||||
|
"columns": {
|
||||||
|
"prompt": "input",
|
||||||
|
"query": "",
|
||||||
|
"response": "output",
|
||||||
|
"history": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ultra_chat": {
|
||||||
|
"script_url": "ultra_chat",
|
||||||
|
"columns": {
|
||||||
|
"prompt": "instruction",
|
||||||
|
"query": "",
|
||||||
|
"response": "output",
|
||||||
|
"history": "history"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"example": {
|
||||||
|
"script_url": "example_dataset",
|
||||||
|
"columns": {
|
||||||
|
"prompt": "instruction",
|
||||||
|
"query": "input",
|
||||||
|
"response": "output",
|
||||||
|
"history": "history"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"comparison_gpt4_en": {
|
||||||
|
"file_name": "comparison_gpt4_data_en.json",
|
||||||
|
"file_sha1": "eeb295ce0ab011c37af52596460c8a57d07ad19f"
|
||||||
|
},
|
||||||
|
"comparison_gpt4_zh": {
|
||||||
|
"file_name": "comparison_gpt4_data_zh.json",
|
||||||
|
"file_sha1": "b99a41c1c864019d9b0c07dbcd5df0560cf33ce0"
|
||||||
|
},
|
||||||
|
"hh_rlhf_en": {
|
||||||
|
"script_url": "hh_rlhf_en",
|
||||||
|
"columns": {
|
||||||
|
"prompt": "instruction",
|
||||||
|
"query": "",
|
||||||
|
"response": "output",
|
||||||
|
"history": "history"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"wiki_demo": {
|
||||||
|
"file_name": "wiki_demo.txt",
|
||||||
|
"file_sha1": "b2288edb05b233e5b35250fd4b308a5fa21fa66d",
|
||||||
|
"columns": {
|
||||||
|
"prompt": "text",
|
||||||
|
"query": "",
|
||||||
|
"response": "",
|
||||||
|
"history": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pretrain_data": {
|
||||||
|
"file_name": "pretrain_data",
|
||||||
|
"columns": {
|
||||||
|
"prompt": "content",
|
||||||
|
"query": "",
|
||||||
|
"response": "",
|
||||||
|
"history": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": 0,
|
||||||
|
"title": "拥有自己的航空器",
|
||||||
|
"content": "想自己驾驶飞机或从事通用航空事业的人,大都想拥有自己的航空器。\"自己的\"意思包括自己购买、自己制造、可供自己使用(租用或借用)等等。\n花自己的钱买一架飞机来开一开,国内有些人或企业已实现了这个愿望。现在一架国产超轻型的“蜜蜂”飞机售价在l0万元以下,进口的一些单发的双座飞机售价在100万元之内。据估计,全国大约有几十万人具有这种购买能力。\n自己造一架飞机来开也是一个好创意。美国的通航飞机中有l/5是自制的。有的自制飞机甚至还创造了世界飞行纪录。今天自己造飞机比当年莱特兄弟容易多了。飞机的基本构造已无秘密可言,各种飞机部件和材料都不难买到。尤其主要的是,技术进步大大改进了配件的性能,与此同时,配件的重量也下降了很多。莱特兄弟当年使用的12马力汽油发动机比现在30马力的同类产品还重。如果有人有志于此而且具备造飞机的种种条件,应该说这个目标也是可以实现的。有两点值得注意,一是在莱特兄弟造飞机时没有前人经验,全靠自己摸索。现在不同了,航空制造已有了上百年的知识和经验可供后人学习和利用。现在如果谁想自己造飞机就不用闭门造车了。制造者本人首先应该去学习和掌握一些必要知识和经验才行。其次,在莱特兄弟时代,没有国家民航当局,他们的航空活动不受法规约束。今天就不一样了,所有要升空的航空器必须先接受民航当局的鉴定,以保证飞行安全。绝不允许以生命为赌注的任何冒险行为。\n租用飞机也是实现自驾飞机的方式之一。国内也还有另一种形式,即参加飞行驾驶学校接受培训,当然所交的学费价格是不菲的。预计未来在我国必将出现出各类飞行俱乐部。到那时,飞行爱好者可以租用飞机去上天过一把瘾了。"
|
||||||
|
}
|
||||||
|
]
|
|
@ -0,0 +1,12 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": 0,
|
||||||
|
"title": "大卫·亨利",
|
||||||
|
"content": "大卫·亨利\n\n大卫·克莱顿·亨利(David Clayton Henrie,),美国演员。近来在迪士尼频道原创电视影集《少年魔法师》(Wizards of Waverly Place)当中演出贾斯汀·鲁索(Justin Russo)一角。\n\n大卫·亨利出生在加州Mission Viejo,在凤凰城长大。他的胞弟劳伦斯·亨利(Lorenzo Henrie)也是演员。大卫·亨利就读夏安传统学校。家中是信奉罗马天主教。 \n\n大卫在2007年拍摄少年魔法师期间认识女演员露西·海尔(Lucy Hale),之后与其交往,于2009年分手。\n\n10岁时,大卫·亨利和SAG在凤凰城签订了合约,并开始走出去试镜。 9岁的时候,在沙加缅度进行商业拍摄,SAG董事建议大卫·亨利搬到洛杉矶。在10岁那年夏天,他和他的家人搬到了好莱坞。他预定他的前2支商业试镜,扮演主要角色为汉堡王和桂格燕麦。他初演电视节目为Providence。 \n\n到了13岁,大卫有了他的第一次重大突破,在福克斯公司的喜剧The Pitts饰演 Petey Pitt一角。大卫下出作品为的Hallmark movie为Monster Maker,和琳达布莱儿、乔治甘迺迪共同演出,并要求回来Hallmark movie公司。 \n\n在18岁时,大卫得到了迪士尼频道原创系列演出机会,该节目2007年10月12日首播。大卫2008年参加了迪士尼频道的游戏节目。他是绿色团队的队长,隔年,为旋风队队长。他在迪士尼原创电影《少年魔法师》之后在《酷爸的疯狂假期》中有饰演一角。\n"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"title": "大卫·亨利",
|
||||||
|
"content": "大卫·亨利\n\n大卫·克莱顿·亨利(David Clayton Henrie,),美国演员。近来在迪士尼频道原创电视影集《少年魔法师》(Wizards of Waverly Place)当中演出贾斯汀·鲁索(Justin Russo)一角。\n\n大卫·亨利出生在加州Mission Viejo,在凤凰城长大。他的胞弟劳伦斯·亨利(Lorenzo Henrie)也是演员。大卫·亨利就读夏安传统学校。家中是信奉罗马天主教。 \n\n大卫在2007年拍摄少年魔法师期间认识女演员露西·海尔(Lucy Hale),之后与其交往,于2009年分手。\n\n10岁时,大卫·亨利和SAG在凤凰城签订了合约,并开始走出去试镜。 9岁的时候,在沙加缅度进行商业拍摄,SAG董事建议大卫·亨利搬到洛杉矶。在10岁那年夏天,他和他的家人搬到了好莱坞。他预定他的前2支商业试镜,扮演主要角色为汉堡王和桂格燕麦。他初演电视节目为Providence。 \n\n到了13岁,大卫有了他的第一次重大突破,在福克斯公司的喜剧The Pitts饰演 Petey Pitt一角。大卫下出作品为的Hallmark movie为Monster Maker,和琳达布莱儿、乔治甘迺迪共同演出,并要求回来Hallmark movie公司。 \n\n在18岁时,大卫得到了迪士尼频道原创系列演出机会,该节目2007年10月12日首播。大卫2008年参加了迪士尼频道的游戏节目。他是绿色团队的队长,隔年,为旋风队队长。他在迪士尼原创电影《少年魔法师》之后在《酷爸的疯狂假期》中有饰演一角。\n"
|
||||||
|
}
|
||||||
|
]
|
|
@ -0,0 +1,2 @@
|
||||||
|
{"id": 0,"title": "大卫·亨利","content": "大卫·亨利\n\n大卫·克莱顿·亨利(David Clayton Henrie,),美国演员。近来在迪士尼频道原创电视影集《少年魔法师》(Wizards of Waverly Place)当中演出贾斯汀·鲁索(Justin Russo)一角。\n\n大卫·亨利出生在加州Mission Viejo,在凤凰城长大。他的胞弟劳伦斯·亨利(Lorenzo Henrie)也是演员。大卫·亨利就读夏安传统学校。家中是信奉罗马天主教。 \n\n大卫在2007年拍摄少年魔法师期间认识女演员露西·海尔(Lucy Hale),之后与其交往,于2009年分手。\n\n10岁时,大卫·亨利和SAG在凤凰城签订了合约,并开始走出去试镜。 9岁的时候,在沙加缅度进行商业拍摄,SAG董事建议大卫·亨利搬到洛杉矶。在10岁那年夏天,他和他的家人搬到了好莱坞。他预定他的前2支商业试镜,扮演主要角色为汉堡王和桂格燕麦。他初演电视节目为Providence。 \n\n到了13岁,大卫有了他的第一次重大突破,在福克斯公司的喜剧The Pitts饰演 Petey Pitt一角。大卫下出作品为的Hallmark movie为Monster Maker,和琳达布莱儿、乔治甘迺迪共同演出,并要求回来Hallmark movie公司。 \n\n在18岁时,大卫得到了迪士尼频道原创系列演出机会,该节目2007年10月12日首播。大卫2008年参加了迪士尼频道的游戏节目。他是绿色团队的队长,隔年,为旋风队队长。他在迪士尼原创电影《少年魔法师》之后在《酷爸的疯狂假期》中有饰演一角。\n"}
|
||||||
|
{"id": 1,"title": "大卫·亨利","content": "大卫·亨利\n\n大卫·克莱顿·亨利(David Clayton Henrie,),美国演员。近来在迪士尼频道原创电视影集《少年魔法师》(Wizards of Waverly Place)当中演出贾斯汀·鲁索(Justin Russo)一角。\n\n大卫·亨利出生在加州Mission Viejo,在凤凰城长大。他的胞弟劳伦斯·亨利(Lorenzo Henrie)也是演员。大卫·亨利就读夏安传统学校。家中是信奉罗马天主教。 \n\n大卫在2007年拍摄少年魔法师期间认识女演员露西·海尔(Lucy Hale),之后与其交往,于2009年分手。\n\n10岁时,大卫·亨利和SAG在凤凰城签订了合约,并开始走出去试镜。 9岁的时候,在沙加缅度进行商业拍摄,SAG董事建议大卫·亨利搬到洛杉矶。在10岁那年夏天,他和他的家人搬到了好莱坞。他预定他的前2支商业试镜,扮演主要角色为汉堡王和桂格燕麦。他初演电视节目为Providence。 \n\n到了13岁,大卫有了他的第一次重大突破,在福克斯公司的喜剧The Pitts饰演 Petey Pitt一角。大卫下出作品为的Hallmark movie为Monster Maker,和琳达布莱儿、乔治甘迺迪共同演出,并要求回来Hallmark movie公司。 \n\n在18岁时,大卫得到了迪士尼频道原创系列演出机会,该节目2007年10月12日首播。大卫2008年参加了迪士尼频道的游戏节目。他是绿色团队的队长,隔年,为旋风队队长。他在迪士尼原创电影《少年魔法师》之后在《酷爸的疯狂假期》中有饰演一角。\n"}
|
|
@ -56,7 +56,6 @@ require_version("accelerate>=0.19.0", "To fix: pip install accelerate>=0.19.0")
|
||||||
require_version("peft>=0.3.0", "To fix: pip install peft>=0.3.0")
|
require_version("peft>=0.3.0", "To fix: pip install peft>=0.3.0")
|
||||||
require_version("trl>=0.4.1", "To fix: pip install trl>=0.4.1")
|
require_version("trl>=0.4.1", "To fix: pip install trl>=0.4.1")
|
||||||
|
|
||||||
|
|
||||||
logger = get_logger(__name__)
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@ -92,10 +91,12 @@ def _init_adapter(
|
||||||
|
|
||||||
if model_args.checkpoint_dir is not None:
|
if model_args.checkpoint_dir is not None:
|
||||||
if finetuning_args.finetuning_type != "lora":
|
if finetuning_args.finetuning_type != "lora":
|
||||||
assert is_mergeable and len(model_args.checkpoint_dir) == 1, "Only LoRA tuning accepts multiple checkpoints."
|
assert is_mergeable and len(
|
||||||
load_trainable_params(model, model_args.checkpoint_dir[0]) # load model checkpoints for non-peft methods
|
model_args.checkpoint_dir) == 1, "Only LoRA tuning accepts multiple checkpoints."
|
||||||
|
load_trainable_params(model, model_args.checkpoint_dir[0]) # load model checkpoints for non-peft methods
|
||||||
else:
|
else:
|
||||||
assert is_mergeable or len(model_args.checkpoint_dir) == 1, "Quantized model only accepts a single checkpoint."
|
assert is_mergeable or len(
|
||||||
|
model_args.checkpoint_dir) == 1, "Quantized model only accepts a single checkpoint."
|
||||||
|
|
||||||
if finetuning_args.finetuning_type == "lora":
|
if finetuning_args.finetuning_type == "lora":
|
||||||
logger.info("Fine-tuning method: LoRA")
|
logger.info("Fine-tuning method: LoRA")
|
||||||
|
@ -105,7 +106,8 @@ def _init_adapter(
|
||||||
assert os.path.exists(os.path.join(model_args.checkpoint_dir[0], CONFIG_NAME)), \
|
assert os.path.exists(os.path.join(model_args.checkpoint_dir[0], CONFIG_NAME)), \
|
||||||
"The given checkpoint is not a LoRA checkpoint, please specify `--finetuning_type full/freeze` instead."
|
"The given checkpoint is not a LoRA checkpoint, please specify `--finetuning_type full/freeze` instead."
|
||||||
|
|
||||||
if (is_trainable and model_args.resume_lora_training) or (not is_mergeable): # continually train on the lora weights
|
if (is_trainable and model_args.resume_lora_training) or (
|
||||||
|
not is_mergeable): # continually train on the lora weights
|
||||||
checkpoints_to_merge, lastest_checkpoint = model_args.checkpoint_dir[:-1], model_args.checkpoint_dir[-1]
|
checkpoints_to_merge, lastest_checkpoint = model_args.checkpoint_dir[:-1], model_args.checkpoint_dir[-1]
|
||||||
else:
|
else:
|
||||||
checkpoints_to_merge = model_args.checkpoint_dir
|
checkpoints_to_merge = model_args.checkpoint_dir
|
||||||
|
@ -117,10 +119,10 @@ def _init_adapter(
|
||||||
if len(checkpoints_to_merge) > 0:
|
if len(checkpoints_to_merge) > 0:
|
||||||
logger.info("Merged {} model checkpoint(s).".format(len(checkpoints_to_merge)))
|
logger.info("Merged {} model checkpoint(s).".format(len(checkpoints_to_merge)))
|
||||||
|
|
||||||
if lastest_checkpoint is not None: # resume lora training or quantized inference
|
if lastest_checkpoint is not None: # resume lora training or quantized inference
|
||||||
model = PeftModel.from_pretrained(model, lastest_checkpoint, is_trainable=is_trainable)
|
model = PeftModel.from_pretrained(model, lastest_checkpoint, is_trainable=is_trainable)
|
||||||
|
|
||||||
if is_trainable and lastest_checkpoint is None: # create new lora weights while training
|
if is_trainable and lastest_checkpoint is None: # create new lora weights while training
|
||||||
lora_config = LoraConfig(
|
lora_config = LoraConfig(
|
||||||
task_type=TaskType.CAUSAL_LM,
|
task_type=TaskType.CAUSAL_LM,
|
||||||
inference_mode=False,
|
inference_mode=False,
|
||||||
|
@ -168,7 +170,7 @@ def load_pretrained(
|
||||||
padding_side="left",
|
padding_side="left",
|
||||||
**config_kwargs
|
**config_kwargs
|
||||||
)
|
)
|
||||||
tokenizer.pad_token_id = 0 if tokenizer.pad_token_id is None else tokenizer.pad_token_id # set as the <unk> token
|
tokenizer.pad_token_id = 0 if tokenizer.pad_token_id is None else tokenizer.pad_token_id # set as the <unk> token
|
||||||
|
|
||||||
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
|
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
|
||||||
is_mergeable = True
|
is_mergeable = True
|
||||||
|
@ -184,9 +186,11 @@ def load_pretrained(
|
||||||
)
|
)
|
||||||
elif model_args.quantization_bit == 4:
|
elif model_args.quantization_bit == 4:
|
||||||
require_version("bitsandbytes>=0.39.0", "To fix: pip install bitsandbytes>=0.39.0")
|
require_version("bitsandbytes>=0.39.0", "To fix: pip install bitsandbytes>=0.39.0")
|
||||||
require_version("transformers>=4.30.0.dev0", "To fix: pip install git+https://github.com/huggingface/transformers.git")
|
require_version("transformers>=4.30.0.dev0",
|
||||||
|
"To fix: pip install git+https://github.com/huggingface/transformers.git")
|
||||||
require_version("peft>=0.4.0.dev0", "To fix: pip install git+https://github.com/huggingface/peft.git")
|
require_version("peft>=0.4.0.dev0", "To fix: pip install git+https://github.com/huggingface/peft.git")
|
||||||
require_version("accelerate>=0.20.0.dev0", "To fix: pip install git+https://github.com/huggingface/accelerate.git")
|
require_version("accelerate>=0.20.0.dev0",
|
||||||
|
"To fix: pip install git+https://github.com/huggingface/accelerate.git")
|
||||||
config_kwargs["load_in_4bit"] = True
|
config_kwargs["load_in_4bit"] = True
|
||||||
config_kwargs["quantization_config"] = BitsAndBytesConfig(
|
config_kwargs["quantization_config"] = BitsAndBytesConfig(
|
||||||
load_in_4bit=True,
|
load_in_4bit=True,
|
||||||
|
@ -214,10 +218,10 @@ def load_pretrained(
|
||||||
model = prepare_model_for_training(model) if is_trainable else model
|
model = prepare_model_for_training(model) if is_trainable else model
|
||||||
model = _init_adapter(model, model_args, finetuning_args, is_trainable, is_mergeable)
|
model = _init_adapter(model, model_args, finetuning_args, is_trainable, is_mergeable)
|
||||||
|
|
||||||
if stage == "rm" or stage == "ppo": # add value head
|
if stage == "rm" or stage == "ppo": # add value head
|
||||||
model = AutoModelForCausalLMWithValueHead.from_pretrained(model)
|
model = AutoModelForCausalLMWithValueHead.from_pretrained(model)
|
||||||
|
|
||||||
if stage == "ppo": # load reward model
|
if stage == "ppo": # load reward model
|
||||||
assert is_trainable, "PPO stage cannot be performed at evaluation."
|
assert is_trainable, "PPO stage cannot be performed at evaluation."
|
||||||
assert model_args.reward_model is not None, "Reward model is necessary for PPO training."
|
assert model_args.reward_model is not None, "Reward model is necessary for PPO training."
|
||||||
logger.info("Load reward model from {}".format(model_args.reward_model))
|
logger.info("Load reward model from {}".format(model_args.reward_model))
|
||||||
|
@ -230,8 +234,8 @@ def load_pretrained(
|
||||||
model._is_int8_training_enabled = True
|
model._is_int8_training_enabled = True
|
||||||
|
|
||||||
if not is_trainable:
|
if not is_trainable:
|
||||||
model.requires_grad_(False) # fix all model params
|
model.requires_grad_(False) # fix all model params
|
||||||
model = model.half() if model_args.quantization_bit is None else model # cast from fp32 to fp16
|
model = model.half() if model_args.quantization_bit is None else model # cast from fp32 to fp16
|
||||||
|
|
||||||
print_trainable_params(model)
|
print_trainable_params(model)
|
||||||
|
|
||||||
|
@ -241,11 +245,11 @@ def load_pretrained(
|
||||||
def prepare_args(
|
def prepare_args(
|
||||||
stage: Literal["pt", "sft", "rm", "ppo"]
|
stage: Literal["pt", "sft", "rm", "ppo"]
|
||||||
) -> Tuple[ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments, FinetuningArguments]:
|
) -> Tuple[ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments, FinetuningArguments]:
|
||||||
|
|
||||||
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments, FinetuningArguments))
|
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments, FinetuningArguments))
|
||||||
|
|
||||||
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # Provide arguments with a json file.
|
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # Provide arguments with a json file.
|
||||||
model_args, data_args, training_args, finetuning_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
model_args, data_args, training_args, finetuning_args = parser.parse_json_file(
|
||||||
|
json_file=os.path.abspath(sys.argv[1]))
|
||||||
else:
|
else:
|
||||||
model_args, data_args, training_args, finetuning_args = parser.parse_args_into_dataclasses()
|
model_args, data_args, training_args, finetuning_args = parser.parse_args_into_dataclasses()
|
||||||
|
|
||||||
|
@ -286,7 +290,7 @@ def prepare_args(
|
||||||
logger.warning("`ddp_find_unused_parameters` needs to be set as False in DDP training.")
|
logger.warning("`ddp_find_unused_parameters` needs to be set as False in DDP training.")
|
||||||
training_args.ddp_find_unused_parameters = False
|
training_args.ddp_find_unused_parameters = False
|
||||||
|
|
||||||
training_args.optim = "adamw_torch" if training_args.optim == "adamw_hf" else training_args.optim # suppress warning
|
training_args.optim = "adamw_torch" if training_args.optim == "adamw_hf" else training_args.optim # suppress warning
|
||||||
|
|
||||||
if model_args.quantization_bit is not None:
|
if model_args.quantization_bit is not None:
|
||||||
if training_args.fp16:
|
if training_args.fp16:
|
||||||
|
@ -310,10 +314,9 @@ def prepare_args(
|
||||||
|
|
||||||
|
|
||||||
def prepare_infer_args() -> Tuple[ModelArguments, DataTrainingArguments, FinetuningArguments]:
|
def prepare_infer_args() -> Tuple[ModelArguments, DataTrainingArguments, FinetuningArguments]:
|
||||||
|
|
||||||
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, FinetuningArguments))
|
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, FinetuningArguments))
|
||||||
|
|
||||||
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # Provide arguments with a json file.
|
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # Provide arguments with a json file.
|
||||||
model_args, data_args, finetuning_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
model_args, data_args, finetuning_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
||||||
else:
|
else:
|
||||||
model_args, data_args, finetuning_args = parser.parse_args_into_dataclasses()
|
model_args, data_args, finetuning_args = parser.parse_args_into_dataclasses()
|
||||||
|
@ -331,7 +334,6 @@ def prepare_data(
|
||||||
model_args: ModelArguments,
|
model_args: ModelArguments,
|
||||||
data_args: DataTrainingArguments
|
data_args: DataTrainingArguments
|
||||||
) -> Dataset:
|
) -> Dataset:
|
||||||
|
|
||||||
def checksum(file_path, hash):
|
def checksum(file_path, hash):
|
||||||
with open(file_path, "rb") as datafile:
|
with open(file_path, "rb") as datafile:
|
||||||
binary_data = datafile.read()
|
binary_data = datafile.read()
|
||||||
|
@ -340,7 +342,7 @@ def prepare_data(
|
||||||
logger.warning("Checksum failed for {}. It may vary depending on the platform.".format(file_path))
|
logger.warning("Checksum failed for {}. It may vary depending on the platform.".format(file_path))
|
||||||
|
|
||||||
max_samples = data_args.max_samples
|
max_samples = data_args.max_samples
|
||||||
all_datasets: List[Dataset] = [] # support multiple datasets
|
all_datasets: List[Dataset] = [] # support multiple datasets
|
||||||
|
|
||||||
for dataset_attr in data_args.dataset_list:
|
for dataset_attr in data_args.dataset_list:
|
||||||
|
|
||||||
|
@ -356,12 +358,10 @@ def prepare_data(
|
||||||
elif dataset_attr.load_from == "file":
|
elif dataset_attr.load_from == "file":
|
||||||
data_file = os.path.join(data_args.dataset_dir, dataset_attr.file_name)
|
data_file = os.path.join(data_args.dataset_dir, dataset_attr.file_name)
|
||||||
extension = dataset_attr.file_name.split(".")[-1]
|
extension = dataset_attr.file_name.split(".")[-1]
|
||||||
|
|
||||||
if dataset_attr.file_sha1 is not None:
|
if dataset_attr.file_sha1 is not None:
|
||||||
checksum(data_file, dataset_attr.file_sha1)
|
checksum(data_file, dataset_attr.file_sha1)
|
||||||
else:
|
else:
|
||||||
logger.warning("Checksum failed: missing SHA-1 hash value in dataset_info.json.")
|
logger.warning("Checksum failed: missing SHA-1 hash value in dataset_info.json.")
|
||||||
|
|
||||||
raw_datasets = load_dataset(
|
raw_datasets = load_dataset(
|
||||||
extension if extension in ["csv", "json"] else "text",
|
extension if extension in ["csv", "json"] else "text",
|
||||||
data_files=data_file,
|
data_files=data_file,
|
||||||
|
@ -383,11 +383,11 @@ def prepare_data(
|
||||||
("query_column", "query"),
|
("query_column", "query"),
|
||||||
("response_column", "response"),
|
("response_column", "response"),
|
||||||
("history_column", "history")
|
("history_column", "history")
|
||||||
]: # every dataset will have 4 columns same as each other
|
]: # every dataset will have 4 columns same as each other
|
||||||
if getattr(dataset_attr, column_name) != target_name:
|
if getattr(dataset_attr, column_name) != target_name:
|
||||||
if getattr(dataset_attr, column_name):
|
if getattr(dataset_attr, column_name):
|
||||||
dataset = dataset.rename_column(getattr(dataset_attr, column_name), target_name)
|
dataset = dataset.rename_column(getattr(dataset_attr, column_name), target_name)
|
||||||
else: # None or empty string
|
else: # None or empty string
|
||||||
dataset = dataset.add_column(target_name, dummy_data)
|
dataset = dataset.add_column(target_name, dummy_data)
|
||||||
all_datasets.append(dataset)
|
all_datasets.append(dataset)
|
||||||
|
|
||||||
|
@ -406,7 +406,6 @@ def preprocess_data(
|
||||||
training_args: Seq2SeqTrainingArguments,
|
training_args: Seq2SeqTrainingArguments,
|
||||||
stage: Literal["pt", "sft", "rm", "ppo"]
|
stage: Literal["pt", "sft", "rm", "ppo"]
|
||||||
) -> Dataset:
|
) -> Dataset:
|
||||||
|
|
||||||
column_names = list(dataset.column_names)
|
column_names = list(dataset.column_names)
|
||||||
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
|
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
|
||||||
prompt_template = Template(data_args.prompt_template)
|
prompt_template = Template(data_args.prompt_template)
|
||||||
|
@ -429,7 +428,8 @@ def preprocess_data(
|
||||||
# we drop the small remainder, and if the total_length < block_size, we exclude this batch
|
# we drop the small remainder, and if the total_length < block_size, we exclude this batch
|
||||||
total_length = (total_length // data_args.max_source_length) * data_args.max_source_length
|
total_length = (total_length // data_args.max_source_length) * data_args.max_source_length
|
||||||
# split by chunks of max_source_length
|
# split by chunks of max_source_length
|
||||||
result = [concatenated_ids[i: i+data_args.max_source_length] for i in range(0, total_length, data_args.max_source_length)]
|
result = [concatenated_ids[i: i + data_args.max_source_length] for i in
|
||||||
|
range(0, total_length, data_args.max_source_length)]
|
||||||
return {
|
return {
|
||||||
"input_ids": result,
|
"input_ids": result,
|
||||||
"labels": result.copy()
|
"labels": result.copy()
|
||||||
|
@ -442,9 +442,9 @@ def preprocess_data(
|
||||||
source_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
|
source_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
|
||||||
target_ids = tokenizer.encode(text=answer, add_special_tokens=False)
|
target_ids = tokenizer.encode(text=answer, add_special_tokens=False)
|
||||||
|
|
||||||
if len(source_ids) > data_args.max_source_length - 1: # bos token
|
if len(source_ids) > data_args.max_source_length - 1: # bos token
|
||||||
source_ids = source_ids[:data_args.max_source_length - 1]
|
source_ids = source_ids[:data_args.max_source_length - 1]
|
||||||
if len(target_ids) > data_args.max_target_length - 1: # eos token
|
if len(target_ids) > data_args.max_target_length - 1: # eos token
|
||||||
target_ids = target_ids[:data_args.max_target_length - 1]
|
target_ids = target_ids[:data_args.max_target_length - 1]
|
||||||
|
|
||||||
input_ids = source_ids + [tokenizer.bos_token_id] + target_ids + [tokenizer.eos_token_id]
|
input_ids = source_ids + [tokenizer.bos_token_id] + target_ids + [tokenizer.eos_token_id]
|
||||||
|
@ -461,9 +461,9 @@ def preprocess_data(
|
||||||
source_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
|
source_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
|
||||||
target_ids = tokenizer.encode(text=answer, add_special_tokens=False)
|
target_ids = tokenizer.encode(text=answer, add_special_tokens=False)
|
||||||
|
|
||||||
if len(source_ids) > data_args.max_source_length - 1: # bos token
|
if len(source_ids) > data_args.max_source_length - 1: # bos token
|
||||||
source_ids = source_ids[:data_args.max_source_length - 1]
|
source_ids = source_ids[:data_args.max_source_length - 1]
|
||||||
if len(target_ids) > data_args.max_target_length - 1: # bos token
|
if len(target_ids) > data_args.max_target_length - 1: # bos token
|
||||||
target_ids = target_ids[:data_args.max_target_length - 1]
|
target_ids = target_ids[:data_args.max_target_length - 1]
|
||||||
|
|
||||||
input_ids = source_ids + [tokenizer.bos_token_id]
|
input_ids = source_ids + [tokenizer.bos_token_id]
|
||||||
|
@ -481,11 +481,11 @@ def preprocess_data(
|
||||||
accept_ids = tokenizer.encode(text=answer[0], add_special_tokens=False)
|
accept_ids = tokenizer.encode(text=answer[0], add_special_tokens=False)
|
||||||
reject_ids = tokenizer.encode(text=answer[1], add_special_tokens=False)
|
reject_ids = tokenizer.encode(text=answer[1], add_special_tokens=False)
|
||||||
|
|
||||||
if len(source_ids) > data_args.max_source_length - 1: # bos token
|
if len(source_ids) > data_args.max_source_length - 1: # bos token
|
||||||
source_ids = source_ids[:data_args.max_source_length - 1]
|
source_ids = source_ids[:data_args.max_source_length - 1]
|
||||||
if len(accept_ids) > data_args.max_target_length - 1: # eos token
|
if len(accept_ids) > data_args.max_target_length - 1: # eos token
|
||||||
accept_ids = accept_ids[:data_args.max_target_length - 1]
|
accept_ids = accept_ids[:data_args.max_target_length - 1]
|
||||||
if len(reject_ids) > data_args.max_target_length - 1: # eos token
|
if len(reject_ids) > data_args.max_target_length - 1: # eos token
|
||||||
reject_ids = reject_ids[:data_args.max_target_length - 1]
|
reject_ids = reject_ids[:data_args.max_target_length - 1]
|
||||||
|
|
||||||
accept_ids = source_ids + [tokenizer.bos_token_id] + accept_ids + [tokenizer.eos_token_id]
|
accept_ids = source_ids + [tokenizer.bos_token_id] + accept_ids + [tokenizer.eos_token_id]
|
||||||
|
|
|
@ -7,7 +7,6 @@ from dataclasses import asdict, dataclass, field
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class DatasetAttr:
|
class DatasetAttr:
|
||||||
|
|
||||||
load_from: str
|
load_from: str
|
||||||
dataset_name: Optional[str] = None
|
dataset_name: Optional[str] = None
|
||||||
file_name: Optional[str] = None
|
file_name: Optional[str] = None
|
||||||
|
@ -68,7 +67,8 @@ class ModelArguments:
|
||||||
)
|
)
|
||||||
checkpoint_dir: Optional[str] = field(
|
checkpoint_dir: Optional[str] = field(
|
||||||
default=None,
|
default=None,
|
||||||
metadata={"help": "Path to the directory(s) containing the delta model checkpoints as well as the configurations."}
|
metadata={
|
||||||
|
"help": "Path to the directory(s) containing the delta model checkpoints as well as the configurations."}
|
||||||
)
|
)
|
||||||
reward_model: Optional[str] = field(
|
reward_model: Optional[str] = field(
|
||||||
default=None,
|
default=None,
|
||||||
|
@ -76,7 +76,8 @@ class ModelArguments:
|
||||||
)
|
)
|
||||||
resume_lora_training: Optional[bool] = field(
|
resume_lora_training: Optional[bool] = field(
|
||||||
default=True,
|
default=True,
|
||||||
metadata={"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."}
|
metadata={
|
||||||
|
"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."}
|
||||||
)
|
)
|
||||||
plot_loss: Optional[bool] = field(
|
plot_loss: Optional[bool] = field(
|
||||||
default=False,
|
default=False,
|
||||||
|
@ -84,7 +85,7 @@ class ModelArguments:
|
||||||
)
|
)
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
if self.checkpoint_dir is not None: # support merging multiple lora weights
|
if self.checkpoint_dir is not None: # support merging multiple lora weights
|
||||||
self.checkpoint_dir = [cd.strip() for cd in self.checkpoint_dir.split(",")]
|
self.checkpoint_dir = [cd.strip() for cd in self.checkpoint_dir.split(",")]
|
||||||
|
|
||||||
|
|
||||||
|
@ -146,7 +147,7 @@ class DataTrainingArguments:
|
||||||
metadata={"help": "Which template to use for constructing prompts in training and inference."}
|
metadata={"help": "Which template to use for constructing prompts in training and inference."}
|
||||||
)
|
)
|
||||||
|
|
||||||
def __post_init__(self): # support mixing multiple datasets
|
def __post_init__(self): # support mixing multiple datasets
|
||||||
dataset_names = [ds.strip() for ds in self.dataset.split(",")]
|
dataset_names = [ds.strip() for ds in self.dataset.split(",")]
|
||||||
with open(os.path.join(self.dataset_dir, "dataset_info.json"), "r") as f:
|
with open(os.path.join(self.dataset_dir, "dataset_info.json"), "r") as f:
|
||||||
dataset_info = json.load(f)
|
dataset_info = json.load(f)
|
||||||
|
@ -155,25 +156,42 @@ class DataTrainingArguments:
|
||||||
for name in dataset_names:
|
for name in dataset_names:
|
||||||
if name not in dataset_info:
|
if name not in dataset_info:
|
||||||
raise ValueError("Undefined dataset {} in dataset_info.json.".format(name))
|
raise ValueError("Undefined dataset {} in dataset_info.json.".format(name))
|
||||||
|
dataset_attrs = []
|
||||||
|
dataset_attr = None
|
||||||
if "hf_hub_url" in dataset_info[name]:
|
if "hf_hub_url" in dataset_info[name]:
|
||||||
dataset_attr = DatasetAttr("hf_hub", dataset_name=dataset_info[name]["hf_hub_url"])
|
dataset_attr = DatasetAttr("hf_hub", dataset_name=dataset_info[name]["hf_hub_url"])
|
||||||
elif "script_url" in dataset_info[name]:
|
elif "script_url" in dataset_info[name]:
|
||||||
dataset_attr = DatasetAttr("script", dataset_name=dataset_info[name]["script_url"])
|
dataset_attr = DatasetAttr("script", dataset_name=dataset_info[name]["script_url"])
|
||||||
else:
|
elif os.path.isfile(os.path.join(self.dataset_dir, dataset_info[name]["file_name"])):
|
||||||
dataset_attr = DatasetAttr(
|
dataset_attr = DatasetAttr(
|
||||||
"file",
|
"file",
|
||||||
file_name=dataset_info[name]["file_name"],
|
file_name=dataset_info[name]["file_name"],
|
||||||
file_sha1=dataset_info[name]["file_sha1"] if "file_sha1" in dataset_info[name] else None
|
file_sha1=dataset_info[name]["file_sha1"] if "file_sha1" in dataset_info[name] else None
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
if "columns" in dataset_info[name]:
|
# Support Directory
|
||||||
dataset_attr.prompt_column = dataset_info[name]["columns"].get("prompt", None)
|
for file_name in os.listdir(os.path.join(self.dataset_dir, dataset_info[name]["file_name"])):
|
||||||
dataset_attr.query_column = dataset_info[name]["columns"].get("query", None)
|
path = os.path.join(dataset_info[name]["file_name"], file_name)
|
||||||
dataset_attr.response_column = dataset_info[name]["columns"].get("response", None)
|
dataset_attrs.append(DatasetAttr(
|
||||||
dataset_attr.history_column = dataset_info[name]["columns"].get("history", None)
|
"file",
|
||||||
|
file_name=path,
|
||||||
self.dataset_list.append(dataset_attr)
|
file_sha1=dataset_info[name]["file_sha1"] if "file_sha1" in dataset_info[name] else None
|
||||||
|
))
|
||||||
|
if dataset_attr is not None:
|
||||||
|
if "columns" in dataset_info[name]:
|
||||||
|
dataset_attr.prompt_column = dataset_info[name]["columns"].get("prompt", None)
|
||||||
|
dataset_attr.query_column = dataset_info[name]["columns"].get("query", None)
|
||||||
|
dataset_attr.response_column = dataset_info[name]["columns"].get("response", None)
|
||||||
|
dataset_attr.history_column = dataset_info[name]["columns"].get("history", None)
|
||||||
|
self.dataset_list.append(dataset_attr)
|
||||||
|
else:
|
||||||
|
for i, dataset_attr in enumerate(dataset_attrs):
|
||||||
|
if "columns" in dataset_info[name]:
|
||||||
|
dataset_attr.prompt_column = dataset_info[name]["columns"].get("prompt", None)
|
||||||
|
dataset_attr.query_column = dataset_info[name]["columns"].get("query", None)
|
||||||
|
dataset_attr.response_column = dataset_info[name]["columns"].get("response", None)
|
||||||
|
dataset_attr.history_column = dataset_info[name]["columns"].get("history", None)
|
||||||
|
self.dataset_list.append(dataset_attr)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -216,14 +234,16 @@ class FinetuningArguments:
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
if isinstance(self.lora_target, str):
|
if isinstance(self.lora_target, str):
|
||||||
self.lora_target = [target.strip() for target in self.lora_target.split(",")] # support custom target modules of LoRA
|
self.lora_target = [target.strip() for target in
|
||||||
|
self.lora_target.split(",")] # support custom target modules of LoRA
|
||||||
|
|
||||||
if self.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
|
if self.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
|
||||||
trainable_layer_ids = [27-k for k in range(self.num_layer_trainable)]
|
trainable_layer_ids = [27 - k for k in range(self.num_layer_trainable)]
|
||||||
else: # fine-tuning the first n layers if num_layer_trainable < 0
|
else: # fine-tuning the first n layers if num_layer_trainable < 0
|
||||||
trainable_layer_ids = [k for k in range(-self.num_layer_trainable)]
|
trainable_layer_ids = [k for k in range(-self.num_layer_trainable)]
|
||||||
|
|
||||||
self.trainable_layers = ["layers.{:d}.{}".format(idx, self.name_module_trainable) for idx in trainable_layer_ids]
|
self.trainable_layers = ["layers.{:d}.{}".format(idx, self.name_module_trainable) for idx in
|
||||||
|
trainable_layer_ids]
|
||||||
|
|
||||||
assert self.finetuning_type in ["none", "freeze", "lora", "full"], "Invalid fine-tuning method."
|
assert self.finetuning_type in ["none", "freeze", "lora", "full"], "Invalid fine-tuning method."
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue