update webui
This commit is contained in:
parent
26912cd816
commit
ba998c67ab
|
@ -200,6 +200,9 @@ class FinetuningArguments(FreezeArguments, LoraArguments, RLHFArguments):
|
|||
if self.stage == "ppo" and self.reward_model_type == "lora" and self.finetuning_type != "lora":
|
||||
raise ValueError("Freeze/Full PPO training needs `reward_model_type=full`.")
|
||||
|
||||
if self.use_llama_pro and self.finetuning_type != "freeze":
|
||||
raise ValueError("`use_llama_pro` is only valid for the Freeze method.")
|
||||
|
||||
def save_to_json(self, json_path: str):
|
||||
r"""Saves the content of this instance in JSON format inside `json_path`."""
|
||||
json_string = json.dumps(asdict(self), indent=2, sort_keys=True) + "\n"
|
||||
|
|
|
@ -26,6 +26,7 @@ def save_model(
|
|||
max_shard_size: int,
|
||||
export_quantization_bit: int,
|
||||
export_quantization_dataset: str,
|
||||
export_legacy_format: bool,
|
||||
export_dir: str,
|
||||
) -> Generator[str, None, None]:
|
||||
error = ""
|
||||
|
@ -61,6 +62,7 @@ def save_model(
|
|||
export_size=max_shard_size,
|
||||
export_quantization_bit=int(export_quantization_bit) if export_quantization_bit in GPTQ_BITS else None,
|
||||
export_quantization_dataset=export_quantization_dataset,
|
||||
export_legacy_format=export_legacy_format,
|
||||
)
|
||||
|
||||
yield ALERTS["info_exporting"][lang]
|
||||
|
@ -73,6 +75,7 @@ def create_export_tab(engine: "Engine") -> Dict[str, "Component"]:
|
|||
max_shard_size = gr.Slider(value=1, minimum=1, maximum=100)
|
||||
export_quantization_bit = gr.Dropdown(choices=["none", "8", "4", "3", "2"], value="none")
|
||||
export_quantization_dataset = gr.Textbox(value="data/c4_demo.json")
|
||||
export_legacy_format = gr.Checkbox()
|
||||
|
||||
export_dir = gr.Textbox()
|
||||
export_btn = gr.Button()
|
||||
|
@ -90,6 +93,7 @@ def create_export_tab(engine: "Engine") -> Dict[str, "Component"]:
|
|||
max_shard_size,
|
||||
export_quantization_bit,
|
||||
export_quantization_dataset,
|
||||
export_legacy_format,
|
||||
export_dir,
|
||||
],
|
||||
[info_box],
|
||||
|
@ -99,6 +103,7 @@ def create_export_tab(engine: "Engine") -> Dict[str, "Component"]:
|
|||
max_shard_size=max_shard_size,
|
||||
export_quantization_bit=export_quantization_bit,
|
||||
export_quantization_dataset=export_quantization_dataset,
|
||||
export_legacy_format=export_legacy_format,
|
||||
export_dir=export_dir,
|
||||
export_btn=export_btn,
|
||||
info_box=info_box,
|
||||
|
|
|
@ -30,7 +30,7 @@ def create_top() -> Dict[str, "Component"]:
|
|||
quantization_bit = gr.Dropdown(choices=["none", "8", "4"], value="none")
|
||||
template = gr.Dropdown(choices=list(templates.keys()), value="default")
|
||||
rope_scaling = gr.Radio(choices=["none", "linear", "dynamic"], value="none")
|
||||
booster = gr.Radio(choices=["none", "flash_attn", "unsloth"], value="none")
|
||||
booster = gr.Radio(choices=["none", "flashattn", "unsloth"], value="none")
|
||||
|
||||
model_name.change(list_adapters, [model_name, finetuning_type], [adapter_path], queue=False).then(
|
||||
get_model_path, [model_name], [model_path], queue=False
|
||||
|
|
|
@ -52,8 +52,8 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
|
|||
)
|
||||
|
||||
with gr.Row():
|
||||
batch_size = gr.Slider(value=4, minimum=1, maximum=512, step=1)
|
||||
gradient_accumulation_steps = gr.Slider(value=4, minimum=1, maximum=512, step=1)
|
||||
batch_size = gr.Slider(value=4, minimum=1, maximum=1024, step=1)
|
||||
gradient_accumulation_steps = gr.Slider(value=4, minimum=1, maximum=1024, step=1)
|
||||
lr_scheduler_type = gr.Dropdown(choices=[scheduler.value for scheduler in SchedulerType], value="cosine")
|
||||
max_grad_norm = gr.Textbox(value="1.0")
|
||||
val_size = gr.Slider(value=0, minimum=0, maximum=1, step=0.001)
|
||||
|
@ -76,11 +76,13 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
|
|||
warmup_steps = gr.Slider(value=0, minimum=0, maximum=5000, step=1)
|
||||
neftune_alpha = gr.Slider(value=0, minimum=0, maximum=10, step=0.1)
|
||||
|
||||
with gr.Column():
|
||||
sft_packing = gr.Checkbox(value=False)
|
||||
upcast_layernorm = gr.Checkbox(value=False)
|
||||
with gr.Row():
|
||||
resize_vocab = gr.Checkbox()
|
||||
sft_packing = gr.Checkbox()
|
||||
upcast_layernorm = gr.Checkbox()
|
||||
use_llama_pro = gr.Checkbox()
|
||||
|
||||
input_elems.update({logging_steps, save_steps, warmup_steps, neftune_alpha, sft_packing, upcast_layernorm})
|
||||
input_elems.update({logging_steps, save_steps, warmup_steps, neftune_alpha, resize_vocab, sft_packing, upcast_layernorm, use_llama_pro})
|
||||
elem_dict.update(
|
||||
dict(
|
||||
extra_tab=extra_tab,
|
||||
|
@ -88,20 +90,25 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
|
|||
save_steps=save_steps,
|
||||
warmup_steps=warmup_steps,
|
||||
neftune_alpha=neftune_alpha,
|
||||
resize_vocab=resize_vocab,
|
||||
sft_packing=sft_packing,
|
||||
upcast_layernorm=upcast_layernorm,
|
||||
use_llama_pro=use_llama_pro,
|
||||
)
|
||||
)
|
||||
|
||||
with gr.Accordion(label="LoRA config", open=False) as lora_tab:
|
||||
with gr.Row():
|
||||
lora_rank = gr.Slider(value=8, minimum=1, maximum=1024, step=1, scale=1)
|
||||
lora_dropout = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=1)
|
||||
lora_target = gr.Textbox(scale=1)
|
||||
additional_target = gr.Textbox(scale=1)
|
||||
create_new_adapter = gr.Checkbox(scale=1)
|
||||
lora_rank = gr.Slider(value=8, minimum=1, maximum=1024, step=1)
|
||||
lora_dropout = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01)
|
||||
lora_target = gr.Textbox()
|
||||
additional_target = gr.Textbox()
|
||||
|
||||
input_elems.update({lora_rank, lora_dropout, lora_target, additional_target, create_new_adapter})
|
||||
with gr.Column():
|
||||
use_rslora = gr.Checkbox()
|
||||
create_new_adapter = gr.Checkbox()
|
||||
|
||||
input_elems.update({lora_rank, lora_dropout, lora_target, additional_target, use_rslora, create_new_adapter})
|
||||
elem_dict.update(
|
||||
dict(
|
||||
lora_tab=lora_tab,
|
||||
|
@ -109,6 +116,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
|
|||
lora_dropout=lora_dropout,
|
||||
lora_target=lora_target,
|
||||
additional_target=additional_target,
|
||||
use_rslora=use_rslora,
|
||||
create_new_adapter=create_new_adapter,
|
||||
)
|
||||
)
|
||||
|
@ -143,7 +151,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
|
|||
output_dir = gr.Textbox()
|
||||
|
||||
with gr.Row():
|
||||
resume_btn = gr.Checkbox(visible=False, interactive=False, value=False)
|
||||
resume_btn = gr.Checkbox(visible=False, interactive=False)
|
||||
process_bar = gr.Slider(visible=False, interactive=False)
|
||||
|
||||
with gr.Box():
|
||||
|
|
|
@ -452,6 +452,20 @@ LOCALES = {
|
|||
"info": "嵌入向量所添加的噪声大小。",
|
||||
},
|
||||
},
|
||||
"resize_vocab": {
|
||||
"en": {
|
||||
"label": "Resize token embeddings",
|
||||
"info": "Resize the tokenizer vocab and the embedding layers.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Изменение размера токенных эмбеддингов",
|
||||
"info": "Изменить размер словаря токенизатора и слоев эмбеддинга.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "更改词表大小",
|
||||
"info": "更改分词器词表和嵌入层的大小。",
|
||||
},
|
||||
},
|
||||
"sft_packing": {
|
||||
"en": {
|
||||
"label": "Pack sequences",
|
||||
|
@ -480,6 +494,20 @@ LOCALES = {
|
|||
"info": "将归一化层权重缩放至 32 位精度。",
|
||||
},
|
||||
},
|
||||
"use_llama_pro": {
|
||||
"en": {
|
||||
"label": "Enable LLaMA Pro",
|
||||
"info": "Make the parameters in the expanded blocks trainable.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Включить LLaMA Pro",
|
||||
"info": "Сделать параметры в расширенных блоках обучаемыми.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "使用 LLaMA Pro",
|
||||
"info": "仅训练块扩展后的参数。",
|
||||
},
|
||||
},
|
||||
"lora_tab": {
|
||||
"en": {
|
||||
"label": "LoRA configurations",
|
||||
|
@ -550,18 +578,32 @@ LOCALES = {
|
|||
"info": "除 LoRA 层以外的可训练模块名称。使用英文逗号分隔多个名称。",
|
||||
},
|
||||
},
|
||||
"use_rslora": {
|
||||
"en": {
|
||||
"label": "Use rslora",
|
||||
"info": "Use the rank stabilization scaling factor for LoRA layer.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Использовать rslora",
|
||||
"info": "Использовать коэффициент масштабирования стабилизации ранга для слоя LoRA.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "使用 rslora",
|
||||
"info": "对 LoRA 层使用秩稳定缩放方法。",
|
||||
},
|
||||
},
|
||||
"create_new_adapter": {
|
||||
"en": {
|
||||
"label": "Create new adapter",
|
||||
"info": "Whether to create a new adapter with randomly initialized weight or not.",
|
||||
"info": "Create a new adapter with randomly initialized weight upon the existing one.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Создать новый адаптер",
|
||||
"info": "Создать новый адаптер с случайной инициализацией веса или нет.",
|
||||
"info": "Создать новый адаптер с случайной инициализацией веса на основе существующего.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "新建适配器",
|
||||
"info": "是否创建一个经过随机初始化的新适配器。",
|
||||
"info": "在现有的适配器上创建一个随机初始化后的新适配器。",
|
||||
},
|
||||
},
|
||||
"rlhf_tab": {
|
||||
|
@ -859,7 +901,7 @@ LOCALES = {
|
|||
},
|
||||
"export_quantization_dataset": {
|
||||
"en": {
|
||||
"label": "Export quantization dataset.",
|
||||
"label": "Export quantization dataset",
|
||||
"info": "The calibration dataset used for quantization.",
|
||||
},
|
||||
"ru": {
|
||||
|
@ -871,6 +913,20 @@ LOCALES = {
|
|||
"info": "量化过程中使用的校准数据集。",
|
||||
},
|
||||
},
|
||||
"export_legacy_format": {
|
||||
"en": {
|
||||
"label": "Export legacy format",
|
||||
"info": "Do not use safetensors to save the model.",
|
||||
},
|
||||
"ru": {
|
||||
"label": "Экспорт в устаревший формат",
|
||||
"info": "Не использовать safetensors для сохранения модели.",
|
||||
},
|
||||
"zh": {
|
||||
"label": "导出旧格式",
|
||||
"info": "不使用 safetensors 格式保存模型。",
|
||||
},
|
||||
},
|
||||
"export_dir": {
|
||||
"en": {
|
||||
"label": "Export dir",
|
||||
|
|
|
@ -125,12 +125,15 @@ class Runner:
|
|||
save_steps=get("train.save_steps"),
|
||||
warmup_steps=get("train.warmup_steps"),
|
||||
neftune_noise_alpha=get("train.neftune_alpha") or None,
|
||||
resize_vocab=get("train.resize_vocab"),
|
||||
sft_packing=get("train.sft_packing"),
|
||||
upcast_layernorm=get("train.upcast_layernorm"),
|
||||
use_llama_pro=get("train.use_llama_pro"),
|
||||
lora_rank=get("train.lora_rank"),
|
||||
lora_dropout=get("train.lora_dropout"),
|
||||
lora_target=get("train.lora_target") or get_module(get("top.model_name")),
|
||||
additional_target=get("train.additional_target") or None,
|
||||
use_rslora=get("train.use_rslora"),
|
||||
create_new_adapter=get("train.create_new_adapter"),
|
||||
output_dir=get_save_dir(get("top.model_name"), get("top.finetuning_type"), get("train.output_dir")),
|
||||
fp16=(get("train.compute_type") == "fp16"),
|
||||
|
|
Loading…
Reference in New Issue