refactor adapter hparam

This commit is contained in:
hiyouga 2023-12-15 20:53:11 +08:00
parent d4c351f1ec
commit 0716f5e470
21 changed files with 302 additions and 311 deletions

View File

@ -274,8 +274,8 @@ CUDA_VISIBLE_DEVICES=0 USE_MODELSCOPE_HUB=1 python src/train_web.py
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage pt \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--dataset wiki_demo \
--finetuning_type lora \
--lora_target q_proj,v_proj \
@ -297,8 +297,8 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--dataset alpaca_gpt4_en \
--template default \
--finetuning_type lora \
@ -321,14 +321,14 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage rm \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_sft_checkpoint \
--create_new_adapter \
--dataset comparison_gpt4_en \
--template default \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--output_dir path_to_rm_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
@ -346,14 +346,14 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage ppo \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_sft_checkpoint \
--create_new_adapter \
--dataset alpaca_gpt4_en \
--template default \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--reward_model path_to_rm_checkpoint \
--output_dir path_to_ppo_checkpoint \
--per_device_train_batch_size 2 \
@ -377,14 +377,14 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage dpo \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_sft_checkpoint \
--create_new_adapter \
--dataset comparison_gpt4_en \
--template default \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--output_dir path_to_dpo_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
@ -472,9 +472,9 @@ deepspeed --num_gpus 8 --master_port=9901 src/train_bash.py \
```bash
python src/export_model.py \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--export_dir path_to_export
```
@ -486,9 +486,9 @@ python src/export_model.py \
```bash
python src/api_demo.py \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
--finetuning_type lora
```
> [!TIP]
@ -499,9 +499,9 @@ python src/api_demo.py \
```bash
python src/cli_demo.py \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
--finetuning_type lora
```
### Web Demo
@ -509,9 +509,9 @@ python src/cli_demo.py \
```bash
python src/web_demo.py \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
--finetuning_type lora
```
### Evaluation
@ -519,9 +519,9 @@ python src/web_demo.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/evaluate.py \
--model_name_or_path path_to_llama_model \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--adapter_name_or_path path_to_checkpoint \
--template vanilla \
--finetuning_type lora
--task mmlu \
--split test \
--lang en \
@ -534,12 +534,12 @@ CUDA_VISIBLE_DEVICES=0 python src/evaluate.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_llama_model \
--do_predict \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--dataset alpaca_gpt4_en \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--output_dir path_to_predict_result \
--per_device_eval_batch_size 8 \
--max_samples 100 \

View File

@ -274,8 +274,8 @@ CUDA_VISIBLE_DEVICES=0 USE_MODELSCOPE_HUB=1 python src/train_web.py
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage pt \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--dataset wiki_demo \
--finetuning_type lora \
--lora_target q_proj,v_proj \
@ -297,8 +297,8 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--dataset alpaca_gpt4_zh \
--template default \
--finetuning_type lora \
@ -321,14 +321,14 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage rm \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_sft_checkpoint \
--create_new_adapter \
--dataset comparison_gpt4_zh \
--template default \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--output_dir path_to_rm_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
@ -346,14 +346,14 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage ppo \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_sft_checkpoint \
--create_new_adapter \
--dataset alpaca_gpt4_zh \
--template default \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--reward_model path_to_rm_checkpoint \
--output_dir path_to_ppo_checkpoint \
--per_device_train_batch_size 2 \
@ -377,14 +377,14 @@ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage dpo \
--model_name_or_path path_to_llama_model \
--do_train \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_sft_checkpoint \
--create_new_adapter \
--dataset comparison_gpt4_zh \
--template default \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--resume_lora_training False \
--checkpoint_dir path_to_sft_checkpoint \
--output_dir path_to_dpo_checkpoint \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 4 \
@ -472,9 +472,9 @@ deepspeed --num_gpus 8 --master_port=9901 src/train_bash.py \
```bash
python src/export_model.py \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--export_dir path_to_export
```
@ -486,9 +486,9 @@ python src/export_model.py \
```bash
python src/api_demo.py \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
--finetuning_type lora
```
> [!TIP]
@ -499,9 +499,9 @@ python src/api_demo.py \
```bash
python src/cli_demo.py \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
--finetuning_type lora
```
### 浏览器测试
@ -509,9 +509,9 @@ python src/cli_demo.py \
```bash
python src/web_demo.py \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint
--finetuning_type lora
```
### 模型评估
@ -519,9 +519,9 @@ python src/web_demo.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/evaluate.py \
--model_name_or_path path_to_llama_model \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--adapter_name_or_path path_to_checkpoint \
--template vanilla \
--finetuning_type lora \
--task ceval \
--split validation \
--lang zh \
@ -534,12 +534,12 @@ CUDA_VISIBLE_DEVICES=0 python src/evaluate.py \
```bash
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
--stage sft \
--model_name_or_path path_to_llama_model \
--do_predict \
--model_name_or_path path_to_llama_model \
--adapter_name_or_path path_to_checkpoint \
--dataset alpaca_gpt4_zh \
--template default \
--finetuning_type lora \
--checkpoint_dir path_to_checkpoint \
--output_dir path_to_predict_result \
--per_device_eval_batch_size 8 \
--max_samples 100 \

View File

@ -1,5 +1,5 @@
torch>=1.13.1
transformers>=4.36.0
transformers>=4.36.1
datasets>=2.14.3
accelerate>=0.21.0
peft>=0.7.0

View File

@ -617,9 +617,6 @@ register_template(
)
r"""
Supports language model inference without histories.
"""
register_template(
name="vanilla",
prefix=[],

View File

@ -15,7 +15,7 @@ class FreezeArguments:
LLaMA choices: [\"mlp\", \"self_attn\"], \
BLOOM & Falcon & ChatGLM choices: [\"mlp\", \"self_attention\"], \
Qwen choices: [\"mlp\", \"attn\"], \
Phi-1.5 choices: [\"mlp\", \"mixer\"], \
Phi choices: [\"mlp\", \"mixer\"], \
Others choices: the same as LLaMA."}
)
num_layer_trainable: Optional[int] = field(
@ -33,9 +33,9 @@ class LoraArguments:
default=None,
metadata={"help": "Name(s) of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint."}
)
lora_alpha: Optional[float] = field(
lora_alpha: Optional[int] = field(
default=None,
metadata={"help": "The scale factor for LoRA fine-tuning (default: lora_rank * 2.0)."}
metadata={"help": "The scale factor for LoRA fine-tuning (default: lora_rank * 2)."}
)
lora_dropout: Optional[float] = field(
default=0.1,
@ -52,12 +52,12 @@ class LoraArguments:
BLOOM & Falcon & ChatGLM choices: [\"query_key_value\", \"dense\", \"dense_h_to_4h\", \"dense_4h_to_h\"], \
Baichuan choices: [\"W_pack\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \
Qwen choices: [\"c_attn\", \"attn.c_proj\", \"w1\", \"w2\", \"mlp.c_proj\"], \
Phi-1.5 choices: [\"Wqkv\", \"out_proj\", \"fc1\", \"fc2\"], \
Phi choices: [\"Wqkv\", \"out_proj\", \"fc1\", \"fc2\"], \
Others choices: the same as LLaMA."}
)
resume_lora_training: Optional[bool] = field(
default=True,
metadata={"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."}
create_new_adapter: Optional[bool] = field(
default=False,
metadata={"help": "Whether to create a new adapter with randomly initialized weight or not."}
)
@ -98,9 +98,9 @@ class RLHFArguments:
default=None,
metadata={"help": "Path to the reference model used for the PPO or DPO training."}
)
ref_model_checkpoint: Optional[str] = field(
ref_model_adapters: Optional[str] = field(
default=None,
metadata={"help": "Path to the directory(s) containing the model checkpoints of the reference model."}
metadata={"help": "Path to the adapters of the reference model."}
)
ref_model_quantization_bit: Optional[int] = field(
default=None,
@ -108,11 +108,11 @@ class RLHFArguments:
)
reward_model: Optional[str] = field(
default=None,
metadata={"help": "Path to the directory containing the checkpoints of the reward model."}
metadata={"help": "Path to the reward model used for the PPO training."}
)
reward_model_checkpoint: Optional[str] = field(
reward_model_adapters: Optional[str] = field(
default=None,
metadata={"help": "Path to the directory(s) containing the model checkpoints of the reward model."}
metadata={"help": "Path to the adapters of the reward model."}
)
reward_model_quantization_bit: Optional[int] = field(
default=None,
@ -161,11 +161,11 @@ class FinetuningArguments(FreezeArguments, LoraArguments, RLHFArguments):
return arg
self.name_module_trainable = split_arg(self.name_module_trainable)
self.lora_alpha = self.lora_alpha or float(self.lora_rank * 2.0)
self.lora_alpha = self.lora_alpha or self.lora_rank * 2
self.lora_target = split_arg(self.lora_target)
self.additional_target = split_arg(self.additional_target)
self.ref_model_checkpoint = split_arg(self.ref_model_checkpoint)
self.reward_model_checkpoint = split_arg(self.reward_model_checkpoint)
self.ref_model_adapters = split_arg(self.ref_model_adapters)
self.reward_model_adapters = split_arg(self.reward_model_adapters)
assert self.finetuning_type in ["lora", "freeze", "full"], "Invalid fine-tuning method."
assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."

View File

@ -8,12 +8,15 @@ class ModelArguments:
Arguments pertaining to which model/config/tokenizer we are going to fine-tune.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from \
huggingface.co/models or modelscope.cn/models."}
metadata={"help": "Path to the model weight or identifier from huggingface.co/models or modelscope.cn/models."}
)
adapter_name_or_path: Optional[str] = field(
default=None,
metadata={"help": "Path to the adapter weight or identifier from huggingface.co/models."}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co."}
metadata={"help": "Where to store the pre-trained models downloaded from huggingface.co or modelscope.cn."}
)
use_fast_tokenizer: Optional[bool] = field(
default=True,
@ -43,10 +46,6 @@ class ModelArguments:
default=None,
metadata={"help": "Adopt scaled rotary positional embeddings."}
)
checkpoint_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to the directory(s) containing the model checkpoints as well as the configurations."}
)
flash_attn: Optional[bool] = field(
default=False,
metadata={"help": "Enable FlashAttention-2 for faster training."}
@ -71,8 +70,8 @@ class ModelArguments:
if self.split_special_tokens and self.use_fast_tokenizer:
raise ValueError("`split_special_tokens` is only supported for slow tokenizers.")
if self.checkpoint_dir is not None: # support merging multiple lora weights
self.checkpoint_dir = [cd.strip() for cd in self.checkpoint_dir.split(",")]
if self.adapter_name_or_path is not None: # support merging multiple lora weights
self.adapter_name_or_path = [path.strip() for path in self.adapter_name_or_path.split(",")]
assert self.quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."

View File

@ -27,8 +27,8 @@ def init_adapter(
Note that the trainable parameters must be cast to float32.
"""
if (not is_trainable) and model_args.checkpoint_dir is None:
logger.info("Checkpoint is not found at evaluation, load the original model.")
if (not is_trainable) and model_args.adapter_name_or_path is None:
logger.info("Adapter is not found at evaluation, load the base model.")
return model
if finetuning_args.finetuning_type == "full" and is_trainable:
@ -44,6 +44,7 @@ def init_adapter(
)
if not num_layers:
raise ValueError("Current model does not support freeze tuning.")
if finetuning_args.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
trainable_layer_ids = [num_layers - k - 1 for k in range(finetuning_args.num_layer_trainable)]
else: # fine-tuning the first n layers if num_layer_trainable < 0
@ -62,30 +63,31 @@ def init_adapter(
if finetuning_args.finetuning_type == "lora":
logger.info("Fine-tuning method: LoRA")
checkpoint_to_resume = None
adapter_to_resume = None
if model_args.checkpoint_dir is not None:
if model_args.adapter_name_or_path is not None:
is_mergeable = True
if getattr(model, "quantization_method", None): # merge lora in quantized model is unstable
assert len(model_args.checkpoint_dir) == 1, "Quantized model only accepts a single checkpoint."
assert len(model_args.adapter_name_or_path) == 1, "Quantized model only accepts a single adapter."
is_mergeable = False
if (is_trainable and finetuning_args.resume_lora_training) or (not is_mergeable):
checkpoints_to_merge, checkpoint_to_resume = model_args.checkpoint_dir[:-1], model_args.checkpoint_dir[-1]
if (is_trainable and not finetuning_args.create_new_adapter) or (not is_mergeable):
adapter_to_merge = model_args.adapter_name_or_path[:-1]
adapter_to_resume = model_args.adapter_name_or_path[-1]
else:
checkpoints_to_merge = model_args.checkpoint_dir
adapter_to_merge = model_args.adapter_name_or_path
for checkpoint in checkpoints_to_merge:
model = PeftModel.from_pretrained(model, checkpoint)
for adapter in adapter_to_merge:
model = PeftModel.from_pretrained(model, adapter)
model = model.merge_and_unload()
if len(checkpoints_to_merge) > 0:
logger.info("Merged {} model checkpoint(s).".format(len(checkpoints_to_merge)))
if len(adapter_to_merge) > 0:
logger.info("Merged {} adapter(s).".format(len(adapter_to_merge)))
if checkpoint_to_resume is not None: # resume lora training
model = PeftModel.from_pretrained(model, checkpoint_to_resume, is_trainable=is_trainable)
if adapter_to_resume is not None: # resume lora training
model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_trainable)
if is_trainable and checkpoint_to_resume is None: # create new lora weights while training
if is_trainable and adapter_to_resume is None: # create new lora weights while training
if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all":
target_modules = find_all_linear_modules(model)
else:
@ -105,7 +107,7 @@ def init_adapter(
for param in filter(lambda p: p.requires_grad, model.parameters()):
param.data = param.data.to(torch.float32)
if model_args.checkpoint_dir is not None:
logger.info("Loaded fine-tuned model from checkpoint(s): {}".format(",".join(model_args.checkpoint_dir)))
if model_args.adapter_name_or_path is not None:
logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path)))
return model

View File

@ -1,41 +1,26 @@
import math
import torch
from types import MethodType
from typing import TYPE_CHECKING, Optional, Tuple
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
PretrainedConfig,
PreTrainedModel,
PreTrainedTokenizerBase
)
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.utils.versions import require_version
from trl import AutoModelForCausalLMWithValueHead
try:
from transformers.integrations import is_deepspeed_zero3_enabled
except ImportError: # https://github.com/huggingface/transformers/releases/tag/v4.33.1
from transformers.deepspeed import is_deepspeed_zero3_enabled
from llmtuner.extras.logging import get_logger
from llmtuner.extras.misc import count_parameters, get_current_device, infer_optim_dtype, try_download_model_from_ms
from llmtuner.extras.misc import count_parameters, get_current_device, try_download_model_from_ms
from llmtuner.extras.packages import is_flash_attn2_available
from llmtuner.hparams import FinetuningArguments
from llmtuner.model.adapter import init_adapter
from llmtuner.model.patches import patch_config, patch_model, patch_valuehead_model, patch_tokenizer, register_autoclass
from llmtuner.model.utils import load_valuehead_params, prepare_model_for_training, resize_embedding_layer
if TYPE_CHECKING:
from transformers import PreTrainedTokenizer
from transformers import PreTrainedModel, PreTrainedTokenizer
from llmtuner.hparams import ModelArguments
logger = get_logger(__name__)
require_version("transformers>=4.36.0", "To fix: pip install transformers>=4.36.0")
require_version("transformers>=4.36.1", "To fix: pip install transformers>=4.36.1")
require_version("datasets>=2.14.3", "To fix: pip install datasets>=2.14.3")
require_version("accelerate>=0.21.0", "To fix: pip install accelerate>=0.21.0")
require_version("peft>=0.7.0", "To fix: pip install peft>=0.7.0")
@ -47,7 +32,7 @@ def load_model_and_tokenizer(
finetuning_args: "FinetuningArguments",
is_trainable: Optional[bool] = False,
add_valuehead: Optional[bool] = False
) -> Tuple[PreTrainedModel, "PreTrainedTokenizer"]:
) -> Tuple["PreTrainedModel", "PreTrainedTokenizer"]:
r"""
Loads pretrained model and tokenizer.
@ -70,73 +55,15 @@ def load_model_and_tokenizer(
padding_side="right", # training with left-padded tensors in fp16 precision may cause overflow
**config_kwargs
)
patch_tokenizer(tokenizer)
if finetuning_args.finetuning_type != "lora" and model_args.checkpoint_dir is not None:
logger.info("Use `model_name_or_path` to specify the model trained with full/freeze method.")
model_to_load = model_args.checkpoint_dir[0]
else:
model_to_load = model_args.model_name_or_path
config = AutoConfig.from_pretrained(model_to_load, **config_kwargs)
# Fix tokenizer (for ChatGLM2 and ChatGLM3)
if getattr(config, "model_type", None) == "chatglm":
tokenizer._pad = MethodType(PreTrainedTokenizerBase._pad, tokenizer)
# Set model dtype
if model_args.compute_dtype is None: # priority: bf16 > fp16 > fp32
model_args.compute_dtype = infer_optim_dtype(model_dtype=getattr(config, "torch_dtype", None))
setattr(config, "torch_dtype", model_args.compute_dtype)
# Fix config (for Qwen)
if getattr(config, "model_type", None) == "qwen":
for dtype_name, dtype in [("fp16", torch.float16), ("bf16", torch.bfloat16), ("fp32", torch.float32)]:
setattr(config, dtype_name, getattr(config, "torch_dtype", None) == dtype)
# Set RoPE scaling
if model_args.rope_scaling is not None:
if not hasattr(config, "rope_scaling"):
logger.warning("Current model does not support RoPE scaling.")
else:
if is_trainable:
if model_args.rope_scaling == "dynamic":
logger.warning(
"Dynamic NTK may not work well with fine-tuning. "
"See: https://github.com/huggingface/transformers/pull/24653"
)
current_max_length = getattr(config, "max_position_embeddings", None)
if current_max_length and model_args.model_max_length > current_max_length:
scaling_factor = float(math.ceil(model_args.model_max_length / current_max_length))
else:
logger.warning("Input length is smaller than max length. Consider increase input length.")
scaling_factor = 1.0
else:
scaling_factor = 2.0
setattr(config, "rope_scaling", {"type": model_args.rope_scaling, "factor": scaling_factor})
logger.info("Using {} scaling strategy and setting scaling factor to {}".format(
model_args.rope_scaling, scaling_factor
))
# Set shift short attention (S^2-Attn)
if is_trainable and model_args.shift_attn:
logger.warning("Shift short attention is temporarily invalid due to breaking changes.")
# if getattr(config, "model_type", None) == "llama":
# setattr(config, "group_size_ratio", 0.25)
# logger.info("Using shift short attention with group_size_ratio=1/4.")
# else:
# logger.warning("Current model does not support shift short attention.")
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
patch_config(config, model_args, is_trainable)
# Set FlashAttention-2
if model_args.flash_attn:
if not is_flash_attn2_available():
logger.warning("FlashAttention-2 is not installed.")
elif getattr(config, "model_type", None) == "qwen":
logger.info("Current model automatically enables FlashAttention if installed.")
else:
config_kwargs["use_flash_attention_2"] = True
logger.info("Using FlashAttention-2 for faster training and inference.")
if model_args.flash_attn and is_flash_attn2_available():
config_kwargs["use_flash_attention_2"] = True
logger.info("Using FlashAttention-2 for faster training and inference.")
# Quantization configurations (using gptq or awq)
if getattr(config, "quantization_config", None):
@ -168,33 +95,16 @@ def load_model_and_tokenizer(
# Load pre-trained models (without valuehead)
model = AutoModelForCausalLM.from_pretrained(
model_to_load,
model_args.model_name_or_path,
config=config,
torch_dtype=model_args.compute_dtype,
low_cpu_mem_usage=(not is_deepspeed_zero3_enabled()),
**config_kwargs
)
# Resize token embeddings
patch_model(model)
register_autoclass(config, model, tokenizer)
resize_embedding_layer(model, tokenizer)
# Disable custom generate method (for Qwen and Baichuan2)
if isinstance(model, PreTrainedModel) and "GenerationMixin" not in str(model.generate.__func__):
model.generate = MethodType(PreTrainedModel.generate, model)
# Fix LM head (for ChatGLM2 and ChatGLM3)
if getattr(config, "model_type", None) == "chatglm":
setattr(model, "lm_head", model.transformer.output_layer)
setattr(model, "_keys_to_ignore_on_save", ["lm_head.weight"])
# Register auto class to save the custom code files
if isinstance(config, PretrainedConfig) and "AutoConfig" in getattr(config, "auto_map", {}):
config.__class__.register_for_auto_class()
if isinstance(model, PreTrainedModel) and "AutoModelForCausalLM" in getattr(config, "auto_map", {}):
model.__class__.register_for_auto_class()
if isinstance(tokenizer, PreTrainedTokenizerBase) and "AutoTokenizer" in tokenizer.init_kwargs.get("auto_map", {}):
tokenizer.__class__.register_for_auto_class()
# Initialize adapters
model = prepare_model_for_training(model=model, finetuning_args=finetuning_args) if is_trainable else model
model = init_adapter(model, model_args, finetuning_args, is_trainable)
@ -202,19 +112,10 @@ def load_model_and_tokenizer(
# Prepare model with valuehead for RLHF
if add_valuehead:
model: "AutoModelForCausalLMWithValueHead" = AutoModelForCausalLMWithValueHead.from_pretrained(model)
def get_input_embeddings(self: "AutoModelForCausalLMWithValueHead") -> torch.nn.Module:
return self.pretrained_model.get_input_embeddings()
setattr(model, "get_input_embeddings", MethodType(get_input_embeddings, model))
ignore_modules = [name for name, _ in model.named_parameters() if "pretrained_model" in name]
setattr(model, "_keys_to_ignore_on_save", ignore_modules)
setattr(model, "tie_weights", MethodType(lambda _: None, model)) # use empty method
vhead_path = (
model_args.checkpoint_dir[-1] if model_args.checkpoint_dir is not None else model_args.model_name_or_path
)
vhead_params = load_valuehead_params(vhead_path, model_args)
patch_valuehead_model(model)
vhead_params = load_valuehead_params(model_args)
if vhead_params is not None:
model.load_state_dict(vhead_params, strict=False)
logger.info("Loaded valuehead from checkpoint: {}".format(vhead_path))
# Prepare model for inference
if not is_trainable:

View File

@ -41,15 +41,19 @@ _EVAL_CLS = Tuple[
def _verify_model_args(model_args: "ModelArguments", finetuning_args: "FinetuningArguments") -> None:
if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora":
raise ValueError("Quantization is only compatible with the LoRA method.")
if model_args.checkpoint_dir is not None and len(model_args.checkpoint_dir) != 1:
if model_args.quantization_bit is not None:
if finetuning_args.finetuning_type != "lora":
raise ValueError("Multiple checkpoints are only available for LoRA tuning.")
raise ValueError("Quantization is only compatible with the LoRA method.")
if finetuning_args.create_new_adapter:
raise ValueError("Cannot create new adapter upon a quantized model.")
if model_args.adapter_name_or_path is not None and len(model_args.adapter_name_or_path) != 1:
if finetuning_args.finetuning_type != "lora":
raise ValueError("Multiple adapters are only available for LoRA tuning.")
if model_args.quantization_bit is not None:
raise ValueError("Quantized model only accepts a single checkpoint. Merge them first.")
raise ValueError("Quantized model only accepts a single adapter. Merge them first.")
def parse_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS:
@ -139,11 +143,17 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS:
training_args_dict.update(dict(ddp_find_unused_parameters=False))
training_args = Seq2SeqTrainingArguments(**training_args_dict)
if finetuning_args.stage in ["rm", "ppo"] and finetuning_args.finetuning_type in ["full", "freeze"]:
can_resume_from_checkpoint = False
else:
can_resume_from_checkpoint = True
if (
training_args.resume_from_checkpoint is None
and training_args.do_train
and os.path.isdir(training_args.output_dir)
and not training_args.overwrite_output_dir
and can_resume_from_checkpoint
):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
@ -158,7 +168,7 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS:
))
if finetuning_args.stage in ["rm", "ppo"] and training_args.resume_from_checkpoint is not None:
logger.warning("Add {} to `checkpoint_dir` to resume training from checkpoint.".format(
logger.warning("Add {} to `adapter_name_or_path` to resume training from checkpoint.".format(
training_args.resume_from_checkpoint
))

View File

@ -0,0 +1,94 @@
import math
import torch
from types import MethodType
from typing import TYPE_CHECKING
from transformers import PreTrainedModel, PreTrainedTokenizerBase
from llmtuner.extras.logging import get_logger
from llmtuner.extras.misc import infer_optim_dtype
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedTokenizer
from trl import AutoModelForCausalLMWithValueHead
from llmtuner.hparams import ModelArguments
logger = get_logger(__name__)
def patch_config(config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool):
if model_args.compute_dtype is None: # priority: bf16 > fp16 > fp32
model_args.compute_dtype = infer_optim_dtype(model_dtype=getattr(config, "torch_dtype", None))
setattr(config, "torch_dtype", model_args.compute_dtype)
if getattr(config, "model_type", None) == "qwen":
for dtype_name, dtype in [("fp16", torch.float16), ("bf16", torch.bfloat16), ("fp32", torch.float32)]:
setattr(config, dtype_name, getattr(config, "torch_dtype", None) == dtype)
if model_args.rope_scaling is not None:
if not hasattr(config, "rope_scaling"):
logger.warning("Current model does not support RoPE scaling.")
else:
if is_trainable:
if model_args.rope_scaling == "dynamic":
logger.warning(
"Dynamic NTK may not work well with fine-tuning. "
"See: https://github.com/huggingface/transformers/pull/24653"
)
current_max_length = getattr(config, "max_position_embeddings", None)
if current_max_length and model_args.model_max_length > current_max_length:
scaling_factor = float(math.ceil(model_args.model_max_length / current_max_length))
else:
logger.warning("Input length is smaller than max length. Consider increase input length.")
scaling_factor = 1.0
else:
scaling_factor = 2.0
setattr(config, "rope_scaling", {"type": model_args.rope_scaling, "factor": scaling_factor})
logger.info("Using {} scaling strategy and setting scaling factor to {}".format(
model_args.rope_scaling, scaling_factor
))
# Set shift short attention (S^2-Attn)
if is_trainable and model_args.shift_attn:
logger.warning("Shift short attention is temporarily invalid due to breaking changes.")
# if getattr(config, "model_type", None) == "llama":
# setattr(config, "group_size_ratio", 0.25)
# logger.info("Using shift short attention with group_size_ratio=1/4.")
# else:
# logger.warning("Current model does not support shift short attention.")
def patch_model(model: "PreTrainedModel"):
if "GenerationMixin" not in str(model.generate.__func__):
model.generate = MethodType(PreTrainedModel.generate, model)
if getattr(model.config, "model_type", None) == "chatglm":
setattr(model, "lm_head", model.transformer.output_layer)
setattr(model, "_keys_to_ignore_on_save", ["lm_head.weight"])
def patch_valuehead_model(model: "AutoModelForCausalLMWithValueHead"):
def get_input_embeddings(self: "AutoModelForCausalLMWithValueHead") -> torch.nn.Module:
return self.pretrained_model.get_input_embeddings()
setattr(model, "get_input_embeddings", MethodType(get_input_embeddings, model))
ignore_modules = [name for name, _ in model.named_parameters() if "pretrained_model" in name]
setattr(model, "_keys_to_ignore_on_save", ignore_modules)
setattr(model, "tie_weights", MethodType(lambda _: None, model)) # use empty method
def patch_tokenizer(tokenizer: "PreTrainedTokenizer"):
if "PreTrainedTokenizerBase" not in str(tokenizer._pad.__func__):
tokenizer._pad = MethodType(PreTrainedTokenizerBase._pad, tokenizer)
def register_autoclass(config: "PretrainedConfig", model: "PreTrainedModel", tokenizer: "PreTrainedTokenizerBase"):
if "AutoConfig" in getattr(config, "auto_map", {}):
config.__class__.register_for_auto_class()
if "AutoModelForCausalLM" in getattr(config, "auto_map", {}):
model.__class__.register_for_auto_class()
if "AutoTokenizer" in tokenizer.init_kwargs.get("auto_map", {}):
tokenizer.__class__.register_for_auto_class()

View File

@ -1,5 +1,4 @@
import torch
import inspect
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
from transformers.utils import cached_file
@ -86,29 +85,26 @@ def get_modelcard_args(
}
def load_valuehead_params(
path_or_repo_id: str,
model_args: "ModelArguments"
) -> Dict[str, torch.Tensor]:
def load_valuehead_params(model_args: "ModelArguments") -> Dict[str, torch.Tensor]:
r"""
Loads value head parameters from Hugging Face Hub or local disk.
Returns: dict with keys `v_head.summary.weight` and `v_head.summary.bias`.
"""
if model_args.adapter_name_or_path is not None:
path_or_repo_id = model_args.adapter_name_or_path[-1]
else:
path_or_repo_id = model_args.model_name_or_path
kwargs = {
"path_or_repo_id": path_or_repo_id,
"cache_dir": model_args.cache_dir
"cache_dir": model_args.cache_dir,
"token": model_args.hf_hub_token
}
if "token" in inspect.signature(cached_file).parameters:
kwargs["token"] = model_args.hf_hub_token
elif "use_auth_token" in inspect.signature(cached_file).parameters: # for transformers==4.31.0
kwargs["use_auth_token"] = model_args.hf_hub_token
else:
logger.warning("Ignore `hf_hub_token` since matched parameter is not found.")
try:
vhead_file = cached_file(filename=WEIGHTS_NAME, **kwargs)
logger.info("Loaded valuehead from {}".format(path_or_repo_id))
return torch.load(vhead_file, map_location="cpu")
except Exception as err:
logger.info("Failed to load {}: {}".format(WEIGHTS_NAME, str(err)))
@ -116,6 +112,7 @@ def load_valuehead_params(
try:
from safetensors import safe_open
vhead_file = cached_file(filename=SAFE_WEIGHTS_NAME, **kwargs)
logger.info("Loaded valuehead from {}".format(path_or_repo_id))
with safe_open(vhead_file, framework="pt", device="cpu") as f:
return {
"v_head.summary.weight": f.get_tensor("v_head.summary.weight"),

View File

@ -46,7 +46,7 @@ def create_ref_model(
ref_model_args_dict = model_args.to_dict()
ref_model_args_dict.update(dict(
model_name_or_path=finetuning_args.ref_model,
checkpoint_dir=finetuning_args.ref_model_checkpoint,
adapter_name_or_path=finetuning_args.ref_model_adapters,
quantization_bit=finetuning_args.ref_model_quantization_bit
))
ref_model_args = ModelArguments(**ref_model_args_dict)
@ -96,7 +96,7 @@ def create_reward_model(
reward_model_args_dict = model_args.to_dict()
reward_model_args_dict.update(dict(
model_name_or_path=finetuning_args.reward_model,
checkpoint_dir=finetuning_args.reward_model_checkpoint,
adapter_name_or_path=finetuning_args.reward_model_adapters,
quantization_bit=finetuning_args.reward_model_quantization_bit
))
reward_model_args = ModelArguments(**reward_model_args_dict)

View File

@ -63,17 +63,17 @@ class WebChatModel(ChatModel):
yield error
return
if get("top.checkpoints"):
checkpoint_dir = ",".join([
get_save_dir(get("top.model_name"), get("top.finetuning_type"), ckpt) for ckpt in get("top.checkpoints")
])
if get("top.adapter_path"):
adapter_name_or_path = ",".join([
get_save_dir(get("top.model_name"), get("top.finetuning_type"), adapter)
for adapter in get("top.adapter_path")])
else:
checkpoint_dir = None
adapter_name_or_path = None
yield ALERTS["info_loading"][lang]
args = dict(
model_name_or_path=get("top.model_path"),
checkpoint_dir=checkpoint_dir,
adapter_name_or_path=adapter_name_or_path,
finetuning_type=get("top.finetuning_type"),
quantization_bit=int(get("top.quantization_bit")) if get("top.quantization_bit") in ["8", "4"] else None,
template=get("top.template"),

View File

@ -2,14 +2,7 @@ import os
import json
import gradio as gr
from typing import Any, Dict, Optional
from transformers.utils import (
WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
ADAPTER_WEIGHTS_NAME,
ADAPTER_SAFE_WEIGHTS_NAME
)
from peft.utils import WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME
from llmtuner.extras.constants import (
DEFAULT_MODULE,
@ -22,18 +15,11 @@ from llmtuner.extras.misc import use_modelscope
from llmtuner.hparams.data_args import DATA_CONFIG
ADAPTER_NAMES = {WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME}
DEFAULT_CACHE_DIR = "cache"
DEFAULT_DATA_DIR = "data"
DEFAULT_SAVE_DIR = "saves"
USER_CONFIG = "user.config"
CKPT_NAMES = [
WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
ADAPTER_WEIGHTS_NAME,
ADAPTER_SAFE_WEIGHTS_NAME
]
def get_save_dir(*args) -> os.PathLike:
@ -90,18 +76,18 @@ def get_template(model_name: str) -> str:
return "default"
def list_checkpoint(model_name: str, finetuning_type: str) -> Dict[str, Any]:
checkpoints = []
if model_name:
def list_adapters(model_name: str, finetuning_type: str) -> Dict[str, Any]:
adapters = []
if model_name and finetuning_type == "lora": # full and freeze have no adapter
save_dir = get_save_dir(model_name, finetuning_type)
if save_dir and os.path.isdir(save_dir):
for checkpoint in os.listdir(save_dir):
for adapter in os.listdir(save_dir):
if (
os.path.isdir(os.path.join(save_dir, checkpoint))
and any([os.path.isfile(os.path.join(save_dir, checkpoint, name)) for name in CKPT_NAMES])
os.path.isdir(os.path.join(save_dir, adapter))
and any([os.path.isfile(os.path.join(save_dir, adapter, name)) for name in ADAPTER_NAMES])
):
checkpoints.append(checkpoint)
return gr.update(value=[], choices=checkpoints)
adapters.append(adapter)
return gr.update(value=[], choices=adapters)
def load_dataset_info(dataset_dir: str) -> Dict[str, Dict[str, Any]]:

View File

@ -14,7 +14,7 @@ def save_model(
lang: str,
model_name: str,
model_path: str,
checkpoints: List[str],
adapter_path: List[str],
finetuning_type: str,
template: str,
max_shard_size: int,
@ -25,8 +25,8 @@ def save_model(
error = ALERTS["err_no_model"][lang]
elif not model_path:
error = ALERTS["err_no_path"][lang]
elif not checkpoints:
error = ALERTS["err_no_checkpoint"][lang]
elif not adapter_path:
error = ALERTS["err_no_adapter"][lang]
elif not export_dir:
error = ALERTS["err_no_export_dir"][lang]
@ -37,7 +37,7 @@ def save_model(
args = dict(
model_name_or_path=model_path,
checkpoint_dir=",".join([get_save_dir(model_name, finetuning_type, ckpt) for ckpt in checkpoints]),
adapter_name_or_path=",".join([get_save_dir(model_name, finetuning_type, adapter) for adapter in adapter_path]),
finetuning_type=finetuning_type,
template=template,
export_dir=export_dir,
@ -63,7 +63,7 @@ def create_export_tab(engine: "Engine") -> Dict[str, "Component"]:
engine.manager.get_elem_by_name("top.lang"),
engine.manager.get_elem_by_name("top.model_name"),
engine.manager.get_elem_by_name("top.model_path"),
engine.manager.get_elem_by_name("top.checkpoints"),
engine.manager.get_elem_by_name("top.adapter_path"),
engine.manager.get_elem_by_name("top.finetuning_type"),
engine.manager.get_elem_by_name("top.template"),
max_shard_size,

View File

@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Dict
from llmtuner.data.template import templates
from llmtuner.extras.constants import METHODS, SUPPORTED_MODELS
from llmtuner.webui.common import get_model_path, get_template, list_checkpoint, save_config
from llmtuner.webui.common import get_model_path, get_template, list_adapters, save_config
from llmtuner.webui.utils import can_quantize
if TYPE_CHECKING:
@ -20,7 +20,7 @@ def create_top() -> Dict[str, "Component"]:
with gr.Row():
finetuning_type = gr.Dropdown(choices=METHODS, value="lora", scale=1)
checkpoints = gr.Dropdown(multiselect=True, scale=5)
adapter_path = gr.Dropdown(multiselect=True, scale=5)
refresh_btn = gr.Button(scale=1)
with gr.Accordion(label="Advanced config", open=False) as advanced_tab:
@ -34,7 +34,7 @@ def create_top() -> Dict[str, "Component"]:
shift_attn = gr.Checkbox(value=False)
model_name.change(
list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False
list_adapters, [model_name, finetuning_type], [adapter_path], queue=False
).then(
get_model_path, [model_name], [model_path], queue=False
).then(
@ -44,13 +44,13 @@ def create_top() -> Dict[str, "Component"]:
model_path.change(save_config, inputs=[lang, model_name, model_path], queue=False)
finetuning_type.change(
list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False
list_adapters, [model_name, finetuning_type], [adapter_path], queue=False
).then(
can_quantize, [finetuning_type], [quantization_bit], queue=False
)
refresh_btn.click(
list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False
list_adapters, [model_name, finetuning_type], [adapter_path], queue=False
)
return dict(
@ -58,7 +58,7 @@ def create_top() -> Dict[str, "Component"]:
model_name=model_name,
model_path=model_path,
finetuning_type=finetuning_type,
checkpoints=checkpoints,
adapter_path=adapter_path,
refresh_btn=refresh_btn,
advanced_tab=advanced_tab,
quantization_bit=quantization_bit,

View File

@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Dict
from transformers.trainer_utils import SchedulerType
from llmtuner.extras.constants import TRAINING_STAGES
from llmtuner.webui.common import list_checkpoint, list_dataset, DEFAULT_DATA_DIR
from llmtuner.webui.common import list_adapters, list_dataset, DEFAULT_DATA_DIR
from llmtuner.webui.components.data import create_preview_box
from llmtuner.webui.utils import gen_plot
@ -60,7 +60,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
lr_scheduler_type=lr_scheduler_type, max_grad_norm=max_grad_norm, val_size=val_size
))
with gr.Accordion(label="Advanced config", open=False) as advanced_tab:
with gr.Accordion(label="Extra config", open=False) as extra_tab:
with gr.Row():
logging_steps = gr.Slider(value=5, minimum=5, maximum=1000, step=5)
save_steps = gr.Slider(value=100, minimum=10, maximum=5000, step=10)
@ -73,7 +73,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
input_elems.update({logging_steps, save_steps, warmup_steps, neftune_alpha, train_on_prompt, upcast_layernorm})
elem_dict.update(dict(
advanced_tab=advanced_tab, logging_steps=logging_steps, save_steps=save_steps, warmup_steps=warmup_steps,
extra_tab=extra_tab, logging_steps=logging_steps, save_steps=save_steps, warmup_steps=warmup_steps,
neftune_alpha=neftune_alpha, train_on_prompt=train_on_prompt, upcast_layernorm=upcast_layernorm
))
@ -83,12 +83,12 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
lora_dropout = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=1)
lora_target = gr.Textbox(scale=1)
additional_target = gr.Textbox(scale=1)
resume_lora_training = gr.Checkbox(value=True, scale=1)
create_new_adapter = gr.Checkbox(scale=1)
input_elems.update({lora_rank, lora_dropout, lora_target, additional_target, resume_lora_training})
input_elems.update({lora_rank, lora_dropout, lora_target, additional_target, create_new_adapter})
elem_dict.update(dict(
lora_tab=lora_tab, lora_rank=lora_rank, lora_dropout=lora_dropout, lora_target=lora_target,
additional_target=additional_target, resume_lora_training=resume_lora_training,
additional_target=additional_target, create_new_adapter=create_new_adapter
))
with gr.Accordion(label="RLHF config", open=False) as rlhf_tab:
@ -98,7 +98,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]:
refresh_btn = gr.Button(scale=1)
refresh_btn.click(
list_checkpoint,
list_adapters,
[engine.manager.get_elem_by_name("top.model_name"), engine.manager.get_elem_by_name("top.finetuning_type")],
[reward_model],
queue=False

View File

@ -33,20 +33,20 @@ LOCALES = {
"label": "微调方法"
}
},
"checkpoints": {
"adapter_path": {
"en": {
"label": "Checkpoints"
"label": "Adapter path"
},
"zh": {
"label": "模型断点"
"label": "适配器路径"
}
},
"refresh_btn": {
"en": {
"value": "Refresh checkpoints"
"value": "Refresh adapters"
},
"zh": {
"value": "刷新断点"
"value": "刷新适配器"
}
},
"advanced_tab": {
@ -285,6 +285,14 @@ LOCALES = {
"info": "验证集占全部样本的百分比。"
}
},
"extra_tab": {
"en": {
"label": "Extra configurations"
},
"zh": {
"label": "其它参数设置"
}
},
"logging_steps": {
"en": {
"label": "Logging steps",
@ -393,14 +401,14 @@ LOCALES = {
"info": "除 LoRA 层以外的可训练模块名称。使用英文逗号分隔多个名称。"
}
},
"resume_lora_training": {
"create_new_adapter": {
"en": {
"label": "Resume LoRA training",
"info": "Whether to resume training from the last LoRA weights or create new lora weights."
"label": "Create new adapter",
"info": "Whether to create a new adapter with randomly initialized weight or not."
},
"zh": {
"label": "继续上次的训练",
"info": "接着上次的 LoRA 权重训练或创建一个新的 LoRA 权重"
"label": "新建适配器",
"info": "是否创建一个经过随机初始化的新适配器"
}
},
"rlhf_tab": {
@ -629,9 +637,9 @@ ALERTS = {
"en": "Please choose a dataset.",
"zh": "请选择数据集。"
},
"err_no_checkpoint": {
"en": "Please select a checkpoint.",
"zh": "请选择断点"
"err_no_adapter": {
"en": "Please select an adapter.",
"zh": "请选择一个适配器"
},
"err_no_export_dir": {
"en": "Please provide export dir.",

View File

@ -21,7 +21,7 @@ class Manager:
self.all_elems["top"]["lang"],
self.all_elems["top"]["model_name"],
self.all_elems["top"]["model_path"],
self.all_elems["top"]["checkpoints"],
self.all_elems["top"]["adapter_path"],
self.all_elems["top"]["finetuning_type"],
self.all_elems["top"]["quantization_bit"],
self.all_elems["top"]["template"],

View File

@ -86,19 +86,19 @@ class Runner:
get = lambda name: data[self.manager.get_elem_by_name(name)]
user_config = load_config()
if get("top.checkpoints"):
checkpoint_dir = ",".join([
get_save_dir(get("top.model_name"), get("top.finetuning_type"), ckpt) for ckpt in get("top.checkpoints")
])
if get("top.adapter_path"):
adapter_name_or_path = ",".join([
get_save_dir(get("top.model_name"), get("top.finetuning_type"), adapter)
for adapter in get("top.adapter_path")])
else:
checkpoint_dir = None
adapter_name_or_path = None
args = dict(
stage=TRAINING_STAGES[get("train.training_stage")],
model_name_or_path=get("top.model_path"),
do_train=True,
model_name_or_path=get("top.model_path"),
adapter_name_or_path=adapter_name_or_path,
cache_dir=user_config.get("cache_dir", None),
checkpoint_dir=checkpoint_dir,
finetuning_type=get("top.finetuning_type"),
quantization_bit=int(get("top.quantization_bit")) if get("top.quantization_bit") in ["8", "4"] else None,
template=get("top.template"),
@ -125,17 +125,14 @@ class Runner:
lora_dropout=get("train.lora_dropout"),
lora_target=get("train.lora_target") or get_module(get("top.model_name")),
additional_target=get("train.additional_target") if get("train.additional_target") else None,
resume_lora_training=get("train.resume_lora_training"),
create_new_adapter=get("train.create_new_adapter"),
output_dir=get_save_dir(get("top.model_name"), get("top.finetuning_type"), get("train.output_dir"))
)
args[get("train.compute_type")] = True
args["disable_tqdm"] = True
if TRAINING_STAGES[get("train.training_stage")] in ["rm", "ppo", "dpo"]:
args["resume_lora_training"] = (args["quantization_bit"] is not None)
if args["quantization_bit"] is not None:
args["upcast_layernorm"] = True
args["create_new_adapter"] = (args["quantization_bit"] is None)
if args["stage"] == "ppo":
args["reward_model"] = get_save_dir(
@ -158,20 +155,19 @@ class Runner:
get = lambda name: data[self.manager.get_elem_by_name(name)]
user_config = load_config()
if get("top.checkpoints"):
checkpoint_dir = ",".join([
get_save_dir(get("top.model_name"), get("top.finetuning_type"), ckpt) for ckpt in get("top.checkpoints")
])
if get("top.adapter_path"):
adapter_name_or_path = ",".join([
get_save_dir(get("top.model_name"), get("top.finetuning_type"), adapter)
for adapter in get("top.adapter_path")])
else:
checkpoint_dir = None
adapter_name_or_path = None
args = dict(
stage="sft",
model_name_or_path=get("top.model_path"),
do_eval=True,
predict_with_generate=True,
model_name_or_path=get("top.model_path"),
adapter_name_or_path=adapter_name_or_path,
cache_dir=user_config.get("cache_dir", None),
checkpoint_dir=checkpoint_dir,
finetuning_type=get("top.finetuning_type"),
quantization_bit=int(get("top.quantization_bit")) if get("top.quantization_bit") in ["8", "4"] else None,
template=get("top.template"),
@ -183,6 +179,7 @@ class Runner:
cutoff_len=get("eval.cutoff_len"),
max_samples=int(get("eval.max_samples")),
per_device_eval_batch_size=get("eval.batch_size"),
predict_with_generate=True,
max_new_tokens=get("eval.max_new_tokens"),
top_p=get("eval.top_p"),
temperature=get("eval.temperature"),

View File

@ -47,7 +47,7 @@ def gen_cmd(args: Dict[str, Any]) -> str:
current_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "0")
cmd_lines = ["CUDA_VISIBLE_DEVICES={} python src/train_bash.py ".format(current_devices)]
for k, v in args.items():
if v is not None and v != "":
if v is not None and v is not False and v != "":
cmd_lines.append(" --{} {} ".format(k, str(v)))
cmd_text = "\\\n".join(cmd_lines)
cmd_text = "```bash\n{}\n```".format(cmd_text)