tiny fix
This commit is contained in:
parent
91611d68c4
commit
5a13b3baa6
|
@ -1,10 +1,6 @@
|
|||
#!/bin/bash
|
||||
# DO NOT use GPTQ/AWQ model in FSDP+QLoRA
|
||||
|
||||
pip install "transformers>=4.39.1"
|
||||
pip install "accelerate>=0.28.0"
|
||||
pip install "bitsandbytes>=0.43.0"
|
||||
|
||||
CUDA_VISIBLE_DEVICES=0,1 accelerate launch \
|
||||
--config_file examples/accelerate/fsdp_config.yaml \
|
||||
src/train.py examples/extras/fsdp_qlora/llama3_lora_sft.yaml
|
||||
|
|
|
@ -104,10 +104,10 @@ def block_expansion(
|
|||
print("Model weights saved in {}".format(output_dir))
|
||||
|
||||
print("Fine-tune this model with:")
|
||||
print(" --model_name_or_path {} \\".format(output_dir))
|
||||
print(" --finetuning_type freeze \\")
|
||||
print(" --freeze_trainable_layers {} \\".format(num_expand))
|
||||
print(" --use_llama_pro")
|
||||
print("model_name_or_path: {}".format(output_dir))
|
||||
print("finetuning_type: freeze")
|
||||
print("freeze_trainable_layers: {}".format(num_expand))
|
||||
print("use_llama_pro: true")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -179,7 +179,7 @@ def get_dataset(
|
|||
if training_args.should_save:
|
||||
dataset.save_to_disk(data_args.tokenized_path)
|
||||
logger.info("Tokenized dataset saved at {}.".format(data_args.tokenized_path))
|
||||
logger.info("Please restart the training with `--tokenized_path {}`.".format(data_args.tokenized_path))
|
||||
logger.info("Please restart the training with `tokenized_path: {}`.".format(data_args.tokenized_path))
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
|
Loading…
Reference in New Issue