Merge branch 'main' of https://osredm.com/p04798526/LLaMA-Factory-310P3
This commit is contained in:
commit
9bbf989502
|
@ -0,0 +1 @@
|
||||||
|
bash run_once.sh lora_sft Qwen-7B 4 50
|
|
@ -1,16 +1,17 @@
|
||||||
import json
|
import json
|
||||||
|
import sys
|
||||||
import pynvml
|
import pynvml
|
||||||
import time
|
import time
|
||||||
import psutil
|
import psutil
|
||||||
|
|
||||||
|
UNIT = 1024 * 1024 * 1024
|
||||||
|
|
||||||
def main():
|
def gpu_status(output_path = "./results/gpu_status", print_status = False, sleep_time = 60):
|
||||||
UNIT = 1024 * 1024 * 1024
|
|
||||||
|
|
||||||
pynvml.nvmlInit()
|
pynvml.nvmlInit()
|
||||||
gpuDeviceCount = pynvml.nvmlDeviceGetCount()
|
gpuDeviceCount = pynvml.nvmlDeviceGetCount()
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
first_loop = True
|
||||||
|
|
||||||
while time.time() - start_time < 3600 *24:
|
while time.time() - start_time < 3600 *24:
|
||||||
# print(time.time() - start_time)
|
# print(time.time() - start_time)
|
||||||
all_gpu_status = []
|
all_gpu_status = []
|
||||||
|
@ -43,14 +44,26 @@ def main():
|
||||||
all_gpu_status = all_gpu_status,
|
all_gpu_status = all_gpu_status,
|
||||||
all_processes_status = all_processes_status
|
all_processes_status = all_processes_status
|
||||||
)
|
)
|
||||||
formatted_time = time.strftime('%Y%m%d%H%M%S', time.localtime())
|
|
||||||
with open(f"./results/gpu_status/gpu_status_{formatted_time}.json", "a", encoding="utf-8") as f:
|
|
||||||
f.write(json.dumps(logs) + "\n")
|
|
||||||
print(logs)
|
|
||||||
|
|
||||||
time.sleep(60)
|
with open(f"{output_path}/gpu_status.json", "a", encoding="utf-8") as f:
|
||||||
|
f.write(json.dumps(logs) + "\n")
|
||||||
|
|
||||||
|
if first_loop:
|
||||||
|
print("Start run gpu_status.py")
|
||||||
|
first_loop = False
|
||||||
|
|
||||||
|
if print_status:
|
||||||
|
print(logs)
|
||||||
|
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
|
||||||
pynvml.nvmlShutdown()
|
pynvml.nvmlShutdown()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
output_path = sys.argv[1]
|
||||||
|
print_status = sys.argv[2]
|
||||||
|
sleep_time = sys.argv[3]
|
||||||
|
gpu_status(output_path, print_status, sleep_time)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
import sys
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
def main():
|
||||||
|
run_type = sys.argv[1]
|
||||||
|
model = sys.argv[2]
|
||||||
|
max_steps = sys.argv[3]
|
||||||
|
run_name = sys.argv[4]
|
||||||
|
output_dir = sys.argv[5]
|
||||||
|
|
||||||
|
if run_type == "lora_sft":
|
||||||
|
yaml_file = './results/lora_sft_template.yaml'
|
||||||
|
elif run_type == "inference":
|
||||||
|
yaml_file = './results/predict_template.yaml'
|
||||||
|
|
||||||
|
model_name_or_path = ""
|
||||||
|
template = ""
|
||||||
|
if model == "9g-8B":
|
||||||
|
model_name_or_path = "../../models/sft_8b_v2"
|
||||||
|
template = ""
|
||||||
|
elif model == "Baichuan2-7B":
|
||||||
|
model_name_or_path = "../../models/Baichuan2-7B-Base"
|
||||||
|
template = "baichuan2"
|
||||||
|
elif model == "ChatGLM2-6B":
|
||||||
|
model_name_or_path = "../../models/chatglm2-6b"
|
||||||
|
template = "chatglm2"
|
||||||
|
elif model == "Llama2-7B":
|
||||||
|
model_name_or_path = "../../models/llama-2-7b-ms"
|
||||||
|
template = "llama2"
|
||||||
|
elif model == "Qwen-7B":
|
||||||
|
model_name_or_path = "../../models/Qwen-7B"
|
||||||
|
template = "qwen"
|
||||||
|
else:
|
||||||
|
print("ERROR: model not supported.")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
config = None
|
||||||
|
with open(yaml_file, 'r', encoding='utf-8') as f:
|
||||||
|
config = yaml.load(f.read(), Loader=yaml.FullLoader)
|
||||||
|
|
||||||
|
|
||||||
|
config['model_name_or_path'] = model_name_or_path
|
||||||
|
config['template'] = template
|
||||||
|
config['output_dir'] = output_dir
|
||||||
|
if run_type == "lora_sft":
|
||||||
|
config['max_steps'] = int(max_steps)
|
||||||
|
|
||||||
|
with open(f'{output_dir}/{run_name}.yaml', 'w', encoding='utf-8') as f:
|
||||||
|
yaml.dump(data=config, stream=f, allow_unicode=True)
|
||||||
|
|
||||||
|
print(f"yaml file saved to {output_dir}/{run_name}.yaml")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -0,0 +1,31 @@
|
||||||
|
bf16: true
|
||||||
|
cutoff_len: 1024
|
||||||
|
dataset: belle_1m
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
do_train: true
|
||||||
|
eval_steps: 500
|
||||||
|
eval_strategy: steps
|
||||||
|
finetuning_type: lora
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
include_num_input_tokens_seen: true
|
||||||
|
include_tokens_per_second: true
|
||||||
|
learning_rate: 0.0001
|
||||||
|
logging_steps: 3
|
||||||
|
lora_target: all
|
||||||
|
lr_scheduler_type: cosine
|
||||||
|
max_samples: 10000
|
||||||
|
max_steps: '50'
|
||||||
|
model_name_or_path: ../../models/Qwen-7B
|
||||||
|
num_train_epochs: 10.0
|
||||||
|
output_dir: ./results/lora_sft_Qwen-7B_4_gpu_50_step_20240905070656
|
||||||
|
overwrite_cache: true
|
||||||
|
overwrite_output_dir: true
|
||||||
|
per_device_eval_batch_size: 2
|
||||||
|
per_device_train_batch_size: 2
|
||||||
|
plot_loss: true
|
||||||
|
preprocessing_num_workers: 16
|
||||||
|
save_steps: 500
|
||||||
|
stage: sft
|
||||||
|
template: qwen
|
||||||
|
val_size: 0.1
|
||||||
|
warmup_ratio: 0.1
|
|
@ -0,0 +1,42 @@
|
||||||
|
### model
|
||||||
|
model_name_or_path: ../../llm/baichuan
|
||||||
|
|
||||||
|
### method
|
||||||
|
stage: sft
|
||||||
|
do_train: true
|
||||||
|
finetuning_type: lora
|
||||||
|
lora_target: all
|
||||||
|
|
||||||
|
### dataset
|
||||||
|
dataset: belle_1m
|
||||||
|
template: baichuan
|
||||||
|
cutoff_len: 1024
|
||||||
|
max_samples: 10000
|
||||||
|
overwrite_cache: true
|
||||||
|
preprocessing_num_workers: 16
|
||||||
|
|
||||||
|
### output
|
||||||
|
output_dir: ./results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single_step500
|
||||||
|
logging_steps: 3
|
||||||
|
save_steps: 500
|
||||||
|
plot_loss: true
|
||||||
|
overwrite_output_dir: true
|
||||||
|
|
||||||
|
### train
|
||||||
|
per_device_train_batch_size: 2
|
||||||
|
gradient_accumulation_steps: 8
|
||||||
|
learning_rate: 1.0e-4
|
||||||
|
num_train_epochs: 10.0
|
||||||
|
lr_scheduler_type: cosine
|
||||||
|
warmup_ratio: 0.1
|
||||||
|
fp16: true
|
||||||
|
ddp_timeout: 180000000
|
||||||
|
max_steps: 500
|
||||||
|
include_num_input_tokens_seen: true
|
||||||
|
include_tokens_per_second: true
|
||||||
|
|
||||||
|
### eval
|
||||||
|
val_size: 0.1
|
||||||
|
per_device_eval_batch_size: 2
|
||||||
|
eval_strategy: steps
|
||||||
|
eval_steps: 500
|
|
@ -0,0 +1,23 @@
|
||||||
|
### model
|
||||||
|
model_name_or_path: ../../llm/baichuan
|
||||||
|
|
||||||
|
### method
|
||||||
|
do_predict: true
|
||||||
|
|
||||||
|
### dataset
|
||||||
|
eval_dataset: alpaca_gpt4_zh
|
||||||
|
template: baichuan
|
||||||
|
cutoff_len: 1024
|
||||||
|
max_samples: 50
|
||||||
|
overwrite_cache: true
|
||||||
|
preprocessing_num_workers: 16
|
||||||
|
include_tokens_per_second: true
|
||||||
|
|
||||||
|
### output
|
||||||
|
output_dir: ./results/inference/Baichuan2-7B/Baichuan2_predict_1
|
||||||
|
overwrite_output_dir: true
|
||||||
|
|
||||||
|
### eval
|
||||||
|
per_device_eval_batch_size: 2
|
||||||
|
predict_with_generate: true
|
||||||
|
ddp_timeout: 180000000
|
|
@ -0,0 +1,52 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
run_type="$1"
|
||||||
|
model="$2"
|
||||||
|
gpu_cnt="$3"
|
||||||
|
max_steps="$4"
|
||||||
|
|
||||||
|
current_datetime=$(date +%Y%m%d%H%M%S)
|
||||||
|
|
||||||
|
if [ "${run_type}"="lora_sft" ]; then
|
||||||
|
run_name="${run_type}_${model}_${gpu_cnt}_gpu_${max_steps}_step_${current_datetime}"
|
||||||
|
|
||||||
|
else
|
||||||
|
run_name="${run_type}_${model}_${gpu_cnt}_gpu_${current_datetime}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
output_dir="./results/${run_name}"
|
||||||
|
|
||||||
|
if [ ! -d "$output_dir" ]; then
|
||||||
|
mkdir -p "$output_dir"
|
||||||
|
echo "output_dir created: $output_dir"
|
||||||
|
else
|
||||||
|
echo "output_dir exists: $output_dir"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# echo "${run_type} ${model} ${gpu_cnt} ${max_steps} ${run_name} ${output_dir}"
|
||||||
|
python prepare_yaml_file.py ${run_type} ${model} ${max_steps} ${run_name} ${output_dir}
|
||||||
|
|
||||||
|
export USE_MODELSCOPE_HUB=1
|
||||||
|
|
||||||
|
# echo "Start recording gpu status "
|
||||||
|
# # 0 means not printing gpu status
|
||||||
|
# python gpu_status.py ${output_dir} 1 10 &
|
||||||
|
# gpu_status_pid=$!
|
||||||
|
# echo "${gpu_status_pid}"
|
||||||
|
|
||||||
|
if [ "${gpu_cnt}"="1" ]; then
|
||||||
|
ASCEND_RT_VISIBLE_DEVICES=0 llamafactory-cli train ${output_dir}/${run_name}.yaml | tee "${output_dir}/log.txt" &
|
||||||
|
train_pid=$!
|
||||||
|
echo "Start train"
|
||||||
|
else
|
||||||
|
FORCE_TORCHRUN=1 llamafactory-cli train ${output_dir}/${run_name}.yaml | tee "${output_dir}/log.txt" &
|
||||||
|
train_pid=$!
|
||||||
|
echo "Start train"
|
||||||
|
fi
|
||||||
|
|
||||||
|
wait $train_pid
|
||||||
|
echo "Train ended"
|
||||||
|
|
||||||
|
# sleep 90
|
||||||
|
# kill $gpu_status_pid
|
||||||
|
# echo "Gpu status ended"
|
Loading…
Reference in New Issue