53 lines
1.3 KiB
Bash
53 lines
1.3 KiB
Bash
|
run_type = $1
|
||
|
model = $2
|
||
|
gpu_cnt = $3
|
||
|
max_steps = $4
|
||
|
|
||
|
current_datetime=$(date +%Y%m%d%H%M%S)
|
||
|
|
||
|
if [ "${run_type}" = "lora_sft" ]; then
|
||
|
run_name="${run_type}_${model}_${gpu_cnt}_gpu_${max_steps}_step_${current_datetime}"
|
||
|
|
||
|
else
|
||
|
run_name="${run_type}_${model}_${gpu_cnt}_gpu_${current_datetime}"
|
||
|
fi
|
||
|
|
||
|
output_dir ="./results/${run_name}"
|
||
|
|
||
|
if [ ! -d "$output_dir" ]; then
|
||
|
mkdir -p "$output_dir"
|
||
|
echo "路径不存在,已创建: $output_dir"
|
||
|
else
|
||
|
echo "路径已存在: $output_dir"
|
||
|
fi
|
||
|
|
||
|
echo "${run_type} ${model} ${gpu_cnt} ${max_steps} ${run_name} ${output_dir}"
|
||
|
python prepare_yaml_file.py ${run_type} ${model} ${max_steps} ${run_name} ${output_dir}
|
||
|
|
||
|
|
||
|
|
||
|
# export USE_MODELSCOPE_HUB=1
|
||
|
|
||
|
# # 0 means not printing gpu status
|
||
|
# python gpu_status.py ${output_dir} 0 &
|
||
|
# gpu_status_pid=$!
|
||
|
# echo "Start recording gpu status "
|
||
|
|
||
|
|
||
|
# if [ "${gpu_cnt}" = "1" ]; then
|
||
|
# ASCEND_RT_VISIBLE_DEVICES=0 llamafactory-cli train ${output_dir}/${run_name}.yml \
|
||
|
# | tee ${output_dir}/log.txt" &
|
||
|
# train_pid=$!
|
||
|
# echo "Start train"
|
||
|
# else
|
||
|
# FORCE_TORCHRUN=1 llamafactory-cli train ${output_dir}/${run_name}.yml \
|
||
|
# | tee ${output_dir}/log.txt" &
|
||
|
# train_pid=$!
|
||
|
# echo "Start train"
|
||
|
# fi
|
||
|
|
||
|
# wait $train_pid
|
||
|
# echo "Train ended"
|
||
|
# sleep 90
|
||
|
# kill $gpu_status_pid
|
||
|
# echo "Gpu status ended"
|