diff --git a/run_once.sh b/run_once.sh index bd8b5d87..2c93c026 100644 --- a/run_once.sh +++ b/run_once.sh @@ -23,35 +23,32 @@ else echo "output_dir exists: $output_dir" fi - # echo "${run_type} ${model} ${gpu_cnt} ${max_steps} ${run_name} ${output_dir}" python prepare_yaml_file.py ${run_type} ${model} ${max_steps} ${run_name} ${output_dir} export USE_MODELSCOPE_HUB=1 -echo "Start recording gpu status " -# 0 means not printing gpu status -python gpu_status.py ${output_dir} 1 10 & -gpu_status_pid=$! -echo "${gpu_status_pid}" +# echo "Start recording gpu status " +# # 0 means not printing gpu status +# python gpu_status.py ${output_dir} 1 10 & +# gpu_status_pid=$! +# echo "${gpu_status_pid}" -sleep 60 +if [ "${gpu_cnt}"="1" ]; then + ASCEND_RT_VISIBLE_DEVICES=0 llamafactory-cli train ${output_dir}/${run_name}.yml \ + | tee ${output_dir}/log.txt" & + train_pid=$! + echo "Start train" +else + FORCE_TORCHRUN=1 llamafactory-cli train ${output_dir}/${run_name}.yml \ + | tee ${output_dir}/log.txt" & + train_pid=$! + echo "Start train" +fi -# if [ "${gpu_cnt}"="1" ]; then -# ASCEND_RT_VISIBLE_DEVICES=0 llamafactory-cli train ${output_dir}/${run_name}.yml \ -# | tee ${output_dir}/log.txt" & -# train_pid=$! -# echo "Start train" -# else -# FORCE_TORCHRUN=1 llamafactory-cli train ${output_dir}/${run_name}.yml \ -# | tee ${output_dir}/log.txt" & -# train_pid=$! -# echo "Start train" -# fi +wait $train_pid +echo "Train ended" -# wait $train_pid -# echo "Train ended" # sleep 90 - -kill $gpu_status_pid -echo "Gpu status ended" \ No newline at end of file +# kill $gpu_status_pid +# echo "Gpu status ended" \ No newline at end of file