feat: add run mindie test script and prepare for test

This commit is contained in:
wql 2024-11-29 16:54:03 +08:00
parent ed8235c4dd
commit 612dcc6b72
2 changed files with 73 additions and 42669 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,75 @@
#!/bin/bash
# bash run_test_once.sh
bash /usr/local/Ascend/llm_model/tests/modeltest/run.sh pa_fp16 performance_single [[256,256]] input.txt 1 qwen /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/qwen/Qwen-7B 1
model="$1"
gpu_cnt="$2"
echo "model: ${model}"
echo "gpu_cnt: ${gpu_cnt}"
current_datetime=$(date +%Y%m%d%H%M%S)
if [ "${gpu_cnt}"="8" ]; then
run_name_gpu_cnt="多卡"
else
run_name_gpu_cnt="单卡"
fi
model_path=""
model_name=""
if [ "${model}"="9g-8B" ]; then
model_path="/home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/qy/sft_8b_v2"
model_name="cpm"
result_folder_name=""
elif [ "${model}"="Baichuan2-7B" ]; then
model_path="/home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/baichuan-inc/Baichuan2-7B-Base"
model_name="baichuan2_7b"
result_folder_name="baichuan2_7b"
elif [ "${model}"="ChatGLM2-6B" ]; then
model_path="/home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/zhipuai/chatglm2-6b"
model_name="chatglm2_6b"
result_folder_name="chatglm2_6b"
elif [ "${model}"="Llama2-7B" ]; then
model_path="/home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/modelscope/llama-2-7b-ms"
model_name="llama"
result_folder_name="llama2_7b"
elif [ "${model}"="Qwen-7B" ]; then
model_path="/home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/qwen/Qwen-7B"
model_name="qwen"
result_folder_name="qwen_7b"
else
echo "ERROR: model not supported or model name wrong"
exit 1
fi
run_name="昇腾310p_推理_${model}_${run_name_gpu_cnt}_${current_datetime}"
output_dir="./results/${run_name}"
mkdir -p "$output_dir"
echo "output_dir created: $output_dir"
echo "Start recording npu status "
bash ../npu_status.sh ${output_dir} 60 0 &
npu_status_pid=$!
# echo "${npu_status_pid}"
bash /usr/local/Ascend/llm_model/tests/modeltest/run.sh pa_fp16 performance_single [[256,256]] input.txt 1 $(model_name) $(model_path) $(gpu_cnt) &
train_pid=$!
wait $train_pid
train_status=$?
echo "inference ended"
sleep 60
kill $npu_status_pid
echo "Npu status ended"
echo "train_status ${train_status}"
if [ $train_status -ne 0 ]; then
output_dir="${output_dir}_fail"
fi
cp "/usr/local/Ascend/llm_model/tests/modeltest/outputs/results/NPU/performance_test/performance_single/fp16/$(result_folder_name)/performance_single_pa_batch1_tp$(gpu_cnt)_formatted_result.csv" $(output_dir)
cp "/usr/local/Ascend/llm_model/tests/modeltest/outputs/results/NPU/performance_test/performance_single/fp16/$(result_folder_name)/performance_single_pa_batch1_tp$(gpu_cnt)_result.csv" $(output_dir)