chore: add edited comand for mindie
This commit is contained in:
parent
3f2f80db4d
commit
9ba88b0295
|
@ -50,6 +50,7 @@ source /usr/local/Ascend/nnal/atb/set_env.sh
|
|||
source set_env.sh
|
||||
# 设置使用卡号
|
||||
export ASCEND_RT_VISIBLE_DEVICES="[卡号]" # NPU场景,如"0,1,2,3,4,5,6,7"
|
||||
export ASCEND_RT_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
|
||||
或
|
||||
export CUDA_VISIBLE_DEVICES="[卡号]" # GPU场景,如"0,1,2,3,4,5,6,7"
|
||||
```
|
||||
|
@ -103,6 +104,16 @@ pip install -r requirements.txt
|
|||
## 单机场景
|
||||
bash run.sh pa_fp16 performance [case_pair] [batch_size] [model_name] ([is_chat_model]) [weight_dir] [chip_num] ([max_position_embedding/max_sequence_length])
|
||||
|
||||
bash run.sh pa_fp16 performance [[256,256]] 1 qwen /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/qwen/Qwen-7B 8
|
||||
bash run.sh pa_fp16 performance [[256,256]] 1 qwen /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/qwen/Qwen-7B 1
|
||||
bash run.sh pa_fp16 performance [[256,256]] 1 llama /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/modelscope/llama-2-7b-ms 8
|
||||
bash run.sh pa_fp16 performance [[256,256]] 1 llama /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/modelscope/llama-2-7b-ms 1
|
||||
bash run.sh pa_fp16 performance [[256,256]] 1 chatglm2_6b /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/zhipuai/chatglm2-6b 8
|
||||
bash run.sh pa_fp16 performance [[256,256]] 1 chatglm2_6b /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/zhipuai/chatglm2-6b 1
|
||||
bash run.sh pa_fp16 performance [[256,256]] 1 baichuan2_7b /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/baichuan-inc/Baichuan2-7B-Base 8
|
||||
bash run.sh pa_fp16 performance [[256,256]] 1 baichuan2_7b /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/baichuan-inc/Baichuan2-7B-Base 1
|
||||
|
||||
|
||||
##多机场景
|
||||
bash run.sh pa_fp16 performance [case_pair] [batch_size] [model_name] ([is_chat_model]) [weight_dir] [rank_table_file] [world_size] [node num] [rank_id_start] ([max_position_embedding/max_sequence_length])
|
||||
|
||||
|
@ -161,6 +172,8 @@ bash run.sh pa_fp16 performance_maxbs [case_pair] [batch_range] [time_limit] [mo
|
|||
##单机场景
|
||||
bash run.sh pa_fp16 [dataset] ([shots]) [batch_size] [model_name] ([is_chat_model]) [weight_dir] [chip_num] ([max_position_embedding/max_sequence_length])
|
||||
|
||||
bash run.sh pa_fp16 full_MMLU 0 1 qwen /home/user/repo/LLaMA-Factory-310P3/ms_cache/hub/qwen/Qwen-7B 8
|
||||
|
||||
##多机场景
|
||||
bash run.sh pa_fp16 [dataset] ([shots]) [batch_size] [model_name] ([is_chat_model]) [weight_dir] [rank_table_file] [world_size] [node num] [rank_id_start] ([max_position_embedding/max_sequence_length])
|
||||
|
||||
|
|
Loading…
Reference in New Issue