forked from PulseFocusPlatform/PulseFocusPlatform
112 lines
3.6 KiB
Python
112 lines
3.6 KiB
Python
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||
|
#
|
||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||
|
# you may not use this file except in compliance with the License.
|
||
|
# You may obtain a copy of the License at
|
||
|
#
|
||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||
|
#
|
||
|
# Unless required by applicable law or agreed to in writing, software
|
||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
# See the License for the specific language governing permissions and
|
||
|
# limitations under the License.
|
||
|
|
||
|
import ast
|
||
|
import argparse
|
||
|
|
||
|
|
||
|
def argsparser():
|
||
|
parser = argparse.ArgumentParser(description=__doc__)
|
||
|
parser.add_argument(
|
||
|
"--det_model_dir",
|
||
|
type=str,
|
||
|
default=None,
|
||
|
help=("Directory include:'model.pdiparams', 'model.pdmodel', "
|
||
|
"'infer_cfg.yml', created by tools/export_model.py."),
|
||
|
required=True)
|
||
|
parser.add_argument(
|
||
|
"--keypoint_model_dir",
|
||
|
type=str,
|
||
|
default=None,
|
||
|
help=("Directory include:'model.pdiparams', 'model.pdmodel', "
|
||
|
"'infer_cfg.yml', created by tools/export_model.py."),
|
||
|
required=True)
|
||
|
parser.add_argument(
|
||
|
"--image_file", type=str, default=None, help="Path of image file.")
|
||
|
parser.add_argument(
|
||
|
"--image_dir",
|
||
|
type=str,
|
||
|
default=None,
|
||
|
help="Dir of image file, `image_file` has a higher priority.")
|
||
|
parser.add_argument(
|
||
|
"--video_file",
|
||
|
type=str,
|
||
|
default=None,
|
||
|
help="Path of video file, `video_file` or `camera_id` has a highest priority."
|
||
|
)
|
||
|
parser.add_argument(
|
||
|
"--camera_id",
|
||
|
type=int,
|
||
|
default=-1,
|
||
|
help="device id of camera to predict.")
|
||
|
parser.add_argument(
|
||
|
"--det_threshold", type=float, default=0.5, help="Threshold of score.")
|
||
|
parser.add_argument(
|
||
|
"--keypoint_threshold",
|
||
|
type=float,
|
||
|
default=0.5,
|
||
|
help="Threshold of score.")
|
||
|
parser.add_argument(
|
||
|
"--output_dir",
|
||
|
type=str,
|
||
|
default="output",
|
||
|
help="Directory of output visualization files.")
|
||
|
parser.add_argument(
|
||
|
"--run_mode",
|
||
|
type=str,
|
||
|
default='fluid',
|
||
|
help="mode of running(fluid/trt_fp32/trt_fp16/trt_int8)")
|
||
|
parser.add_argument(
|
||
|
"--use_gpu",
|
||
|
type=ast.literal_eval,
|
||
|
default=False,
|
||
|
help="Whether to predict with GPU.")
|
||
|
parser.add_argument(
|
||
|
"--run_benchmark",
|
||
|
type=ast.literal_eval,
|
||
|
default=False,
|
||
|
help="Whether to predict a image_file repeatedly for benchmark")
|
||
|
parser.add_argument(
|
||
|
"--enable_mkldnn",
|
||
|
type=ast.literal_eval,
|
||
|
default=False,
|
||
|
help="Whether use mkldnn with CPU.")
|
||
|
parser.add_argument(
|
||
|
"--cpu_threads", type=int, default=1, help="Num of threads with CPU.")
|
||
|
parser.add_argument(
|
||
|
"--use_dynamic_shape",
|
||
|
type=ast.literal_eval,
|
||
|
default=False,
|
||
|
help="Dynamic_shape for TensorRT.")
|
||
|
parser.add_argument(
|
||
|
"--trt_min_shape", type=int, default=1, help="min_shape for TensorRT.")
|
||
|
parser.add_argument(
|
||
|
"--trt_max_shape",
|
||
|
type=int,
|
||
|
default=1280,
|
||
|
help="max_shape for TensorRT.")
|
||
|
parser.add_argument(
|
||
|
"--trt_opt_shape",
|
||
|
type=int,
|
||
|
default=640,
|
||
|
help="opt_shape for TensorRT.")
|
||
|
parser.add_argument(
|
||
|
"--trt_calib_mode",
|
||
|
type=bool,
|
||
|
default=False,
|
||
|
help="If the model is produced by TRT offline quantitative "
|
||
|
"calibration, trt_calib_mode need to set True.")
|
||
|
|
||
|
return parser
|