forked from PaddlePaddle/FastDeploy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinfer.py
83 lines (72 loc) · 2.51 KB
/
infer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True, help="Path of model.")
parser.add_argument(
"--video", type=str, required=True, help="Path of test video file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
# 配置runtime,加载模型
runtime_option = build_option(args)
model_file = os.path.join(args.model, "model.pdmodel")
params_file = os.path.join(args.model, "model.pdiparams")
config_file = os.path.join(args.model, "infer_cfg.yml")
model = fd.vision.tracking.PPTracking(
model_file, params_file, config_file, runtime_option=runtime_option)
# 初始化轨迹记录器
recorder = fd.vision.tracking.TrailRecorder()
# 绑定记录器 注意:每次预测时,往trail_recorder里面插入数据,随着预测次数的增加,内存会不断地增长,
# 可以通过unbind_recorder()方法来解除绑定
model.bind_recorder(recorder)
# 预测图片分割结果
cap = cv2.VideoCapture(args.video)
# count = 0
while True:
_, frame = cap.read()
if frame is None:
break
result = model.predict(frame)
# count += 1
# if count == 10:
# model.unbind_recorder()
img = fd.vision.vis_mot(frame, result, 0.0, recorder)
cv2.imshow("video", img)
if cv2.waitKey(30) == ord("q"):
break
model.unbind_recorder()
cap.release()
cv2.destroyAllWindows()