Skip to content

Commit

Permalink
[cherry-pick] Fix run_benchmark (#7815)
Browse files Browse the repository at this point in the history
* fix run_benchmark for small model accurate speed

* fix run_benchmark for other det models
  • Loading branch information
nemonameless authored Feb 23, 2023
1 parent 05ab413 commit 93e2d43
Showing 1 changed file with 38 additions and 8 deletions.
46 changes: 38 additions & 8 deletions deploy/python/infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def filter_box(self, result, threshold):
filter_res = {'boxes': boxes, 'boxes_num': filter_num}
return filter_res

def predict(self, repeats=1):
def predict(self, repeats=1, run_benchmark=False):
'''
Args:
repeats (int): repeats number for prediction
Expand All @@ -193,6 +193,15 @@ def predict(self, repeats=1):
'''
# model prediction
np_boxes_num, np_boxes, np_masks = np.array([0]), None, None

if run_benchmark:
for i in range(repeats):
self.predictor.run()
paddle.device.cuda.synchronize()
result = dict(
boxes=np_boxes, masks=np_masks, boxes_num=np_boxes_num)
return result

for i in range(repeats):
self.predictor.run()
output_names = self.predictor.get_output_names()
Expand Down Expand Up @@ -272,9 +281,9 @@ def predict_image_slice(self,
self.det_times.preprocess_time_s.end()

# model prediction
result = self.predict(repeats=50) # warmup
result = self.predict(repeats=50, run_benchmark=True) # warmup
self.det_times.inference_time_s.start()
result = self.predict(repeats=repeats)
result = self.predict(repeats=repeats, run_benchmark=True)
self.det_times.inference_time_s.end(repeats=repeats)

# postprocess
Expand Down Expand Up @@ -370,9 +379,9 @@ def predict_image(self,
self.det_times.preprocess_time_s.end()

# model prediction
result = self.predict(repeats=50) # warmup
result = self.predict(repeats=50, run_benchmark=True) # warmup
self.det_times.inference_time_s.start()
result = self.predict(repeats=repeats)
result = self.predict(repeats=repeats, run_benchmark=True)
self.det_times.inference_time_s.end(repeats=repeats)

# postprocess
Expand Down Expand Up @@ -568,7 +577,7 @@ def __init__(
output_dir=output_dir,
threshold=threshold, )

def predict(self, repeats=1):
def predict(self, repeats=1, run_benchmark=False):
'''
Args:
repeats (int): repeat number for prediction
Expand All @@ -577,7 +586,20 @@ def predict(self, repeats=1):
'cate_label': label of segm, shape:[N]
'cate_score': confidence score of segm, shape:[N]
'''
np_label, np_score, np_segms = None, None, None
np_segms, np_label, np_score, np_boxes_num = None, None, None, np.array(
[0])

if run_benchmark:
for i in range(repeats):
self.predictor.run()
paddle.device.cuda.synchronize()
result = dict(
segm=np_segms,
label=np_label,
score=np_score,
boxes_num=np_boxes_num)
return result

for i in range(repeats):
self.predictor.run()
output_names = self.predictor.get_output_names()
Expand Down Expand Up @@ -659,7 +681,7 @@ def postprocess(self, inputs, result):
result = dict(boxes=np_boxes, boxes_num=np_boxes_num)
return result

def predict(self, repeats=1):
def predict(self, repeats=1, run_benchmark=False):
'''
Args:
repeats (int): repeat number for prediction
Expand All @@ -668,6 +690,14 @@ def predict(self, repeats=1):
matix element:[class, score, x_min, y_min, x_max, y_max]
'''
np_score_list, np_boxes_list = [], []

if run_benchmark:
for i in range(repeats):
self.predictor.run()
paddle.device.cuda.synchronize()
result = dict(boxes=np_score_list, boxes_num=np_boxes_list)
return result

for i in range(repeats):
self.predictor.run()
np_score_list.clear()
Expand Down

0 comments on commit 93e2d43

Please sign in to comment.