update times to end-to-end latency (inference + NMS)

pull/1/head
Glenn Jocher 5 years ago
parent 8f17a96ff0
commit ad71d2d513

@ -62,7 +62,6 @@ def detect(save_img=False):
# Inference
t1 = torch_utils.time_synchronized()
pred = model(img, augment=opt.augment)[0]
t2 = torch_utils.time_synchronized()
# to float
if half:
@ -71,6 +70,7 @@ def detect(save_img=False):
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres,
fast=True, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = torch_utils.time_synchronized()
# Apply Classifier
if classify:

Loading…
Cancel
Save