diff --git a/detect.py b/detect.py index 53b63ebf..f6efbeac 100644 --- a/detect.py +++ b/detect.py @@ -63,6 +63,9 @@ def detect(save_img=False): # Run inference if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + old_img_w = old_img_h = imgsz + old_img_b = 1 + t0 = time.time() for path, img, im0s, vid_cap in dataset: img = torch.from_numpy(img).to(device) @@ -71,13 +74,22 @@ def detect(save_img=False): if img.ndimension() == 3: img = img.unsqueeze(0) + # Warmup + if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]): + old_img_b = img.shape[0] + old_img_h = img.shape[2] + old_img_w = img.shape[3] + for i in range(3): + model(img, augment=opt.augment)[0] + # Inference t1 = time_synchronized() pred = model(img, augment=opt.augment)[0] + t2 = time_synchronized() # Apply NMS pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) - t2 = time_synchronized() + t3 = time_synchronized() # Apply Classifier if classify: @@ -93,7 +105,6 @@ def detect(save_img=False): p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt - s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh if len(det): # Rescale boxes from img_size to im0 size @@ -117,7 +128,7 @@ def detect(save_img=False): plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) # Print time (inference + NMS) - #print(f'{s}Done. ({t2 - t1:.3f}s)') + #print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}s) NMS') # Stream results if view_img: