Simplify autoshape() post-process (#1603)

* Simplify autoshape() post-process

* cleanup
This commit is contained in:
Glenn Jocher 2020-12-09 07:43:58 -08:00 committed by GitHub
parent 6b1fe3e9dd
commit 61fb2dbd20
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 7 additions and 8 deletions

View File

@ -94,7 +94,7 @@ def yolov3_tiny(pretrained=False, channels=3, classes=80):
if __name__ == '__main__': if __name__ == '__main__':
model = create(name='yolov3', pretrained=True, channels=3, classes=80) # example model = create(name='yolov3', pretrained=True, channels=3, classes=80) # example
model = model.fuse().autoshape() # for PIL/cv2/np inputs and NMS model = model.autoshape() # for PIL/cv2/np inputs and NMS
# Verify inference # Verify inference
from PIL import Image from PIL import Image

View File

@ -167,8 +167,7 @@ class autoShape(nn.Module):
# Post-process # Post-process
for i in batch: for i in batch:
if y[i] is not None: scale_coords(shape1, y[i][:, :4], shape0[i])
y[i][:, :4] = scale_coords(shape1, y[i][:, :4], shape0[i])
return Detections(imgs, y, self.names) return Detections(imgs, y, self.names)
@ -177,13 +176,13 @@ class Detections:
# detections class for YOLOv5 inference results # detections class for YOLOv5 inference results
def __init__(self, imgs, pred, names=None): def __init__(self, imgs, pred, names=None):
super(Detections, self).__init__() super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names self.names = names # class names
self.xyxy = pred # xyxy pixels self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) self.n = len(self.pred)

View File

@ -26,5 +26,5 @@ pandas
# scikit-learn==0.19.2 # for coreml quantization # scikit-learn==0.19.2 # for coreml quantization
# extras -------------------------------------- # extras --------------------------------------
# thop # FLOPS computation thop # FLOPS computation
# pycocotools>=2.0 # COCO mAP pycocotools>=2.0 # COCO mAP

View File

@ -258,7 +258,7 @@ def wh_iou(wh1, wh2):
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, classes=None, agnostic=False, labels=()): def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
"""Performs Non-Maximum Suppression (NMS) on inference results """Performs Non-Maximum Suppression (NMS) on inference results
Returns: Returns: