在上次教程中,我们学习了pytorch自带的图像预处理方法,这些没有相应的标签处理,因此不适合诸如目标检测之类的任务。本教程将学习目标检测的相关图像预处理方法
# 下载一张图片用于本教程中图像处理结果的可视化 ! wget http://www.ruanyifeng.com/blogimg/asset/2017/bg2017121301.jpg
import cv2 import math import matplotlib.pyplot as plt import numpy as np import random import torchvision.transforms as T from PIL import Image def visual(img, boxes, scores): COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] FONT = cv2.FONT_HERSHEY_SIMPLEX for i in range(boxes.shape[0]): cv2.rectangle(img, (int(boxes[i][0]),int(boxes[i][1])),(int(boxes[i][2]),int(boxes[i][3])),COLORS[i%3],2) cv2.putText(img, 'Object: %.2f'%scores[i],(int(boxes[i][0])-3,int(boxes[i][1])-5), FONT, 0.4, (0,0,0),2) return img img = np.array(Image.open('bg2017121301.jpg')) plt.imshow(img)
<matplotlib.image.AxesImage at 0x7f02c89c7510>
# 我们这里手动设置一个dummy bbox [x1, y1, x2, y2] boxes = np.array([[80, 50, 300, 300]]) scores = [0.6] img = visual(img, boxes, scores) plt.imshow(img)
<matplotlib.image.AxesImage at 0x7f02c80faed0>
# 随机扩展像素,我们对应处理了目标检测的标签 def random_expand(img, boxes, fill, p): if np.random.random() > p: return img, boxes height, width, depth = img.shape for _ in range(50): scale = np.random.uniform(1, 2) min_ratio = max(0.5, 1. / scale / scale) max_ratio = min(2, scale * scale) ratio = math.sqrt(np.random.uniform(min_ratio, max_ratio)) ws = scale * ratio hs = scale / ratio if ws < 1 or hs < 1: continue w = int(ws * width) h = int(hs * height) left = np.random.randint(0, w - width) top = np.random.randint(0, h - height) boxes_t = boxes.copy() boxes_t[:, :2] += (left, top) boxes_t[:, 2:] += (left, top) expand_img = np.empty( (h, w, depth), dtype=img.dtype) expand_img[:, :] = fill expand_img[top:top + height, left:left + width] = img img = expand_img return img, boxes_t img = np.array(Image.open('bg2017121301.jpg')) img_t, boxes_t = random_expand(img, boxes, (127.5, 127.5, 127.5), 0.9) plt.imshow(visual(img_t, boxes_t, scores))
<matplotlib.image.AxesImage at 0x7f39aab25bd0>
# 随机裁剪图片 def matrix_iou(a,b): """ return iou of a and b, numpy version for data augenmentation """ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) area_b = np.prod(b[:, 2:] - b[:, :2], axis=1) return area_i / (area_a[:, np.newaxis] + area_b - area_i+1e-12) def ramdom_crop(img, boxes, classes=0, ratios=None): height, width, _ = img.shape if len(boxes) == 0: return img, boxes, classes while True: # min_iou, max_iou for the bbox using random crop mode = random.choice(( None, (0.3, None), (0.5, None), (0.7, None), )) if mode is None: return img, boxes, classes min_iou, max_iou = mode if min_iou is None: min_iou = float('-inf') if max_iou is None: max_iou = float('inf') for _ in range(50): scale = random.uniform(0.3, 1.) min_ratio = max(0.5, scale * scale) max_ratio = min(2, 1. / scale / scale) ratio = math.sqrt(random.uniform(min_ratio, max_ratio)) w = int(scale * ratio * width) h = int((scale / ratio) * height) l = random.randrange(width - w) t = random.randrange(height - h) roi = np.array((l, t, l + w, t + h)) iou = matrix_iou(boxes, roi[np.newaxis]) if not (min_iou <= iou.min() and iou.max() <= max_iou): continue img_t = img[roi[1]:roi[3], roi[0]:roi[2]] centers = (boxes[:, :2] + boxes[:, 2:]) / 2 mask = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1) boxes_t = boxes[mask].copy() classes_t = classes[mask].copy() if ratios is not None: ratios_t = ratios[mask].copy() else: ratios_t = None if len(boxes_t) == 0: continue boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2]) boxes_t[:, :2] -= roi[:2] boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:]) boxes_t[:, 2:] -= roi[:2] return img_t, boxes_t, classes_t img = np.array(Image.open('bg2017121301.jpg')) img_t, boxes_t, classes_t = ramdom_crop(img, boxes, classes=np.array([0]), ratios=None) plt.imshow(visual(img_t, boxes_t, classes_t))
<matplotlib.image.AxesImage at 0x7f02c2f5f410>
boxes_t
array([[ 80, 50, 300, 300]])
# 随机仿射变换 def random_affine(img, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2), borderValue=(127.5, 127.5, 127.5)): border = 0 height, width, _ = img.shape # Rotation and Scale R = np.eye(3) a = random.random() * (degrees[1] - degrees[0]) + degrees[0] s = random.random() * (scale[1] - scale[0]) + scale[0] R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s) # angle: 旋转角度, center:旋转中心, scale:旋转后的缩放比例 # Translation T = np.eye(3) T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels) T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels) # Shear S = np.eye(3) S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg) M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!! imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=borderValue) # BGR order borderValue if targets is not None: if len(targets) > 0: n = targets.shape[0] points = targets[:, 0:4].copy() area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1]) # warp points xy = np.ones((n * 4, 3)) xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 xy = (xy @ M.T)[:, :2].reshape(n, 8) # create new boxes x = xy[:, [0, 2, 4, 6]] y = xy[:, [1, 3, 5, 7]] xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # apply angle-based reduction radians = a * math.pi / 180 reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 x = (xy[:, 2] + xy[:, 0]) / 2 y = (xy[:, 3] + xy[:, 1]) / 2 w = (xy[:, 2] - xy[:, 0]) * reduction h = (xy[:, 3] - xy[:, 1]) * reduction xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T # reject warped points outside of image x1 = np.clip(xy[:, 0], 0, width) y1 = np.clip(xy[:, 1], 0, height) x2 = np.clip(xy[:, 2], 0, width) y2 = np.clip(xy[:, 3], 0, height) boxes = np.concatenate((x1, y1, x2, y2)).reshape(4, n).T return imw, boxes, M else: return imw img = np.array(Image.open('bg2017121301.jpg')) img_t, boxes_t, _ = random_affine(img, boxes, borderValue=(127.5, 127.5, 127.5)) plt.imshow(visual(img_t, boxes_t, classes_t))
<matplotlib.image.AxesImage at 0x7f39aac1c110>