我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用cv2.BORDER_REPLICATE。
def affine_skew(self, tilt, phi, img, mask=None): h, w = img.shape[:2] if mask is None: mask = np.zeros((h, w), np.uint8) mask[:] = 255 A = np.float32([[1, 0, 0], [0, 1, 0]]) if phi != 0.0: phi = np.deg2rad(phi) s, c = np.sin(phi), np.cos(phi) A = np.float32([[c, -s], [s, c]]) corners = [[0, 0], [w, 0], [w, h], [0, h]] tcorners = np.int32(np.dot(corners, A.T)) x, y, w, h = cv2.boundingRect(tcorners.reshape(1, -1, 2)) A = np.hstack([A, [[-x], [-y]]]) img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE) if tilt != 1.0: s = 0.8*np.sqrt(tilt * tilt - 1) img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01) img = cv2.resize(img, (0, 0), fx=1.0 / tilt, fy=1.0, interpolation=cv2.INTER_NEAREST) A[0] /= tilt if phi != 0.0 or tilt != 1.0: h, w = img.shape[:2] mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST) Ai = cv2.invertAffineTransform(A) return img, mask, Ai
def distort_affine_cv2(image, alpha_affine=10, random_state=None): if random_state is None: random_state = np.random.RandomState(None) shape = image.shape shape_size = shape[:2] center_square = np.float32(shape_size) // 2 square_size = min(shape_size) // 3 pts1 = np.float32([ center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) distorted_image = cv2.warpAffine( image, M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE) #cv2.BORDER_REFLECT_101) return distorted_image
def test_box_filter_edge(self): I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32) r = 2 ret1 = cv.smooth.box_filter(I, r, normalize=True, border_type='edge') ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_REPLICATE) self.assertTrue(np.array_equal(ret1, ret2))
def get_normalized_image(img, rr, debug = False): box = cv2.boxPoints(rr) extbox = cv2.boundingRect(box) if extbox[2] * extbox[3] > img.shape[0] * img.shape[1]: print("Too big proposal: {0}x{1}".format(extbox[2], extbox[3])) return None, None extbox = [extbox[0], extbox[1], extbox[2], extbox[3]] extbox[2] += extbox[0] extbox[3] += extbox[1] extbox = np.array(extbox, np.int) extbox[0] = max(0, extbox[0]) extbox[1] = max(0, extbox[1]) extbox[2] = min(img.shape[1], extbox[2]) extbox[3] = min(img.shape[0], extbox[3]) tmp = img[extbox[1]:extbox[3], extbox[0]:extbox[2]] center = (tmp.shape[1] / 2, tmp.shape[0] / 2) rot_mat = cv2.getRotationMatrix2D( center, rr[2], 1 ) if tmp.shape[0] == 0 or tmp.shape[1] == 0: return None, rot_mat if debug: vis.draw_box_points(img, np.array(extbox, dtype="int"), color = (0, 255, 0)) cv2.imshow('scaled', img) rot_mat[0,2] += rr[1][0] /2.0 - center[0] rot_mat[1,2] += rr[1][1] /2.0 - center[1] try: norm_line = cv2.warpAffine( tmp, rot_mat, (int(rr[1][0]), int(rr[1][1])), borderMode=cv2.BORDER_REPLICATE ) except: return None, rot_mat return norm_line, rot_mat
def remap_image(name, img, small, page_dims, params): height = 0.5 * page_dims[1] * OUTPUT_ZOOM * img.shape[0] height = round_nearest_multiple(height, REMAP_DECIMATE) width = round_nearest_multiple(height * page_dims[0] / page_dims[1], REMAP_DECIMATE) print ' output will be {}x{}'.format(width, height) height_small = height / REMAP_DECIMATE width_small = width / REMAP_DECIMATE page_x_range = np.linspace(0, page_dims[0], width_small) page_y_range = np.linspace(0, page_dims[1], height_small) page_x_coords, page_y_coords = np.meshgrid(page_x_range, page_y_range) page_xy_coords = np.hstack((page_x_coords.flatten().reshape((-1, 1)), page_y_coords.flatten().reshape((-1, 1)))) page_xy_coords = page_xy_coords.astype(np.float32) image_points = project_xy(page_xy_coords, params) image_points = norm2pix(img.shape, image_points, False) image_x_coords = image_points[:, 0, 0].reshape(page_x_coords.shape) image_y_coords = image_points[:, 0, 1].reshape(page_y_coords.shape) image_x_coords = cv2.resize(image_x_coords, (width, height), interpolation=cv2.INTER_CUBIC) image_y_coords = cv2.resize(image_y_coords, (width, height), interpolation=cv2.INTER_CUBIC) img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) remapped = cv2.remap(img_gray, image_x_coords, image_y_coords, cv2.INTER_CUBIC, None, cv2.BORDER_REPLICATE) thresh = cv2.adaptiveThreshold(remapped, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, ADAPTIVE_WINSZ, 25) pil_image = Image.fromarray(thresh) pil_image = pil_image.convert('1') threshfile = name + '_thresh.png' pil_image.save(threshfile, dpi=(OUTPUT_DPI, OUTPUT_DPI)) if DEBUG_LEVEL >= 1: height = small.shape[0] width = int(round(height * float(thresh.shape[1])/thresh.shape[0])) display = cv2.resize(thresh, (width, height), interpolation=cv2.INTER_AREA) debug_show(name, 6, 'output', display) return threshfile
def frame(image, top=2, bottom=2, left=2, right=2, borderType=cv.BORDER_CONSTANT, color=[255, 0, 0]): ''' add borders around :image: :param image: has to be in RBG color scheme. Use `convert_to_rgb` if it's in opencv BGR scheme. :param color: array representing an RGB color. :param borderType: Other options are: cv.BORDER_REFLECT, cv.BORDER_REFLECT_101, cv.BORDER_DEFAULT, cv.BORDER_REPLICATE, cv.BORDER_WRAP ''' return cv.copyMakeBorder(image, top, bottom, left, right, borderType, value=color)
def add_border(src): dst = np.empty(shape=(src.shape[0], src.shape[1] + 2 * image_border, src.shape[2] + 2 * image_border)) for c in xrange(src.shape[0]): dst[c] = cv2.copyMakeBorder(src[c], top=image_border, bottom=image_border, left=image_border, right=image_border, borderType=cv2.BORDER_REPLICATE) return dst
def __call__(self, image, *args): size=self.size if self.type=='constant': image = cv2.copyMakeBorder(image, size, size, size, size, cv2.BORDER_CONSTANT, value=self.constant_color) elif self.type=='reflect': image = cv2.copyMakeBorder(image, size, size, size, size, cv2.BORDER_REFLECT) elif self.type=='replicate': image = cv2.copyMakeBorder(image, size, size, size, size, cv2.BORDER_REPLICATE) if len(args): return (image, *args) else: return image
def _augment(self, img, s): return cv2.GaussianBlur(img, s, sigmaX=0, sigmaY=0, borderType=cv2.BORDER_REPLICATE)
def __init__(self, max_deg, center_range=(0,1), interp=cv2.INTER_CUBIC, border=cv2.BORDER_REPLICATE): """ :param max_deg: max abs value of the rotation degree :param center_range: the location of the rotation center """ super(Rotation, self).__init__() self._init(locals())
def random_zoom_out(self, img, boxes): ''' Randomly zoom out the image and adjust the bbox locations. For bbox (xmin, ymin, xmax, ymax), the zoomed out bbox is: coef -- zoom out coefficient ((1-coef)*w/2 + coef*xmin, (1-coef)*h/2 + coef*ymin, (1-coed)*w/2 + coef*xmax, (1-coef)*h/2 + coef*ymax) Args: img: (PIL.Image) image. boxes: (tensor) bbox locations, sized [#obj, 4]. Return: img: (PIL.Image) randomly zoomed out image. boxes: (tensor) randomly zoomed out bbox locations, sized [#obj, 4]. ''' coef = random.uniform(0.5, 1) w = img.width h = img.height xmin = (1-coef)*w/2 + coef*boxes[:,0] xmax = (1-coef)*w/2 + coef*boxes[:,2] ymin = (1-coef)*h/2 + coef*boxes[:,1] ymax = (1-coef)*h/2 + coef*boxes[:,3] boxes[:,0] = xmin boxes[:,1] = ymin boxes[:,2] = xmax boxes[:,3] = ymax top = int(h/2*(1-coef)/coef) bottom = int(h/2*(1-coef)/coef) left = int(w/2*(1-coef)/coef) right = int(w/2*(1-coef)/coef) img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_REPLICATE) img = cv2.resize(img, (w, h)) return img, boxes
def align_face_3(im, key_points): ''' Align face image by affine transfromation. The transformation matrix is computed by 3 pairs of points input: im: input image key_points: [(xi, yi)], list of 21-key-point or 106-key-point output: im_out ''' key_points = np.array(key_points, dtype = np.float32) dst_points = np.array([[70.745, 112.0], [108.237, 112.0], [89.4324, 153.514]], dtype = np.float32) dst_sz = (178, 218) src_points = np.zeros((3, 2), dtype = np.float32) if key_points.shape[0] == 21: src_points[0] = key_points[16] src_points[1] = key_points[17] src_points[2] = (key_points[19] + key_points[20]) / 2.0 elif key_points[0] == 106: src_points[0] = key_points[104] src_points[1] = key_points[105] src_points[2] = (key_points[84] + key_points[90]) / 2.0 else: raise Exception('invalid number of face keypoints') trans_mat = cv2.getAffineTransform(src_points, dst_points) im_out = cv2.warpAffine(im, trans_mat, dsize = dst_sz, borderMode = cv2.BORDER_REPLICATE) return im_out
def align_face_21(im, key_points): ''' Align face image by affine transfromation. The transformation matrix is computed by 21 pairs of points input: im: input image key_points: [(xi, yi)], list of 21-key-point or 106-key-point output: im_out ''' dst_sz = (178, 218) src_points = np.array(key_points, dtype = np.float32) assert src_points.shape[0] == 21, 'invalid number of face keypoints (21)' dst_points = mean_pose_21 X = np.zeros((42, 4), dtype = np.float32) U = np.zeros((42, 1), dtype = np.float32) X[0:21, 0:2] = src_points X[0:21, 2] = 1 X[21::, 0] = src_points[:, 1] X[21::, 1] = -src_points[:, 0] X[21::, 3] = 1 U[0:21, 0] = dst_points[:,0] U[21::, 0] = dst_points[:,1] M = np.linalg.pinv(X).dot(U).flatten() trans_mat = np.array([[M[0], M[1], M[2]], [-M[1], M[0], M[3]]], dtype = np.float32) im_out = cv2.warpAffine(im, trans_mat, dsize = dst_sz, borderMode = cv2.BORDER_REPLICATE) return im_out
def align_face_19(im, key_points): ''' For AFLW Align face image by affine transfromation. The transformation matrix is computed by 19 pairs of points ''' dst_sz = (178, 218) src_points = np.array(key_points, dtype = np.float32) assert src_points.shape[0] == 19, 'invalid number of face keypoints (19)' src_points = src_points[0:18, :] dst_points = mean_pose_19 X = np.zeros((36, 4), dtype = np.float32) U = np.zeros((36, 1), dtype = np.float32) X[0:18, 0:2] = src_points X[0:18, 2] = 1 X[18::, 0] = src_points[:, 1] X[18::, 1] = -src_points[:, 0] X[18::, 3] = 1 U[0:18, 0] = dst_points[:,0] U[18::, 0] = dst_points[:,1] M = np.linalg.pinv(X).dot(U).flatten() trans_mat = np.array([[M[0], M[1], M[2]], [-M[1], M[0], M[3]]], dtype = np.float32) im_out = cv2.warpAffine(im, trans_mat, dsize = dst_sz, borderMode = cv2.BORDER_REPLICATE) return im_out
def compute_derivs_through_H(I, H): """ Compute derivatives after additional homography step. For J(x,y) = I( H (x,y) ), compute dJ/dx and dJ/dy. """ h,w = I.shape[:2] Hinv = np.linalg.inv(H) H_xp1_only = np.array([[1.0,0,-1.0],[0.0,1.0,0.0],[0.0,0.0,1.0]]) H_xm1_only = np.array([[1.0,0,1.0],[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) H_yp1_only = np.array([[1.0, 0, 0], [0.0, 1.0, -1.0], [0.0, 0.0, 1.0]]) H_ym1_only = np.array([[1.0, 0, 0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]]) H_only_array = [H_xp1_only, H_xm1_only, H_yp1_only, H_ym1_only] [H_xp1, H_xm1, H_yp1, H_ym1] = [Hinv.dot(H_).dot(H) for H_ in H_only_array] y,x = np.mgrid[:h,:w] xy_ar = np.c_[x.flatten(), y.flatten()].astype('float') # Compute displacements of finite difference samples. xy_ar_xp1 = ptransform(xy_ar, H_xp1) xy_ar_xm1 = ptransform(xy_ar, H_xm1) xy_ar_yp1 = ptransform(xy_ar, H_yp1) xy_ar_ym1 = ptransform(xy_ar, H_ym1) d_xp1 = np.linalg.norm(xy_ar - xy_ar_xp1, axis=1).reshape((h,w)) d_xm1 = np.linalg.norm(xy_ar - xy_ar_xm1, axis=1).reshape((h,w)) d_yp1 = np.linalg.norm(xy_ar - xy_ar_yp1, axis=1).reshape((h,w)) d_ym1 = np.linalg.norm(xy_ar - xy_ar_ym1, axis=1).reshape((h,w)) if I.ndim > 2: d_xp1 = d_xp1[:,:,np.newaxis] d_xm1 = d_xm1[:,:,np.newaxis] d_yp1 = d_yp1[:,:,np.newaxis] d_ym1 = d_ym1[:,:,np.newaxis] I_xp1 = cv2.warpPerspective(I, H_xp1, (w,h), borderMode=cv2.BORDER_REPLICATE) I_xm1 = cv2.warpPerspective(I, H_xm1, (w,h), borderMode=cv2.BORDER_REPLICATE) I_yp1 = cv2.warpPerspective(I, H_yp1, (w,h), borderMode=cv2.BORDER_REPLICATE) I_ym1 = cv2.warpPerspective(I, H_ym1, (w,h), borderMode=cv2.BORDER_REPLICATE) dx = 0.5 * ((I_xp1 - I) * d_xp1 + (I - I_xm1) * d_xm1) dy = 0.5 * ((I_yp1 - I) * d_yp1 + (I - I_ym1) * d_ym1) return dx,dy
def interp_lin(I, xn, yn, compute_derivs=True): """ Perform linear interpolation of I. I is evaluated at xn, yn. Returns ------- I_warped : array_like Warped image dI_warped_dx : array_like Derivative of warped image in x direction dI_warped_dy : array_like Derivative of warped image in y direction """ I_warped = cv2.remap(I.astype('float32'), xn.astype('float32'), yn.astype('float32'), borderMode=cv2.BORDER_REPLICATE, interpolation=cv2.INTER_CUBIC) if compute_derivs: if True: dI_dy, dI_dx = np.gradient(I)[:2] dI_warped_dy = cv2.remap(dI_dy.astype('float32'), xn.astype('float32'), yn.astype('float32'), borderMode=cv2.BORDER_REPLICATE, interpolation=cv2.INTER_CUBIC) dI_warped_dx = cv2.remap(dI_dx.astype('float32'), xn.astype('float32'), yn.astype('float32'), borderMode=cv2.BORDER_REPLICATE, interpolation=cv2.INTER_CUBIC) else: dI_warped_dy, dI_warped_dx = np.gradient(I_warped)[:2] return I_warped, dI_warped_dx, dI_warped_dy # If we don't want to compute the derivatives return I_warped
def __image_generator(self): def id_generator(size=16, max_letter=6): _str = '' _letter_cnt = 0 for i in range(size): if _letter_cnt < max_letter: _c = random.choice(string.ascii_uppercase + string.digits) if _c in string.ascii_uppercase: _letter_cnt += 1 else: _c = random.choice(string.digits) _str += _c return _str def blur_method(_im, m): if m == 0: return _im elif m == 1: return cv2.GaussianBlur(_im, (5, 5), 0) elif m == 2: return cv2.blur(_im, (5,5)) elif m == 3: return cv2.medianBlur(_im, 5) else: return _im def brightness(_im): _brightness_offset = np.random.randint(-50, 50) return cv2.convertScaleAbs(_im, alpha=1, beta=_brightness_offset) _dmtx = DMTX(shape=3)# shape=3 is 16x16 while True: # 022RDXBTH4001093 _str = id_generator(16, 2) _dmtx.encode(_str) _im = np.array(_dmtx.image)# [:,:,::-1] _im = cv2.cvtColor(_im, cv2.COLOR_RGB2GRAY) _im = cv2.resize(_im, (self.im_shape[1]-12, self.im_shape[0]-12)) _h, _w = _im.shape[:2] # random mirco rotation _angle = np.random.randint(-6, 6) / 2.0 _rot_mat = cv2.getRotationMatrix2D((_w / 2, _h / 2), _angle, 1) _im = cv2.warpAffine(_im, _rot_mat, (_w, _h)) # get label _label = cv2.resize(_im, (self.la_shape[1], self.la_shape[0])) # low-resolution _scale = np.random.choice(range(1, 6)) _im = cv2.resize(_im, (0,0), fx=1/float(_scale), fy=1/float(_scale)) _im = cv2.resize(_im, (self.im_shape[1]-12, self.im_shape[0]-12)) # add border. Need by net. 112 -> 100 _im = cv2.copyMakeBorder(_im, 6, 6, 6, 6, cv2.BORDER_REPLICATE) # add noise _im = blur_method(_im, np.random.choice(range(0, 4))) _im = brightness(_im) # to caffe data format _im = _im.astype(np.float32, copy=False) _label = _label.astype(np.float32, copy=False) _im *= 0.0039215684 _label *= 0.0039215684 yield _im, _label