我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.INTER_AREA。
def createTrainingInstances(self, images): start = time.time() hog = cv2.HOGDescriptor() instances = [] for img, label in images: # print img img = read_color_image(img) img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA) descriptor = hog.compute(img) if descriptor is None: descriptor = [] else: descriptor = descriptor.ravel() pairing = Instance(descriptor, label) instances.append(pairing) end = time.time() - start self.training_instances = instances print "HOG TRAIN SERIAL: %d images -> %f" % (len(images), end)
def createTestingInstances(self, images): start = time.time() hog = cv2.HOGDescriptor() instances = [] for img, label in images: # print img img = read_color_image(img) img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA) descriptor = hog.compute(img) if descriptor is None: descriptor = [] else: descriptor = descriptor.ravel() pairing = Instance(descriptor, label) instances.append(pairing) end = time.time() - start self.testing_instances = instances print "HOG TEST SERIAL: %d images -> %f" % (len(images), end)
def findSquare( self,frame ): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (7, 7), 0) edged = cv2.Canny(blurred, 60, 60) # find contours in the edge map (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # loop over our contours to find hexagon cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:50] screenCnt = None for c in cnts: # approximate the contour peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.004 * peri, True) # if our approximated contour has four points, then # we can assume that we have found our squeare if len(approx) >= 4: screenCnt = approx x,y,w,h = cv2.boundingRect(c) cv2.drawContours(image, [approx], -1, (0, 0, 255), 1) #cv2.imshow("Screen", image) #create the mask and remove rest of the background mask = np.zeros(image.shape[:2], dtype = "uint8") cv2.drawContours(mask, [screenCnt], -1, 255, -1) masked = cv2.bitwise_and(image, image, mask = mask) #cv2.imshow("Masked",masked ) #crop the masked image to to be compared to referance image cropped = masked[y:y+h,x:x+w] #scale the image so it is fixed size as referance image cropped = cv2.resize(cropped, (200,200), interpolation =cv2.INTER_AREA) return cropped
def get_whole_rotated_image(crop, mask, angle, crop_size, before_rotate_size, scale): #Better for larger: #pixels_to_jitter = 35 * scale #For Dates: pixels_to_jitter = 4 #Old Way center_x = before_rotate_size / 2 + (random.random() * pixels_to_jitter * 2) - pixels_to_jitter center_y = before_rotate_size / 2 + (random.random() * pixels_to_jitter * 2) - pixels_to_jitter rot_image = crop.copy() rot_image = rotate(rot_image, angle, center_x, center_y, before_rotate_size, before_rotate_size) # This is hard coded for 28x28. rot_image = cv2.resize(rot_image, (41, 41), interpolation=cv2.INTER_AREA) rot_image = rot_image[6:34, 6:34] # rot_image = rot_image * mask return rot_image
def resize_image(img_path, mini_size=480, jpeg_quality=80): """ ??image :param img_path: image??? :param mini_size: ?????? :param jpeg_quality: jpeg????? """ org_img = cv2.imread(img_path) img_w = org_img.shape[0] img_h = org_img.shape[1] if max(img_w, img_h) > mini_size: if img_w > img_h: img_w = mini_size * img_w // img_h img_h = mini_size else: img_h = mini_size * img_h // img_w img_w = mini_size dist_size = (img_h, img_w) r_image = cv2.resize(org_img, dist_size, interpolation=cv2.INTER_AREA) params = [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality] img_name = img_path + '_New.jpg' cv2.imwrite(img_name, r_image, params=[cv2.IMWRITE_JPEG_QUALITY, params])
def resize_to_nearest_aspect_ratio(img, divide_base=4, resize_base=256): w, h = img.shape[0], img.shape[1] #print(w,h) if w < h: if resize_base == 0: resize_base = w - w % divide_base s0 = resize_base s1 = int(h * resize_base / w) s1 = s1 - s1 % divide_base else: if resize_base == 0: resize_base = h - h % divide_base s1 = resize_base s0 = int(w * resize_base / h) s0 = s0 - s0 % divide_base #print(s1,s0) return cv2.resize(img, (s1, s0), interpolation=cv2.INTER_AREA) # Input imgs format: (batch, channels, width, height)
def _modify_observation(self, observation): # convert color to grayscale using luma component observation = ( observation[:, :, 0] * 0.299 + observation[:, :, 1] * 0.587 + observation[:, :, 2] * 0.114 ) observation = cv2.resize( observation, (84, 110), interpolation=cv2.INTER_AREA ) observation = observation[18:102, :] assert observation.shape == (84, 84) # convert to values between 0 and 1 observation = np.array(observation, dtype=np.uint8) return observation
def scale(self): self.original_image = self.image.copy() self.image_height, self.image_width = self.image.shape[:2] if max(self.image_width, self.image_height) > MAX_DIMENSION: # Need to shrink if self.image_width > self.image_height: new_width = MAX_DIMENSION new_height = int(self.image_height * new_width / self.image_width) else: new_height = MAX_DIMENSION new_width = int(self.image_width * new_height / self.image_height) print 'Resizing to {}x{}'.format(new_width, new_height) self.image = cv2.resize(self.image, (new_width, new_height), interpolation=cv2.INTER_AREA) self.image_height, self.image_width = self.image.shape[:2]
def imresample(img, sz): im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable return im_data # This method is kept for debugging purpose # h=img.shape[0] # w=img.shape[1] # hs, ws = sz # dx = float(w) / ws # dy = float(h) / hs # im_data = np.zeros((hs,ws,3)) # for a1 in range(0,hs): # for a2 in range(0,ws): # for a3 in range(0,3): # im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3] # return im_data
def __init__(self,img): #making two copies of the same image original_img = np.array(img) new_img = np.array(img) #resizing keeping the aspect ratio constant a_ratio = new_img.shape[0]/new_img.shape[1] #new_row=int(new_img.shape[0]) new_row = 128 new_colm = int(new_row/a_ratio) new_img = cv2.resize(new_img, (new_colm,new_row), interpolation = cv2.INTER_AREA) original_img = cv2.resize(original_img, (new_colm,new_row), interpolation = cv2.INTER_AREA) #convert new_one to grayscale new_img = cv2.cvtColor(new_img,cv2.COLOR_BGR2GRAY) self.original_img = original_img self.new_img = new_img
def output_resized_mask(): import pandas as pd os.makedirs('output/resize', exist_ok=True) imgs = [0, 10, 20, 30] df = pd.read_csv(TRAIN_INDEX) for i in imgs: fns = df.iloc[i] img_fn = fns['img'] mask_fn = fns['mask'] print('mask_fn', mask_fn) mask = carvana_pad_to_std(np.load(mask_fn)) for downsample in [1.0, 1.5, 2.0, 4.0]: h = int(1280 / downsample) w = int(1920 / downsample) out_fn = os.path.join('output/resize/{}_{}x{}.png'.format(i, w, h)) print(mask.shape) print((h, w)) m = cv2.resize(mask, dsize=(w, h), interpolation=cv2.INTER_AREA) print(m.shape) draw_mask(out_fn, img_fn, mask_fn, m)
def get_mnist_data(is_train, image_size, batchsize): ds = MNISTCh('train' if is_train else 'test', shuffle=True) if is_train: augs = [ imgaug.RandomApplyAug(imgaug.RandomResize((0.8, 1.2), (0.8, 1.2)), 0.3), imgaug.RandomApplyAug(imgaug.RotationAndCropValid(15), 0.5), imgaug.RandomApplyAug(imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01), 0.25), imgaug.Resize((224, 224), cv2.INTER_AREA) ] ds = AugmentImageComponent(ds, augs) ds = PrefetchData(ds, 128*10, multiprocessing.cpu_count()) ds = BatchData(ds, batchsize) ds = PrefetchData(ds, 256, 4) else: # no augmentation, only resizing augs = [ imgaug.Resize((image_size, image_size), cv2.INTER_CUBIC), ] ds = AugmentImageComponent(ds, augs) ds = BatchData(ds, batchsize) ds = PrefetchData(ds, 20, 2) return ds
def get_heatmap(self, target_size): heatmap = np.zeros((CocoMetadata.__coco_parts, self.height, self.width)) for joints in self.joint_list: for idx, point in enumerate(joints): if point[0] < 0 or point[1] < 0: continue CocoMetadata.put_heatmap(heatmap, idx, point, self.sigma) heatmap = heatmap.transpose((1, 2, 0)) # background heatmap[:, :, -1] = np.clip(1 - np.amax(heatmap, axis=2), 0.0, 1.0) if target_size: heatmap = cv2.resize(heatmap, target_size, interpolation=cv2.INTER_AREA) return heatmap
def linearToPolar(img, center=None, final_radius=None, initial_radius=None, phase_width=None, interpolation=cv2.INTER_AREA, maps=None, borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts): ''' map a 2d (x,y) Cartesian array to a polar (r, phi) array using opencv.remap ''' if maps is None: mapY, mapX = linearToPolarMaps(img.shape[:2], center, final_radius, initial_radius, phase_width) else: mapY, mapX = maps o = {'interpolation': interpolation, 'borderValue': borderValue, 'borderMode': borderMode} o.update(opts) return cv2.remap(img, mapY, mapX, **o)
def polarToLinear(img, shape=None, center=None, maps=None, interpolation=cv2.INTER_AREA, borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts): ''' map a 2d polar (r, phi) polar array to a Cartesian (x,y) array using opencv.remap ''' if maps is None: mapY, mapX = polarToLinearMaps(img.shape[:2], shape, center) else: mapY, mapX = maps o = {'interpolation': interpolation, 'borderValue': borderValue, 'borderMode': borderMode} o.update(opts) return cv2.remap(img, mapY, mapX, **o)
def resize_to_screen(src, maxw=1380, maxh=600, copy=False): height, width = src.shape[:2] scl_x = float(width)/maxw scl_y = float(height)/maxh scl = int(np.ceil(max(scl_x, scl_y))) if scl > 1.0: inv_scl = 1.0/scl img = cv2.resize(src, (0, 0), None, inv_scl, inv_scl, cv2.INTER_AREA) elif copy: img = src.copy() else: img = src return img
def resize_to_screen(src, maxw=1280, maxh=700, copy=False): height, width = src.shape[:2] scl_x = float(width)/maxw scl_y = float(height)/maxh scl = int(np.ceil(max(scl_x, scl_y))) if scl > 1.0: inv_scl = 1.0/scl img = cv2.resize(src, (0, 0), None, inv_scl, inv_scl, cv2.INTER_AREA) elif copy: img = src.copy() else: img = src return img
def crop_lowres(self, cv_image): self.ltob.d_img_raw_npy = np.asarray(cv_image) if self.instance_type == 'main': img = cv2.resize(cv_image, (0, 0), fx=1 / 16., fy=1 / 16., interpolation=cv2.INTER_AREA) startrow = 3 startcol = 27 img = imutils.rotate_bound(img, 180) else: img = cv2.resize(cv_image, (0, 0), fx=1 / 15., fy=1 / 15., interpolation=cv2.INTER_AREA) startrow = 2 startcol = 27 endcol = startcol + 64 endrow = startrow + 64 # crop image: img = img[startrow:endrow, startcol:endcol] assert img.shape == (64,64,3) return img
def imresample(img, sz): im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #pylint: disable=no-member return im_data # This method is kept for debugging purpose # h=img.shape[0] # w=img.shape[1] # hs, ws = sz # dx = float(w) / ws # dy = float(h) / hs # im_data = np.zeros((hs,ws,3)) # for a1 in range(0,hs): # for a2 in range(0,ws): # for a3 in range(0,3): # im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3] # return im_data
def imresize(img, scale): """Depending on if we scale the image up or down, we use an interpolation technique as per OpenCV recommendation. :param img: 3D numpy array of image. :param scale: float to scale image by in both axes. """ if scale > 1.0: # use cubic interpolation for upscale. img = cv2.resize(img, None, interpolation=cv2.INTER_CUBIC, fx=scale, fy=scale) elif scale < 1.0: # area relation sampling for downscale. img = cv2.resize(img, None, interpolation=cv2.INTER_AREA, fx=scale, fy=scale) return img
def test_crop_bb(): # Given one sample image and the following parameters image = helpers.get_one_sample_image() parameters = {"bb" : (0, 10, 10, 20), "pad" : 2, "desired_size" : (6,6), } # When perform crop_bb() patch = utils.crop_bb(image, bb=parameters["bb"], padding=parameters["pad"], dst_size=parameters["desired_size"]) # Then it should be same with manually cropped one. bb = parameters["bb"] pad = parameters["pad"] desired_size = parameters["desired_size"] crop_manual = image[max(bb[0],bb[0]-pad) : min(image.shape[0],bb[1]+pad), max(bb[2],bb[2]-pad) : min(image.shape[1],bb[3]+pad)] crop_manual = cv2.resize(crop_manual, desired_size, interpolation=cv2.INTER_AREA) assert patch.all() == crop_manual.all(), "utils.crop_bb() unit test failed!!"
def CalculateDim(data, V, c, screen_width, screen_height, datarate): n_points = data.shape[1] m_lines = data.shape[0] time0 = n_points / datarate Ltotal = points2range(n_points, datarate, c) Lpx = Ltotal / n_points Hpx = V * time0 / 2 Htotal = Hpx * m_lines scale = Hpx / Lpx if screen_width == -1: screen_width = round(screen_height / scale) elif screen_height == -1: screen_height = round(screen_width * scale) dim = (screen_width,screen_height) # data = cv2.resize(data, dim, interpolation=cv2.INTER_AREA) return dim
def read_img(path, s_size): image1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) if image1.shape[0] < image1.shape[1]: s0 = s_size s1 = int(image1.shape[1] * (s_size / image1.shape[0])) s1 = s1 - s1 % 16 else: s1 = s_size s0 = int(image1.shape[0] * (s_size / image1.shape[1])) s0 = s0 - s0 % 16 image1 = np.asarray(image1, np.float32) image1 = cv2.resize(image1, (s1, s0), interpolation=cv2.INTER_AREA) if image1.ndim == 2: image1 = image1[:, :, np.newaxis] return image1.transpose(2, 0, 1), False
def resize_sample(sample, shape=None, use_interp=True, scale=None): if (shape and scale) or (not shape and not scale): raise ValueError('Must specify exactly one of shape or scale, but got shape=\'{}\', scale=\'{}\''.format(shape, scale)) # Use INTER_AREA for shrinking and INTER_LINEAR for enlarging: interp = cv2.INTER_NEAREST if use_interp: target_is_smaller = (shape and shape[1] < sample.shape[1]) or (scale and scale < 1) # targetWidth < sampleWidth interp = cv2.INTER_AREA if target_is_smaller else cv2.INTER_LINEAR if shape: resized = cv2.resize(sample, (shape[1], shape[0]), interpolation=interp) else: resized = cv2.resize(sample, None, fx=scale, fy=scale, interpolation=interp) return resized
def scale(image, new_size, kind='width'): ''' resize :image: to :new_size: param while preserving aspect ratio. ''' # obtain image height & width. h, w, channels = image.shape # aspect ratio = original width / original height. aspect_ratio = w / h if kind == 'width': if w > new_size: # adjusted height. new_height = int(new_size // aspect_ratio) # inter_area for resizing algorithm parameter. return cv.resize(image, (new_size, new_height), interpolation=cv.INTER_AREA) elif kind == 'height': if h > new_size: # adjusted width. new_width = int(new_size // aspect_ratio) # inter_area for resizing algorithm parameter. return cv.resize(image, (new_width, new_size), interpolation=cv.INTER_AREA) else: raise ValueError('Not supported option.')
def resize(images, size=(100, 100)): """ Function to resize the number of pixels in an image. To achieve a standarized pixel number accros different images, it is desirable to make every picture of the same pixel size. By using an OpenCV method we increase or reduce the number of pixels accordingly. """ images_norm = [] for image in images: is_color = len(image.shape) == 3 if is_color: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # using different OpenCV method if enlarging or shrinking if image.shape < size: image_norm = cv2.resize(image, size, interpolation=cv2.INTER_AREA) else: image_norm = cv2.resize(image, size, interpolation=cv2.INTER_CUBIC) images_norm.append(image_norm) return images_norm
def random_augmentation(image, crop_size, resize_size): # randomly choose crop size image = image.transpose(1, 2, 0) h, w, _ = image.shape # random cropping if crop_size != h: top = random.randint(0, h - crop_size - 1) left = random.randint(0, w - crop_size - 1) bottom = top + crop_size right = left + crop_size image = image[top:bottom, left:right, :] # random flipping if random.randint(0, 1): image = image[:, ::-1, :] # randomly choose resize size if resize_size != crop_size: cv2.resize(image, (resize_size, resize_size), interpolation=cv2.INTER_AREA) return image.transpose(2, 0, 1)
def multi_feat_match(template, image, options=None): """ Match template and image by extracting multiple features (specified) from it. :param template: Template image :param image: Search image :param options: Options include - features: List of options for each feature :return: """ h, w = image.shape[:2] scale = 1 if options is not None and 'features' in options: heatmap = np.zeros((h, w), dtype=np.float64) for foptions in options['features']: f_hmap, _ = feature_match(template, image, foptions) heatmap += cv.resize(f_hmap, (w, h), interpolation=cv.INTER_AREA) heatmap /= len(options['features']) else: heatmap, scale = feature_match(template, image, options) return heatmap, scale
def generate_data(train_path, test_path): index = 1 output_index = 1 for (dirpath, dirnames, filenames) in os.walk(input_path): # ???????????????8?????2??? random.shuffle(filenames) for filename in filenames: if filename.endswith('.bmp'): img_path = dirpath + '/' + filename # ??opencv ???? img_data = cv2.imread(img_path) # ??????????????28 * 28 img_data = cv2.resize(img_data, (28, 28), interpolation=cv2.INTER_AREA) if index < 3: cv2.imwrite(test_path + '/' + str(output_index) + '/' + str(index) + '.jpg', img_data) index += 1 elif 10 >= index >= 3: cv2.imwrite(train_path + '/' + str(output_index) + '/' + str(index) + '.jpg', img_data) index += 1 if index > 10: output_index += 1 index = 1
def feed(self, im): assert im.dtype == np.uint8 im = cv2.resize(im, dsize=self.im_shape, interpolation=cv2.INTER_AREA) im = im - 127.5 ss = [None]*len(self.Vs) cs = [None]*len(self.Vs) inp = im for i, Vn in enumerate(self.Vs): n = i + 1 if self.Vs[i].use_feedback and (i+1) < len(self.Vs): context = self.gen_context(self.Vs[i+1], self.im_shape[0]/self.Vs[0].Xb/(2**i), self.im_shape[1]/self.Vs[0].Yb/(2**i)) else: context = None # top level doesn't have any feedback s, c = self.Vs[i].sparsify(inp, context=context) ss[i] = s cs[i] = c if c is None: # sparsify returns None if a layer isn't trained enough to return a response break if n < len(self.Vs): # input for next level inp = self.group_NxN_input(c, 2, self.Vs[0].K+1, self.im_shape[0]/self.Vs[0].Xb/(2**i), self.im_shape[1]/self.Vs[0].Yb/(2**i)) return ss, cs
def createTrainingInstances(self, images): hog = cv2.HOGDescriptor() instances = [] for img, label in images: print img img = read_color_image(img) img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA) descriptor = hog.compute(img) if descriptor is None: descriptor = [] else: descriptor = descriptor.ravel() pairing = Instance(descriptor, label) instances.append(pairing) self.training_instances = instances
def createTestingInstances(self, images): hog = cv2.HOGDescriptor() instances = [] for img, label in images: print img img = read_color_image(img) img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA) descriptor = hog.compute(img) if descriptor is None: descriptor = [] else: descriptor = descriptor.ravel() pairing = Instance(descriptor, label) instances.append(pairing) self.testing_instances = instances
def local_hog(image): HOGDESC = cv2.HOGDescriptor() img, label = image img = read_color_image(img) img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA) descriptor = HOGDESC.compute(img) if descriptor is None: descriptor = [] else: descriptor = descriptor.ravel() pairing = Instance(descriptor, label) return pairing
def local_par_hog(images): inst = list() HOGDESC = cv2.HOGDescriptor() for image in images: img, label = image img = read_color_image(img) img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA) descriptor = HOGDESC.compute(img) if descriptor is None: descriptor = [] else: descriptor = descriptor.ravel() pairing = Instance(descriptor, label) inst.append(pairing) return inst
def compare_io(X_val, model_raw, directory = '%s/image_comparison' % cfg['dir']['validation']): #Creates tensors to compare to source images, plots both side by side, and saves the plots road = Roadgen(cfg) curves_to_print = model_raw.shape[0] #reshaping the model output vector to make it easier to work with model_out = road.model_interpret(model_raw) print('predictions denormalized') #initialize the model view tensor model_view = np.zeros( (curves_to_print, road.input_size[1], road.input_size[0], road.n_channels), dtype=np.uint8) for prnt_i in range(curves_to_print): patch = road.road_generator(model_out[prnt_i], road.line_width, rand_gen=0) model_view[prnt_i] = cv2.resize(patch, road.input_size, interpolation=cv2.INTER_AREA) road.save_images(X_val, model_view, directory ) #Prints plot of curves against the training data Saves plots in files #Because the model outputs are not put into a drawing function it is easier for audiences # to understand the model output data. #FIXME function is still built to work like v1 generation also may have bugs in the plotter function
def resize(image, width=None, height=None, interpolation=cv2.INTER_AREA): # initialize the dimensions of the image to be resized and grab the image size dim = None (h, w) = image.shape[:2] # if both the width and height are None, then return the original image if width is None and height is None: return image # check to see if the width is None if width is None: # calculate the ratio of the height and construct the dimensions r = height / float(h) dim = (int(w * r), height) # otherwise, the height is None else: # calculate the ratio of the width and construct the dimensions r = width / float(w) dim = (width, int(h * r)) # resize the image resized = cv2.resize(image, dim, interpolation = interpolation) # return the resized image return resized
def load_images(queue: PriorityQueue, source: int, file_path: str, target_width: int, target_height: int, display_progress: bool=False): window = 'image' if display_progress: cv2.namedWindow(window) for file in iglob(path.join(file_path, '**', '*.jpg'), recursive=True): buffer = cv2.imread(file) buffer = cv2.resize(buffer, (target_width, target_height), interpolation=cv2.INTER_AREA) random_priority = random() queue.put((random_priority, (buffer, source))) if display_progress: cv2.imshow(window, buffer) if (cv2.waitKey(33) & 0xff) == 27: break if display_progress: cv2.destroyWindow(window)
def pad(im, scale=1.3, size=224): """Pad im with 0""" h = im.shape[0] w = im.shape[1] pad_value = 128 if h>scale*w: scale = 1.0*h/size new_im = cv2.resize(im,(int(w/scale),size),interpolation=cv2.INTER_AREA) pad_size = (size - new_im.shape[1])/2 padding = ((0,0),(pad_size,pad_size),(0,0)) new_im = np.pad(new_im, padding, mode = 'constant', constant_values=(pad_value,pad_value)) return new_im if w>scale*h: scale = 1.0*w/size new_im = cv2.resize(im,(size,int(h/scale)),interpolation=cv2.INTER_AREA) pad_size = (size - new_im.shape[0])/2 padding = ((pad_size,pad_size),(0,0),(0,0)) new_im = np.pad(new_im, padding, mode = 'constant', constant_values = (pad_value,pad_value)) return new_im return im
def pad(im, scale=10, size=224): """Pad im with pad_value""" pad_value = 128 h = im.shape[0] w = im.shape[1] if h>scale*w: scale = 1.0*h/size #new_im = im new_im = cv2.resize(im,(int(w/scale),size),interpolation=cv2.INTER_AREA) pad_size = (size - new_im.shape[1])/2 padding = ((0,0),(pad_size,pad_size),(0,0)) new_im = np.pad(new_im, padding, mode = 'constant', constant_values=(pad_value,pad_value)) return new_im if w>scale*h: scale = 1.0*w/size #new_im = im new_im = cv2.resize(im,(size,int(h/scale)),interpolation=cv2.INTER_AREA) pad_size = (size - new_im.shape[0])/2 padding = ((pad_size,pad_size),(0,0),(0,0)) new_im = np.pad(new_im, padding, mode = 'constant', constant_values = (pad_value,pad_value)) return new_im return cv2.resize(im, (size,size), interpolation=cv2.INTER_AREA)