我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用skimage.transform()。
def load_image_array_flowers(image_file, image_size): img = skimage.io.imread(image_file) # GRAYSCALE if len(img.shape) == 2: img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'uint8') img_new[:,:,0] = img img_new[:,:,1] = img img_new[:,:,2] = img img = img_new img_resized = skimage.transform.resize(img, (image_size, image_size)) # FLIP HORIZONTAL WIRH A PROBABILITY 0.5 if random.random() > 0.5: img_resized = np.fliplr(img_resized) return img_resized.astype('float32')
def load_image(path): # load image img = skimage.io.imread(path) img = img / 255.0 assert (0 <= img).all() and (img <= 1.0).all() # print "Original Image Shape: ", img.shape # we crop image from center short_edge = min(img.shape[:2]) yy = int((img.shape[0] - short_edge) / 2) xx = int((img.shape[1] - short_edge) / 2) crop_img = img[yy: yy + short_edge, xx: xx + short_edge] # resize to 224, 224 resized_img = skimage.transform.resize(crop_img, (224, 224)) return resized_img # returns the top1 string
def load_image2(path, height=None, width=None): # load image img = skimage.io.imread(path) img = img / 255.0 if height is not None and width is not None: ny = height nx = width elif height is not None: ny = height nx = img.shape[1] * ny / img.shape[0] elif width is not None: nx = width ny = img.shape[0] * nx / img.shape[1] else: ny = img.shape[0] nx = img.shape[1] return skimage.transform.resize(img, (ny, nx))
def load_augment(fname, w, h, aug_params=no_augmentation_params, transform=None, sigma=0.0, color_vec=None): """Load augmented image with output shape (w, h). Default arguments return non augmented image of shape (w, h). To apply a fixed transform (color augmentation) specify transform (color_vec). To generate a random augmentation specify aug_params and sigma. """ img = load_image(fname) img = perturb(img, augmentation_params=aug_params, target_shape=(w, h)) #if transform is None: # img = perturb(img, augmentation_params=aug_params, target_shape=(w, h)) #else: # img = perturb_fixed(img, tform_augment=transform, target_shape=(w, h)) #randString = str(np.random.normal(0,1,1)) #im = Image.fromarray(img.transpose(1,2,0).astype('uint8')) #figName = fname.split("/")[-1] #im.save("imgs/"+figName+randString+".jpg") np.subtract(img, MEAN[:, np.newaxis, np.newaxis], out=img) #np.divide(img, STD[:, np.newaxis, np.newaxis], out=img) #img = augment_color(img, sigma=sigma, color_vec=color_vec) return img
def load_augment(fname, w, h, aug_params=no_augmentation_params, transform=None, sigma=0.0, color_vec=None): """Load augmented image with output shape (w, h). Default arguments return non augmented image of shape (w, h). To apply a fixed transform (color augmentation) specify transform (color_vec). To generate a random augmentation specify aug_params and sigma. """ img = load_image(fname) if transform is None: img = perturb(img, augmentation_params=aug_params, target_shape=(w, h)) else: img = perturb_fixed(img, tform_augment=transform, target_shape=(w, h)) np.subtract(img, MEAN[:, np.newaxis, np.newaxis], out=img) np.divide(img, STD[:, np.newaxis, np.newaxis], out=img) img = augment_color(img, sigma=sigma, color_vec=color_vec) return img
def build_centering_transform(image_shape, target_shape): """Image cetering transform Args: image_shape: tuple(rows, cols), input image shape target_shape: tuple(rows, cols), output image shape Returns: a centering transform instance """ rows, cols = image_shape trows, tcols = target_shape shift_x = (cols - tcols) / 2.0 shift_y = (rows - trows) / 2.0 return skimage.transform.SimilarityTransform(translation=(shift_x, shift_y))
def build_center_uncenter_transforms(image_shape): """Center Unceter transform These are used to ensure that zooming and rotation happens around the center of the image. Use these transforms to center and uncenter the image around such a transform. Args: image_shape: tuple(rows, cols), input image shape Returns: a center and an uncenter transform instance """ center_shift = np.array( [image_shape[1], image_shape[0]]) / 2.0 - 0.5 # need to swap rows and cols here apparently! confusing! tform_uncenter = skimage.transform.SimilarityTransform( translation=-center_shift) tform_center = skimage.transform.SimilarityTransform( translation=center_shift) return tform_center, tform_uncenter
def perturb_fixed(img, tform_augment, target_shape=(50, 50), mode='constant', mode_cval=0): """Perturb image Determinastic It perturbs an image with augmentation transform with determinastic params used for validation/testing data Args: img: a `ndarray`, input image augmentation_paras: a dict, with augmentation name as keys and values as params target_shape: a tuple(rows, cols), output image shape mode: mode for transformation available modes: {`constant`, `edge`, `symmetric`, `reflect`, `wrap`} mode_cval: float, Used in conjunction with mode `constant`, the value outside the image boundaries Returns: a `ndarray` of transformed image """ shape = img.shape[1:] tform_centering = build_centering_transform(shape, target_shape) tform_center, tform_uncenter = build_center_uncenter_transforms(shape) # shift to center, augment, shift back (for the rotation/shearing) tform_augment = tform_uncenter + tform_augment + tform_center return fast_warp(img, tform_centering + tform_augment, output_shape=target_shape, mode=mode, mode_cval=mode_cval)
def transform(patch, flip=False, mirror=False, rotations=[]): """Perform data augmentation on a patch. Args: patch (numpy array): The patch to be processed. flip (bool, optional): Up/down symetry. mirror (bool, optional): left/right symetry. rotations (int list, optional) : rotations to perform (angles in deg). Returns: array list: list of augmented patches """ transformed_patches = [patch] for angle in rotations: transformed_patches.append(skimage.img_as_ubyte(skimage.transform.rotate(patch, angle))) if flip: transformed_patches.append(np.flipud(patch)) if mirror: transformed_patches.append(np.fliplr(patch)) return transformed_patches # In[4]:
def load_image(path, size=224): # load image img = skimage.io.imread(path) img = img / 255.0 assert (0 <= img).all() and (img <= 1.0).all() # print "Original Image Shape: ", img.shape # we crop image from center short_edge = min(img.shape[:2]) yy = int((img.shape[0] - short_edge) / 2) xx = int((img.shape[1] - short_edge) / 2) crop_img = img[yy: yy + short_edge, xx: xx + short_edge] # resize to 224, 224 resized_img = skimage.transform.resize(crop_img, (size, size)) return resized_img # returns the top1 string
def rescale(img, input_height, input_width): print("Original image shape:" + str(img.shape) + " and remember it should be in H, W, C!") print("Model's input shape is %dx%d") % (input_height, input_width) aspect = img.shape[1]/float(img.shape[0]) print("Orginal aspect ratio: " + str(aspect)) if(aspect>1): # landscape orientation - wide image res = int(aspect * input_height) imgScaled = skimage.transform.resize(img, (input_width, res)) if(aspect<1): # portrait orientation - tall image res = int(input_width/aspect) imgScaled = skimage.transform.resize(img, (res, input_height)) if(aspect == 1): imgScaled = skimage.transform.resize(img, (input_width, input_height)) # pyplot.figure() # pyplot.imshow(imgScaled) # pyplot.axis('on') # pyplot.title('Rescaled image') print("New image shape:" + str(imgScaled.shape) + " in HWC") return imgScaled
def build_rescale_transform_slow(downscale_factor, image_shape, target_shape): """ This mimics the skimage.transform.resize function. The resulting image is centered. """ rows, cols = image_shape trows, tcols = target_shape col_scale = row_scale = downscale_factor src_corners = np.array([[1, 1], [1, rows], [cols, rows]]) - 1 dst_corners = np.zeros(src_corners.shape, dtype=np.double) # take into account that 0th pixel is at position (0.5, 0.5) dst_corners[:, 0] = col_scale * (src_corners[:, 0] + 0.5) - 0.5 dst_corners[:, 1] = row_scale * (src_corners[:, 1] + 0.5) - 0.5 tform_ds = skimage.transform.AffineTransform() tform_ds.estimate(src_corners, dst_corners) # centering shift_x = cols / (2.0 * downscale_factor) - tcols / 2.0 shift_y = rows / (2.0 * downscale_factor) - trows / 2.0 tform_shift_ds = skimage.transform.SimilarityTransform(translation=(shift_x, shift_y)) return tform_shift_ds + tform_ds
def perturb_multiscale(img, scale_factors, augmentation_params, target_shapes, rng=np.random): """ scale is a DOWNSCALING factor. """ tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape) tform_augment = random_perturbation_transform(rng=rng, **augmentation_params) tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing) output = [] for scale, target_shape in zip(scale_factors, target_shapes): if isinstance(scale, skimage.transform.ProjectiveTransform): tform_rescale = scale else: tform_rescale = build_rescale_transform(scale, img.shape, target_shape) # also does centering output.append(fast_warp(img, tform_rescale + tform_augment, output_shape=target_shape, mode='constant').astype('float32')) return output
def perturb_multiscale_fixed(img, scale_factors, tform_augment, target_shapes): """ scale is a DOWNSCALING factor. """ tform_center, tform_uncenter = build_center_uncenter_transforms(img.shape) tform_augment = tform_uncenter + tform_augment + tform_center # shift to center, augment, shift back (for the rotation/shearing) output = [] for scale, target_shape in zip(scale_factors, target_shapes): if isinstance(scale, skimage.transform.ProjectiveTransform): tform_rescale = scale else: tform_rescale = build_rescale_transform(scale, img.shape, target_shape) # also does centering output.append(fast_warp(img, tform_rescale + tform_augment, output_shape=target_shape, mode='constant').astype('float32')) return output
def load_image(path): # Load image [height, width, depth] img = skimage.io.imread(path) / 255.0 assert (0 <= img).all() and (img <= 1.0).all() # Crop image from center short_edge = min(img.shape[:2]) yy = int((img.shape[0] - short_edge) / 2) xx = int((img.shape[1] - short_edge) / 2) shape = list(img.shape) crop_img = img[yy: yy + short_edge, xx: xx + short_edge] resized_img = skimage.transform.resize(crop_img, (shape[0], shape[1])) return resized_img, shape # Return a resized numpy array of an image specified by its path
def load_image2(path, height=None, width=None): # Load image img = skimage.io.imread(path) / 255.0 if height is not None and width is not None: ny = height nx = width elif height is not None: ny = height nx = img.shape[1] * ny / img.shape[0] elif width is not None: nx = width ny = img.shape[0] * nx / img.shape[1] else: ny = img.shape[0] nx = img.shape[1] return skimage.transform.resize(img, (ny, nx)) # Render the generated image given a tensorflow session and a variable image (x)
def load_img(path, grayscale=False, resize=None, order=1): # Load image img = io.imread(path) # Resize # print('Desired resize: ' + str(resize)) if resize is not None: img = skimage.transform.resize(img, resize, order=order, preserve_range=True) # print('Final resize: ' + str(img.shape)) # Color conversion if len(img.shape)==2 and not grayscale: img = gray2rgb(img) elif len(img.shape)>2 and img.shape[2]==3 and grayscale: img = rgb2gray(img) # Return image return img
def load_image(path): # load image nImgs = len(path) rImg = np.zeros([nImgs,224,224,3]) for i in range(nImgs): img = cv2.imread(path[i]) img = img / 255.0 assert (0 <= img).all() and (img <= 1.0).all() # print "Original Image Shape: ", img.shape # we crop image from center short_edge = min(img.shape[:2]) yy = int((img.shape[0] - short_edge) / 2) xx = int((img.shape[1] - short_edge) / 2) crop_img = img[yy: yy + short_edge, xx: xx + short_edge] # resize to 224, 224 resized_img = cv2.resize(img,(224,224),interpolation = cv2.INTER_CUBIC) #skimage.transform.resize(crop_img, (224, 224)) rImg[i] = resized_img return rImg # returns the top1 string
def load_img(path, grayscale=False, resize=None, order=1): # Load image img = io.imread(path) # Resize # print('Desired resize: ' + str(resize)) if resize is not None: img = skimage.transform.resize(img, resize, order=order, preserve_range=True) # print('Final resize: ' + str(img.shape)) # Color conversion if len(img.shape) == 2 and not grayscale: img = gray2rgb(img) elif len(img.shape) > 2 and img.shape[2] == 3 and grayscale: img = rgb2gray(img) # Return image return img
def load_image(path): # load image img = skimage.io.imread(path) img = img / 255.0 assert (0 <= img).all() and (img <= 1.0).all() # print "Original Image Shape: ", img.shape # we crop image from center short_edge = min(img.shape[:2]) yy = int((img.shape[0] - short_edge) / 2) xx = int((img.shape[1] - short_edge) / 2) crop_img = img[yy: yy + short_edge, xx: xx + short_edge] # resize to 224, 224 resized_img = skimage.transform.resize(crop_img, (224, 224)) if len(resized_img.shape)<3: resized_img = skimage.color.gray2rgb(resized_img) return resized_img # returns the top1 string
def load_image_array(image_file, image_size): img = skimage.io.imread(image_file) # GRAYSCALE if len(img.shape) == 2: img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'uint8') img_new[:,:,0] = img img_new[:,:,1] = img img_new[:,:,2] = img img = img_new img_resized = skimage.transform.resize(img, (image_size, image_size)) # FLIP HORIZONTAL WIRH A PROBABILITY 0.5 if random.random() > 0.5: img_resized = np.fliplr(img_resized) return img_resized.astype('float32')
def load_image(path, height=None, width=None): img = skimage.io.imread(path) if len(img.shape) == 2: img = skimage.color.gray2rgb(img) img = img / 255.0 if height is not None and width is not None: ny = height nx = width elif height is not None: ny = height nx = img.shape[1] * ny / img.shape[0] elif width is not None: nx = width ny = img.shape[0] * nx / img.shape[1] else: ny = img.shape[0] nx = img.shape[1] return skimage.transform.resize(img, (ny, nx))
def rescale(input_image, scale_factor): sz = [s*scale_factor for s in input_image.shape[:2]] rescaled = skimage.transform.resize(input_image, sz, mode='reflect') return rescaled
def resize(input_image, size): dtype = input_image.dtype ret = skimage.transform.resize(input_image, size) if dtype == np.uint8: ret = (255*ret).astype(dtype) elif dtype == np.uint16: ret = (65535*ret).astype(dtype) elif dtype == np.float32 or dtype == np.float64: ret = ret.astype(dtype) else: raise ValueError('resize not implemented for type {}'.format(dtype)) return ret # ----- I/O -------------------------------------------------------------------
def load_image_array(image_file, image_size, image_id, data_dir='Data/datasets/mscoco/train2014', mode='train'): img = None if os.path.exists(image_file): #print('found' + image_file) img = skimage.io.imread(image_file) else: print('notfound' + image_file) img = skimage.io.imread('http://mscoco.org/images/%d' % (image_id)) img_path = os.path.join(data_dir, 'COCO_%s2014_%.12d.jpg' % ( mode, image_id)) skimage.io.imsave(img_path, img) # GRAYSCALE if len(img.shape) == 2: img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'uint8') img_new[:,:,0] = img img_new[:,:,1] = img img_new[:,:,2] = img img = img_new img_resized = skimage.transform.resize(img, (image_size, image_size)) # FLIP HORIZONTAL WIRH A PROBABILITY 0.5 if random.random() > 0.5: img_resized = np.fliplr(img_resized) return img_resized.astype('float32')
def load_image_inception(image_file, image_size=128): img = skimage.io.imread(image_file) # GRAYSCALE if len(img.shape) == 2: img_new = np.ndarray((img.shape[0], img.shape[1], 3), dtype='uint8') img_new[:, :, 0] = img img_new[:, :, 1] = img img_new[:, :, 2] = img img = img_new if image_size != 0: img = skimage.transform.resize(img, (image_size, image_size), mode='reflect') return img.astype('int32')
def test(): img = skimage.io.imread("./test_data/starry_night.jpg") ny = 300 nx = img.shape[1] * ny / img.shape[0] img = skimage.transform.resize(img, (ny, nx)) skimage.io.imsave("./test_data/test/output.jpg", img)
def fast_warp(img, tf, output_shape, mode='constant', order=0): """ This wrapper function is faster than skimage.transform.warp """ m = tf.params t_img = np.zeros((img.shape[0],) + output_shape, img.dtype) for i in range(t_img.shape[0]): t_img[i] = _warp_fast(img[i], m, output_shape=output_shape, mode=mode, order=order) return t_img
def build_centering_transform(image_shape, target_shape): rows, cols = image_shape trows, tcols = target_shape shift_x = (cols - tcols) / 2.0 shift_y = (rows - trows) / 2.0 return skimage.transform.SimilarityTransform(translation=(shift_x, shift_y))
def build_center_uncenter_transforms(image_shape): """ These are used to ensure that zooming and rotation happens around the center of the image. Use these transforms to center and uncenter the image around such a transform. """ center_shift = np.array([image_shape[1], image_shape[0]]) / 2.0 - 0.5 # need to swap rows and cols here apparently! confusing! tform_uncenter = skimage.transform.SimilarityTransform(translation=-center_shift) tform_center = skimage.transform.SimilarityTransform(translation=center_shift) return tform_center, tform_uncenter
def build_augmentation_transform(zoom=(1.0, 1.0), rotation=0, shear=0, translation=(0, 0), flip=False): if flip: shear += 180 rotation += 180 # shear by 180 degrees is equivalent to rotation by 180 degrees + flip. # So after that we rotate it another 180 degrees to get just the flip. tform_augment = skimage.transform.AffineTransform(scale=(1/zoom[0], 1/zoom[1]), rotation=np.deg2rad(rotation), shear=np.deg2rad(shear), translation=translation) return tform_augment
def transform(self, x): return prj(self.H.dot(np.append(x, 1)))
def load_augmented_images(fnames, preprocessor, w, h, is_training, aug_params=no_augmentation_params, transform=None, bbox=None, fill_mode='constant', fill_mode_cval=0, standardizer=None, save_to_dir=None): return np.array( [load_augment(f, preprocessor, w, h, is_training, aug_params, transform, bbox, fill_mode, fill_mode_cval, standardizer, save_to_dir) for f in fnames])
def load_augment(fname, preprocessor, w, h, is_training, aug_params=no_augmentation_params, transform=None, bbox=None, fill_mode='constant', fill_mode_cval=0, standardizer=None, save_to_dir=None): """Load augmented image with output shape (h, w, c) Default arguments return non augmented image of shape (h, w, c). To apply a fixed transform (and color augmentation) specify transform (and color_vec in standardizer). To generate a random augmentation specify aug_params (and sigma in standardizer). """ img = _load_image_th(fname, preprocessor) # img shape - (c, h, w) if bbox is not None: img = _definite_crop(img, bbox) # print(img.shape) # import cv2 # cv2.imshow("test", np.asarray(img[1,:,:], dtype=np.uint8)) # cv2.waitKey(0) if bbox[4] == 1: img = img[:, :, ::-1] elif transform is not None: img = _perturb_fixed(img, tform_augment=transform, target_shape=(w, h), mode=fill_mode, mode_cval=fill_mode_cval) else: img = _perturb(img, augmentation_params=aug_params, target_shape=(w, h), mode=fill_mode, mode_cval=fill_mode_cval) if save_to_dir is not None: file_full_name = os.path.basename(fname) file_name, file_ext = os.path.splitext(file_full_name) fname2 = "%s/%s_DA_%d%s" % (save_to_dir, file_name, np.random.randint(1e4), file_ext) _save_image_th(img, fname2) if standardizer is not None: img = standardizer(img, is_training) # convert to shape (h, w, c) return img.transpose(1, 2, 0)
def build_augmentation_transform(zoom=(1.0, 1.0), rotation=0, shear=0, translation=(0, 0), flip=False): if flip: shear += 180 rotation += 180 # shear by 180 degrees is equivalent to rotation by 180 degrees + flip. # So after that we rotate it another 180 degrees to get just the flip. tform_augment = skimage.transform.AffineTransform(scale=(1 / zoom[0], 1 / zoom[1]), rotation=np.deg2rad(rotation), shear=np.deg2rad(shear), translation=translation) return tform_augment # internal stuff below
def _fast_warp(img, tf, output_shape, mode='constant', mode_cval=0, order=0): """This wrapper function is faster than skimage.transform.warp """ m = tf.params t_img = np.zeros((img.shape[0],) + output_shape, img.dtype) for i in range(t_img.shape[0]): t_img[i] = _warp_fast(img[i], m, output_shape=output_shape, mode=mode, cval=mode_cval, order=order) return t_img
def _build_centering_transform(image_shape, target_shape): cols, rows = image_shape tcols, trows = target_shape shift_x = (cols - tcols) / 2.0 shift_y = (rows - trows) / 2.0 return skimage.transform.SimilarityTransform(translation=(shift_x, shift_y))
def fast_warp(img, tf, output_shape=None, mode='constant', order=0): """ This wrapper function is faster than skimage.transform.warp """ m = tf.params if output_shape is None: t_img = np.zeros_like(img); else: t_img = np.zeros((img.shape[0],) + output_shape, img.dtype) for i in range(t_img.shape[0]): t_img[i] = _warp_fast(img[i], m, output_shape=output_shape, mode=mode, order=order) return t_img
def build_centering_transform(image_shape, target_shape): rows, cols = image_shape if target_shape is None: trows,tcols = image_shape else: trows, tcols = target_shape shift_x = (cols - tcols) / 2.0 shift_y = (rows - trows) / 2.0 return skimage.transform.SimilarityTransform(translation=(shift_x, shift_y))