我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用skimage.transform.SimilarityTransform()。
def affine_zoom( img, zoom, spin = 0 ): '''Returns new image derived from img, after a central-origin affine transform has been applied''' img_copy = img.copy() # Shift transforms allow Affine to be applied with centre of image as 0,0 shift_y, shift_x, _ = (np.array(img_copy.shape)-1) / 2. shift_fwd = transform.SimilarityTransform(translation=[-shift_x, -shift_y]) shift_back = transform.SimilarityTransform(translation=[shift_x, shift_y]) affine = transform.AffineTransform( scale=(zoom, zoom), rotation=(spin * math.pi/180) ) img_copy = transform.warp( img_copy, ( shift_fwd + ( affine + shift_back )).inverse, order=3, clip=False, preserve_range=True, mode='reflect').astype(np.float32) return img_copy
def affine_transformation(z, order, **kwargs): """Apply an affine transformation to a 2-dimensional array. Parameters ---------- matrix : np.array 3x3 numpy array specifying the affine transformation to be applied. order : int Interpolation order. Returns ------- trans : array Affine transformed diffraction pattern. """ shift_y, shift_x = np.array(z.shape[:2]) / 2. tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y]) tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y]) transformation = tf.AffineTransform(**kwargs) trans = tf.warp(z, (tf_shift + (transformation + tf_shift_inv)).inverse, order=order) return trans
def function(self, x, y): signal2D = self.signal.data order = self.order d11 = self.d11.value d12 = self.d12.value d21 = self.d21.value d22 = self.d22.value t1 = self.t1.value t2 = self.t2.value D = np.array([[d11, d12, t1], [d21, d22, t2], [0., 0., 1.]]) shifty, shiftx = np.array(signal2D.shape[:2]) / 2 shift = tf.SimilarityTransform(translation=[-shiftx, -shifty]) tform = tf.AffineTransform(matrix=D) shift_inv = tf.SimilarityTransform(translation=[shiftx, shifty]) transformed = tf.warp(signal2D, (shift + (tform + shift_inv)).inverse, order=order) return transformed
def distort_affine_skimage(image, rotation=10.0, shear=5.0, random_state=None): if random_state is None: random_state = np.random.RandomState(None) rot = np.deg2rad(np.random.uniform(-rotation, rotation)) sheer = np.deg2rad(np.random.uniform(-shear, shear)) shape = image.shape shape_size = shape[:2] center = np.float32(shape_size) / 2. - 0.5 pre = transform.SimilarityTransform(translation=-center) affine = transform.AffineTransform(rotation=rot, shear=sheer, translation=center) tform = pre + affine distorted_image = transform.warp(image, tform.params, mode='reflect') return distorted_image.astype(np.float32)
def get_head_crop(img, pt1, pt2): im = img.copy() minh = 10 minw = 20 x = pt1[0] - pt2[0] y = pt1[1] - pt2[1] dist = math.hypot(x, y) croph = int((im.shape[0] - 1.0 * dist) // 2) cropw = int((im.shape[1] - 2.0 * dist) // 2) newh = im.shape[0] - 2 * croph neww = im.shape[1] - 2 * cropw if croph <= 0 or cropw <= 0 or newh < minh or neww < minw: return im else: angle = math.atan2(y, x) * 180 / math.pi centery = 0.4 * pt1[1] + 0.6 * pt2[1] centerx = 0.4 * pt1[0] + 0.6 * pt2[0] center = (centerx, centery) im = rotate(im, angle, resize=False, center=center) imcenter = (im.shape[1] / 2, im.shape[0] / 2) trans = (center[0] - imcenter[0], center[1] - imcenter[1]) tform = SimilarityTransform(translation=trans) im = warp(im, tform) im = im[croph:-croph, cropw:-cropw] return im
def add_jitter(prj, low=0, high=1): """Simulates jitter in projection images. The jitter is simulated by drawing random samples from a uniform distribution over the half-open interval [low, high). Parameters ---------- prj : ndarray 3D stack of projection images. The first dimension is projection axis, second and third dimensions are the x- and y-axes of the projection image, respectively. low : float, optional Lower boundary of the output interval. All values generated will be greater than or equal to low. The default value is 0. high : float Upper boundary of the output interval. All values generated will be less than high. The default value is 1.0. Returns ------- ndarray 3D stack of projection images with jitter. """ from xcor.utils import scale from skimage import transform as tf # Needs scaling for skimage float operations. prj, scl = scale(prj) # Random jitter parameters are drawn from uniform distribution. ind = np.random.uniform(low, high, size=(prj.shape[0], 2)) for m in range(prj.shape[0]): tform = tf.SimilarityTransform(translation=ind[m]) prj[m] = tf.warp(prj[m], tform, order=0) # Re-scale back to original values. prj *= scl return prj
def _compensate_rotation_shift(self, img, scale): """This is an auxiliary method used by extract_from_image. It is needed due to particular specifics of the skimage.transform.rotate implementation. Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain amount. Thus when we need to cut out the box from the image, we need to account for this shift. We do this by repeating the computation from skimage.transform.rotate here. TODO: This makes the code uncomfortably coupled to SKImage (e.g. this logic is appropriate for skimage 0.12.1, but not for 0.11, and no one knows what happens in later versions). A solution would be to use skimage.transform.warp with custom settings, but we can think of it later. """ ctr = np.asarray([self.center[1]*scale, self.center[0]*scale]) tform1 = transform.SimilarityTransform(translation=ctr) tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle) tform3 = transform.SimilarityTransform(translation=-ctr) tform = tform3 + tform2 + tform1 rows, cols = img.shape[0], img.shape[1] corners = np.array([ [0, 0], [0, rows - 1], [cols - 1, rows - 1], [cols - 1, 0] ]) corners = tform.inverse(corners) minc = corners[:, 0].min() minr = corners[:, 1].min() maxc = corners[:, 0].max() maxr = corners[:, 1].max() # SKImage 0.11 version out_rows = maxr - minr + 1 out_cols = maxc - minc + 1 # fit output image in new shape return ((cols - out_cols) / 2., (rows - out_rows) / 2.)
def test(test_func): lena = imread('lena512.png') n = 100 error_all = np.zeros([n]) pbar = progressbar.ProgressBar(max_value=n) for i in range(n): pbar.update(i+1) x_true = np.random.random()*6-5 y_true = np.random.random()*6-5 # ex) left:5, up:30 => translation=(5, 30) t_form = tf.SimilarityTransform(translation=(x_true, y_true)) lena_shift = tf.warp(lena, t_form) a1 = np.random.randint(10, 50) a2 = np.random.randint(10, 50) a3 = np.random.randint(10, 50) a4 = np.random.randint(10, 50) img1 = lena[a1:-a2, a3:-a4] img2 = lena_shift[a1:-a2, a3:-a4] x_est, y_est = test_func(img1, img2) # print("x: {0:.3f}, x: {0:.3f}".format(x_true, y_true)) # print("x: {0:.3f}, y: {0:.3f}".format(x_est, y_est)) value = math.sqrt((x_true - x_est)**2 + (y_true - y_est)**2) error_all[i] = value ave = np.average(error_all) std = np.std(error_all) print("\terror: {0:.3f} +- {1:.3f}".format(ave, std)) #------------------------------ # main #------------------------------
def _align_two_rasters(img1, img2): p1 = normalize(img1[10:-10, 10:-10, 0].astype(np.float32)) p2 = normalize(img2[10:-10, 10:-10, 7].astype(np.float32)) x, y = poc(p2, p1) print('x: {0:.5f} y: {1:.5f}'.format(x, y)) t_form = tf.SimilarityTransform(translation=(x, y)) img3 = tf.warp(img2, t_form) return img3