Python skimage.transform 模块,rotate() 实例源码

我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用skimage.transform.rotate()

项目:chainer-spatial-transformer-networks    作者:hvy    | 项目源码 | 文件源码
def transform_mnist_rts(in_data):
    img, label = in_data
    img = img[0]  # Remove channel axis for skimage manipulation

    # Rotate
    img = transform.rotate(img, angle=np.random.uniform(-45, 45),
                           resize=True, mode='constant')
    #  Scale
    img = transform.rescale(img, scale=np.random.uniform(0.7, 1.2),
                            mode='constant')

    # Translate
    h, w = img.shape
    if h >= img_size[0] or w >= img_size[1]:
        img = transform.resize(img, output_shape=img_size, mode='constant')
        img = img.astype(np.float32)
    else:
        img_canvas = np.zeros(img_size, dtype=np.float32)
        ymin = np.random.randint(0, img_size[0] - h)
        xmin = np.random.randint(0, img_size[1] - w)
        img_canvas[ymin:ymin+h, xmin:xmin+w] = img
        img = img_canvas

    img = img[np.newaxis, :]  # Add the bach channel back
    return img, label
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def translate(image, dx, dy, **kwargs):
    """
    Shift image horizontally and vertically

    >>> image = np.eye(3, dtype='uint8') * 255
    >>> translate(image, 2, 1)
    array([[  0,   0,   0],
           [  0,   0, 255],
           [  0,   0,   0]], dtype=uint8)

    :param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
    :param dx: horizontal translation in pixels
    :param dy: vertical translation in pixels
    :param kwargs kwargs: Keyword arguments for the underlying scikit-image
       rotate function, e.g. order=1 for linear interpolation.
    :return: translated image
    :rtype:  numpy array with range [0,255] and dtype 'uint8'
    """
    set_default_order(kwargs)
    transmat = skt.AffineTransform(translation=(-dx, -dy))
    return skt.warp(image, transmat, preserve_range=True,
                    **kwargs).astype('uint8')
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def shear(image, shear_factor, **kwargs):
    """
    Shear image.

    For details see:
    http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.AffineTransform

    >>> image = np.eye(3, dtype='uint8')
    >>> rotated = rotate(image, 45)

    :param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
    :param float shear_factor: Shear factor [0, 1]
    :param kwargs kwargs: Keyword arguments for the underlying scikit-image
       warp function, e.g. order=1 for linear interpolation.
    :return: Sheared image
    :rtype: numpy array with range [0,255] and dtype 'uint8'
    """
    set_default_order(kwargs)
    transform = skt.AffineTransform(shear=shear_factor)
    return skt.warp(image, transform, preserve_range=True,
                    **kwargs).astype('uint8')
项目:segmentation    作者:zengyu714    | 项目源码 | 文件源码
def _rotate_and_rescale(xs, ys):
    """Rotate images and labels and scale image and labels by a certain factor.
    Both need to swap axis from [depth, height, width] to [height, width, depth]
    required by skimage.transform library.
    """

    degree = np.int(np.random.uniform(low=-3, high=5))
    factor = np.random.uniform(low=0.9, high=1.1)
    # swap axis
    HWC_xs, HWC_ys = [np.transpose(item, [1, 2, 0]) for item in [xs, ys]]
    # rotate and rescale
    HWC_xs, HWC_ys = [rotate(item, degree, mode='symmetric', preserve_range=True) for item in [HWC_xs, HWC_ys]]
    HWC_xs, HWC_ys = [rescale(item, factor, mode='symmetric', preserve_range=True) for item in [HWC_xs, HWC_ys]]
    # swap back
    xs, ys = [np.transpose(item, [2, 0, 1]) for item in [HWC_xs, HWC_ys]]
    return xs, ys
项目:ml-traffic    作者:Zepheus    | 项目源码 | 文件源码
def process(self, im):
        if self.crop:
            (h, w, _) = im.shape
            nw, nh = self.rotatedRectWithMaxArea(w, h, math.radians(self.degrees))
            rotated = transform.rotate(im, self.degrees, resize=True)
            (rh, rw, _) = rotated.shape

            image_size = (rw, rh)
            image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))

            x1 = int(image_center[0] - nw * 0.5)
            x2 = int(image_center[0] + nw * 0.5)
            y1 = int(image_center[1] - nh * 0.5)
            y2 = int(image_center[1] + nh * 0.5)

            rotated_cropped = rotated[y1:y2, x1:x2, :]
            return rotated_cropped
        else:
            return transform.rotate(im, self.degrees, resize=True)
项目:deepjets    作者:deepjets    | 项目源码 | 文件源码
def preprocess(subjets, constits, edges,
               cutoff=0.1,
               rotate=True,
               reflect=True,
               zoom=False,
               out_width=25,
               normalize=True):
    translate(constits, subjets)
    image = pixelize(constits, edges)
    if rotate:
        image = rotate_image(image, subjets)
    if reflect:
        image = reflect_image(image, subjets)
    image = zoom_image(image, zoom if zoom is not False else 1., out_width)
    if normalize:
        image = normalize_image(image)
    return image
项目:u-net    作者:yihui-he    | 项目源码 | 文件源码
def augmentation(image, org_width=160,org_height=224, width=190, height=262):
    max_angle=20
    image=resize(image,(width,height))

    angle=np.random.randint(max_angle)
    if np.random.randint(2):
        angle=-angle
    image=rotate(image,angle,resize=True)

    xstart=np.random.randint(width-org_width)
    ystart=np.random.randint(height-org_height)
    image=image[xstart:xstart+org_width,ystart:ystart+org_height]

    if np.random.randint(2):
        image=cv2.flip(image,1)

    if np.random.randint(2):
        image=cv2.flip(image,0)
    # image=resize(image,(org_width,org_height))

    print(image.shape)
    plt.imshow(image)
    plt.show()
项目:Ultras-Sound-Nerve-Segmentation---Kaggle    作者:Simoncarbo    | 项目源码 | 文件源码
def transform(image): #translate, shear, stretch, flips?
    rows,cols = image.shape

    angle = random.uniform(-1.5,1.5)
    center = (rows / 2 - 0.5+random.uniform(-50,50), cols / 2 - 0.5+random.uniform(-50,50))
    def_image = tf.rotate(image, angle = angle, center = center,clip = True, preserve_range = True,order = 5)

    alpha = random.uniform(0,5)
    sigma = random.exponential(scale = 5)+2+alpha**2
    def_image = elastic_transform(def_image, alpha, sigma)

    def_image = def_image[10:-10,10:-10]

    return def_image

# sigma: variance of filter, fixes homogeneity of transformation 
#    (close to zero : random, big: translation)
项目:kaggle-right-whale    作者:felixlaumon    | 项目源码 | 文件源码
def get_head_crop(img, pt1, pt2):
    im = img.copy()
    minh = 10
    minw = 20

    x = pt1[0] - pt2[0]
    y = pt1[1] - pt2[1]
    dist = math.hypot(x, y)
    croph = int((im.shape[0] - 1.0 * dist) // 2)
    cropw = int((im.shape[1] - 2.0 * dist) // 2)
    newh = im.shape[0] - 2 * croph
    neww = im.shape[1] - 2 * cropw

    if croph <= 0 or cropw <= 0 or newh < minh or neww < minw:
        return im
    else:
        angle = math.atan2(y, x) * 180 / math.pi
        centery = 0.4 * pt1[1] + 0.6 * pt2[1]
        centerx = 0.4 * pt1[0] + 0.6 * pt2[0]
        center = (centerx, centery)
        im = rotate(im, angle, resize=False, center=center)
        imcenter = (im.shape[1] / 2, im.shape[0] / 2)
        trans = (center[0] - imcenter[0], center[1] - imcenter[1])
        tform = SimilarityTransform(translation=trans)
        im = warp(im, tform)
        im = im[croph:-croph, cropw:-cropw]
        return im
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def rotate_transform_batch(x, rotation=None):

    r = np.random.uniform(-0.5, 0.5, size=x.shape[0]) * rotation

    # hack; skimage.transform wants float images to be in [-1, 1]
    factor = np.maximum(np.max(x), np.abs(np.min(x)))
    x = x / factor

    x_out = np.empty_like(x)
    for i in range(x.shape[0]):
        x_out[i, 0] = tf.rotate(x[i, 0], r[i])

    x_out *= factor

    return x_out
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def _augment(self,img, hm, max_rotation = 30):
        """ # TODO : IMPLEMENT DATA AUGMENTATION 
        """
        if random.choice([0,1]): 
            r_angle = np.random.randint(-1*max_rotation, max_rotation)
            img =   transform.rotate(img, r_angle, preserve_range = True)
            hm = transform.rotate(hm, r_angle)
        return img, hm

    # ----------------------- Batch Generator ----------------------------------
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def rotatehm(hm, angle):
    """
        Given a heatMap, returns a rotated heatMap
        args : 
            hm      : (numpy.array) heatMap
            angle : (int) Angle
    """
    rot_hm = np.zeros((16,64,64))
    for i in range(16):
        rot_hm[i] = transform.rotate(hm[i],angle)
    return rot_hm
项目:Nature-Conservancy-Fish-Image-Prediction    作者:Brok-Bucholtz    | 项目源码 | 文件源码
def rand_rotate(image):
    angle = randint(1, 360)
    return rotate(image, angle, preserve_range=True).astype(np.uint8)
项目:Nature-Conservancy-Fish-Image-Prediction    作者:Brok-Bucholtz    | 项目源码 | 文件源码
def run():
    augments = {
        'rotate': (rand_rotate, './data/augmentation/rotation/'),
        'scale': (rand_scale, './data/augmentation/scale/')
    }

    for name, arguments in augments.items():
        print('Augmenting {} images:'.format(name))
        augment(*arguments)
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def rotate(image, angle=0, **kwargs):
    """
    Rotate image.

    For details see:
    http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.rotate

    For a smooth interpolation of images set 'order=1'. To rotate masks use
    the default 'order=0'.

    >>> image = np.eye(3, dtype='uint8')
    >>> rotate(image, 90)
    array([[0, 0, 1],
           [0, 1, 0],
           [1, 0, 0]], dtype=uint8)

    :param numpy array image: Numpy array with range [0,255] and dtype 'uint8'.
    :param float angle: Angle in degrees in counter-clockwise direction
    :param kwargs kwargs: Keyword arguments for the underlying scikit-image
       rotate function, e.g. order=1 for linear interpolation.
    :return: Rotated image
    :rtype: numpy array with range [0,255] and dtype 'uint8'
    """
    set_default_order(kwargs)
    return skt.rotate(image, angle, preserve_range=True,
                      **kwargs).astype('uint8')
项目:convolutional-pose-machines-chainer    作者:tomoyukun    | 项目源码 | 文件源码
def rotate_image(self, image, joint_x, joint_y, bbox):
        joints = np.transpose(np.array((joint_x, joint_y), dtype=(np.float32)), (1,0))
        angle = np.random.randint(-self.rotate_range, self.rotate_range)
        theta = -np.radians(angle)
        image = transform.rotate(image, angle, center = (bbox[0], bbox[1]))
        c, s = np.cos(theta), np.sin(theta)
        rot_mat = np.array([[c, -s], [s, c]])
        joints = rot_mat.dot((joints - [bbox[0], bbox[1]]).T).T + [bbox[0], bbox[1]]
        joint_x = joints[:,0]
        joint_y = joints[:,1]
        bbox[2:4] = [(max(joint_x) - min(joint_x)) * 1.2,
                     (max(joint_y) - min(joint_y)) * 1.2]
        return image, joint_x, joint_y, bbox
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def rotate_3d_ski(im, gt):
    im = np.transpose(im, (1, 2, 0))
    gt = np.transpose(gt, (1, 2, 0))

    ang = np.random.uniform(0, 360)
    r_im = rotate(im , ang, order=3)
    r_gt = rotate(gt, ang, order=3)

    return np.transpose(r_im, (2, 0, 1)), np.transpose(r_gt, (2, 0, 1))
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def frame_rotate(frame,theta = 45):
        (h, w) = frame.shape[:2]
        center = (w / 2, h / 2)

        # rotate the image by 180 degrees
        M = cv2.getRotationMatrix2D(center, theta, 1.0)
        rotated = cv2.warpAffine(frame, M, (w, h))
        return rotated
项目:ml-traffic    作者:Zepheus    | 项目源码 | 文件源码
def __init__(self, degrees, cropImage=True):
        self.degrees = degrees
        self.crop = cropImage

# Reference: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
项目:crass    作者:UB-Mannheim    | 项目源码 | 文件源码
def deskew(args,image, image_param):
    # Deskew the given image based on the horizontal line
    # Calculate the angle of the points between 20% and 80% of the line
    uintimage = get_uintimg(image)
    binary = get_binary(args, uintimage)
    labels, numl = measurements.label(binary)
    objects = measurements.find_objects(labels)
    deskew_path = None
    for i, b in enumerate(objects):
        linecoords = Linecoords(image, i, b)
        # The line has to be bigger than minwidth, smaller than maxwidth, stay in the top (30%) of the img,
        # only one obj allowed and the line isn't allowed to start contact the topborder of the image
        if int(args.minwidthhor * image_param.width) < get_width(b) < int(args.maxwidthhor * image_param.width) \
                and int(image_param.height * args.minheighthor) < get_height(b) < int(image_param.height * args.maxheighthor) \
                and int(image_param.height * args.minheighthormask) < (linecoords.height_start+linecoords.height_stop)/2 < int(image_param.height * args.maxheighthormask) \
                and linecoords.height_start != 0:

            pixelwidth = set_pixelground(binary[b].shape[1])
            arr = np.arange(1, pixelwidth(args.deskewlinesize) + 1)
            mean_y = []
            #Calculate the mean value for every y-array
            for idx in range(pixelwidth(args.deskewlinesize)):
                value_y = measurements.find_objects(labels[b][:, idx + pixelwidth((1.0-args.deskewlinesize)/2)] == i + 1)[0]
                mean_y.append((value_y[0].stop + value_y[0].start) / 2)
            polyfit_value = np.polyfit(arr, mean_y, 1)
            deskewangle = np.arctan(polyfit_value[0]) * (360 / (2 * np.pi))
            args.ramp = True
            deskew_image = transform.rotate(image, deskewangle)
            create_dir(image_param.pathout+os.path.normcase("/deskew/"))
            deskew_path = "%s_deskew.%s" % (image_param.pathout+os.path.normcase("/deskew/")+image_param.name, args.extension)
            deskewinfo = open(image_param.pathout+os.path.normcase("/deskew/")+image_param.name + "_deskewangle.txt", "w")
            deskewinfo.write("Deskewangle:\t%d" % deskewangle)
            deskewinfo.close()
            image_param.deskewpath = deskew_path
            with warnings.catch_warnings():
                #Transform rotate convert the img to float and save convert it back
                warnings.simplefilter("ignore")
                misc.imsave(deskew_path, deskew_image)
            break

    return deskew_path
项目:crass    作者:UB-Mannheim    | 项目源码 | 文件源码
def get_uintimg(image):
    if len(image.shape) > 2:
        uintimage = color.rgb2gray(copy.deepcopy(image))
    else:
        uintimage = copy.deepcopy(image)
    if uintimage.dtype == "float64":
        with warnings.catch_warnings():
            # Transform rotate convert the img to float and save convert it back
            warnings.simplefilter("ignore")
            uintimage = ski.img_as_uint(uintimage, force_copy=True)
    return uintimage
项目:ramp-workflow    作者:paris-saclay-cds    | 项目源码 | 文件源码
def _image_transform(x, transforms):
    from skimage.transform import rotate
    for t in transforms:
        if t['name'] == 'rotate':
            angle = np.random.random() * (
                t['u_angle'] - t['l_angle']) + t['l_angle']
            rotate(x, angle, preserve_range=True)
    return x
项目:deepjets    作者:deepjets    | 项目源码 | 文件源码
def rotate_image(image, subjets):
    """Return rotated and repixelised image array.

    Rotation puts subleading subjet or first principle component at -pi/2.
    Repixelisation interpolates with cubic spline.
    """
    # Use subleading subject information to rotate
    if len(subjets) > 1:
        theta = np.arctan2(subjets['phi'][1], subjets['eta'][1])
        theta = -90.0-(theta*180.0/np.pi)
        return transform.rotate(image, theta, order=3)

    # Use principle component of image intensity to rotate
    width, height = image.shape
    pix_coords = np.array([[i, j] for i in range(-width+1, width, 2)
                           for j in range(-height+1, height, 2)])
    covX = np.cov(pix_coords, aweights=np.reshape(image, (width*height)),
                  rowvar=0, bias=1)
    e_vals, e_vecs = np.linalg.eigh(covX)
    pc = e_vecs[:,-1]
    theta = np.arctan2(pc[1], pc[0])
    theta = -90.0-(theta*180.0/np.pi)
    t_image = transform.rotate(image, theta, order=3)
    # Check orientation of principle component
    pix_bot = np.sum(t_image[:, :-(-height//2)])
    pix_top = np.sum(t_image[:, (height//2):])
    if pix_top > pix_bot:
        t_image = transform.rotate(t_image, 180.0, order=3)
        theta += 180.0
    return t_image
项目:PassportEye    作者:konstantint    | 项目源码 | 文件源码
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
        """Extracts the contents of this box from a given image.
        For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.

        Returns an image with dimensions height*scale x width*scale.
        Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
        and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")

        :param img: a numpy ndarray suitable for image processing via skimage.
        :param scale: the RotatedBox is scaled by this value before performing the extraction.
            This is necessary when, for example, the location of a particular feature is determined using a smaller image,
            yet then the corresponding area needs to be extracted from the original, larger image.
            The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
        :param margin_width: The margin that should be added to the width dimension of the box from each size.
            This value is given wrt actual box dimensions (i.e. not scaled).
        :param margin_height: The margin that should be added to the height dimension of the box from each side.
        :return: a numpy ndarray, corresponding to the extracted region (aligned straight).

        TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
        """
        rotate_by = (np.pi/2 - self.angle)*180/np.pi
        img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
        # The resizeable transform will shift the resulting image somewhat wrt original coordinates.
        # When we cut out the box we will compensate for this shift.
        shift_c, shift_r = self._compensate_rotation_shift(img, scale)

        r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
        r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
        c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
        c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
        return img_rotated[r1:r2, c1:c2]
项目:PassportEye    作者:konstantint    | 项目源码 | 文件源码
def _compensate_rotation_shift(self, img, scale):
        """This is an auxiliary method used by extract_from_image.
        It is needed due to particular specifics of the skimage.transform.rotate implementation.
        Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain amount.
        Thus when we need to cut out the box from the image, we need to account for this shift.
        We do this by repeating the computation from skimage.transform.rotate here.

        TODO: This makes the code uncomfortably coupled to SKImage (e.g. this logic is appropriate for skimage 0.12.1, but not for 0.11,
        and no one knows what happens in later versions). A solution would be to use skimage.transform.warp with custom settings, but we can think of it later.
        """
        ctr = np.asarray([self.center[1]*scale, self.center[0]*scale])
        tform1 = transform.SimilarityTransform(translation=ctr)
        tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle)
        tform3 = transform.SimilarityTransform(translation=-ctr)
        tform = tform3 + tform2 + tform1

        rows, cols = img.shape[0], img.shape[1]
        corners = np.array([
            [0, 0],
            [0, rows - 1],
            [cols - 1, rows - 1],
            [cols - 1, 0]
        ])
        corners = tform.inverse(corners)
        minc = corners[:, 0].min()
        minr = corners[:, 1].min()
        maxc = corners[:, 0].max()
        maxr = corners[:, 1].max()

        # SKImage 0.11 version
        out_rows = maxr - minr + 1
        out_cols = maxc - minc + 1

        # fit output image in new shape
        return ((cols - out_cols) / 2., (rows - out_rows) / 2.)
项目:nn-segmentation-for-lar    作者:cvdlab    | 项目源码 | 文件源码
def rotate_patches(patch, edge_1, edge_2, rotating_angle):
    return np.array( [rotate( patch, rotating_angle, resize=False ),
                      rotate( edge_1, rotating_angle, resize=False ),
                      rotate( edge_2, rotating_angle, resize=False )] )
项目:nn-segmentation-for-lar    作者:cvdlab    | 项目源码 | 文件源码
def rotate_patch(patch, angle):
    """

    :param patch: patch of size (4, 33, 33)
    :param angle: says how much rotation must be applied
    :return: rotate_patch
    """

    return np.array([rotate(patch[0], angle, resize=False),
                     rotate(patch[1], angle, resize=False),
                     rotate(patch[2], angle, resize=False),
                     rotate(patch[3], angle, resize=False)])
项目:u-net    作者:yihui-he    | 项目源码 | 文件源码
def augmentation(image, imageB, org_width=160,org_height=224, width=190, height=262):
    max_angle=20
    image=resize(image,(width,height))
    imageB=resize(imageB,(width,height))

    angle=np.random.randint(max_angle)
    if np.random.randint(2):
        angle=-angle
    image=rotate(image,angle,resize=True)
    imageB=rotate(imageB,angle,resize=True)

    xstart=np.random.randint(width-org_width)
    ystart=np.random.randint(height-org_height)
    image=image[xstart:xstart+org_width,ystart:ystart+org_height]
    imageB=imageB[xstart:xstart+org_width,ystart:ystart+org_height]

    if np.random.randint(2):
        image=cv2.flip(image,1)
        imageB=cv2.flip(imageB,1)

    if np.random.randint(2):
        imageB=cv2.flip(imageB,0)
    # image=resize(image,(org_width,org_height))

    return image,imageB
    # print(image.shape)
    # plt.imshow(image)
    # plt.show()

# Helper to build a conv -> BN -> relu block
项目:u-net    作者:yihui-he    | 项目源码 | 文件源码
def augmentation(image, imageB, org_width=160,org_height=224, width=190, height=262):
    max_angle=20
    image=cv2.resize(image,(height,width))
    imageB=cv2.resize(imageB,(height,width))

    angle=np.random.randint(max_angle)
    if np.random.randint(2):
        angle=-angle
    image=rotate(image,angle,resize=True)
    imageB=rotate(imageB,angle,resize=True)

    xstart=np.random.randint(width-org_width)
    ystart=np.random.randint(height-org_height)
    image=image[xstart:xstart+org_width,ystart:ystart+org_height]
    imageB=imageB[xstart:xstart+org_width,ystart:ystart+org_height]

    if np.random.randint(2):
        image=cv2.flip(image,1)
        imageB=cv2.flip(imageB,1)

    if np.random.randint(2):
        image=cv2.flip(image,0)
        imageB=cv2.flip(imageB,0)

    image=cv2.resize(image,(org_height,org_width))
    imageB=cv2.resize(imageB,(org_height,org_width))

    return image,imageB
    # print(image.shape)
    # plt.imshow(image)
    # plt.show()
项目:PyDatSet    作者:dnlcrl    | 项目源码 | 文件源码
def random_rotate(X, max_angle=10):
    N, C, H, W = X.shape
    out = np.zeros_like(X)
    high = np.abs(max_angle) + 1
    low = - np.abs(max_angle)
    for i, x in enumerate(X):
        t = x.transpose(1, 2, 0)
        t = rotate(t, np.random.randint(low, high), resize=False)
        t = t.transpose(2, 0, 1)

        out[i] = t
    return out
项目:tanda    作者:HazyResearch    | 项目源码 | 文件源码
def TF_rotate(x, angle=0.0, target=None):
    assert len(x.shape) == 3
    h, w, nc = x.shape

    # Rotate using edge fill mode
    return rotate(x, angle, mode='edge', order=1)
项目:dm-caffe-example    作者:aur-dream    | 项目源码 | 文件源码
def rotate_image(inImgFilename, outImgFilename, rotAngle):
    """
    Rotates a copy of inImgFilename by angle rotAngle (in degrees)

    Args:
        inImgFilename (str): path to the input mammogram in PNG format
        outImgFilename (str): directory where the output image must be saved in PNG format
        rotAngle (float): angle to rotate image (in degrees)
    """

    raw = misc.imread(inImgFilename)
    aug = rotate(raw, rotAngle)
    imsave(outImgFilename, aug)
项目:DRPN    作者:w7829    | 项目源码 | 文件源码
def Data_iterate_minibatches(inputs, targets, batchsize, arg=False, genSetting=None, shuffle=False, warpMode=None):
    # assert len(inputs[0]) == len(targets[0])
    if shuffle:
        rinputs = copy.deepcopy(inputs)
        rtargets = copy.deepcopy(targets)
        indices = np.random.permutation(len(inputs[0]))
        for i in range(len(inputs[0])):
            for idx in range(len(inputs)):
                rinputs[idx][i] = inputs[idx][indices[i]]
            for idx in range(len(targets)):
                rtargets[idx][i] = targets[idx][indices[i]]
        inputs = rinputs
        targets = rtargets
        # inputs[:] = inputs[indices]
        # targets[:] = targets[indices]

    init = True
    global input_tmp
    global target_tmp
    global isOK
    for start_idx in range(0, len(inputs[0]) - batchsize*2 + 1, batchsize):
        # if (isOK == False) and (two == False):
        #     inputsbatch, targetsbatch = read_pics(inputs[start_idx:start_idx + batchsize], targets[start_idx:start_idx + batchsize], batchsize, crop, mirror, flip, rotate)
        # else:
        while isOK == False:
            if init:
                sl = range(start_idx,start_idx + batchsize)
                thread.start_new_thread(Data_readPics_thread, ([itemgetter(*sl)(i) for i in inputs], [itemgetter(*sl)(i) for i in targets], batchsize, genSetting, arg, warpMode))
                init = False
                # inputsbatch, targetsbatch = read_pics(inputs[start_idx:start_idx + batchsize], targets[start_idx:start_idx + batchsize], batchsize, crop, mirror, flip, rotate)
            time.sleep(0.01)
        inputsbatch, targetsbatch = input_tmp, target_tmp
        isOK = False
        sl = range(start_idx  + batchsize,start_idx + 2 * batchsize)
        thread.start_new_thread(Data_readPics_thread, ([itemgetter(*sl)(i) for i in inputs], [itemgetter(*sl)(i) for i in targets], batchsize, genSetting, arg, warpMode))
        # yield itertools.chain(inputsbatch, targetsbatch)
        yield inputsbatch + targetsbatch
    while isOK == False:
        time.sleep(0.01)
    inputsbatch, targetsbatch = input_tmp, target_tmp
    isOK = False
    # yield itertools.chain(inputsbatch, targetsbatch)
    yield inputsbatch + targetsbatch
    # len(inputs) - batchsize*2 + 1
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def randomrotate(angle=90, randomstate=None, invert=False, padding=0, extrapadding=0):
    ignorechannels=True

    if isinstance(randomstate, int):
        rng = np.random.RandomState(randomstate)
    elif isinstance(randomstate, np.random.RandomState):
        rng = randomstate
    else:
        rng = np.random.RandomState(None)

    def _randomrot90(im):
        k = rng.randint(0, 4)
        if not invert:
            return np.rot90(im, k=k)
        else:
            return np.rot90(im, k=(4-k))

    def _randomrot45(im):
        assert im.shape[0] == im.shape[1], "45 degree rotations are tested only for square images."

        k = int(rng.choice(a=[0, 1, 3, 5, 7, 8], size=1))
        # Rotation angle (in degrees)
        rotangle = 45 * k

        if not invert:
            if k == 0 or k == 8:
                im = np.pad(im, (padding + extrapadding), mode='reflect') if padding > 0 else im
                return im
            else:
                im = _rotate(im, angle=rotangle, resize=True, mode='reflect')
                im = np.pad(im, padding, mode='reflect') if padding > 0 else im
                return im
        else:
            if k == 0 or k == 8:
                im = im[(padding + extrapadding):-(padding + extrapadding),
                     (padding + extrapadding):-(padding + extrapadding)] if padding > 0 else im
                return im
            else:
                im = im[padding:-padding, padding:-padding] if padding > 0 else im
                # For some reason, _rotate doesn't like if it's values are larger than +1 or smaller than -1.
                # Scale
                scale = np.max(np.abs(im))
                im *= (1./scale)
                # Process
                im = _rotate(im, angle=(360 - rotangle), resize=True, mode='reflect')
                # Rescale
                im *= scale
                # Edges of im are now twice as large as they were in the original image. Crop.
                cropstart = im.shape[0]/4
                cropstop = cropstart * 3
                im = im[cropstart:cropstop, cropstart:cropstop]
                return im

    if angle == 45:
        return image2batchfunc(_randomrot45, ignorechannels=ignorechannels)
    elif angle == 90:
        return image2batchfunc(_randomrot90, ignorechannels=ignorechannels)
    else:
        raise NotImplementedError("Curently implemented rotation angles are 45 and 90 degrees.")

# Function for random flips of the image