Python skimage.io 模块,imshow() 实例源码

我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用skimage.io.imshow()

项目:stegasawus    作者:rokkuran    | 项目源码 | 文件源码
def plot_wavelet_decomposition(image, level=3):
    """
    Plot of 2D wavelet decompositions for given number of levels.

    image needs to be either a colour channel or greyscale image:
        rgb: self.I[:, :, n], where n = {0, 1, 2}
        greyscale: use rgb_to_grey(self.I)

    """
    coeffs = pywt.wavedec2(image, wavelet='haar', level=level)
    for i, (cH, cV, cD) in enumerate(coeffs[1:]):
        if i == 0:
            cAcH = np.concatenate((coeffs[0], cH), axis=1)
            cVcD = np.concatenate((cV, cD), axis=1)
            plot_image = np.concatenate((cAcH, cVcD), axis=0)
        else:
            plot_image = np.concatenate((plot_image, cH), axis=1)
            cVcD = np.concatenate((cV, cD), axis=1)
            plot_image = np.concatenate((plot_image, cVcD), axis=0)

    plt.grid(False)
    io.imshow(abs(plot_image), cmap='gray_r')
    plt.show()
项目:kaggle-yelp-restaurant-photo-classification    作者:u1234x1234    | 项目源码 | 文件源码
def PreprocessImage(path, show_img=True):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 299, 299
    resized_img = transform.resize(crop_img, (299, 299))
    if show_img:
        io.imshow(resized_img)
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 256
    # swap axes to make image from (299, 299, 3) to (3, 299, 299)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean
    normed_img = sample - 128.
    normed_img /= 128.

    return np.reshape(normed_img, (1, 3, 299, 299))
项目:kaggle-yelp-restaurant-photo-classification    作者:u1234x1234    | 项目源码 | 文件源码
def PreprocessImage(path, show_img=True):
    # load image
    img = io.imread(path)
#    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 299, 299
    resized_img = transform.resize(crop_img, (299, 299))
    if show_img:
        io.imshow(resized_img)
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 256
    # swap axes to make image from (299, 299, 3) to (3, 299, 299)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean
    normed_img = sample - 128.
    normed_img /= 128.

    return np.reshape(normed_img, (1, 3, 299, 299))
项目:mxnet_tk1    作者:starimpact    | 项目源码 | 文件源码
def PreprocessImage(path, show_img=False):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (224, 224))
    if show_img:
        io.imshow(resized_img)
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 255
    # swap axes to make image from (224, 224, 3) to (3, 224, 224)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)

    # sub mean
    normed_img = sample - mean_img
    normed_img.resize(1, 3, 224, 224)
    return normed_img

# Get preprocessed batch (single image batch)
项目:Yelp_Restaurant_Photo_Classification    作者:prith189    | 项目源码 | 文件源码
def PreprocessImage(path, show_img=False,invert_img=False):
    img = io.imread(path)
    if(invert_img):
        img = np.fliplr(img)
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (299, 299))
    if show_img:
        io.imshow(resized_img)
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 256
    # swap axes to make image from (299, 299, 3) to (3, 299, 299)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean 
    normed_img = sample - 128
    normed_img /= 128.
    return np.reshape(normed_img,(1,3,299,299))
项目:c3d-pytorch    作者:DavideA    | 项目源码 | 文件源码
def get_sport_clip(clip_name, verbose=True):
    """
    Loads a clip to be fed to C3D for classification.
    TODO: should I remove mean here?

    Parameters
    ----------
    clip_name: str
        the name of the clip (subfolder in 'data').
    verbose: bool
        if True, shows the unrolled clip (default is True).

    Returns
    -------
    Tensor
        a pytorch batch (n, ch, fr, h, w).
    """

    clip = sorted(glob(join('data', clip_name, '*.png')))
    clip = np.array([resize(io.imread(frame), output_shape=(112, 200), preserve_range=True) for frame in clip])
    clip = clip[:, :, 44:44+112, :]  # crop centrally

    if verbose:
        clip_img = np.reshape(clip.transpose(1, 0, 2, 3), (112, 16 * 112, 3))
        io.imshow(clip_img.astype(np.uint8))
        io.show()

    clip = clip.transpose(3, 0, 1, 2)  # ch, fr, h, w
    clip = np.expand_dims(clip, axis=0)  # batch axis
    clip = np.float32(clip)

    return torch.from_numpy(clip)
项目:grad-cam.tensorflow    作者:Ankush96    | 项目源码 | 文件源码
def main(_):
    x, img = load_image(FLAGS.input)

    sess = tf.Session()

    print("\nLoading Vgg")
    imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
    vgg = vgg16(imgs, 'vgg16_weights.npz', sess)

    print("\nFeedforwarding")
    prob = sess.run(vgg.probs, feed_dict={vgg.imgs: x})[0]
    preds = (np.argsort(prob)[::-1])[0:5]
    print('\nTop 5 classes are')
    for p in preds:
        print(class_names[p], prob[p])

    # Target class
    predicted_class = preds[0]
    # Target layer for visualization
    layer_name = FLAGS.layer_name
    # Number of output classes of model being used
    nb_classes = 1000

    cam3 = grad_cam(x, vgg, sess, predicted_class, layer_name, nb_classes)

    img = img.astype(float)
    img /= img.max()

    # Superimposing the visualization with the image.
    new_img = img+3*cam3
    new_img /= new_img.max()

    # Display and save
    io.imshow(new_img)
    plt.show()
    io.imsave(FLAGS.output, new_img)
项目:stegasawus    作者:rokkuran    | 项目源码 | 文件源码
def plot_images(self):
        """
        Plot cover and steganographic RGB images side by side.
        """
        io.imshow(np.concatenate((self.I, self.S), axis=1))
        plt.title('Left: original cover image. Right: steganographic image.')
        plt.grid(False)
        plt.show()
项目:stegasawus    作者:rokkuran    | 项目源码 | 文件源码
def plot_rgb_components(self):
        """
        Plot RGB colour channels for both cover and steganographic images.
        """
        f, axarr = plt.subplots(nrows=2, ncols=3)
        for i, image_type in enumerate(['Cover', 'Stego']):
            for j, colour in enumerate(['Red', 'Green', 'Blue']):
                axarr[i, j].imshow(self.I[:, :, j], cmap='{}s'.format(colour))
                axarr[i, j].set_title('{} {}'.format(image_type, colour))
                axarr[i, j].set_xticklabels([])
                axarr[i, j].set_yticklabels([])
        plt.show()
项目:stegasawus    作者:rokkuran    | 项目源码 | 文件源码
def plot_rgb_difference(self):
        """
        Plots difference between cover and steganographic images for each RGB
        colour channel.
        """
        f, axarr = plt.subplots(1, 3, figsize=(12, 4))
        for j, colour in enumerate(['Red', 'Green', 'Blue']):
            diff = self.I[:, :, j] - self.S[:, :, j]
            axarr[j].imshow(diff, cmap='{}s_r'.format(colour))
            axarr[j].set_title('{}'.format(colour))
            axarr[j].set_xticklabels([])
            axarr[j].set_yticklabels([])
        plt.show()
项目:stegasawus    作者:rokkuran    | 项目源码 | 文件源码
def plot_difference(self):
        """
        Plot difference between cover and steganographic image.
        """
        io.imshow(self.I - self.S)
        plt.grid(False)
        plt.show()
项目:r-cnn    作者:ericxian1997    | 项目源码 | 文件源码
def main():

    # loading image
    from skimage import io
    img = io.imread('example/dog (1).JPEG')
    # img = skimage.data.astronaut()
    io.imshow(img)
    endpoint = np.arange(4)
    cut_photo = np.arange(154587)

    # perform selective search
    img_lbl, regions = selectivesearch.selective_search(
        img, scale=500, sigma=0.9, min_size=10)

    candidates = set()
    for r in regions:
        repeted = False
        # excluding same rectangle (with different segments)
        if r['rect'] in candidates:
            continue
        # excluding regions smaller than 2000 pixels
        if r['size'] < 2000:
            continue
        # distorted rects
        x, y, w, h = r['rect']
        a1 = x
        a2 = y
        a3 = w
        a4 = h
        if w / h > 1.2 or h / w > 1.2:
            continue
        # remove the repeated area
        for x, y, w, h in candidates:
            if overlap(a1,a2,a3,a4,x,y,w,h) > 0.9:
                repeted = True
                break
        if repeted == False:
            candidates.add(r['rect'])

    # save the new photo
    i = 1
    for x, y, w, h in candidates:
        print x, y, w, h
        cut_area = img[y:y+h,x:x+w,:]
        io.imsave('C:\Users\eric\selectivesearch\segements\\' + str(i) +'.jpg',cut_area)
        i = i+1
        out = transform.resize(cut_area, (227, 227))
        temp1 = np.array([x,y,w,h])
        temp2 = out
        temp2 = np.array(temp2,dtype=np.float32)
        temp2 = temp2.reshape(1,154587)
        endpoint = np.vstack((endpoint,temp1))
        cut_photo = np.vstack((cut_photo,temp2))

    # save the np.array
    np.save("cut_photo.npy", cut_photo)
    np.save("endpoint_4.npy", endpoint)
项目:nn-segmentation-for-lar    作者:cvdlab    | 项目源码 | 文件源码
def predict_image(self, filepath_image, show=False):
        '''
        predicts classes of input image
        INPUT   (1) str 'filepath_image': filepath to image to predict on
                (2) bool 'show': True to show the results of prediction, False to return prediction
        OUTPUT  (1) if show == False: array of predicted pixel classes for the center 208 x 208 pixels
                (2) if show == True: displays segmentation results
        '''
        print 'Starting prediction...'
        if self.cascade_model:
            images = io.imread(filepath_image).astype('float').reshape(5, 216, 160)
            p33list = []
            p65list = []
            # create patches from an entire slice
            for image in images[:-1]:
                if np.max(image) != 0:
                    image /= np.max(image)
                patch65 = extract_patches_2d(image, (65, 65))
                p65list.append(patch65)
                p33list.append(self.center_n(33, patch65))
                print str(len(p33list))
            patches33 = np.array(zip(p33list[0], p33list[1], p33list[2], p33list[3]))
            patches65 = np.array(zip(p65list[0], p65list[1], p65list[2], p65list[3]))
            # predict classes of each pixel based on model
            prediction = self.model.predict([patches65, patches33])
            print 'Predicted'
            prediction = prediction.reshape(208, 208)
            if show:
                io.imshow(prediction)
                plt.show
            else:
                return prediction
        else:
            images = io.imread(filepath_image).astype('float').reshape(5, 216, 160)
            p33list = []
            # create patches from an entire slice
            for image in images[:-1]:
                if np.max(image) != 0:
                    image /= np.max(image)
                patch33 = extract_patches_2d(image, (33, 33))
                p33list.append(patch33)
            patches33 = np.array(zip(p33list[0], p33list[1], p33list[2], p33list[3]))
            # predict classes of each pixel based on model
            prediction = self.cnn1.predict(patches33)
            print 'Predicted'
            prediction = prediction.reshape(5, 184, 128)
            predicted_classes = np.argmax(prediction, axis=0)
            if show:
                print 'Let s show'
                for i in range(5):
                    io.imshow(prediction[i])
                    plt.show
                    print 'Showed'
                    return prediction
            else:
                return predicted_classes
项目:nn-segmentation-for-lar    作者:cvdlab    | 项目源码 | 文件源码
def save_segmented_image(self, filepath_image, modality='t1c', show=False):
        '''
        Creates an image of original brain with segmentation overlay and save it in ./predictions
        INPUT   (1) str 'filepath_image': filepath to test image for segmentation, including file extension
                (2) str 'modality': imaging modality to use as background. defaults to t1c. options: (flair, t1, t1c, t2)
                (3) bool 'show': If true, shows output image. defaults to False.
        OUTPUT  (1) if show is True, shows image of segmentation results
                (2) if show is false, returns segmented image.
        '''
        modes = {'flair': 0, 't1': 1, 't1c': 2, 't2': 3}

        segmentation = self.predict_image(filepath_image, show=False)
        print 'segmentation = ' + str(segmentation)
        img_mask = np.pad(segmentation, (16, 16), mode='edge')
        ones = np.argwhere(img_mask == 1)
        twos = np.argwhere(img_mask == 2)
        threes = np.argwhere(img_mask == 3)
        fours = np.argwhere(img_mask == 4)

        test_im = io.imread(filepath_image)
        test_back = test_im.reshape(5, 216, 160)[modes[modality]]
        # overlay = mark_boundaries(test_back, img_mask)
        gray_img = img_as_float(test_back)

        # adjust gamma of image
        image = adjust_gamma(color.gray2rgb(gray_img), 0.65)
        sliced_image = image.copy()
        red_multiplier = [1, 0.2, 0.2]
        yellow_multiplier = [1, 1, 0.25]
        green_multiplier = [0.35, 0.75, 0.25]
        blue_multiplier = [0, 0.25, 0.9]

        print str(len(ones))
        print str(len(twos))
        print str(len(threes))
        print str(len(fours))

        # change colors of segmented classes
        for i in xrange(len(ones)):
            sliced_image[ones[i][0]][ones[i][1]] = red_multiplier
        for i in xrange(len(twos)):
            sliced_image[twos[i][0]][twos[i][1]] = green_multiplier
        for i in xrange(len(threes)):
            sliced_image[threes[i][0]][threes[i][1]] = blue_multiplier
        for i in xrange(len(fours)):
            sliced_image[fours[i][0]][fours[i][1]] = yellow_multiplier
        #if show=True show the prediction
        if show:
            print 'Showing...'
            io.imshow(sliced_image)
            plt.show()
        #save the prediction
        print 'Saving...'
        try:
            mkdir_p('./predictions/')
            io.imsave('./predictions/' + os.path.basename(filepath_image) + '.png', sliced_image)
            print 'prediction saved.'
        except:
            io.imsave('./predictions/' + os.path.basename(filepath_image) + '.png', sliced_image)
            print 'prediction saved.'