Python skimage 模块,img_as_ubyte() 实例源码

我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用skimage.img_as_ubyte()

项目:BlurDetection    作者:whdcumt    | 项目源码 | 文件源码
def get_masks(img, n_seg=250):
    logger.debug('SLIC segmentation initialised')
    segments = skimage.segmentation.slic(img, n_segments=n_seg, compactness=10, sigma=1)
    logger.debug('SLIC segmentation complete')
    logger.debug('contour extraction...')
    masks = [[numpy.zeros((img.shape[0], img.shape[1]), dtype=numpy.uint8), None]]
    for region in skimage.measure.regionprops(segments):
        masks.append([masks[0][0].copy(), region.bbox])
        x_min, y_min, x_max, y_max = region.bbox
        masks[-1][0][x_min:x_max, y_min:y_max] = skimage.img_as_ubyte(region.convex_image)
    logger.debug('contours extracted')
    return masks[1:]
项目:nn-segmentation-for-lar    作者:cvdlab    | 项目源码 | 文件源码
def predict_image(self, test_img):
        """
        predicts classes of input image
        :param test_img: filepath to image to predict on
        :param show: displays segmentation results
        :return: segmented result
        """
        img = np.array( rgb2gray( imread( test_img ).astype( 'float' ) ).reshape( 5, 216, 160 )[-2] ) / 256

        plist = []

        # create patches from an entire slice
        img_1 = adjust_sigmoid( img ).astype( float )
        edges_1 = adjust_sigmoid( img, inv=True ).astype( float )
        edges_2 = img_1
        edges_5_n = normalize( laplace( img_1 ) )
        edges_5_n = img_as_float( img_as_ubyte( edges_5_n ) )

        plist.append( extract_patches_2d( edges_1, (23, 23) ) )
        plist.append( extract_patches_2d( edges_2, (23, 23) ) )
        plist.append( extract_patches_2d( edges_5_n, (23, 23) ) )
        patches = np.array( zip( np.array( plist[0] ), np.array( plist[1] ), np.array( plist[2] ) ) )

        # predict classes of each pixel based on model
        full_pred = self.model.predict_classes( patches )
        fp1 = full_pred.reshape( 194, 138 )
        return fp1
项目:FCN_train    作者:315386775    | 项目源码 | 文件源码
def test_hed_rgb_roundtrip(self):
        img_rgb = img_as_ubyte(self.img_rgb)
        with expected_warnings(['precision loss']):
            new = img_as_ubyte(hed2rgb(rgb2hed(img_rgb)))
        assert_equal(new, img_rgb)

    # RGB<->HED roundtrip with float image
项目:FCN_train    作者:315386775    | 项目源码 | 文件源码
def test_hdx_rgb_roundtrip(self):
        from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx
        img_rgb = self.img_rgb
        conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb),
                              rgb_from_hdx)
        assert_equal(img_as_ubyte(conv), img_rgb)

    # RGB<->HDX roundtrip with ubyte image
项目:vi_vae_gmm    作者:wangg12    | 项目源码 | 文件源码
def save_image_with_clusters(x, clusters, filename, shape=(10, 10), scale_each=False,
                           transpose=False):
    '''single image, each row is a cluster'''
    makedirs(filename)
    n = x.shape[0]

    images = np.zeros_like(x)
    curr_len = 0
    for i in range(10):
        images_i = x[clusters==i, :]
        n_i = images_i.shape[0]
        images[curr_len : curr_len+n_i, :] = images_i
        curr_len += n_i

    x = images

    if transpose:
        x = x.transpose(0, 2, 3, 1)
    if scale_each is True:
        for i in range(n):
            x[i] = rescale_intensity(x[i], out_range=(0, 1))

    n_channels = x.shape[3]
    x = img_as_ubyte(x)
    r, c = shape
    if r * c < n:
        print('Shape too small to contain all images')
    h, w = x.shape[1:3]
    ret = np.zeros((h * r, w * c, n_channels), dtype='uint8')
    for i in range(r):
        for j in range(c):
            if i * c + j < n:
                ret[i * h:(i + 1) * h, j * w:(j + 1) * w, :] = x[i * c + j]
    ret = ret.squeeze()
    io.imsave(filename, ret)
项目:logsolve    作者:twinone    | 项目源码 | 文件源码
def displ():
    rows = int((len(steps)-1)/COLS)+1
    fig, ax = plt.subplots(
        ncols=min(COLS,len(steps)),
        nrows=rows,
        sharex=True,
        sharey=True,
        squeeze=False,
        figsize=(12,7))


    for i, (tit, im) in enumerate(steps):
        r = int(i/COLS)
        c = i%COLS
        ax[r][c].imshow(im, cmap=plt.cm.gray)
        ax[r][c].set_title(tit)

        if (SAVE_OUTPUTS):
            imsave('out-'+tit+'.jpg', img_as_ubyte(im))

        ax[r][c].axis('off')
        ax[r][c].set_xticklabels([])
        ax[r][c].set_yticklabels([])
    plt.subplots_adjust(wspace=0, hspace=.1, left=0, bottom=0, right=1, top=.95)
    #plt.tight_layout()
    plt.show()





# Process an image
项目:logsolve    作者:twinone    | 项目源码 | 文件源码
def displ():
    rows = int((len(steps)-1)/COLS)+1
    fig, ax = plt.subplots(
        ncols=min(COLS,len(steps)),
        nrows=rows,
        sharex=True,
        sharey=True,
        squeeze=False,
        figsize=(12,7))


    for i, (tit, im) in enumerate(steps):
        r = int(i/COLS)
        c = i%COLS
        ax[r][c].imshow(im, cmap=plt.cm.gray)
        ax[r][c].set_title(tit)

        if (SAVE_OUTPUTS):
            imsave(tit+'-out.jpg', img_as_ubyte(im))



        ax[r][c].axis('off')
        ax[r][c].set_xticklabels([])
        ax[r][c].set_yticklabels([])
    plt.subplots_adjust(wspace=0, hspace=.1, left=0, bottom=0, right=1, top=.95)
    #plt.tight_layout()
    plt.show()

# return the XxY of the image
项目:logsolve    作者:twinone    | 项目源码 | 文件源码
def __init__(self, im, x, y):
        self.im = img_as_ubyte(im)
        self.imrgb = grey2rgb(self.im)
        self.w, self.h  = im.shape
        self.x, self.y = x, y
        self.cellw = self.w / x
        self.cellh = self.h / y
        self._cell_colors()
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def save_image_collections(x, filename, shape=(10, 10), scale_each=False,
                           transpose=False):
    """
    :param shape: tuple
        The shape of final big images.
    :param x: numpy array
        Input image collections. (number_of_images, rows, columns, channels) or
        (number_of_images, channels, rows, columns)
    :param scale_each: bool
        If true, rescale intensity for each image.
    :param transpose: bool
        If true, transpose x to (number_of_images, rows, columns, channels),
        i.e., put channels behind.
    :return: `uint8` numpy array
        The output image.
    """
    makedirs(filename)
    n = x.shape[0]
    if transpose:
        x = x.transpose(0, 2, 3, 1)
    if scale_each is True:
        for i in range(n):
            x[i] = rescale_intensity(x[i], out_range=(0, 1))
    n_channels = x.shape[3]
    x = img_as_ubyte(x)
    r, c = shape
    if r * c < n:
        print('Shape too small to contain all images')
    h, w = x.shape[1:3]
    ret = np.zeros((h * r, w * c, n_channels), dtype='uint8')
    for i in range(r):
        for j in range(c):
            if i * c + j < n:
                ret[i * h:(i + 1) * h, j * w:(j + 1) * w, :] = x[i * c + j]
    ret = ret.squeeze()
    io.imsave(filename, ret)
项目:reseg    作者:fvisin    | 项目源码 | 文件源码
def save_image(outpath, img):
    import errno
    try:
        os.makedirs(os.path.dirname(outpath))
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise e
        pass
    imsave(outpath, img_as_ubyte(img))
项目:tefla    作者:litan    | 项目源码 | 文件源码
def save_images(images, dir):
    save_path = base_dir + dir + '_28'
    os.makedirs(save_path)
    prefix = dir
    for i, img_data in enumerate(images):
        img_data2 = skimage.img_as_ubyte(img_data)
        img = Image.fromarray(img_data2)
        img.save('%s/%s_%d.tiff' % (save_path, prefix, i))
项目:tefla    作者:litan    | 项目源码 | 文件源码
def save_images(images, dir):
    save_path = base_dir + dir + '_28'
    os.makedirs(save_path)
    prefix = dir
    for i, img_data in enumerate(images):
        img_data2 = skimage.img_as_ubyte(img_data)
        img = Image.fromarray(img_data2)
        img.save('%s/%s_%d.tiff' % (save_path, prefix, i))
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def plot_figure_1(images, rigidity_refined, structure_refined, flow_estimated, flow_gt):
    """ Plot teaser image:
    - Triplet of frames
    - Segmentation
    - Structure
    - Flow
    """
    if not os.path.isdir('./teaser'):
        os.makedirs('teaser')

    I1 = img_as_ubyte(images[1])

    cm_bwr = plt.get_cmap('bwr')
    Irigidity = cm_bwr(rigidity_refined.astype('float32'))

    Istructure = structure2image(structure_refined, rigidity_refined)
    #Istructure_gray = structure2image(structure_refined, rigidity_refined)
    #Istructure_plasma = structure2image(structure_refined, rigidity_refined,cmap='plasma')
    #Istructure_inferno = structure2image(structure_refined, rigidity_refined,cmap='inferno')
    #Istructure_hot = structure2image(structure_refined, rigidity_refined,cmap='hot')
    #Istructure_magma =structure2image(structure_refined, rigidity_refined,cmap='magma') 
    #Istructure_viridis =structure2image(structure_refined, rigidity_refined,cmap='viridis') 
    #Istructure_jet =structure2image(structure_refined, rigidity_refined,cmap='jet') 
    #Istructure_rainbow =structure2image(structure_refined, rigidity_refined,cmap='rainbow') 

    Iflow_estimated = flow_viz.computeFlowImage(flow_estimated[0], flow_estimated[1])
    Iflow_gt = flow_viz.computeFlowImage(flow_gt[0],flow_gt[1])

    io.imsave('./teaser/01_images.png', I1)
    io.imsave('./teaser/02_rigidity.png', Irigidity)
    io.imsave('./teaser/03_structure.png', Istructure)
    #io.imsave('./teaser/03_structure_gray.png', Istructure_gray)
    #io.imsave('./teaser/03_structure_plasma.png', Istructure_plasma)
    #io.imsave('./teaser/03_structure_inferno.png', Istructure_inferno)
    #io.imsave('./teaser/03_structure_hot.png', Istructure_hot)
    #io.imsave('./teaser/03_structure_magma.png', Istructure_magma)
    #io.imsave('./teaser/03_structure_viridis.png', Istructure_viridis)
    #io.imsave('./teaser/03_structure_jet.png', Istructure_jet)
    #io.imsave('./teaser/03_structure_rainbow.png', Istructure_rainbow)
    io.imsave('./teaser/04_flowest.png', Iflow_estimated)
    io.imsave('./teaser/05_flowgt.png', Iflow_gt)
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def plot_figure_3(image, rigidity_cnn, rigidity_motion, rigidity_structure, rigidity_refined):
    if not os.path.isdir('./rigidityestimation'):
        os.makedirs('./rigidityestimation')

    cm_bwr = plt.get_cmap('bwr')
    Irigidity_cnn = cm_bwr(rigidity_cnn.astype('float32'))
    Irigidity_motion = cm_bwr(rigidity_motion.astype('float32'))
    Irigidity_structure = cm_bwr(rigidity_structure.astype('float32'))
    Irigidity_refined = cm_bwr(rigidity_refined.astype('float32'))

    io.imsave('./rigidityestimation/01_image.png', img_as_ubyte(image))
    io.imsave('./rigidityestimation/02_rigidity_cnn.png', Irigidity_cnn)
    io.imsave('./rigidityestimation/03_rigidity_motion.png', Irigidity_motion)
    io.imsave('./rigidityestimation/04_rigidity_structure.png', Irigidity_structure)
    io.imsave('./rigidityestimation/05_rigidity_refined.png', Irigidity_refined)
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def plot_figure_6(images, rigidity_refined, structure_refined, flow_estimated, flow_init, flow_gt, flow_gt_valid):
    if not os.path.isdir('./results_supmat/temp'):
        os.makedirs('results_supmat/temp')

    I = img_as_ubyte((images[0]+images[1]+images[2])/3.0)
    io.imsave('./results_supmat/temp/01_image.png',I)

    Iuv_gt = flow_viz.computeFlowImage(flow_gt[0], flow_gt[1])
    io.imsave('./results_supmat/temp/02_gt_flow.png', Iuv_gt)

    cm_bwr = plt.get_cmap('bwr')
    Irigidity = cm_bwr(rigidity_refined.astype('float32'))
    io.imsave('./results_supmat/temp/03_rigidity.png',Irigidity)

    Istructure = structure2image(structure_refined, rigidity_refined)
    io.imsave('./results_supmat/temp/04_structure.png',Istructure)

    Iuv_est = flow_viz.computeFlowImage(flow_estimated[0],flow_estimated[1])
    io.imsave('./results_supmat/temp/05_flow.png',Iuv_est)

    epe_est = np.sqrt((flow_estimated[0]-flow_gt[0])**2 + (flow_estimated[1]-flow_gt[1])**2)
    epe_init = np.sqrt((flow_init[0]-flow_gt[0])**2 + (flow_init[1]-flow_gt[1])**2)

    #import ipdb; ipdb.set_trace()

    epe_est[flow_gt_valid==0] = 0
    epe_init[flow_gt_valid==0] = 0

    epe_diff = epe_init - epe_est
    epe_green = np.clip(epe_diff, 0, 3)/3.0
    epe_red = np.clip(-epe_diff, 0, 3)/3.0

    Icomparison = np.zeros((rigidity_refined.shape[0],rigidity_refined.shape[1],3))

    Icomparison[:,:,0] = epe_red
    Icomparison[:,:,1] = epe_green
    Icomparison = img_as_ubyte(Icomparison)
    io.imsave('./results_supmat/temp/06_comparison.png',Icomparison)
项目:FaceAnalysis    作者:ElliotSalisbury    | 项目源码 | 文件源码
def warpFace(im, oldLandmarks, newLandmarks, justFace=False, output_shape=None):
    print("warping face")
    if not justFace:
        cornerPts = np.array([(0, 0), (im.shape[1], 0), (im.shape[1], im.shape[0]), (0, im.shape[0])])

        oldLandmarks = np.append(oldLandmarks, cornerPts, axis=0)
        newLandmarks = np.append(newLandmarks, cornerPts, axis=0)

    tform = PiecewiseAffineTransform()
    tform.estimate(newLandmarks,oldLandmarks)

    warped = warp(im, tform, output_shape=output_shape)
    warped = skimage.img_as_ubyte(warped)
    return warped
项目:ssta-captioning    作者:Yugnaynehc    | 项目源码 | 文件源码
def preprocess_frame(image, target_height=224, target_width=224):
    image = resize_frame(image, target_height, target_width)
    image = skimage.img_as_ubyte(image).astype(np.float32)
    # ???ILSVRC???????????BGR???
    image -= np.array([103.939, 116.779, 123.68])
    # ?BGR??????RGB????????????caffe????????RGB??
    image = image[:, :, ::-1]
    return image
项目:RFCN    作者:zengxianyu    | 项目源码 | 文件源码
def tensor2image(image):
    """
    convert a mean-0 tensor to float numpy image
    :param image: 
    :return: image
    """
    image = image.clone()
    image[0] = image[0] + 122.67891434
    image[1] = image[1] + 116.66876762
    image[2] = image[2] + 104.00698793
    image = image.numpy() / 255.0
    image = image.transpose((1, 2, 0))
    image = img_as_ubyte(image)
    return image


# def prior_map(img):
#     """
#     get RFCN prior map
#     :param img: numpy array (H*W*C, RGB), [0, 1], float
#     :return: pmap
#     """
#     # step 1 over segmentation into superpixels
#     sp = slic(img, n_segments=200, sigma=5)
#     sp_num = sp.max() + 1
#     sp = sp.astype(float)
#
#     # step 2 the mean lab color of the sps
#     mean_lab_color = np.zeros((sp_num, 3))
#     lab_img = color.rgb2lab(img)
#     for c in range(3):
#         for i in range(sp_num):
#             mean_lab_color[i, c] = lab_img[sp == i, c].mean()
#
#     # step 3, element uniqueness




    return pimg
项目:tanda    作者:HazyResearch    | 项目源码 | 文件源码
def np_to_pil(x):
    """Converts from np image in skimage float format to PIL.Image"""
    x = np.squeeze(np.uint8(img_as_ubyte(x)))
    return Image.fromarray(np.uint8(img_as_ubyte(x)))
项目:logsolve    作者:twinone    | 项目源码 | 文件源码
def thresh_local_mean(im, gridsize, **kwargs):
    # for each im, inv, we want to do local thresholding
    # we need a big block size in order to get a good mean value
    # to compare to, but if it gets too big it's just like a global
    # thresholding

    # process kwargs
    opts = { 'offset': 0.1, 'blocksize': 501, 'method': 'mean', 'debug': False }
    for k in kwargs: opts[k] = kwargs[k]

    x, y = gridsize

    thresh = threshold_local(im, opts['blocksize'], method=opts['method'])
    #b = threshold_local(im, 501, method='mean', offset=0.1)

    black, white = im > thresh-opts['offset'], im < thresh+opts['offset']

    cpwhite = CellProcessor(white, x, y)
    cpblack = CellProcessor(black, x, y)

    areas = grey2rgb(img_as_ubyte(im.copy()))
    colors = grey2rgb(img_as_ubyte(im.copy()))

    out = np.zeros((x, y))

    # grey out the images
    for yy in range(y):
        for xx in range(x):
            rr, cc = cpwhite.cell_ellipse(xx, yy)
            areas[rr, cc] = np.array([255,255,255])
            iswhite = cpwhite.get_color(xx, yy) < 30
            isblack = cpblack.get_color(xx, yy) < 30
            rr, cc = cpwhite.cell_rect(xx, yy)
            if isblack:
                colors[rr, cc] = np.array([0,0,0])
                out[yy, xx] = 0
            elif iswhite:
                colors[rr, cc] = np.array([255,255,255])
                out[yy, xx] = 1
            else:
                colors[rr, cc] = np.array([127,127,127])
                out[yy, xx] = -1

    if (opts['debug']):
        fig, axes = plt.subplots(ncols=4, nrows=1, figsize=(10, 3))
        ax = axes.ravel()
        plt.gray()

        ax[0].imshow(im)
        ax[0].set_title('Original Image')
        ax[1].imshow(white)
        ax[1].set_title('white')
        ax[2].imshow(black)
        ax[2].set_title('black')
        ax[3].imshow(colors)
        ax[3].set_title('Detected colors')
        for a in ax:
            a.axis('off')
        plt.show()
    return out.astype(int)
项目:TF-phrasecut-public    作者:chenxi116    | 项目源码 | 文件源码
def build_referit_batches(setname, T, input_H, input_W):
    # data directory
    im_dir = './data/referit/images/'
    mask_dir = './data/referit/mask/'
    query_file = './data/referit/referit_query_' + setname + '.json'
    vocab_file = './data/vocabulary_referit.txt'

    # saving directory
    data_folder = './referit/' + setname + '_batch/'
    data_prefix = 'referit_' + setname
    if not os.path.isdir(data_folder):
        os.makedirs(data_folder)

    # load annotations
    query_dict = json.load(open(query_file))
    im_list = query_dict.keys()
    vocab_dict = text_processing.load_vocab_dict_from_file(vocab_file)

    # collect training samples
    samples = []
    for n_im, name in enumerate(im_list):
        im_name = name.split('_', 1)[0] + '.jpg'
        mask_name = name + '.mat'
        for sent in query_dict[name]:
            samples.append((im_name, mask_name, sent))

    # save batches to disk
    num_batch = len(samples)
    for n_batch in range(num_batch):
        print('saving batch %d / %d' % (n_batch + 1, num_batch))
        im_name, mask_name, sent = samples[n_batch]
        im = skimage.io.imread(im_dir + im_name)
        mask = load_gt_mask(mask_dir + mask_name).astype(np.float32)

        if 'train' in setname:
            im = skimage.img_as_ubyte(im_processing.resize_and_pad(im, input_H, input_W))
            mask = im_processing.resize_and_pad(mask, input_H, input_W)
        if im.ndim == 2:
            im = np.tile(im[:, :, np.newaxis], (1, 1, 3))

        text = text_processing.preprocess_sentence(sent, vocab_dict, T)

        np.savez(file = data_folder + data_prefix + '_' + str(n_batch) + '.npz',
            text_batch = text,
            im_batch = im,
            mask_batch = (mask > 0),
            sent_batch = [sent])
项目:TF-phrasecut-public    作者:chenxi116    | 项目源码 | 文件源码
def build_coco_batches(dataset, setname, T, input_H, input_W):
    im_dir = './data/coco/images'
    im_type = 'train2014'
    vocab_file = './data/vocabulary_Gref.txt'

    data_folder = './' + dataset + '/' + setname + '_batch/'
    data_prefix = dataset + '_' + setname
    if not os.path.isdir(data_folder):
        os.makedirs(data_folder)

    if dataset == 'Gref':
        refer = REFER('./external/refer/data', dataset = 'refcocog', splitBy = 'google')
    elif dataset == 'unc':
        refer = REFER('./external/refer/data', dataset = 'refcoco', splitBy = 'unc')
    elif dataset == 'unc+':
        refer = REFER('./external/refer/data', dataset = 'refcoco+', splitBy = 'unc')
    else:
        raise ValueError('Unknown dataset %s' % dataset)
    refs = [refer.Refs[ref_id] for ref_id in refer.Refs if refer.Refs[ref_id]['split'] == setname]
    vocab_dict = text_processing.load_vocab_dict_from_file(vocab_file)

    n_batch = 0
    for ref in refs:
        im_name = 'COCO_' + im_type + '_' + str(ref['image_id']).zfill(12)
        im = skimage.io.imread('%s/%s/%s.jpg' % (im_dir, im_type, im_name))
        seg = refer.Anns[ref['ann_id']]['segmentation']
        rle = cocomask.frPyObjects(seg, im.shape[0], im.shape[1])
        mask = np.max(cocomask.decode(rle), axis = 2).astype(np.float32)

        if 'train' in setname:
            im = skimage.img_as_ubyte(im_processing.resize_and_pad(im, input_H, input_W))
            mask = im_processing.resize_and_pad(mask, input_H, input_W)
        if im.ndim == 2:
            im = np.tile(im[:, :, np.newaxis], (1, 1, 3))

        for sentence in ref['sentences']:
            print('saving batch %d' % (n_batch + 1))
            sent = sentence['sent']
            text = text_processing.preprocess_sentence(sent, vocab_dict, T)

            np.savez(file = data_folder + data_prefix + '_' + str(n_batch) + '.npz',
                text_batch = text,
                im_batch = im,
                mask_batch = (mask > 0),
                sent_batch = [sent])
            n_batch += 1
项目:deepdream-neural-style-transfer    作者:rdcolema    | 项目源码 | 文件源码
def main(style_img, gpu_id, content_img, verbose, model, init, num_iters, ratio, length, output):
    """
        Entry point.
    """

    # logging
    level = logging.INFO if verbose else logging.DEBUG
    logging.basicConfig(format=LOG_FORMAT, datefmt="%H:%M:%S", level=level)
    logging.info("Starting style transfer.")

    # set GPU/CPU mode
    if gpu_id == -1:
        caffe.set_mode_cpu()
        logging.info("Running net on CPU.")
    else:
        caffe.set_device(gpu_id)
        caffe.set_mode_gpu()
        logging.info("Running net on GPU {0}.".format(gpu_id))

    # load images
    img_style = caffe.io.load_image(style_img)
    img_content = caffe.io.load_image(content_img)
    logging.info("Successfully loaded images.")

    # artistic style class
    use_pbar = not verbose
    st = StyleTransfer(model.lower(), use_pbar=use_pbar)
    logging.info("Successfully loaded model {0}.".format(model))

    # perform style transfer
    start = timeit.default_timer()
    n_iters = st.transfer_style(img_style, img_content, length=length,
                                init=init, ratio=np.float(ratio),
                                n_iter=num_iters, verbose=verbose)
    end = timeit.default_timer()
    logging.info("Ran {0} iterations in {1:.0f}s.".format(n_iters, end - start))
    img_out = st.get_generated()


    # DONE!
    imsave(output, img_as_ubyte(img_out))
    logging.info("Output saved to {0}.".format(output))
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def plot_figure_2(images,
        flow_init,
        rigidity_init,
        structure_init,
        occlusions,
        rigidity_refined,
        structure_refined,
        flow_estimated):
    if not os.path.isdir('./diagram'):
        os.makedirs('diagram')

    io.imsave('./diagram/inputframe0.png', img_as_ubyte(images[0]))
    io.imsave('./diagram/inputframe1.png', img_as_ubyte(images[1]))
    io.imsave('./diagram/inputframe2.png', img_as_ubyte(images[2]))

    Iuvinit_bwd = flow_viz.computeFlowImage(flow_init[0][0],flow_init[0][1])
    Iuvinit_fwd = flow_viz.computeFlowImage(flow_init[1][0],flow_init[1][1])
    io.imsave('./diagram/inputflow0.png', Iuvinit_bwd)
    io.imsave('./diagram/inputflow1.png', Iuvinit_fwd)

    cm_bwr = plt.get_cmap('bwr')
    Irigidity_init = cm_bwr(rigidity_init.astype('float32'))
    Irigidity_refined = cm_bwr(rigidity_refined.astype('float32'))
    io.imsave('./diagram/rigidity_init.png', Irigidity_init)
    io.imsave('./diagram/rigidity_refined.png', Irigidity_refined)

    Istructure_init = structure2image(structure_init, rigidity_refined)
    Istructure_refined = structure2image(structure_refined, rigidity_refined)
    io.imsave('./diagram/structure_init.png', Istructure_init)
    io.imsave('./diagram/structure_refined.png', Istructure_refined)

    occ_bwd, occ_fwd = occlusions
    Iocclusions = np.ones_like(Istructure_init) * 255
    Iocclusions[:,:,0][occ_bwd>0] = 255
    Iocclusions[:,:,1][occ_bwd>0] = 0
    Iocclusions[:,:,2][occ_bwd>0] = 0
    Iocclusions[:,:,0][occ_fwd>0] = 0
    Iocclusions[:,:,1][occ_fwd>0] = 0
    Iocclusions[:,:,2][occ_fwd>0] = 255
    io.imsave('./diagram/occlusions.png', Iocclusions)

    Iuvest = flow_viz.computeFlowImage(flow_estimated[0],flow_estimated[1])
    io.imsave('./diagram/outputflow.png', Iuvest)
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def plot_figure_5(images, rigidity_refined, structure_refined, flow_estimated, flow_init, flow_gt, flow_gt_valid):
    if not os.path.isdir('./results'):
        os.makedirs('results')

    I = img_as_ubyte((images[0]+images[1]+images[2])/3.0)
    io.imsave('./results/01_image.png',I)

    cm_bwr = plt.get_cmap('bwr')
    Irigidity = cm_bwr(rigidity_refined.astype('float32'))
    io.imsave('./results/02_rigidity.png',Irigidity)

    Istructure = structure2image(structure_refined, rigidity_refined)
    io.imsave('./results/03_structure.png',Istructure)

    Iuv_est = flow_viz.computeFlowImage(flow_estimated[0],flow_estimated[1])
    io.imsave('./results/04_flow.png',Iuv_est)

    epe_est = np.sqrt((flow_estimated[0]-flow_gt[0])**2 + (flow_estimated[1]-flow_gt[1])**2)
    epe_init = np.sqrt((flow_init[0]-flow_gt[0])**2 + (flow_init[1]-flow_gt[1])**2)

    #import ipdb; ipdb.set_trace()

    epe_est[flow_gt_valid==0] = 0
    epe_init[flow_gt_valid==0] = 0

    epe_diff = epe_init - epe_est
    epe_green = np.clip(epe_diff, 0, 3)/3.0
    epe_red = np.clip(-epe_diff, 0, 3)/3.0

    Icomparison = np.zeros((rigidity_refined.shape[0],rigidity_refined.shape[1],3))

    print(Icomparison.shape)
    print(epe_green.shape)
    print(epe_red.shape)

    Icomparison[:,:,0] = epe_red
    Icomparison[:,:,1] = epe_green
    Icomparison = img_as_ubyte(Icomparison)
    io.imsave('./results/05_comparison.png',Icomparison)



#
# Supmat figures
#
项目:file-metadata    作者:pywikibot-catfiles    | 项目源码 | 文件源码
def fetch(self, key=''):
        if key == 'filename_raster':
            # A raster filename holds the file in a raster graphic format
            return self.fetch('filename')
        elif key == 'filename_zxing':
            return pathlib2.Path(self.fetch('filename_raster')).as_uri()
        elif key == 'ndarray':
            Image.MAX_IMAGE_PIXELS = self.config('max_decompressed_size')
            try:
                image_array = skimage.io.imread(self.fetch('filename_raster'))
                if image_array.shape == (2,):
                    # Assume this is related to
                    # https://github.com/scikit-image/scikit-image/issues/2154
                    return image_array[0]
                return image_array
            except Image.DecompressionBombWarning:
                logging.warn('The file "{0}" contains a lot of pixels and '
                             'can take a lot of memory when decompressed. '
                             'To allow larger images, modify the '
                             '"max_decompressed_size" config.'
                             .format(self.fetch('filename')))
                # Use empty array as the file cannot be read.
                return numpy.ndarray(0)
        elif key == 'ndarray_grey':
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                return skimage.img_as_ubyte(
                    skimage.color.rgb2grey(self.fetch('ndarray')))
        elif key == 'ndarray_hsv':
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                return skimage.img_as_ubyte(
                    skimage.color.rgb2hsv(self.fetch('ndarray_noalpha')))
        elif key == 'ndarray_noalpha':
            if self.is_type('alpha'):
                return self.alpha_blend(self.fetch('ndarray'))
            return self.fetch('ndarray')
        elif key == 'pillow':
            pillow_img = Image.open(self.fetch('filename_raster'))
            self.closables.append(pillow_img)
            return pillow_img
        return super(ImageFile, self).fetch(key)