Python scipy.ndimage 模块,zoom() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.ndimage.zoom()

项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def _generate_batch(self, meta):
    image = ndimage.imread(meta.image_path)
    height, width, _ = meta.shape
    if height > width:
      scale = self._image_scale_size / width
    else:
      scale = self._image_scale_size / height

    # TODO: the dimensions in caffe is (batch elem, channel, height, width)
    resized_image = ndimage.zoom(image, (scale, scale, 1))
    bboxes = np.empty((len(meta.objects), 5))
    for i, obj in enumerate(meta.objects):
      bboxes[i][:4] = obj['bbox']
      bboxes[i][4] = obj['class_index']

    return np.expand_dims(resized_image, 0), scale, bboxes
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def cmd_zoom(args):
    """
    Sub-command: "zoom", zoom the image to a new size with FoV coverage
    preserved.
    """
    fimage = FITSImage(args.infile)
    print("Image size: %dx%d" % (fimage.Nx, fimage.Ny))
    pixelsize = fimage.pixelsize
    if pixelsize is None:
        raise RuntimeError("--pixelsize required")
    else:
        print("Pixel size: %.1f [arcsec]" % pixelsize)
        print("Field of view: (%.2f, %.2f) [deg]" % fimage.fov)

    print("Zooming image ...")
    print("Interpolation order: %d" % args.order)
    print("Zoomed image size: %dx%d" % (args.size, args.size))
    fimage.zoom(newsize=args.size, order=args.order)
    print("Zoomed image pixel size: %.1f [arcsec]" % fimage.pixelsize)
    fimage.write(args.outfile, clobber=args.clobber)
    print("Saved zoomed FITS image to: %s" % args.outfile)
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def predict_multi_scale(full_image, net, scales, sliding_evaluation, flip_evaluation):
    """Predict an image by looking at it with different scales."""
    classes = net.model.outputs[0].shape[3]
    full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
    h_ori, w_ori = full_image.shape[:2]
    for scale in scales:
        print("Predicting image scaled by %f" % scale)
        scaled_img = misc.imresize(full_image, size=scale, interp="bilinear")
        if sliding_evaluation:
            scaled_probs = predict_sliding(scaled_img, net, flip_evaluation)
        else:
            scaled_probs = net.predict(scaled_img, flip_evaluation)
        # scale probs up to full size
        h, w = scaled_probs.shape[:2]
        probs = ndimage.zoom(scaled_probs, (1.*h_ori/h, 1.*w_ori/w, 1.),
                             order=1, prefilter=False)
        # visualize_prediction(probs)
        # integrate probs over all scales
        full_probs += probs
    full_probs /= len(scales)
    return full_probs
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def __init__(self, output_image_shape, interpolation_order=3, zoom_kwargs=None, **super_kwargs):
        """
        Parameters
        ----------
        output_image_shape : list or tuple or int
            Target size of the output image. Aspect ratio may not be preserved.
        interpolation_order : int
            Interpolation order for the spline interpolation.
        zoom_kwargs : dict
            Keyword arguments for `scipy.ndimage.zoom`.
        super_kwargs : dict
            Keyword arguments for the superclass.
        """
        super(Scale, self).__init__(**super_kwargs)
        output_image_shape = (output_image_shape, output_image_shape) \
            if isinstance(output_image_shape, int) else tuple(output_image_shape)
        assert_(len(output_image_shape) == 2,
                "`output_image_shape` must be an integer or a tuple of length 2.",
                ValueError)
        self.output_image_shape = output_image_shape
        self.interpolation_order = interpolation_order
        self.zoom_kwargs = {} if zoom_kwargs is None else dict(zoom_kwargs)
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def image_function(self, image):
        source_height, source_width = image.shape
        target_height, target_width = self.output_image_shape
        # We're on Python 3 - take a deep breath and relax.
        zoom_height, zoom_width = (target_height / source_height), (target_width / source_width)
        with catch_warnings():
            # Ignore warning that scipy should be > 0.13 (it's 0.19 these days)
            simplefilter('ignore')
            rescaled_image = zoom(image, (zoom_height, zoom_width),
                                  order=self.interpolation_order, **self.zoom_kwargs)
        # This should never happen
        assert_(rescaled_image.shape == (target_height, target_width),
                "Shape mismatch that shouldn't have happened if you were on scipy > 0.13.0. "
                "Are you on scipy > 0.13.0?",
                ShapeError)
        return rescaled_image
项目:trappist1    作者:rodluger    | 项目源码 | 文件源码
def update(self):
    '''

    '''

    # Update plot
    contour = np.zeros((self.ny,self.nx))
    contour[np.where(self.aperture)] = 1
    contour = np.lib.pad(contour, 1, self.PadWithZeros)
    highres = zoom(contour, 100, order = 0, mode='nearest') 
    extent = np.array([-1, self.nx, -1, self.ny])
    if self.contour is not None:
      for coll in self.contour.collections: 
        self.ax.collections.remove(coll) 
    self.contour = self.ax.contour(highres, levels=[0.5], extent=extent, origin='lower', colors='r', linewidths=2) 
    self.update_bkg()
    self.update_lc()
    self.update_lcbkg()
    self.fig.canvas.draw()
项目:monodepth360    作者:srijanparmeshwar    | 项目源码 | 文件源码
def read_file(filename, shape = None):
    if filename.lower().endswith(".exr"):
        depth_map = read_depth(filename)
        return depth_map, depth_map < 1000.0

    elif filename.lower().endswith(".png"):
        depth_map = mpimg.imread(filename)

        if shape is not None:
            ih, iw = depth_map.shape
            h, w = shape

            if ih > 1024:
                depth_map = depth_map[::2, ::2]

            depth_map = zoom(depth_map, [float(h) / float(ih), w / float(iw)], order = 1)

        mask = depth_map < 0.99
        depth_map = depth_map * 65536 / 1000
        return depth_map, mask

    elif filename.lower().endswith(".npy"):
        return np.load(filename), None
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def draw_ori_on_img(img, ori, mask, fname, coh=None, stride=16):
    ori = np.squeeze(ori)
    mask = np.squeeze(np.round(mask))
    img = np.squeeze(img)
    ori = ndimage.zoom(ori, np.array(img.shape)/np.array(ori.shape, dtype=float), order=0)
    if mask.shape != img.shape:
        mask = ndimage.zoom(mask, np.array(img.shape)/np.array(mask.shape, dtype=float), order=0)
    if coh is None:
        coh = np.ones_like(img)
    fig = plt.figure()
    plt.imshow(img,cmap='gray')
    plt.hold(True)  
    for i in xrange(stride,img.shape[0],stride):
        for j in xrange(stride,img.shape[1],stride):
            if mask[i, j] == 0:
                continue
            x, y, o, r = j, i, ori[i,j], coh[i,j]*(stride*0.9)
            plt.plot([x, x+r*np.cos(o)], [y, y+r*np.sin(o)], 'r-')
    plt.axis([0,img.shape[1],img.shape[0],0])
    plt.axis('off')
    plt.savefig(fname, bbox_inches='tight', pad_inches = 0)
    plt.close(fig)            
    return
项目:Panacea    作者:grzeimann    | 项目源码 | 文件源码
def subsample(a): # this is more a generic function then a method ...
    """
    Returns a 2x2-subsampled version of array a (no interpolation, just cutting pixels in 4).
    The version below is directly from the scipy cookbook on rebinning :
    U{http://www.scipy.org/Cookbook/Rebinning}
    There is ndimage.zoom(cutout.array, 2, order=0, prefilter=False), but it makes funny borders.

    """
    """
    # Ouuwww this is slow ...
    outarray = np.zeros((a.shape[0]*2, a.shape[1]*2), dtype=np.float64)
    for i in range(a.shape[0]):
        for j in range(a.shape[1]): 
            outarray[2*i,2*j] = a[i,j]
            outarray[2*i+1,2*j] = a[i,j]
            outarray[2*i,2*j+1] = a[i,j]
            outarray[2*i+1,2*j+1] = a[i,j]
    return outarray
    """
    # much better :
    newshape = (2*a.shape[0], 2*a.shape[1])
    slices = [slice(0,old, float(old)/new) for old,new in zip(a.shape,newshape) ]
    coordinates = np.mgrid[slices]
    indices = coordinates.astype('i')   #choose the biggest smaller integer index
    return a[tuple(indices)]
项目:VIRUS    作者:grzeimann    | 项目源码 | 文件源码
def subsample(a): # this is more a generic function then a method ...
    """
    Returns a 2x2-subsampled version of array a (no interpolation, just cutting pixels in 4).
    The version below is directly from the scipy cookbook on rebinning :
    U{http://www.scipy.org/Cookbook/Rebinning}
    There is ndimage.zoom(cutout.array, 2, order=0, prefilter=False), but it makes funny borders.

    """
    """
    # Ouuwww this is slow ...
    outarray = np.zeros((a.shape[0]*2, a.shape[1]*2), dtype=np.float64)
    for i in range(a.shape[0]):
        for j in range(a.shape[1]): 
            outarray[2*i,2*j] = a[i,j]
            outarray[2*i+1,2*j] = a[i,j]
            outarray[2*i,2*j+1] = a[i,j]
            outarray[2*i+1,2*j+1] = a[i,j]
    return outarray
    """
    # much better :
    newshape = (2*a.shape[0], 2*a.shape[1])
    slices = [slice(0,old, float(old)/new) for old,new in zip(a.shape,newshape) ]
    coordinates = np.mgrid[slices]
    indices = coordinates.astype('i')   #choose the biggest smaller integer index
    return a[tuple(indices)]
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def zoom(self, newsize, order=1):
        """
        Zoom the image to the specified ``newsize``, meanwhile the header
        information will be updated accordingly to preserve the FoV coverage.

        NOTE
        ----
        The image aspect ratio cannot be changed.

        Parameters
        ----------
        newsize : (Nx, Ny) or N
            The size of the zoomed image.
        order : int, optional
            The interpolation order, default: 1
        """
        try:
            Nx2, Ny2 = newsize
        except TypeError:
            Nx2 = Ny2 = newsize
        zoom = ((Ny2+0.1)/self.Ny, (Nx2+0.1)/self.Nx)
        if abs(zoom[0] - zoom[1]) > 1e-3:
            raise RuntimeError("image aspect ratio cannot be changed")

        pixelsize_old = self.pixelsize
        self.image = ndimage.zoom(self.image, zoom=zoom, order=order)
        self.pixelsize = pixelsize_old * (self.Nx / Nx2)
        return self.image
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def predict(self, img, flip_evaluation):
        """
        Predict segementation for an image.

        Arguments:
            img: must be rowsxcolsx3
        """
        h_ori, w_ori = img.shape[:2]
        if img.shape[0:2] != self.input_shape:
            print("Input %s not fitting for network size %s, resizing. You may want to try sliding prediction for better results." % (img.shape[0:2], self.input_shape))
            img = misc.imresize(img, self.input_shape)
        input_data = self.preprocess_image(img)
        # utils.debug(self.model, input_data)

        regular_prediction = self.model.predict(input_data)[0]
        if flip_evaluation:
            print("Predict flipped")
            flipped_prediction = np.fliplr(self.model.predict(np.flip(input_data, axis=2))[0])
            prediction = (regular_prediction + flipped_prediction) / 2.0
        else:
            prediction = regular_prediction

        if img.shape[0:1] != self.input_shape:  # upscale prediction if necessary
            h, w = prediction.shape[:2]
            prediction = ndimage.zoom(prediction, (1.*h_ori/h, 1.*w_ori/w, 1.),
                                      order=1, prefilter=False)
        return prediction
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def deepdream(net, base_img, iter_n=11, octave_n=4, octave_scale=1.4, 
              end='inception_4c/output', clip=True, **step_params):
    #BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
    #deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
    #function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n-1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

        src.reshape(1,3,h,w) # resize the network's input image size
        src.data[0] = octave_base+detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip: # adjust image contrast if clipping is disabled
                vis = vis*(255.0/np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0]-octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])



#///////////////////////////////////////////////////////////////
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def deepdream(net, base_img, iter_n=11, octave_n=4, octave_scale=1.4, 
              end='inception_4c/output', clip=True, **step_params):
    #BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
    #deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
    #function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n-1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

        src.reshape(1,3,h,w) # resize the network's input image size
        src.data[0] = octave_base+detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip: # adjust image contrast if clipping is disabled
                vis = vis*(255.0/np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0]-octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])



#///////////////////////////////////////////////////////////////
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6, end='inception_5b/output', clip=True, **step_params):
    #BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
    #deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
    #function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n-1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

        src.reshape(1,3,h,w) # resize the network's input image size
        src.data[0] = octave_base+detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip: # adjust image contrast if clipping is disabled
                vis = vis*(255.0/np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0]-octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])




#SELECT HERE THE PICTURE YOU WANT TO DRAW THE DREAM ON:
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def deepdream(net, base_img, iter_n=5, octave_n=4, octave_scale=1.4, 
              end='inception_4d/output', clip=True, **step_params):
    #BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
    #deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
    #function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n-1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

        src.reshape(1,3,h,w) # resize the network's input image size
        src.data[0] = octave_base+detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip: # adjust image contrast if clipping is disabled
                vis = vis*(255.0/np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0]-octave_base
    # returning the resulting image
    return deprocess(net, src.data[0]) 

#////////////////////////////////////////////////////////////////////////////////////
#SELECT SOURCE PICTURE & SET FRAME SUM
#////////////////////////////////////////////////////////////////////////////////////
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def deepdream(net, base_img, iter_n=11, octave_n=4, octave_scale=1.4, 
              end='inception_5a/output', clip=False, **step_params):
    #BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
    #deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
    #function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n-1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

        src.reshape(1,3,h,w) # resize the network's input image size
        src.data[0] = octave_base+detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip: # adjust image contrast if clipping is disabled
                vis = vis*(255.0/np.percentile(vis, 100))
                #vis = vis*(255.0/np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0]-octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])



#///////////////////////////////////////////////////////////////
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def deepdream(net, base_img, iter_n=11, octave_n=4, octave_scale=1.3, end='inception_4c/output', clip=True, **step_params):
    #BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
    #deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
    #function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n-1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

        src.reshape(1,3,h,w) # resize the network's input image size
        src.data[0] = octave_base+detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip: # adjust image contrast if clipping is disabled
                vis = vis*(255.0/np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0]-octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])




#SELECT HERE THE PICTURE YOU WANT TO DRAW THE DREAM ON:
#///////////////////////////////////////////////////////////////
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6, end='inception_4b/output', clip=True, **step_params):
    #BACKUP high detail: def deepdream(net, base_img, iter_n=12, octave_n=6, octave_scale=1.6,end='inception_5b/pool_proj', clip=True, **step_params):
    #deepdream(net, base_img, iter_n=10, octave_n=7, octave_scale=1.6,end='prob', clip=False, **step_params):
    #function params>>net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_5b/5x5', clip=True, **step_params
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n-1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

        src.reshape(1,3,h,w) # resize the network's input image size
        src.data[0] = octave_base+detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip: # adjust image contrast if clipping is disabled
                vis = vis*(255.0/np.percentile(vis, 99.98))
            showarray(vis)
            #silent print
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0]-octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])




#SELECT HERE THE PICTURE YOU WANT TO DRAW THE DREAM ON:
项目:VLTPF    作者:avigan    | 项目源码 | 文件源码
def _scale_interp_builtin(array, scale_value, mode='constant', cval=0):
    scaled = ndimage.zoom(array, scale_value, order=3, mode=mode, cval=cval)

    return scaled
项目:PyMieScatt    作者:bsumlin    | 项目源码 | 文件源码
def Inversion(Qsca,Qabs,wavelength,diameter,nMin=1,nMax=3,kMin=0.001,kMax=1,scatteringPrecision=0.010,absorptionPrecision=0.010,spaceSize=120,interp=2):
  error = lambda measured,calculated: np.abs((calculated-measured)/measured)

  nRange = np.linspace(nMin,nMax,spaceSize)
  kRange = np.logspace(np.log10(kMin),np.log10(kMax),spaceSize)
  scaSpace = np.zeros((spaceSize,spaceSize))
  absSpace = np.zeros((spaceSize,spaceSize))

  for ni,n in enumerate(nRange):
    for ki,k in enumerate(kRange):
      _derp = fastMieQ(n+(1j*k),wavelength,diameter)
      scaSpace[ni][ki] = _derp[0]
      absSpace[ni][ki] = _derp[1]
  if interp is not None:
    nRange = zoom(nRange,interp)
    kRange = zoom(kRange,interp)
    scaSpace = zoom(scaSpace,interp)
    absSpace = zoom(absSpace,interp)

  scaSolutions = np.where(np.logical_and(Qsca*(1-scatteringPrecision)<scaSpace, scaSpace<Qsca*(1+scatteringPrecision)))
  absSolutions = np.where(np.logical_and(Qabs*(1-absorptionPrecision)<absSpace, absSpace<Qabs*(1+absorptionPrecision)))

  validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
  validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]

  solution = np.intersect1d(validScattering,validAbsorption)
#  errors = [error()]

  return solution
项目:PyMieScatt    作者:bsumlin    | 项目源码 | 文件源码
def Inversion_SD(Bsca,Babs,wavelength,dp,ndp,nMin=1,nMax=3,kMin=0,kMax=1,scatteringPrecision=0.001,absorptionPrecision=0.001,spaceSize=40,interp=2):
  dp = coerceDType(dp)
  ndp = coerceDType(ndp)

  nRange = np.linspace(nMin,nMax,spaceSize)
  kRange = np.linspace(kMin,kMax,spaceSize)
  scaSpace = np.zeros((spaceSize,spaceSize))
  absSpace = np.zeros((spaceSize,spaceSize))

  for ni,n in enumerate(nRange):
    for ki,k in enumerate(kRange):
      _derp = fastMie_SD(n+(1j*k),wavelength,dp,ndp)
      scaSpace[ni][ki] = _derp[0]
      absSpace[ni][ki] = _derp[1]
  if interp is not None:
    nRange = zoom(nRange,interp)
    kRange = zoom(kRange,interp)
    scaSpace = zoom(scaSpace,interp)
    absSpace = zoom(absSpace,interp)

  scaSolutions = np.where(np.logical_and(Bsca*(1-scatteringPrecision)<scaSpace, scaSpace<Bsca*(1+scatteringPrecision)))
  absSolutions = np.where(np.logical_and(Babs*(1-absorptionPrecision)<absSpace, absSpace<Babs*(1+absorptionPrecision)))

  validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
  validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]

  return np.intersect1d(validScattering,validAbsorption)
项目:PyMieScatt    作者:bsumlin    | 项目源码 | 文件源码
def Inversion(Qsca,Qabs,wavelength,diameter,nMin=1,nMax=3,kMin=0.001,kMax=1,scatteringPrecision=0.010,absorptionPrecision=0.010,spaceSize=120,interp=2):
  error = lambda measured,calculated: np.abs((calculated-measured)/measured)

  nRange = np.linspace(nMin,nMax,spaceSize)
  kRange = np.logspace(np.log10(kMin),np.log10(kMax),spaceSize)
  scaSpace = np.zeros((spaceSize,spaceSize))
  absSpace = np.zeros((spaceSize,spaceSize))

  for ni,n in enumerate(nRange):
    for ki,k in enumerate(kRange):
      _derp = fastMieQ(n+(1j*k),wavelength,diameter)
      scaSpace[ni][ki] = _derp[0]
      absSpace[ni][ki] = _derp[1]
  if interp is not None:
    nRange = zoom(nRange,interp)
    kRange = zoom(kRange,interp)
    scaSpace = zoom(scaSpace,interp)
    absSpace = zoom(absSpace,interp)

  scaSolutions = np.where(np.logical_and(Qsca*(1-scatteringPrecision)<scaSpace, scaSpace<Qsca*(1+scatteringPrecision)))
  absSolutions = np.where(np.logical_and(Qabs*(1-absorptionPrecision)<absSpace, absSpace<Qabs*(1+absorptionPrecision)))

  validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
  validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]

  solution = np.intersect1d(validScattering,validAbsorption)
#  errors = [error()]

  return solution
项目:PyMieScatt    作者:bsumlin    | 项目源码 | 文件源码
def Inversion_SD(Bsca,Babs,wavelength,dp,ndp,nMin=1,nMax=3,kMin=0,kMax=1,scatteringPrecision=0.001,absorptionPrecision=0.001,spaceSize=40,interp=2):
  dp = coerceDType(dp)
  ndp = coerceDType(ndp)

  nRange = np.linspace(nMin,nMax,spaceSize)
  kRange = np.linspace(kMin,kMax,spaceSize)
  scaSpace = np.zeros((spaceSize,spaceSize))
  absSpace = np.zeros((spaceSize,spaceSize))

  for ni,n in enumerate(nRange):
    for ki,k in enumerate(kRange):
      _derp = fastMie_SD(n+(1j*k),wavelength,dp,ndp)
      scaSpace[ni][ki] = _derp[0]
      absSpace[ni][ki] = _derp[1]
  if interp is not None:
    nRange = zoom(nRange,interp)
    kRange = zoom(kRange,interp)
    scaSpace = zoom(scaSpace,interp)
    absSpace = zoom(absSpace,interp)

  scaSolutions = np.where(np.logical_and(Bsca*(1-scatteringPrecision)<scaSpace, scaSpace<Bsca*(1+scatteringPrecision)))
  absSolutions = np.where(np.logical_and(Babs*(1-absorptionPrecision)<absSpace, absSpace<Babs*(1+absorptionPrecision)))

  validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
  validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]

  return np.intersect1d(validScattering,validAbsorption)
项目:3DGAN-Pytorch    作者:rimchang    | 项目源码 | 文件源码
def getVoxelFromMat(path, cube_len=64):
    """Mat ???? ?? Voxel ? ???? ??"""
    voxels = io.loadmat(path)['instance']
    voxels = np.pad(voxels, (1, 1), 'constant', constant_values=(0, 0))
    if cube_len != 32 and cube_len == 64:
        voxels = nd.zoom(voxels, (2, 2, 2), mode='constant', order=0)
    return voxels
项目:luna16    作者:gzuidhof    | 项目源码 | 文件源码
def load_itk_image_rescaled(filename, slice_mm):
    im, origin, spacing = load_itk_image(filename)

    new_im = zoom(im, [spacing[0]/slice_mm,1.0,1.0])
    return new_im
项目:fg21sim    作者:liweitianux    | 项目源码 | 文件源码
def load(self, infile, frequency=None):
        """
        Load input sky image from file into this instance.

        Parameters
        ----------
        infile : str
            The path to the input sky patch
        frequency : float, optional
            The frequency of the sky patch;
            Unit: [MHz]
        """
        self.infile = infile
        if frequency is not None:
            self.frequency = frequency
        with fits.open(infile) as f:
            self.data = f[0].data
            header = f[0].header.copy(strip=True)
            self.header_.extend(header, update=True)
        self.ysize_in, self.xsize_in = self.data.shape
        logger.info("Loaded sky patch from: %s (%dx%d)" %
                    (infile, self.xsize_in, self.ysize_in))

        if (self.xsize_in != self.xsize) or (self.ysize_in != self.ysize):
            logger.warning("Scale input sky patch to size %dx%d" %
                           (self.xsize, self.ysize))
            zoom = ((self.ysize+0.1)/self.ysize_in,
                    (self.xsize+0.1)/self.xsize_in)
            self.data = ndimage.zoom(self.data, zoom=zoom, order=1)
项目:fg21sim    作者:liweitianux    | 项目源码 | 文件源码
def circle2ellipse(imgcirc, bfraction, rotation=None):
    """
    Shrink the input circle image with respect to the center along the
    column (axis) to transform the circle to an ellipse, and then rotate
    around the image center.

    Parameters
    ----------
    imgcirc : 2D `~numpy.ndarray`
        Input image grid containing a circle at the center
    bfraction : float
        The fraction of the semi-minor axis w.r.t. the semi-major axis
        (i.e., the half width of the input image), to determine the
        shrunk size (height) of the output image.
        Should be a fraction within [0, 1]
    rotation : float, optional
        Rotation angle (unit: [deg])
        Default: ``None`` (i.e., no rotation)

    Returns
    -------
    imgout : 2D `~numpy.ndarray`
        Image of the same size as the input circle image.
    """
    nrow, ncol = imgcirc.shape
    # Shrink the circle to be elliptical
    nrow2 = nrow * bfraction
    nrow2 = int(nrow2 / 2) * 2 + 1  # be odd
    # NOTE: zoom() calculate the output shape with round() instead of int();
    #       fix the warning about they may be different.
    zoom = ((nrow2+0.1)/nrow, 1)
    img2 = ndimage.zoom(imgcirc, zoom=zoom, order=1)
    # Pad the shrunk image to have the same size as input
    imgout = np.zeros(shape=(nrow, ncol))
    r1 = int((nrow - nrow2) / 2)
    imgout[r1:(r1+nrow2), :] = img2
    if rotation:
        imgout = ndimage.rotate(imgout, angle=rotation, reshape=False, order=1)
    return imgout
项目:GuidedLabelling    作者:coallaoh    | 项目源码 | 文件源码
def heatmap2segconf(heat_maps, imshape_original, gt_cls):
    heat_maps_os = nd.zoom(heat_maps,
                           [1,
                            float(imshape_original[0]) / heat_maps.shape[1],
                            float(imshape_original[1]) / heat_maps.shape[2]],
                           order=1)
    heat_maps_norm = heat_maps_os / heat_maps_os.max(axis=1).max(axis=1).reshape((-1, 1, 1))
    confidence = heat_maps_norm.max(0)
    seg = gt_cls[heat_maps_norm.argmax(axis=0)]

    return seg, confidence
项目:deepdream-neural-style-transfer    作者:rdcolema    | 项目源码 | 文件源码
def deepdream(net, base_img, iter_n=5, octave_n=11, octave_scale=1.4, end='inception_3b/5x5_reduce', clip=True,
              **step_params):
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n - 1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1])  # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)

        src.reshape(1, 3, h, w)  # resize the network's input image size
        src.data[0] = octave_base + detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip:  # adjust image contrast if clipping is disabled
                vis = vis * (255.0 / np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0] - octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])
项目:deepdream-neural-style-transfer    作者:rdcolema    | 项目源码 | 文件源码
def deepdream(net, base_img, iter_n=70, octave_n=7, octave_scale=1.4, end='inception_5a/pool_proj', clip=True,
              **step_params):
    # prepare base images for all octaves
    octaves = [preprocess(net, base_img)]
    for i in xrange(octave_n - 1):
        octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))

    src = net.blobs['data']
    detail = np.zeros_like(octaves[-1])  # allocate image for network-produced details
    for octave, octave_base in enumerate(octaves[::-1]):
        h, w = octave_base.shape[-2:]
        if octave > 0:
            # upscale details from the previous octave
            h1, w1 = detail.shape[-2:]
            detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)

        src.reshape(1, 3, h, w)  # resize the network's input image size
        src.data[0] = octave_base + detail
        for i in xrange(iter_n):
            make_step(net, end=end, clip=clip, **step_params)

            # visualization
            vis = deprocess(net, src.data[0])
            if not clip:  # adjust image contrast if clipping is disabled
                vis = vis * (255.0 / np.percentile(vis, 99.98))
            showarray(vis)
            print octave, i, end, vis.shape
            clear_output(wait=True)

        # extract details produced on the current octave
        detail = src.data[0] - octave_base
    # returning the resulting image
    return deprocess(net, src.data[0])
项目:pypher    作者:aboucaud    | 项目源码 | 文件源码
def imresample(image, source_pscale, target_pscale, interp_order=1):
    """
    Resample data array from one pixel scale to another

    The resampling ensures the parity of the image is conserved
    to preserve the centering.

    Parameters
    ----------
    image : `numpy.ndarray`
        Input data array
    source_pscale : float
        Pixel scale of ``image`` in arcseconds
    target_pscale : float
        Pixel scale of output array in arcseconds
    interp_order : int, optional
        Spline interpolation order [0, 5] (default 1: linear)

    Returns
    -------
    output : `numpy.ndarray`
        Resampled data array

    """
    old_size = image.shape[0]
    new_size_raw = old_size * source_pscale / target_pscale
    new_size = int(np.ceil(new_size_raw))

    if new_size > 10000:
        raise MemoryError("The resampling will yield a too large image. "
                          "Please resize the input PSF image.")

    # Chech for parity
    if (old_size - new_size) % 2 == 1:
        new_size += 1

    ratio = new_size / old_size

    return zoom(image, ratio, order=interp_order) / ratio**2
项目:backtrackbb    作者:BackTrackBB    | 项目源码 | 文件源码
def resample(self, dx, dy, dz):
        zoom_x = self.dx / dx
        zoom_y = self.dy / dy
        zoom_z = self.dz / dz
        self.array = zoom(self.array, (zoom_x, zoom_y, zoom_z))
        self.nx, self.ny, self.nz = self.array.shape
        if self.type == 'SLOW_LEN':
            self.array *= dx / self.dx
        self.dx = dx
        self.dy = dy
        self.dz = dz
项目:tf-3dgan    作者:meetshah1995    | 项目源码 | 文件源码
def getVolumeFromOFF(path, sideLen=32):
    mesh = trimesh.load(path)
    volume = trimesh.voxel.Voxel(mesh, 0.5).raw
    (x, y, z) = map(float, volume.shape)
    volume = nd.zoom(volume.astype(float), 
                     (sideLen/x, sideLen/y, sideLen/z),
                     order=1, 
                     mode='nearest')
    volume[np.nonzero(volume)] = 1.0
    return volume.astype(np.bool)
项目:tf-3dgan    作者:meetshah1995    | 项目源码 | 文件源码
def getVoxelFromMat(path, cube_len=64):
    voxels = io.loadmat(path)['instance']
    voxels = np.pad(voxels,(1,1),'constant',constant_values=(0,0))
    if cube_len != 32 and cube_len == 64:
        voxels = nd.zoom(voxels, (2,2,2), mode='constant', order=0)
    return voxels
项目:webCamEmocognizer    作者:DeepInEvil    | 项目源码 | 文件源码
def extract_face_features(gray, detected_face, offset_coefficients):
        (x, y, w, h) = detected_face
        #print x , y, w ,h
        horizontal_offset = np.int(np.floor(offset_coefficients[0] * w))
        vertical_offset = np.int(np.floor(offset_coefficients[1] * h))


        extracted_face = gray[y+vertical_offset:y+h, 
                          x+horizontal_offset:x-horizontal_offset+w]
        #print extracted_face.shape
        new_extracted_face = zoom(extracted_face, (48. / extracted_face.shape[0], 
                                               48. / extracted_face.shape[1]))
        new_extracted_face = new_extracted_face.astype(np.float32)
        new_extracted_face /= float(new_extracted_face.max())
        return new_extracted_face
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def re_rescale(im):
    d_im = zoom(im, (1, 0.5, 0.8), order=3)
    d_im = zoom(d_im, (1, 2, (1/0.8)), order=3)

    return d_im
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def show_downsize():
    for im in gen_images(n=-1, crop=True):
        t_im = im['T1c']
        gt = im['gt']

        t_im = np.asarray(t_im, dtype='float32')
        gt = np.asarray(gt, dtype='float32')

        d_im = zoom(t_im, 0.5, order=3)
        d_gt = zoom(gt, 0.5, order=0)
        print 'New shape: ', d_im.shape

        slices1 = np.arange(0, d_im.shape[0], d_im.shape[0]/20)
        slices2 = np.arange(0, t_im.shape[0], t_im.shape[0]/20)

        for s1, s2 in zip(slices1, slices2):
            d_im_slice = d_im[s1]
            d_gt_slice = d_gt[s1]

            im_slice = t_im[s2]
            gt_slice = gt[s2]

            title0= 'Original'
            title1= 'Downsized'
            vis_ims(im0=im_slice, gt0=gt_slice, im1=d_im_slice, 
                gt1=d_gt_slice, title0=title0, title1=title1)
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def get_im_as_ndarray(image, downsize=False):
    ims = [image['Flair'], image['T1'], image['T1c'], image['T2']]
    if downsize:
        ims = [zoom(x, 0.5, order=1) for x in ims]
    im = np.array(ims, dtype='int16')

    return im
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def get_gt(gt, n_classes, downsize=False):
    if not downsize:
        return gt
    original_shape = gt.shape
    gt_onehot = np.reshape(gt, (-1,))
    gt_onehot = np.reshape(one_hot(gt_onehot, n_classes), original_shape + (n_classes,))
    gt_onehot = np.transpose(gt_onehot, (3, 0, 1, 2))

    zoom_gt = np.array([zoom(class_map, 0.5, order=1) for class_map in gt_onehot])
    zoom_gt = zoom_gt.argmax(axis=0)
    zoom_gt = np.asarray(zoom_gt, dtype='int8')

    return zoom_gt
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def process_gt(gt, n_classes, downsize=False):
    if downsize:
        gt = zoom(gt, 0.5, order=0)
        gt = np.asarray(gt, dtype='int8')
    gt = np.transpose(gt, (1, 2, 0))
    l = np.reshape(gt, (-1,))
    l = np.reshape(one_hot(l, n_classes), (-1, n_classes))
    return l
项目:kaggle_dsb    作者:syagev    | 项目源码 | 文件源码
def load_itk_image_rescaled(filename, slice_mm):
    im, origin, spacing = load_itk_image(filename)

    new_im = zoom(im, [spacing[0]/slice_mm,1.0,1.0])
    return new_im
项目:DeepArt    作者:jiriroz    | 项目源码 | 文件源码
def dream(self, base_img, iter_n=10, octave_n=4, octave_scale=1.4, 
                end='inception_4c/output', clip=True, guide_features=None, name="dream", **step_params):
        # prepare base images for all octaves
        octaves = [self.preprocess(base_img)]
        for i in xrange(octave_n-1):
            octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

        src = self.net.blobs['data']
        detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
        for octave, octave_base in enumerate(octaves[::-1]):
            h, w = octave_base.shape[-2:]
            if octave > 0:
                # upscale details from the previous octave
                h1, w1 = detail.shape[-2:]
                detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)

            src.reshape(1,3,h,w) # resize the network's input image size
            src.data[0] = octave_base+detail
            for i in xrange(iter_n):
                self.make_step(end=end, clip=clip, guide_features=guide_features, **step_params)

                # visualization
                vis = self.deprocess(src.data[0])
                # adjust image contrast if clipping is disabled
                if not clip:
                    vis = vis*(255.0/np.percentile(vis, 99.98))
                print octave, i, end, vis.shape
                clear_output(wait=True)

            # extract details produced on the current octave
            detail = src.data[0]-octave_base
        self.showarray(vis, name)
        # returning the resulting image
        return self.deprocess(src.data[0])
项目:sparsecnn    作者:fkiaee    | 项目源码 | 文件源码
def read_image(path):
    img = imread(path,mode="RGB")
    h, w, c = np.shape(img)
    scale_size = 256
    crop_size = 224
    assert c == 3
    img = zoom(img, (scale_size/h, scale_size/w,1))
    img = img.astype(np.float32)
    img -= np.array([104., 117., 124.])
    h, w, c = img.shape
    ho, wo = ((h - crop_size) / 2, (w - crop_size) / 2)
    img = img[ho:ho + crop_size, wo:wo + crop_size, :]
    #print(np.shape(img))
    img = img[None, ...]
    return img
项目:nllgrid    作者:claudiodsf    | 项目源码 | 文件源码
def resample(self, dx, dy, dz):
        if self.type in ['ANGLE', 'ANGLE2D']:
            raise NotImplementedError(
                'Resample not implemented for ANGLE grid.')
        zoom_x = self.dx / dx
        zoom_y = self.dy / dy
        zoom_z = self.dz / dz
        self.array = zoom(self.array, (zoom_x, zoom_y, zoom_z))
        self.nx, self.ny, self.nz = self.array.shape
        if self.type == 'SLOW_LEN':
            self.array *= dx / self.dx
        self.dx = dx
        self.dy = dy
        self.dz = dz
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def Deepdream(self, base_img, iter_n=10, octave_n=4, octave_scale=1.4, clip=True):
        # prepare base images for all octaves
        octaves = [self.Preprocess(base_img)]
        for i in xrange(octave_n-1):
            # shrink the image octave[0] so that function always return image size as octave[0]
            octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

        src = self.net.blobs['data']
        detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
        for octave, octave_base in enumerate(octaves[::-1]):# from end to 0
            h, w = octave_base.shape[-2:]
            if octave > 0:
                # upscale details from the previous octave
                h1, w1 = detail.shape[-2:]
                detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
            src.reshape(1,3,h,w) # resize the network's input image size
            src.data[0] = octave_base+detail
            for i in xrange(iter_n):
                self.Make_step()
                # visualization
                '''
                vis = self.deprocess(net, src.data[0])
                if not clip: # adjust image contrast if clipping is disabled
                    vis = vis*(255.0/np.percentile(vis, 99.98))
                showarray(vis)
                print octave, i, end, vis.shape
                clear_output(wait=True)
                '''
            # extract details produced on the current octave
            #print octave, self.end, src.data[0].shape
            detail = src.data[0]-octave_base
        # returning the resulting image
        return self.Deprocess(src.data[0])
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def Deepdream(self, base_img, iter_n=10, octave_n=4, octave_scale=1.4, clip=True):
        # prepare base images for all octaves
        octaves = [self.Preprocess(base_img)]
        for i in xrange(octave_n-1):
            # shrink the image octave[0] so that function always return image size as octave[0]
            octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))

        src = self.net.blobs['data']
        detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
        for octave, octave_base in enumerate(octaves[::-1]):# from end to 0
            h, w = octave_base.shape[-2:]
            if octave > 0:
                # upscale details from the previous octave
                h1, w1 = detail.shape[-2:]
                detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
            src.reshape(1,3,h,w) # resize the network's input image size
            src.data[0] = octave_base+detail
            for i in xrange(iter_n):
                self.Make_step()
                # visualization
                '''
                vis = self.deprocess(net, src.data[0])
                if not clip: # adjust image contrast if clipping is disabled
                    vis = vis*(255.0/np.percentile(vis, 99.98))
                showarray(vis)
                print octave, i, end, vis.shape
                clear_output(wait=True)
                '''
            # extract details produced on the current octave
            #print octave, self.end, src.data[0].shape
            detail = src.data[0]-octave_base
        # returning the resulting image
        return self.Deprocess(src.data[0])
项目:pytorch-maddpg    作者:xuehy    | 项目源码 | 文件源码
def resize(scale, old_mats):
    new_mats = []
    for mat in old_mats:
        new_mats.append(zoom(mat, scale, order=0))
    return np.array(new_mats)
项目:ZOGY    作者:pmvreeswijk    | 项目源码 | 文件源码
def ds9_arrays(**kwargs):

    cmd = ['ds9', '-zscale', '-zoom', '4', '-cmap', 'heat']
    for name, array in kwargs.items():
        # write array to fits
        fitsfile = 'ds9_'+name+'.fits'
        fits.writeto(fitsfile, np.array(array).astype(np.float32), clobber=True)            
        # append to command
        cmd.append(fitsfile)

    #print 'cmd', cmd
    result = subprocess.call(cmd)

################################################################################
项目:xdesign    作者:tomography    | 项目源码 | 文件源码
def _compute_msssim(imQual, nlevels=5, sigma=1.2, L=1, K=(0.01, 0.03)):
    '''
    An implementation of the Multi-Scale Structural SIMilarity index (MS-SSIM).

    References
    -------------
    Multi-scale Structural Similarity Index (MS-SSIM)
    Z. Wang, E. P. Simoncelli and A. C. Bovik, "Multi-scale structural
    similarity for image quality assessment," Invited Paper, IEEE Asilomar
    Conference on Signals, Systems and Computers, Nov. 2003

    Parameters
    -------------
    imQual : ImageQuality
    nlevels : int
        The max number of levels to analyze
    sigma : float
        Sets the standard deviation of the gaussian filter. This setting
        determines the minimum scale at which quality is assessed.
    L : scalar
        The dynamic range of the data. This value is 1 for float
        representations and 2^bitdepth for integer representations.
    K : 2-tuple
        A list of two constants which help prevent division by zero.

    Returns
    -------
    imQual : ImageQuality
        A struct used to organize image quality information. NOTE: the valid
        range for SSIM is [-1, 1].
    '''
    _full_reference_input_check(imQual, sigma, nlevels, L)

    img1 = imQual.orig
    img2 = imQual.recon

    # The relative imporance of each level as determined by human experiment
    # weight = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]

    for level in range(0, nlevels):
        imQual += _compute_ssim(ImageQuality(img1, img2), sigma=sigma, L=L,
                                K=K, scale=sigma * 2**level)
        if level == nlevels - 1:
            break

        # Downsample (using ndimage.zoom to prevent sampling bias)
        img1 = scipy.ndimage.zoom(img1, 1/2)
        img2 = scipy.ndimage.zoom(img2, 1/2)

    return imQual