Python scipy.ndimage 模块,convolve() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用scipy.ndimage.convolve()

项目:Vessel3DDL    作者:konopczynski    | 项目源码 | 文件源码
def ApplyAtoms(V,D,scale):
    out=[]
    for s in xrange(scale):
        if s!=0:
            print('scale='+str(s))
            V = pyr.pyramid_reduce_3d(V,downscale=2) # reduce the volume. e.g. from 512^3 to 256^3
        else: print('scale=0')
        for i in xrange(len(D)):
            print('s:'+str(s)+' i:'+str(i))
            conv = nd.convolve(V, D[i], mode='constant', cval=0.0)
            if s==0:
                out.append(conv)
            else:
                upscaled = pyr.pyramid_expand_3d(conv, upscale=2**s)
                out.append(upscaled)
    out=np.array(out)
    return out
项目:Splipy    作者:sintefmath    | 项目源码 | 文件源码
def smooth(obj):
    """Smooth an object by setting the interior control points to the average of
    itself and all neighbours (e.g. 9 for surfaces, 27 for volumes). The edges
    are kept unchanged, and any rational weights are kept unchanged.

    :param obj: The object to smooth
    :type obj: :class:`splipy.SplineObject`
    """
    n = obj.shape
    averaging_mask  = np.ones([3]*len(n)+[1])
    averaging_mask /= averaging_mask.size

    new_controlpoints = ndimage.convolve(obj.controlpoints, averaging_mask)

    if obj.rational:
        interior = tuple([slice(1,-1,None)]*len(n) + [slice(0,-1,None)])
    else:
        interior = tuple([slice(1,-1,None)]*len(n) + [slice(None,None,None)])

    obj.controlpoints[interior] =  new_controlpoints[interior]
项目:learning-blind-motion-deblurring    作者:cgtuebingen    | 项目源码 | 文件源码
def get_data(self):

        image_iter = self.ds_images.get_data()
        psf_iter = self.ds_psf.get_data()

        for dp_image in image_iter:

            # sample camera shake kernel
            dp_psf = next(psf_iter)

            # synthesize ego-motion
            for t, k in enumerate(dp_psf):
                blurry = dp_image[t]
                for c in range(3):
                    blurry[:, :, c] = ndimage.convolve(blurry[:, :, c], k, mode='constant', cval=0.0)
                dp_image[t] = blurry

            yield dp_image
项目:oocgcm    作者:lesommer    | 项目源码 | 文件源码
def apply(self, mode='reflect', weights=None, compute=True):
        """
        Convolve the current window with the data
        """
        # Check if the data has more dimensions than the window and add
        # extra-dimensions to the window if it is the case
        mask = self.obj.notnull()
        if weights is None:
            weights = im.convolve(mask.astype(float), self.coefficients, mode=mode)
        filled_data = self.obj.fillna(0.).data

        def convolve(x):
            xf = im.convolve(x, self.coefficients, mode=mode)
            return xf

        data = filled_data.map_overlap(convolve, depth=self._depth,
                                       boundary=mode, trim=True)
        if compute:
            with ProgressBar():
                out = data.compute()
        else:
            out = data
        res = xr.DataArray(out, dims=self.obj.dims, coords=self.coords, name=self.obj.name) / weights

        return res.where(mask == 1)
项目:oocgcm    作者:lesommer    | 项目源码 | 文件源码
def boundary_weights(self, mode='reflect', drop_dims=None):
        """
        Compute the boundary weights

        Parameters
        ----------
            mode:

            drop_dims:
                Specify dimensions along which the mask is constant

        Returns
        -------
        """
        mask = self.obj.notnull()
        new_dims = copy.copy(self.obj.dims)
        new_coords = copy.copy(self.coords)
        for dim in drop_dims:
            #TODO: Make the function work
            mask = mask.isel({dim:0})
            del(new_dims[dim])
            del(new_coords[dim])
        weights = im.convolve(mask.astype(float), self.coefficients, mode=mode)
        res = xr.DataArray(weights, dims=new_dims, coords=new_coords, name='boundary weights')
        return res.where(mask == 1)
项目:deep-learning-models    作者:kuleshov    | 项目源码 | 文件源码
def nudge_dataset(X, Y):
    """
    This produces a dataset 5 times bigger than the original one,
    by moving the 8x8 images in X around by 1px to left, right, down, up
    """
    direction_vectors = [
        [[0, 1, 0],
         [0, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [1, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 1],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 0],
         [0, 1, 0]]]

    shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
                                  weights=w).ravel()
    X = np.concatenate([X] +
                       [np.apply_along_axis(shift, 1, X, vector)
                        for vector in direction_vectors])
    Y = np.concatenate([Y for _ in range(5)], axis=0)
    return X, Y
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def maskedConvolve(arr, kernel, mask, mode='reflect'):
    '''
    same as scipy.ndimage.convolve but is only executed on mask==True
    ... which should speed up everything
    '''
    arr2 = extendArrayForConvolution(arr, kernel.shape, modex=mode, modey=mode)
    print(arr2.shape)
    out = np.zeros_like(arr)
    return _calc(arr2, kernel, mask, out)
项目:Numpy_Guider_Code    作者:skychan    | 项目源码 | 文件源码
def get_pixar(arr, weights):
  states = ndimage.convolve(arr, weights, mode='wrap')

  bools = (states == 13) | (states == 12 ) | (states == 3)

  return bools.astype(int)
项目:aiw-second-edition    作者:dougmcilwraith    | 项目源码 | 文件源码
def nudge_dataset(X, Y):
    """
    This produces a dataset 5 times bigger than the original one,
    by moving the 8x8 images in X around by 1px to left, right, down, up
    """
    direction_vectors = [[[0, 1, 0],[0, 0, 0],[0, 0, 0]],
            [[0, 0, 0],[1, 0, 0],[0, 0, 0]],
            [[0, 0, 0],[0, 0, 1],[0, 0, 0]],
            [[0, 0, 0],[0, 0, 0],[0, 1, 0]]]
    shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant', weights=w).ravel()
    X = np.concatenate([X] + [np.apply_along_axis(shift, 1, X, vector) for vector in direction_vectors])
    Y = np.concatenate([Y for _ in range(5)], axis=0)
    return X, Y

#6.11
项目:SyConn    作者:StructuralNeurobiologyLab    | 项目源码 | 文件源码
def create_hull_voxels(self):
        voxels = np.copy(self.voxels)
        if len(voxels.shape) > 1:
            voxels_array = np.array(voxels, dtype=np.int)
            x_min = np.min(voxels_array[:, 0]) - 2
            x_max = np.max(voxels_array[:, 0]) + 2
            y_min = np.min(voxels_array[:, 1]) - 2
            y_max = np.max(voxels_array[:, 1]) + 2
            z_min = np.min(voxels_array[:, 2]) - 2
            z_max = np.max(voxels_array[:, 2]) + 2

            matrix = np.zeros((x_max-x_min, y_max-y_min, z_max-z_min),
                              dtype=np.uint8)

            lower_boarder = np.array([basics.negative_to_zero(x_min),
                                      basics.negative_to_zero(y_min),
                                      basics.negative_to_zero(z_min)],
                                     dtype=np.int)

            voxels = np.array(voxels, dtype=np.int) - lower_boarder
            matrix[voxels[:, 0], voxels[:, 1], voxels[:, 2]] = 1

            k = np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]],
                          [[0, 1, 0], [1, 1, 1], [0, 1, 0]],
                          [[0, 0, 0], [0, 1, 0], [0, 0, 0]]])
            coords = np.argwhere((ndimage.convolve(matrix, k, mode="constant", cval=0.) < 7)*matrix == 1) + lower_boarder
        else:
            coords = voxels
        return coords
项目:PaintingToArtists    作者:achintyagopal    | 项目源码 | 文件源码
def inverse_convolve(self, img, kernel):
        # x,y in order
        # z in reverse order
        _,_,l = kernel.shape
        new_img = []
        for z in range(l):
            kernel_z = cv2.flip(kernel[:,:,l - z - 1], -1)
            new_img.append(convolve(img, kernel_z, mode="constant"))
        new_img = np.array(new_img)
        new_img = np.swapaxes(new_img, 0,1)
        new_img = np.swapaxes(new_img, 1,2)

        return new_img
项目:PaintingToArtists    作者:achintyagopal    | 项目源码 | 文件源码
def inverse_convolve(self, img, kernel):
        # x,y in order
        # z in reverse order
        _,_,l = kernel.shape
        new_img = []
        for z in range(l):
            kernel_z = cv2.flip(kernel[:,:,l - z - 1], -1)
            new_img.append(convolve(img, kernel_z, mode="constant"))
        new_img = np.array(new_img)
        new_img = np.swapaxes(new_img, 0,1)
        new_img = np.swapaxes(new_img, 1,2)

        return new_img
项目:grocsvs    作者:grocsvs    | 项目源码 | 文件源码
def get_svs(mat, bg_mat, sv_region, window_size, rolling=0):
    if rolling > 0:
        weights = numpy.ones((rolling*2+1, rolling*2+1))    
        mat = ndimage.convolve(mat, weights, mode="constant")
        bg_mat = ndimage.convolve(bg_mat, weights, mode="constant")

    norm = mat/bg_mat
    norm[numpy.isnan(norm)] = 0
    norm = numpy.ma.masked_array(norm, mask=False)

    breakpoints = []

    while not norm.mask.all():
        where = numpy.ma.where(norm==norm.max())
        where = (where[0][0], where[1][0])

        is_good = (mat[where] > 25 and norm[where] > 0.05)

        if is_good:
            breakpoint = (where[1]*window_size + sv_region["startx"],
                          where[0]*window_size + sv_region["starty"])

            breakpoints.append(breakpoint)

            # TODO: constant for extend; this determines the closest
            # any two breakpoints can be from one another
            norm.mask[get_matrix_rect(norm, where, 10)] = True
        else:
            break

    return breakpoints
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def nudge_dataset(X, Y):
    """
    This produces a dataset 5 times bigger than the original one,
    by moving the 8x8 images in X around by 1px to left, right, down, up
    """
    direction_vectors = [
        [[0, 1, 0],
         [0, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [1, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 1],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 0],
         [0, 1, 0]]]

    shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
                                  weights=w).ravel()
    X = np.concatenate([X] +
                       [np.apply_along_axis(shift, 1, X, vector)
                        for vector in direction_vectors])
    Y = np.concatenate([Y for _ in range(5)], axis=0)
    return X, Y

# Load Data
项目:segmentator    作者:ofgulban    | 项目源码 | 文件源码
def compute_gradient_magnitude(ima, method='scharr'):
    """Compute gradient magnitude of images.

    Parameters
    ----------
    ima : np.ndarray
        First image, which is often the intensity image (eg. T1w).
    method : string
        Gradient computation method. Available options are 'scharr',
        'sobel', 'prewitt', 'numpy'.
    Returns
    -------
    gra_mag : np.ndarray
        Second image, which is often the gradient magnitude image
        derived from the first image

    """
    if method == 'sobel':  # magnitude scale is similar to numpy method
        kernel = create_3D_kernel(operator=method)
        gra = np.zeros(ima.shape + (kernel.shape[0],))
        for d in range(kernel.shape[0]):
            gra[..., d] = convolve(ima, kernel[d, ...])
        # compute generic gradient magnitude with normalization
        gra_mag = np.sqrt(np.sum(np.power(gra, 2.), axis=-1))
        return gra_mag
    elif method == 'prewitt':
        kernel = create_3D_kernel(operator=method)
        gra = np.zeros(ima.shape + (kernel.shape[0],))
        for d in range(kernel.shape[0]):
            gra[..., d] = convolve(ima, kernel[d, ...])
        # compute generic gradient magnitude with normalization
        gra_mag = np.sqrt(np.sum(np.power(gra, 2.), axis=-1))
        return gra_mag
    elif method == 'scharr':
        kernel = create_3D_kernel(operator=method)
        gra = np.zeros(ima.shape + (kernel.shape[0],))
        for d in range(kernel.shape[0]):
            gra[..., d] = convolve(ima, kernel[d, ...])
        # compute generic gradient magnitude with normalization
        gra_mag = np.sqrt(np.sum(np.power(gra, 2.), axis=-1))
        return gra_mag
    elif method == 'numpy':
        gra = np.asarray(np.gradient(ima))
        gra_mag = np.sqrt(np.sum(np.power(gra, 2.), axis=0))
        return gra_mag
    else:
        print 'Gradient magnitude method is invalid!'
项目:discretize    作者:simpeg    | 项目源码 | 文件源码
def random_model(shape, seed=None, anisotropy=None, its=100, bounds=None):
    """
        Create a random model by convolving a kernel with a
        uniformly distributed model.

        :param tuple shape: shape of the model.
        :param int seed: pick which model to produce, prints the seed if you don't choose.
        :param numpy.ndarray anisotropy: this is the (3 x n) blurring kernel that is used.
        :param int its: number of smoothing iterations
        :param list bounds: bounds on the model, len(list) == 2
        :rtype: numpy.ndarray
        :return: M, the model


        .. plot::

            import matplotlib.pyplot as plt
            import discretize
            plt.colorbar(plt.imshow(discretize.utils.random_model((50, 50), bounds=[-4, 0])))
            plt.title('A very cool, yet completely random model.')
            plt.show()


    """
    if bounds is None:
        bounds = [0, 1]

    if seed is None:
        seed = np.random.randint(1e3)
        print('Using a seed of: ', seed)

    if type(shape) in num_types:
        shape = (shape, ) # make it a tuple for consistency

    np.random.seed(seed)
    mr = np.random.rand(*shape)
    if anisotropy is None:
        if len(shape) is 1:
            smth = np.array([1, 10., 1], dtype=float)
        elif len(shape) is 2:
            smth = np.array([[1, 7, 1], [2, 10, 2], [1, 7, 1]], dtype=float)
        elif len(shape) is 3:
            kernal = np.array([1, 4, 1], dtype=float).reshape((1, 3))
            smth = np.array(sp.kron(sp.kron(kernal, kernal.T).todense()[:], kernal).todense()).reshape((3, 3, 3))
    else:
        assert len(anisotropy.shape) is len(shape), 'Anisotropy must be the same shape.'
        smth = np.array(anisotropy, dtype=float)

    smth = smth/smth.sum() # normalize
    mi = mr
    for i in range(its):
        mi = ndi.convolve(mi, smth)

    # scale the model to live between the bounds.
    mi = (mi - mi.min())/(mi.max()-mi.min()) # scaled between 0 and 1
    mi = mi*(bounds[1]-bounds[0])+bounds[0]

    return mi
项目:VLTPF    作者:avigan    | 项目源码 | 文件源码
def sph_ifs_correct_spectral_xtalk(img):
    '''
    Corrects a IFS frame from the spectral crosstalk

    This routines corrects for the SPHERE/IFS spectral crosstalk at
    small scales and (optionally) at large scales. This correction is
    necessary to correct the signal that is "leaking" between
    lenslets. See Antichi et al. (2009ApJ...695.1042A) for a
    theoretical description of the IFS crosstalk. Some informations
    regarding its correction are provided in Vigan et al. (2015), but
    this procedure still lacks a rigorous description and performance
    analysis.

    Since the correction of the crosstalk involves a convolution by a
    kernel of size 41x41, the values at the edges of the frame depend
    on how you choose to apply the convolution. Current implementation
    is EDGE_TRUNCATE. In other parts of the image (i.e. far from the
    edges), the result is identical to original routine by Dino
    Mesa. Note that in the original routine, the convolution that was
    coded did not treat the edges in a clean way defined
    mathematically. The scipy.ndimage.convolve() function offers
    different possibilities for the edges that are all documented.

    Parameters
    ----------
    img : array_like
        Input IFS science frame

    Returns
    -------
    img_corr : array_like
        Science frame corrected from the spectral crosstalk

    '''

    # definition of the dimension of the matrix
    sepmax = 20
    dim    = sepmax*2+1
    bfac   = 0.727986/1.8

    # defines a matrix to be used around each pixel
    # (the value of the matrix is lower for greater
    # distances form the center.
    x, y = np.meshgrid(np.arange(dim)-sepmax, np.arange(dim)-sepmax)
    rdist  = np.sqrt(x**2 + y**2)
    kernel = 1 / (1+rdist**3 / bfac**3)
    kernel[(np.abs(x) <= 1) & (np.abs(y) <= 1)] = 0

    # convolution and subtraction
    conv = ndimage.convolve(img, kernel, mode='reflect')
    img_corr = img - conv

    return img_corr
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def pyconv3d(signals, filters, border_mode='valid'):
    Ns, Ts, C, Hs, Ws = signals.shape
    Nf, Tf, C, Hf, Wf = filters.shape

    # if border_mode is not 'valid', the signals need zero-padding
    if border_mode == 'full':
        Tpad = Tf - 1
        Hpad = Hf - 1
        Wpad = Wf - 1
    elif border_mode == 'half':
        Tpad = Tf // 2
        Hpad = Hf // 2
        Wpad = Wf // 2
    else:
        Tpad = 0
        Hpad = 0
        Wpad = 0

    if Tpad > 0 or Hpad > 0 or Wpad > 0:
        # zero-pad signals
        signals_padded = numpy.zeros((Ns, Ts + 2 * Tpad, C,
                                      Hs + 2 * Hpad, Ws + 2 * Wpad), 'float32')
        signals_padded[:, Tpad:(Ts + Tpad), :, Hpad:(Hs + Hpad),
                       Wpad:(Ws + Wpad)] = signals
        Ns, Ts, C, Hs, Ws = signals_padded.shape
        signals = signals_padded

    Tf2 = Tf // 2
    Hf2 = Hf // 2
    Wf2 = Wf // 2

    rval = numpy.zeros((Ns, Ts - Tf + 1, Nf, Hs - Hf + 1, Ws - Wf + 1))
    for ns in xrange(Ns):
        for nf in xrange(Nf):
            for c in xrange(C):
                s_i = signals[ns, :, c, :, :]
                f_i = filters[nf, :, c, :, :]
                r_i = rval[ns, :, nf, :, :]
                o_i = ndimage.convolve(s_i, f_i, mode='constant', cval=1)
                o_i_sh0 = o_i.shape[0]
                # print s_i.shape, f_i.shape, r_i.shape, o_i.shape
                r_i += o_i[Tf2:o_i_sh0 - Tf2, Hf2:-Hf2, Wf2:-Wf2]
    return rval