Python scipy.signal 模块,convolve2d() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用scipy.signal.convolve2d()

项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def calc_filters(self):
        """
        Calculate the convolution filters of each scale.
        Note: the zero-th scale filter (i.e., delta function) is the first
        element, thus the array index is the same as the decomposition scale.
        """
        self.filters = []
        # scale 0: delta function
        h = np.array([[1]])  # NOTE: 2D
        self.filters.append(h)
        # scale 1
        h = self.phi[::-1, ::-1]
        self.filters.append(h)
        for scale in range(2, self.level+1):
            h_up = self.zupsample(self.phi, order=scale-1)
            h2 = signal.convolve2d(h_up[::-1, ::-1], h, mode="same",
                    boundary=self.boundary)
            self.filters.append(h2)
项目:Comictagger    作者:dickloraine    | 项目源码 | 文件源码
def average_hash2( self ):
        pass
        """
        # Got this one from somewhere on the net.  Not a clue how the 'convolve2d'
        # works! 

        from numpy import array 
        from scipy.signal import convolve2d

        im = self.image.resize((self.width, self.height), Image.ANTIALIAS).convert('L')

        in_data = array((im.getdata())).reshape(self.width, self.height)
        filt = array([[0,1,0],[1,-4,1],[0,1,0]])
        filt_data = convolve2d(in_data,filt,mode='same',boundary='symm').flatten()

        result = reduce(lambda x, (y, z): x | (z << y),
                         enumerate(map(lambda i: 0 if i < 0 else 1, filt_data)),
                         0)
        #print "{0:016x}".format(result)
        return result
        """
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def clear_patch(hfield, box):
    ''' Clears a patch shaped like box, assuming robot is placed in center of hfield
    @param box: rllab.spaces.Box-like
    '''
    if box.flat_dim > 2:
        raise ValueError("Provide 2dim box")

    # clear patch
    h_center = int(0.5 * hfield.shape[0])
    w_center = int(0.5 * hfield.shape[1])
    fromrow, torow = w_center + int(box.low[0]/STEP), w_center + int(box.high[0] / STEP)
    fromcol, tocol = h_center + int(box.low[1]/STEP), h_center + int(box.high[1] / STEP)
    hfield[fromrow:torow, fromcol:tocol] = 0.0

    # convolve to smoothen edges somewhat, in case hills were cut off
    K = np.ones((10,10)) / 100.0
    s = convolve2d(hfield[fromrow-9:torow+9, fromcol-9:tocol+9], K, mode='same', boundary='symm')
    hfield[fromrow-9:torow+9, fromcol-9:tocol+9] = s

    return hfield
项目:chxanalys    作者:yugangzhang    | 项目源码 | 文件源码
def binImage(self, bin_factor_x=1, bin_factor_y=1, fraction_required_to_keep = 0.5):
        # bin an image by bin_factor_x in X and bin_factor_y in Y by averaging all pixels in an bin_factor_x by bin_factor_y rectangle
        # This is accomplished using convolution followed by downsampling, with the downsampling chosen so that the center
        # of the binned image coincides with the center of the original unbinned one.

        from scipy.signal import convolve2d
        self.data [self.data <0 ] = 0 # temporarily remove flags
        numberOfValues = (self.data != 0).astype(int) # record positions that have a value
        binning_kernel = np.ones((bin_factor_x, bin_factor_y), dtype=float) # create binning kernel (all values within this get averaged)
        self.data = convolve2d(self.data, binning_kernel, mode='same') # perform 2D convolution
        numberOfValues = convolve2d(numberOfValues, binning_kernel, mode='same') # do the same with the number of values
        self.data[ numberOfValues > 1 ] = self.data[ numberOfValues > 1 ] / numberOfValues[ numberOfValues > 1 ] # take average, accounting for how many datapoints went into each point
        self.data[ numberOfValues < (bin_factor_x * bin_factor_y * fraction_required_to_keep)] = -1 # if too few values existed for averaging because too many of the pixels were unknown, make the resulting pixel unknown
        dimx, dimy = np.shape(self.data) # get dimensions
        centerX = dimx//2 # get center in X direction
        centerY = dimy//2 # get center in Y direction

        # Now take the smaller array from the smoothed large one to obtain the final binned image. The phrase "centerX % bin_factor_x"
        # is to ensure that the subarray we take includes the exact center of the big array. For example if our original image is
        # 1000x1000 then the central pixel is at position 500 (starting from 0). If we are binning this by 5 we want a 200x200 array
        # where the new central pixel at x=100 corresponds to the old array at x=500, so "centerX % bin_factor_x" ->
        # 500 % 5 = 0, so we would be indexing 0::5 = [0, 5, 10, 15..., 500, 505...] which is what we want. The same scenario with a
        # 1004x1004 image needs the center of the 200x200 array to be at x=502, and 502 % 5 = 2 and we index
        # 2::5 = [2,7,12..., 502, 507 ...]
        self.data = self.data[ centerX % bin_factor_x :: bin_factor_x, centerY % bin_factor_y :: bin_factor_y ]
项目:Lyssandra    作者:ektormak    | 项目源码 | 文件源码
def calculate_sift_grid(self, image, gridH, gridW):

        H, W = image.shape
        Npatches = gridH.size
        feaArr = np.zeros((Npatches, Nsamples * Nangles))

        # calculate gradient
        GH, GW = gen_dgauss(self.sigma)
        IH = signal.convolve2d(image, GH, mode='same')
        IW = signal.convolve2d(image, GW, mode='same')
        Imag = np.sqrt(IH ** 2 + IW ** 2)
        Itheta = np.arctan2(IH, IW)
        Iorient = np.zeros((Nangles, H, W))
        for i in range(Nangles):
            Iorient[i] = Imag * np.maximum(np.cos(Itheta - angles[i]) ** alpha, 0)
        for i in range(Npatches):
            currFeature = np.zeros((Nangles, Nsamples))
            for j in range(Nangles):
                currFeature[j] = np.dot(self.weights, \
                                        Iorient[j, gridH[i]:gridH[i] + self.pS, gridW[i]:gridW[i] + self.pS].flatten())
            feaArr[i] = currFeature.flatten()
        return feaArr
项目:Lyssandra    作者:ektormak    | 项目源码 | 文件源码
def extract_sift_patches(self, image, gridH, gridW):
        # extracts the sift descriptor of patches
        # in positions (gridH,gridW) in the image
        H, W = image.shape
        Npatches = gridH.size
        feaArr = np.zeros((Npatches, Nsamples * Nangles))

        # calculate gradient
        GH, GW = gen_dgauss(self.sigma)
        IH = signal.convolve2d(image, GH, mode='same')
        IW = signal.convolve2d(image, GW, mode='same')
        Imag = np.sqrt(IH ** 2 + IW ** 2)
        Itheta = np.arctan2(IH, IW)
        Iorient = np.zeros((Nangles, H, W))
        for i in range(Nangles):
            Iorient[i] = Imag * np.maximum(np.cos(Itheta - angles[i]) ** alpha, 0)
        for i in range(Npatches):
            currFeature = np.zeros((Nangles, Nsamples))
            for j in range(Nangles):
                currFeature[j] = np.dot(self.weights, \
                                        Iorient[j, gridH[i]:gridH[i] + self.pS, gridW[i]:gridW[i] + self.pS].flatten())
            feaArr[i] = currFeature.flatten()
        # feaArr contains each descriptor in a row
        feaArr = self.normalize_sift(feaArr)
        return feaArr
项目:MOOCs    作者:ankitaggarwal011    | 项目源码 | 文件源码
def run_edges(image):
  ''' This function finds and colors all edges in the given image.
  '''

  # Convert image to gray
  if len(image.shape) > 2:
    grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  else:
    grayimage = image

  # blur so the gradient operation is less noisy.
  # uses a gaussian filter with sigma = 2
  grayimage = gaussian_filter(grayimage, 2).astype(float)

  # Filter with x and y sobel filters
  dx = convolve2d(grayimage, sobel_filter_x())
  dy = convolve2d(grayimage, sobel_filter_y())

  # Convert to orientation and magnitude images
  theta = transform_xy_theta(dx, dy)
  mag = transform_xy_mag(dx, dy)

  outimg = np.zeros((image.shape[0], image.shape[1], 3), dtype = np.uint8)

  # Fill with corresponding color.
  for r in range(outimg.shape[0]):
    for c in range(outimg.shape[1]):
      outimg[r,c,:] = get_color(theta[r,c], mag[r,c])

  return outimg
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def calcDirection(img,blockSize):
    """calculate ridge directions in an image, using gradient method
    return: ridge directions
    """
    sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
    sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
    par_x=convolve2d(img,sobel_x,mode='same')
    par_y=convolve2d(img,sobel_y,mode='same')
    N,M=np.shape(img)
    Vx=np.zeros((N/blockSize,M/blockSize))
    Vy=np.zeros((N/blockSize,M/blockSize))
    for i in xrange(N/blockSize):
        for j in xrange(M/blockSize):
            a=i*blockSize;b=a+blockSize;c=j*blockSize;d=c+blockSize
            Vy[i,j]=2*np.sum(par_x[a:b,c:d]*par_y[a:b,c:d])
            Vx[i,j]=np.sum(par_y[a:b,c:d]**2-par_x[a:b,c:d]**2)
    gaussianBlurSigma=2;    gaussian_block=5
    Vy=cv2.GaussianBlur(Vy,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    Vx=cv2.GaussianBlur(Vx,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    theta=0.5*np.arctan2(Vy,Vx)            
    return theta
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def calcDirection(img,blockSize):
    sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
    sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
    par_x=convolve2d(img,sobel_x,mode='same')
    par_y=convolve2d(img,sobel_y,mode='same')
    N,M=np.shape(img)
    Vx=np.zeros((N/blockSize,M/blockSize))
    Vy=np.zeros((N/blockSize,M/blockSize))
    for i in xrange(N/blockSize):
        for j in xrange(M/blockSize):
            a=i*blockSize;b=a+blockSize;c=j*blockSize;d=c+blockSize
            Vy[i,j]=2*np.sum(par_x[a:b,c:d]*par_y[a:b,c:d])
            Vx[i,j]=np.sum(par_y[a:b,c:d]**2-par_x[a:b,c:d]**2)
    gaussianBlurSigma=2;    gaussian_block=5
    Vy=cv2.GaussianBlur(Vy,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    Vx=cv2.GaussianBlur(Vx,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    theta=0.5*np.arctan2(Vy,Vx)#+np.pi/2            
    return theta
项目:Learn-to-identify-similar-images    作者:MashiMaroLjc    | 项目源码 | 文件源码
def _shape(self,img):
        """
        ??????????????
        :param img:
        :return: ????
        """

        operator1 = np.fromfunction(self._gauss, (5, 5), sigma=self._sigma)
        operator2 = np.array([[1, 1, 1],
                           [1,-8, 1],
                           [1, 1, 1]])
        image_blur = signal.convolve2d(img, operator1, mode="same")
        # ?????????????
        image2 = signal.convolve2d(image_blur, operator2, mode="same")
        if image2.max() != 0:
            image2 = (image2 / float(image2.max())) * 255
        else:
            image2 = np.zeros(image2.shape)
        # ??????????????255???????????
        image2[image2>image2.mean()] = 255
        # ?????????????
        image2[image2 <=image2.mean()] =0
        return image2
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def py_conv_scipy(img, kern, mode, subsample):
    assert img.shape[1] == kern.shape[1]
    if mode == 'valid':
        outshp = (img.shape[0], kern.shape[0],
                  img.shape[2] - kern.shape[2] + 1,
                  img.shape[3] - kern.shape[3] + 1)
    else:
        outshp = (img.shape[0], kern.shape[0],
                  img.shape[2] + kern.shape[2] - 1,
                  img.shape[3] + kern.shape[3] - 1)
    out = numpy.zeros(outshp, dtype='float32')
    for b in xrange(out.shape[0]):
        for k in xrange(out.shape[1]):
            for s in xrange(img.shape[1]):
                # convolve2d or correlate
                out[b, k, :, :] += convolve2d(img[b, s, :, :],
                                              kern[k, s, :, :],
                                              mode)
    return out[:, :, ::subsample[0], ::subsample[1]]
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def convolve(patchDim, numFeatures, images, W):
    m = np.shape(images)[3]
    imageDim = np.shape(images)[0]
    imageChannels = np.shape(images)[2]
    convolvedFeatures = np.zeros((numFeatures, m, \
                                  imageDim - patchDim + 1, imageDim - patchDim + 1))
    for imageNum in range(m):
        for featureNum in range(numFeatures):
            convolvedImage = np.zeros((imageDim - patchDim + 1, imageDim - patchDim + 1))
            for channel in range(imageChannels):
                feature = np.zeros((patchDim, patchDim))
                start = channel * patchDim*patchDim
                stop = start + patchDim*patchDim
                feature += np.reshape(W[featureNum, start:stop], (patchDim, patchDim), order="F")
                feature = np.flipud(np.fliplr(feature))
                im = images[:, :, channel, imageNum]
                convolvedImage += sig.convolve2d(im, feature, "valid")
            # sparse filtering activation function
            convolvedImage = np.sqrt(1e-8 + np.multiply(convolvedImage, convolvedImage))
            convolvedFeatures[featureNum, imageNum, :, :] = convolvedImage
    return convolvedFeatures
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def convolve(patchDim, numFeatures, images, W):
    m = np.shape(images)[3]
    imageDim = np.shape(images)[0]
    imageChannels = np.shape(images)[2]
    convolvedFeatures = np.zeros((numFeatures, m, \
                        imageDim - patchDim + 1, imageDim - patchDim + 1))
    for imageNum in range(m):
        for featureNum in range(numFeatures):
            convolvedImage = np.zeros((imageDim - patchDim + 1, imageDim - patchDim + 1))
            for channel in range(imageChannels):
                feature = np.zeros((patchDim, patchDim))
                start = channel * patchDim*patchDim
                stop = start + patchDim*patchDim
                feature += np.reshape(W[featureNum, start:stop], (patchDim, patchDim), order="F")
                feature = np.flipud(np.fliplr(feature))
                im = images[:, :, channel, imageNum]
                convolvedImage += sig.convolve2d(im, feature, "valid")
            # sparse filtering activation function
            convolvedImage = np.sqrt(1e-8 + np.multiply(convolvedImage, convolvedImage))
            convolvedFeatures[featureNum, imageNum, :, :] = convolvedImage
    return convolvedFeatures
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def convolve(patchDim, numFeatures, images, W):
    m = np.shape(images)[3]
    imageDim = np.shape(images)[0]
    imageChannels = np.shape(images)[2]
    convolvedFeatures = np.zeros((numFeatures, m, \
                        imageDim - patchDim + 1, imageDim - patchDim + 1))
    for imageNum in range(m):
        for featureNum in range(numFeatures):
            convolvedImage = np.zeros((imageDim - patchDim + 1, imageDim - patchDim + 1))
            for channel in range(imageChannels):
                feature = np.zeros((patchDim, patchDim))
                start = channel * patchDim*patchDim
                stop = start + patchDim*patchDim
                feature += np.reshape(W[featureNum, start:stop], (patchDim, patchDim), order="F")
                feature = np.flipud(np.fliplr(feature))
                im = images[:, :, channel, imageNum]
                convolvedImage += sig.convolve2d(im, feature, "valid")
            # sparse filtering activation function
            convolvedImage = np.sqrt(1e-8 + np.multiply(convolvedImage, convolvedImage))
            convolvedFeatures[featureNum, imageNum, :, :] = convolvedImage
    return convolvedFeatures
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def convolve(patchDim, numFeatures, images, W):
    m = np.shape(images)[3]
    imageDim = np.shape(images)[0]
    imageChannels = np.shape(images)[2]
    convolvedFeatures = np.zeros((numFeatures, m, \
                                  imageDim - patchDim + 1, imageDim - patchDim + 1))
    for imageNum in range(m):
        for featureNum in range(numFeatures):
            convolvedImage = np.zeros((imageDim - patchDim + 1, imageDim - patchDim + 1))
            for channel in range(imageChannels):
                feature = np.zeros((patchDim, patchDim))
                start = channel * patchDim*patchDim
                stop = start + patchDim*patchDim
                feature += np.reshape(W[featureNum, start:stop], (patchDim, patchDim), order="F")
                feature = np.flipud(np.fliplr(feature))
                im = images[:, :, channel, imageNum]
                convolvedImage += sig.convolve2d(im, feature, "valid")
            # sparse filtering activation function
            convolvedImage = np.sqrt(1e-8 + np.multiply(convolvedImage, convolvedImage))
            convolvedFeatures[featureNum, imageNum, :, :] = convolvedImage
    return convolvedFeatures
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def convolve(patchDim, numFeatures, images, W):
    m = np.shape(images)[3]
    imageDim = np.shape(images)[0]
    imageChannels = np.shape(images)[2]
    convolvedFeatures = np.zeros((numFeatures, m, \
                        imageDim - patchDim + 1, imageDim - patchDim + 1))
    for imageNum in range(m):
        for featureNum in range(numFeatures):
            convolvedImage = np.zeros((imageDim - patchDim + 1, imageDim - patchDim + 1))
            for channel in range(imageChannels):
                feature = np.zeros((patchDim, patchDim))
                start = channel * patchDim*patchDim
                stop = start + patchDim*patchDim
                feature += np.reshape(W[featureNum, start:stop], (patchDim, patchDim), order="F")
                feature = np.flipud(np.fliplr(feature))
                im = images[:, :, channel, imageNum]
                convolvedImage += sig.convolve2d(im, feature, "valid")
            # sparse filtering activation function
            convolvedImage = np.sqrt(1e-8 + np.multiply(convolvedImage, convolvedImage))
            convolvedFeatures[featureNum, imageNum, :, :] = convolvedImage
    return convolvedFeatures
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def inpaint(self, rescale_factor=1.0):
        """ Fills in the zero pixels in the image.

        Parameters
        ----------
        rescale_factor : float
            amount to rescale the image for inpainting, smaller numbers increase speed

        Returns
        -------
        :obj:`DepthImage`
            depth image with zero pixels filled in
        """
        # form inpaint kernel
        inpaint_kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])

        # resize the image
        resized_data = self.resize(rescale_factor, interp='nearest').data

        # inpaint the smaller image
        cur_data = resized_data.copy()
        zeros = (cur_data == 0)
        while np.any(zeros):
            neighbors = ssg.convolve2d((cur_data != 0), inpaint_kernel,
                                       mode='same', boundary='symm')
            avg_depth = ssg.convolve2d(cur_data, inpaint_kernel,
                                       mode='same', boundary='symm')
            avg_depth[neighbors > 0] = avg_depth[neighbors > 0] / \
                neighbors[neighbors > 0]
            avg_depth[neighbors == 0] = 0
            avg_depth[resized_data > 0] = resized_data[resized_data > 0]
            cur_data = avg_depth
            zeros = (cur_data == 0)

        # fill in zero pixels with inpainted and resized image
        inpainted_im = DepthImage(cur_data, frame=self.frame)
        filled_data = inpainted_im.resize(
            1.0 / rescale_factor, interp='bilinear').data
        new_data = np.copy(self.data)
        new_data[self.data == 0] = filled_data[self.data == 0]
        return DepthImage(new_data, frame=self.frame)
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def absorbing_gaussian(x,sigma):
    '''
    Applies a gaussian convolution to 2d array `x` with absorbing
    boundary conditions.

    Parameters
    ----------

    Returns
    -------
    '''
    support = 1+sigma*6
    normalization = np.zeros(x.shape,'double')
    result = np.zeros(x.shape,'double')
    kernel = gaussian_2D_kernel(sigma)
    return convolve2d(x, kernel, mode='same', boundary='symm')
项目:rllab    作者:rll    | 项目源码 | 文件源码
def clear_patch(hfield, box):
    ''' Clears a patch shaped like box, assuming robot is placed in center of hfield
    @param box: rllab.spaces.Box-like
    '''
    if box.flat_dim > 2:
        raise ValueError("Provide 2dim box")

    # clear patch
    h_center = int(0.5 * hfield.shape[0])
    w_center = int(0.5 * hfield.shape[1])
    fromrow, torow = w_center + int(box.low[0]/STEP), w_center + int(box.high[0] / STEP)
    fromcol, tocol = h_center + int(box.low[1]/STEP), h_center + int(box.high[1] / STEP)
    hfield[fromrow:torow, fromcol:tocol] = 0.0

    # convolve to smoothen edges somewhat, in case hills were cut off
    K = np.ones((10,10)) / 100.0
    s = convolve2d(hfield[fromrow-9:torow+9, fromcol-9:tocol+9], K, mode='same', boundary='symm')
    hfield[fromrow-9:torow+9, fromcol-9:tocol+9] = s

    return hfield
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def convolve2d(image, kernel):
    # This function which takes an image and a kernel
    # and returns the convolution of them
    # Args:
    #   image: a numpy array of size [image_height, image_width].
    #   kernel: a numpy array of size [kernel_height, kernel_width].
    # Returns:
    #   a numpy array of size [image_height, image_width] (convolution output).

    kernel = np.flipud(np.fliplr(kernel))  # Flip the kernel
    output = np.zeros_like(image)  # convolution output
    # Add zero padding to the input image
    image_padded = np.zeros((image.shape[0] + 2, image.shape[1] + 2))
    image_padded[1:-1, 1:-1] = image
    for x in range(image.shape[1]):  # Loop over every pixel of the image
        for y in range(image.shape[0]):
            # element-wise multiplication of the kernel and the image
            output[y, x] = (kernel * image_padded[y:y + 3, x:x + 3]).sum()
    return output


# Convolve a sharpen kernel with an image
项目:ois    作者:toros-astro    | 项目源码 | 文件源码
def get_cmatrices(self):
        kh, kw = self.k_shape
        v, u = np.mgrid[:kh, :kw]
        c = []
        for aGauss in self.gausslist:
            n = aGauss['modPolyDeg'] + 1
            allus = [pow(u, i) for i in range(n)]
            allvs = [pow(v, i) for i in range(n)]
            gaussk = self.gauss(center=aGauss['center'],
                                sx=aGauss['sx'], sy=aGauss['sy'])
            newc = [signal.convolve2d(self.refimage, gaussk * aU * aV,
                                      mode='same')
                    for i, aU in enumerate(allus)
                    for aV in allvs[:n - i]
                    ]
            c.extend(newc)
        return c
项目:SeisFlows_tunnel    作者:DmBorisov    | 项目源码 | 文件源码
def gridsmooth(Z, span):
    """ Smooths values on 2D rectangular grid
    """
    import warnings
    warnings.filterwarnings('ignore')

    x = np.linspace(-2.*span, 2.*span, 2.*span + 1.)
    y = np.linspace(-2.*span, 2.*span, 2.*span + 1.)
    (X, Y) = np.meshgrid(x, y)
    mu = np.array([0., 0.])
    sigma = np.diag([span, span])**2.
    F = gauss2(X, Y, mu, sigma)
    F = F/np.sum(F)
    W = np.ones(Z.shape)
    Z = _signal.convolve2d(Z, F, 'same')
    W = _signal.convolve2d(W, F, 'same')
    Z = Z/W
    return Z
项目:US-image-prediction    作者:ChengruiWu008    | 项目源码 | 文件源码
def add_gauss_noise(image,r=10):
    suanzi = np.fromfunction(func, (r, r), sigma=5)
    image = np.array(image)
    image2 = signal.convolve2d(image, suanzi, mode="same")
    image2 = (image2 / float(image2.max())) * 255
    return np.array(image2)
项目:US-image-prediction    作者:ChengruiWu008    | 项目源码 | 文件源码
def add_gauss_noise(image,r=10):
    suanzi = np.fromfunction(func, (r, r), sigma=5)
    image = np.array(image)
    image2 = signal.convolve2d(image, suanzi, mode="same")
    image2 = (image2 / float(image2.max())) * 255
    return np.array(image2)
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def transform(self, data, scale, boundary="symm"):
        """
        Perform only one scale wavelet transform for the given data.

        return:
            [ approx, detail ]
        """
        self.decomposition = []
        approx = signal.convolve2d(data, self.filters[scale],
                mode="same", boundary=self.boundary)
        detail = data - approx
        return [approx, detail]
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def decompose(self, level, boundary="symm"):
        """
        Perform IUWT decomposition in the plain loop way.
        The filters of each scale/level are calculated first, then the
        approximations of each scale/level are calculated by convolving the
        raw/finest image with these filters.

        return:
            [ W_1, W_2, ..., W_n, A_n ]
            n = level
            W: wavelet details
            A: approximation
        """
        self.boundary = boundary
        if self.level != level or self.filters == []:
            self.level = level
            self.calc_filters()
        self.decomposition = []
        approx = self.data
        for scale in range(1, level+1):
            # approximation:
            approx2 = signal.convolve2d(self.data, self.filters[scale],
                    mode="same", boundary=self.boundary)
            # wavelet details:
            w = approx - approx2
            self.decomposition.append(w)
            if scale == level:
                self.decomposition.append(approx2)
            approx = approx2
        return self.decomposition
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def __decompose(self, data, phi, level):
        """
        2D IUWT decomposition (or stationary wavelet transform).

        This is a convolution version, where kernel is zero-upsampled
        explicitly. Not fast.

        Parameters:
        - level : level of decomposition
        - phi : low-pass filter kernel
        - boundary : boundary conditions (passed to scipy.signal.convolve2d,
                     'symm' by default)

        Returns:
            list of wavelet details + last approximation. Each element in
            the list is an image of the same size as the input image. 
        """
        if level <= 0:
            return data
        shapecheck = map(lambda a,b:a>b, data.shape, phi.shape)
        assert np.all(shapecheck)
        # approximation:
        approx = signal.convolve2d(data, phi[::-1, ::-1], mode="same",
                boundary=self.boundary)
        # wavelet details:
        w = data - approx
        phi_up = self.zupsample(phi, order=1)
        shapecheck = map(lambda a,b:a>b, data.shape, phi_up.shape)
        if level == 1:
            return [w, approx]
        elif not np.all(shapecheck):
            print("Maximum allowed decomposition level reached",
                    file=sys.stderr)
            return [w, approx]
        else:
            return [w] + self.__decompose(approx, phi_up, level-1)
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def decompose(self, level=5, boundary="symm", verbose=False):
        """
        2D IUWT decomposition with VST.
        """
        self.boundary = boundary
        if self.level != level or self.filters == []:
            self.level = level
            self.calc_filters()
            self.calc_vst_coef()
        self.decomposition = []
        approx = self.data
        if verbose:
            print("IUWT decomposing (%d levels): " % level,
                    end="", flush=True, file=sys.stderr)
        for scale in range(1, level+1):
            if verbose:
                print("%d..." % scale, end="", flush=True, file=sys.stderr)
            # approximation:
            approx2 = signal.convolve2d(self.data, self.filters[scale],
                    mode="same", boundary=self.boundary)
            # wavelet details:
            w = self.vst(approx, scale=scale-1) - self.vst(approx2, scale=scale)
            self.decomposition.append(w)
            if scale == level:
                self.decomposition.append(approx2)
            approx = approx2
        if verbose:
            print("DONE!", flush=True, file=sys.stderr)
        return self.decomposition
项目:ml_capstone    作者:drscott173    | 项目源码 | 文件源码
def moving_average_2d(self, data, window):
        """Moving average on two-dimensional data.
        """
        # Makes sure that the window function is normalized.
        window /= window.sum()
        # Makes sure data array is a numpy array or masked array.
        if type(data).__name__ not in ['ndarray', 'MaskedArray']:
            data = numpy.asarray(data)

        # The output array has the same dimensions as the input data 
        # (mode='same') and symmetrical boundary conditions are assumed
        # (boundary='symm').
        return convolve2d(data, window, mode='same', boundary='symm')
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def isolate(img, size, size_ker):

    Id = -0.5*np.eye(size_ker)
    h = np.zeros((size_ker,size_ker))

    h[size_ker/2,:] = 1
    h[:,size_ker/2] = 1

    h[size_ker/2,size_ker/2] = 1

    h = h/np.float(np.sum(h))
    img = img/np.float(np.max(img))
    newimg = np.copy(img)*0
    n1,n2 = np.shape(img)
    N1,N2 = int(n1/size),int(n2/size)
    grid = np.zeros((N1,N2))
    print(np.shape(img), N1, size)

    for  i in range(N1):
        for j in range(N2):
            grid[i,j] = np.mean(img[i*(size):(i+1)*(size),j*(size):(j+1)*(size)])
    grid = (grid-0.5)*2
    newgrid = np.copy(grid)*0
    newgrid = scp.convolve2d(grid,h,mode = 'same', boundary = 'wrap')
    for  i in range(N1):
        for j in range(N2):
            newimg[i*(size):(i+1)*(size),j*(size):(j+1)*(size)] = newgrid[i,j] 
    t = 0
    newimg2 = np.copy(newimg)*0
    newimg2[newimg<=t]= -1
    newimg2[newimg>t] = 1

    return newimg2
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def iuwt(wave, convol2d =0):
    mode = 'nearest'

    lvl,n1,n2 = np.shape(wave)
    h = np.array([1./16, 1./4, 3./8, 1./4, 1./16])
    n = np.size(h)

    cJ = np.copy(wave[lvl-1,:,:])


    for i in np.linspace(1,lvl-1,lvl-1):

        newh = np.zeros((1,n+(n-1)*(2**(lvl-1-i)-1)))
        newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
        H = np.dot(newh.T,newh)

        ###### Line convolution
        if convol2d == 1:
            cnew = cp.convolve2d(cJ, H, mode='same', boundary='symm')
        else:
          cnew = sc.convolve1d(cJ,newh[0,:],axis = 0, mode = mode)
            ###### Column convolution
          cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode = mode)

        cJ = cnew+wave[lvl-1-i,:,:]

    return np.reshape(cJ,(n1,n2))
项目:catchy    作者:jvbalen    | 项目源码 | 文件源码
def three_melody_matrices(track_id, win=4.0):

    t, melstm, melmat = two_melody_matrices(track_id)
    dt = t[1] - t[0]
    nkern = np.round(win / dt)
    kern1 = np.ones((nkern, 1))
    kern2 = np.zeros((nkern + 1, 1))
    kern = np.vstack((kern1, kern2))
    kern *= 1.0 / nkern
    melfwd = dsp.convolve2d(melmat, kern, mode='same')
    return t, melstm, melmat, melfwd
项目:catchy    作者:jvbalen    | 项目源码 | 文件源码
def two_melody_matrices(track_id, win=4.0):

    t, melmat = one_melody_matrix(track_id)
    dt = t[1] - t[0]
    nkern = int(np.round(win / dt))
    kern1 = np.zeros((nkern + 1, 1))
    kern2 = np.ones((nkern, 1))
    kern = np.vstack((kern1, kern2))
    kern *= 1.0 / nkern
    melstm = dsp.convolve2d(melmat, kern, mode='same')
    return t, melstm, melmat
项目:lazyprogrammer    作者:inhwane    | 项目源码 | 文件源码
def convolve_flatten(X):
    # input will be (32, 32, 3, N)
    # output will be (N, 32*32)
    N = X.shape[-1]
    flat = np.zeros((N, 32*32))
    for i in xrange(N):
        #flat[i] = X[:,:,:,i].reshape(3072)
        bw = X[:,:,:,i].mean(axis=2) # make it grayscale
        Gx = convolve2d(bw, Hx, mode='same')
        Gy = convolve2d(bw, Hy, mode='same')
        G = np.sqrt(Gx*Gx + Gy*Gy)
        G /= G.max() # normalize it
        flat[i] = G.reshape(32*32)
    return flat
项目:evaluation-toolkit    作者:lightfield-analysis    | 项目源码 | 文件源码
def get_depth_normals(self, depth_map):
        h, w = np.shape(depth_map)
        zz = depth_map
        xx, yy = np.meshgrid(range(0, h), range(0, w))
        xx = (xx / (h - 1.0) * 0.5) * self.sensor_mm * zz / self.focal_length_mm
        yy = (yy / (w - 1.0) * 0.5) * self.sensor_mm * zz / self.focal_length_mm

        kernel = np.asarray([[3., 10., 3.], [0., 0., 0.], [-3., -10., -3.]])
        kernel /= 64.

        dxdx = ssig.convolve2d(xx, kernel, mode="same", boundary="wrap")
        dydx = ssig.convolve2d(yy, kernel, mode="same", boundary="wrap")
        dzdx = ssig.convolve2d(zz, kernel, mode="same", boundary="wrap")

        dxdy = ssig.convolve2d(xx, np.transpose(kernel), mode="same", boundary="wrap")
        dydy = ssig.convolve2d(yy, np.transpose(kernel), mode="same", boundary="wrap")
        dzdy = ssig.convolve2d(zz, np.transpose(kernel), mode="same", boundary="wrap")

        normal_map = np.full((h, w, 3), fill_value=np.nan)

        normal_map[:, :, 0] = (dzdx * dxdy - dxdx * dzdy)
        normal_map[:, :, 1] = - (dydx * dzdy - dzdx * dydy)
        normal_map[:, :, 2] = - (dxdx * dydy - dydx * dxdy)

        magnitude = np.sqrt(np.sum(np.square(normal_map), axis=2))
        normal_map = normal_map / np.dstack((magnitude, magnitude, magnitude))

        return normal_map
项目:pyblur    作者:lospooky    | 项目源码 | 文件源码
def PsfBlur(img, psfid):
    imgarray = np.array(img, dtype="float32")
    kernel = psfDictionary[psfid]
    convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
    img = Image.fromarray(convolved)
    return img
项目:pyblur    作者:lospooky    | 项目源码 | 文件源码
def BoxBlur(img, dim):
    imgarray = np.array(img, dtype="float32")
    kernel = BoxKernel(dim)
    convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
    img = Image.fromarray(convolved)
    return img
项目:pyblur    作者:lospooky    | 项目源码 | 文件源码
def LinearMotionBlur(img, dim, angle, linetype):
    imgarray = np.array(img, dtype="float32")
    kernel = LineKernel(dim, angle, linetype)
    convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
    img = Image.fromarray(convolved)
    return img
项目:pyblur    作者:lospooky    | 项目源码 | 文件源码
def DefocusBlur(img, dim):
    imgarray = np.array(img, dtype="float32")
    kernel = DiskKernel(dim)
    convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype("uint8")
    img = Image.fromarray(convolved)
    return img
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def calcDirection(img):
    sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
    sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
    par_x=convolve2d(img,sobel_x,mode='same')
    par_y=convolve2d(img,sobel_y,mode='same')
    Vy=2*np.sum(par_x*par_y)
    Vx=np.sum(par_y**2-par_x**2)
    theta=0.5*np.arctan2(Vy,Vx)#+np.pi/2
    return theta
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def segmentation(img, blockSize=8, h=352, w=288):
    add0=(16-img.shape[0]%16)/2
    add1=(16-img.shape[1]%16)/2
    img=np.vstack((  255*np.ones((add0,img.shape[1])), img, 255*np.ones((add0,img.shape[1]))  ))
    img=np.hstack((  255*np.ones((img.shape[0],add1)), img, 255*np.ones((img.shape[0],add1))  ))
#    img=np.uint8(img)
    ## reference: IMPROVED FINGERPRINT IMAGE SEGMENTATION USING NEW MODIFIED GRADIENT
    #               BASED TECHNIQUE
    sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
    sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
    par_x=convolve2d(img,sobel_x,mode='same')
    par_y=convolve2d(img,sobel_y,mode='same')
    #img=basic.blockproc(img,cv2.equalizeHist,(blockSize,blockSize))
    stdx=blockproc(par_x,np.std,(16,16),True)
    stdy=blockproc(par_y,np.std,(16,16),True)
    grddev=stdx+stdy
    threshold=90
    index=grddev[1:-1,1:-1].copy()
    index[np.where(index<threshold)]=0
    index[np.where(index>=threshold)]=1
    a=np.zeros(grddev.shape)
    a[1:-1,1:-1]=index
    index=a

    valid=np.zeros(img.shape)
    valid_b=block_view(valid,(16,16))
    valid_b[:]=index[:,:,np.newaxis,np.newaxis]

    kernel = np.ones((8,8),np.uint8)
    # first dilate to delete the invalid value inside the fingerprint region
    valid=cv2.dilate(valid,kernel,iterations = 5)
    # then erode more to delete the valid value outside the fingerprint region
    valid=cv2.erode(valid, kernel, iterations = 12)
    # dilate again to increase the valid value area in compensate for the lose
    # due to erosion in the last step
    valid=cv2.dilate(valid, kernel, iterations=7)

    img[np.where(valid==0)]=255
    # align the image    
    #img=align(img, valid)         
    return cut(img, valid, h, w)
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def calcDirection(img,blockSize,method='block-wise'):
    """calculate ridge directions in an image, using gradient method
    return: ridge directions
    """
    sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
    sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
    par_x=convolve2d(img,sobel_x,mode='same')
    par_y=convolve2d(img,sobel_y,mode='same')
    N,M=np.shape(img)
    if method=='block-wise':
        Vx=np.zeros((N/blockSize,M/blockSize))
        Vy=np.zeros((N/blockSize,M/blockSize))
        for i in xrange(N/blockSize):
            for j in xrange(M/blockSize):
                a=i*blockSize;b=a+blockSize;c=j*blockSize;d=c+blockSize
                Vy[i,j]=2*np.sum(par_x[a:b,c:d]*par_y[a:b,c:d])
                Vx[i,j]=np.sum(par_y[a:b,c:d]**2-par_x[a:b,c:d]**2)

    elif method=='pixel-wise':
        Vx,Vy=np.zeros((N,M)),np.zeros((N,M))
        for i in xrange(blockSize/2,N-blockSize/2):
            a=i-blockSize/2
            b=a+blockSize
            for j in xrange(blockSize/2,M-blockSize/2):
                c=j-blockSize/2
                d=c+blockSize
                Vy[i,j]=2*np.sum(par_x[a:b,c:d]*par_y[a:b,c:d])
                Vx[i,j]=np.sum(par_y[a:b,c:d]**2-par_x[a:b,c:d]**2)

    gaussianBlurSigma=2;
    gaussian_block=5 if method=='block-wise' else 21
    Vy=cv2.GaussianBlur(Vy,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    Vx=cv2.GaussianBlur(Vx,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    theta=0.5*np.arctan2(Vy,Vx)            
    return theta
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def calcDirection(img):
    sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
    sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
    par_x=convolve2d(img,sobel_x,mode='same')
    par_y=convolve2d(img,sobel_y,mode='same')
    Vy=2*np.sum(par_x*par_y)
    Vx=np.sum(par_y**2-par_x**2)
    theta=0.5*np.arctan2(Vy,Vx)#+np.pi/2
    return theta
项目:pyglitch    作者:giofusco    | 项目源码 | 文件源码
def apply_filter(X, H):
    """convolve image X with 2D matrix H. Returns a new modified matrix"""
    I = X.copy()
    for c in range(0, pgc.num_channels(X)):
        I[:, :, c] = signal.convolve2d(I[:, :, c], H, mode='same')
    return I
项目:SignGlove    作者:papachristoumarios    | 项目源码 | 文件源码
def windowData(instanceArray, windowSize):
    window = np.ones((windowSize, 1))
    windowed = convolve2d(instanceArray, window, mode="valid")
    return windowed[::windowSize,:]
项目:tutorials    作者:pytorch    | 项目源码 | 文件源码
def backward(self, grad_output):
        input, filter = self.saved_tensors
        grad_input = convolve2d(grad_output.numpy(), filter.t().numpy(), mode='full')
        grad_filter = convolve2d(input.numpy(), grad_output.numpy(), mode='valid')
        return torch.FloatTensor(grad_input), torch.FloatTensor(grad_filter)
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def fd_conv(Img_xy, h2d, mode ='same'):
    #return convolve2d(Img_xy, h2d, mode=mode)
    return fftconvolve(Img_xy, h2d, mode=mode)
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def fd_conv(Img_xy, h2d, mode ='same'):
    #return convolve2d(Img_xy, h2d, mode=mode)
    return fftconvolve(Img_xy, h2d, mode=mode)
项目:videoseg    作者:pathak22    | 项目源码 | 文件源码
def compute_flow(impath1, impath2, outdir,
                    fbcodepath=os.getenv("HOME") + '/fbcode'):
    stem = os.path.splitext(os.path.basename(impath1))[0]
    deepmatch_cmd = os.path.join(fbcodepath,
                                    '_bin/experimental/deeplearning/dpathak' +
                                    '/video-processing/deepmatch/deepmatch')
    call([deepmatch_cmd, impath1, impath2, '-out',
                os.path.join(outdir, stem + '_sparse.txt'), '-downscale', '2'])
    img1 = cv2.imread(impath1).astype(float)
    M = np.zeros((img1.shape[0], img1.shape[1]), dtype=np.float32)
    filt = np.array([[1., -1.]]).reshape((1, -1))
    for c in range(3):
        gx = convolve2d(img1[:, :, c], filt, mode='same')
        gy = convolve2d(img1[:, :, c], filt.T, mode='same')
        M = M + gx**2 + gy**2

    M = M / np.max(M)
    with open(os.path.join(outdir, '_edges.bin'), 'w') as f:
        M.tofile(f)

    epicflow_command = os.path.join(fbcodepath,
                                    '_bin/experimental/deeplearning/dpathak' +
                                    '/video-processing/epicflow/epicflow')
    call([epicflow_command, impath1, impath2,
                os.path.join(outdir, '_edges.bin'),
                os.path.join(outdir, stem + '_sparse.txt'),
                os.path.join(outdir, 'flow.flo')])

    flow = read_flo(os.path.join(outdir, 'flow.flo'))
    hsv = np.zeros_like(img1).astype(np.uint8)
    hsv[..., 1] = 255
    mag, ang = cv2.cartToPolar(flow[..., 0].astype(float),
                                flow[..., 1].astype(float))
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    hsv[..., 0] = ang * 180 / np.pi / 2
    bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    cv2.imwrite(os.path.join(outdir, stem + '_flow.png'), bgr)