我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用scipy.ndimage.uniform_filter()。
def run(self, ips, snap, img, para = None): nimg.uniform_filter(snap, para['size'], output=img)
def run(self, ips, imgs, para = None): imgs[:] = ndimg.uniform_filter(imgs, para['size'])
def filter_data(data, size, no_data_val=None): """ This does not work with masked array. ndimage.uniform_filter does not respect masked array Parameters ---------- data size no_data_val Returns ------- """ if no_data_val: mask = data == no_data_val data[mask] = np.nan averaged_data = np.zeros_like(data) ndimage.uniform_filter(data, output=averaged_data, size=size, mode='nearest') return averaged_data
def add_noise(x): """Add random noise to images.""" sz = x.shape mask = (x == 0) * 1. noise = np.random.rand(sz[0], sz[1]) for i in range(x.shape[0]): noise[i] = ndimage.uniform_filter(noise[i].reshape(28, 28), size=3).flatten() noise = np.multiply(noise, mask) out = np.clip(x + noise, 0., 1.) return out
def skel_to_graph(skel): """ Transform skeleton into its branches and nodes, by counting the number of neighbors of each pixel in the skeleton """ convolve_skel = 3**skel.ndim * ndimage.uniform_filter(skel.astype(np.float)) # 3x3 square mean neighbors = np.copy(skel.astype(np.uint8)) skel = skel.astype(np.bool) neighbors[skel] = convolve_skel[skel] - 1 edges = morphology.label(np.logical_or(neighbors == 2, neighbors ==1), background=0) nodes = morphology.label(np.logical_and(np.not_equal(neighbors, 2), neighbors > 0), background=0) length_edges = np.bincount(edges.ravel()) return nodes, edges, length_edges
def count_neighbors(binary): convolve = 3**binary.ndim * ndimage.uniform_filter(binary.astype(np.float)) # 3x3 sum neighbors = np.copy(binary.astype(np.uint8)) binary = binary.astype(np.bool) neighbors[binary] = convolve[binary] - 1 return neighbors
def _icfmedian(i, stack, weight=None, cfwidth=None): ufilt = 3 # set this to help with extreme over/under corrections return ndi.median_filter( ndi.uniform_filter(stack, (ufilt, 1)), (cfwidth, 1))
def filter_center(data, size=3, no_data_val=None, func=np.nanmean, mask_no_data=False): """ Parameters ---------- data: input data size: odd number uniform filtering kernel size no_data_val: value in matrix that is treated as no data value func: function to use, choose from np.nanmean/median/max/min etc. mask_no_data: bool, if True will keep the original no data pixel intact Returns: nanmean of the matrix A filtered by a uniform kernel of size=size ------- Adapted from: http://stackoverflow.com/questions/23829097/python-numpy-fastest-method-for-2d-kernel-rank-filtering-on-masked-arrays-and-o?rq=1 Notes ----- This function `centers` the kernel at the target pixel. This is slightly different from scipy.ndimage.uniform_filter application. In scipy.ndimage.uniform_filter, a convolution approach is implemented. An equivalent is scipy.ndimage.uniform_filter like convolution approach with no_data_val/nan handling can be found in filter_broadcast_uniform_filter in this module. Change function to nanmedian, nanmax, nanmin as required. """ assert size % 2 == 1, 'Please supply an odd size' rows, cols = data.shape padded_data = np.empty(shape=(rows + size-1, cols + size-1), dtype=data.dtype) padded_data[:] = np.nan rows_pad, cols_pad = padded_data.shape if no_data_val is not None: mask = data == no_data_val data[mask] = np.nan padded_data[size//2:rows_pad - size//2, size//2: cols_pad - size//2] = data.copy() row, col = data.shape stride_data = as_strided(padded_data, (row, col, size, size), padded_data.strides+padded_data.strides) stride_data = stride_data.copy().reshape((row, col, size**2)) avg = func(stride_data, axis=2) avg[np.isnan(avg)] = no_data_val if mask_no_data: avg[mask] = no_data_val return avg
def filter_uniform_filter(data, size=3, no_data_val=None, func=np.nanmean): """ Parameters ---------- A = input data size = odd number uniform filtering kernel size no_data_val = value in matrix that is treated as no data value Returns: nanmean of the matrix A filtered by a uniform kernel of size=size ------- Adapted from: http://stackoverflow.com/questions/23829097/python-numpy-fastest-method-for-2d-kernel-rank-filtering-on-masked-arrays-and-o?rq=1 Notes: This is equivalent to scipy.ndimage.uniform_filter, but can handle nan's, and can use numpy nanmean/median/max/min functions. no_data_val/nan handling can be found in filter_broadcast_uniform_filter in this module. Change function to nanmeadian, nanmax, nanmin as required. """ assert size % 2 == 1, 'Please supply an odd size' rows, cols = data.shape padded_A = np.empty(shape=(rows + size-1, cols + size-1), dtype=data.dtype) padded_A[:] = np.nan rows_pad, cols_pad = padded_A.shape if no_data_val: mask = data == no_data_val data[mask] = np.nan padded_A[size-1: rows_pad, size - 1: cols_pad] = data.copy() n, m = data.shape strided_data = as_strided(padded_A, (n, m, size, size), padded_A.strides+padded_A.strides) strided_data = strided_data.copy().reshape((n, m, size**2)) return func(strided_data, axis=2)
def hog_feature(im): """Compute Histogram of Gradient (HOG) feature for an image Modified from skimage.feature.hog http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog Reference: Histograms of Oriented Gradients for Human Detection Navneet Dalal and Bill Triggs, CVPR 2005 Parameters: im : an input grayscale or rgb image Returns: feat: Histogram of Gradient (HOG) feature """ # convert rgb to grayscale if needed if im.ndim == 3: image = rgb2gray(im) else: image = np.at_least_2d(im) sx, sy = image.shape # image size orientations = 9 # number of gradient bins cx, cy = (8, 8) # pixels per cell gx = np.zeros(image.shape) gy = np.zeros(image.shape) gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude grad_ori = np.arctan2(gy, (gx + 1e-15)) * (180 / np.pi) + 90 # gradient orientation n_cellsx = int(np.floor(sx / cx)) # number of cells in x n_cellsy = int(np.floor(sy / cy)) # number of cells in y # compute orientations integral images orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations)) for i in range(orientations): # create new integral image for this orientation # isolate orientations in this range temp_ori = np.where(grad_ori < 180 / orientations * (i + 1), grad_ori, 0) temp_ori = np.where(grad_ori >= 180 / orientations * i, temp_ori, 0) # select magnitudes for those orientations cond2 = temp_ori > 0 temp_mag = np.where(cond2, grad_mag, 0) orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T return orientation_histogram.ravel()