我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.ndimage.gaussian_filter()。
def selfQuotientImage(matrix,sigma=5.0): ''' Compute a self quotient image. Based on work by Wang et.al. "Self Quotient Image for Face Recognition" ICIP 2004 ''' is_image = False if isinstance(matrix,pv.Image): matrix = matrix.asMatrix2D() is_image = True assert matrix.min() >= 0 matrix = matrix + 0.01*matrix.max() denom = ndi.gaussian_filter(matrix,sigma) # make sure there are no divide by zeros matrix = matrix/denom if is_image: return pv.Image(matrix) return matrix
def highPassFilter(matrix,sigma): ''' This function computes a high and low pass filter. This can be used to reduce the effect of lighting. A low pass image is first computed by convolving the image with a Gausian filter of radius sigma. Second, a high pass image is computed by subtracting the low pass image from the original image. This means that the original image can be reconstructed by adding a low pass image and a high pass image. @returns: high_pass_image ''' is_image = False if isinstance(matrix,pv.Image): matrix = matrix.asMatrix2D() is_image = True matrix = matrix - ndi.gaussian_filter(matrix,sigma) if is_image: return pv.Image(matrix) return matrix
def lowPassFilter(matrix,sigma): ''' This function computes a low pass filter. It basically smoothes the image by convolving with a Gaussian. This is often used to reduce the effect of noise in images or to reduce the effect of small registration errors. @returns: an pv.Image set from a numpy matrix if input was an image or a numpy matrix otherwize. ''' is_image = False if isinstance(matrix,pv.Image): matrix = matrix.asMatrix2D() is_image = True matrix = ndi.gaussian_filter(matrix,sigma) if is_image: return pv.Image(matrix) return matrix
def preprocess(self,im,leye,reye,ilog=None): im = pv.Image(im.asPIL()) affine = pv.AffineFromPoints(leye,reye,self.leye,self.reye,self.tile_size) tile = affine.transformImage(im) mat = tile.asMatrix2D() # High pass filter the image mat = mat - ndimage.gaussian_filter(mat,self.norm_sigma) # Value normalize the image. mat = mat - mat.mean() mat = mat / mat.std() tile = pv.Image(mat) return tile
def subtract_background_dog(z, sigma_min, sigma_max): """Difference of gaussians method for background removal. Parameters ---------- sigma_max : float Large gaussian blur sigma. sigma_min : float Small gaussian blur sigma. Returns ------- Denoised diffraction pattern as np.array """ blur_max = ndi.gaussian_filter(z, sigma_max) blur_min = ndi.gaussian_filter(z, sigma_min) return np.maximum(np.where(blur_min > blur_max, z, 0) - blur_max, 0)
def find_beam_position_blur(z, sigma=30): """Estimate direct beam position by blurring the image with a large Gaussian kernel and finding the maximum. Parameters ---------- sigma : float Sigma value for Gaussian blurring kernel. Returns ------- center : np.array np.array containing indices of estimated direct beam positon. """ blurred = ndi.gaussian_filter(z, sigma) center = np.unravel_index(blurred.argmax(), blurred.shape) return np.array(center)
def grid_density_gaussian_filter(data, size, resolution=None, smoothing_window=None): """Smoothing grid values with a Gaussian filter. :param [(float, float, float)] data: list of 3-dimensional grid coordinates :param int size: grid size :param int resolution: desired grid resolution :param int smoothing_window: size of the gaussian kernels for smoothing :return: smoothed grid values :rtype: numpy.ndarray """ resolution = resolution if resolution else size k = (resolution - 1) / size w = smoothing_window if smoothing_window else int(0.01 * resolution) # Heuristic imgw = (resolution + 2 * w) img = np.zeros((imgw, imgw)) for x, y, z in data: ix = int(x * k) + w iy = int(y * k) + w if 0 <= ix < imgw and 0 <= iy < imgw: img[iy][ix] += z z = ndi.gaussian_filter(img, (w, w)) # Gaussian convolution z[z <= BLANK_THRESH] = np.nan # Making low values blank return z[w:-w, w:-w]
def permissiveMask( self, volumeThres, gaussSigma = 5.0, gaussRethres = 0.07, smoothSigma=1.5 ): """ Given a (tight) volumeThres(hold) measured in Chimera or IMS, this function generates a Gaussian dilated mask that is then smoothed. Everything is done with Gaussian operations so the Fourier space representation of the mask should be relatively smooth as well, and hence ring less. Excepts self.mrc to be loaded. Populates self.mask. """ thres = self.mrc > volumeThres; thres = thres.astype('float32') gaussThres = scipy.ndimage.gaussian_filter( thres, gaussSigma ) rethres = gaussThres > gaussRethres; rethres = rethres.astype('float32') self.mask = scipy.ndimage.gaussian_filter( rethres, smoothSigma ) print( "permissive mask complete, use ioMRC.writeMRC(self.mrc, 'maskname.mrc') to save" ) pass
def strict_local_maximum(prob_map): prob_gau = np.zeros(prob_map.shape) sn.gaussian_filter(prob_map, 2, output=prob_gau, mode='mirror') prob_fil = np.zeros(prob_map.shape) sn.rank_filter(prob_gau, -2, output=prob_fil, footprint=np.ones([3, 3])) temp = np.logical_and(prob_gau > prob_fil, prob_map > HIGH_PROB) * 1. idx = np.where(temp > 0) return idx
def plotImage( self ): self.fig.clear() self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] ) if "lowPass" in self.plotDict: self.plotDict['image'] = ni.gaussian_filter( self.plotDict['image'], self.plotDict["lowPass"] ) clim = zorro.util.histClim( self.plotDict['image'], cutoff=1E-4 ) self.axes.hold(True) mage = self.axes.imshow( self.plotDict['image'], vmin=clim[0], vmax=clim[1], interpolation='nearest', cmap=self.plotDict['image_cmap'] ) if 'pixelsize' in self.plotDict: zorro.util.plotScalebar( mage, self.plotDict['pixelsize'] ) if bool(self.plotDict['colorbar']): self.fig.colorbar( mage, fraction=0.046, pad=0.04) self.axes.set_axis_off() self.axes.hold(False) return self.printPlot( dpi_key=u'image_dpi' )
def plotFFT( self ): self.fig.clear() self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] ) self.axes.hold(False) FFTimage = np.fft.fft2( self.plotDict['image'] ) FFTimage[0,0] = 1.0 # Clip out zero-frequency pixel FFTimage = np.log10( 1.0 + np.abs( np.fft.fftshift( FFTimage ))) if "lowPass" in self.plotDict: FFTimage = ni.gaussian_filter( FFTimage, self.plotDict["lowPass"] ) FFTclim = zorro.util.ciClim( FFTimage, sigma=2.5 ) mage = self.axes.imshow( FFTimage, interpolation='bicubic', vmin=FFTclim[0], vmax=FFTclim[1], cmap=self.plotDict['image_cmap'] ) if 'pixelsize' in self.plotDict: inv_ps = 1.0 / (FFTimage.shape[0] * self.plotDict['pixelsize'] ) zorro.util.plotScalebar( mage, inv_ps, units=u'nm^{-1}' ) self.axes.set_axis_off() if bool(self.plotDict['colorbar']): self.fig.colorbar( mage, fraction=0.046, pad=0.04) return self.printPlot( dpi_key=u'image_dpi' )
def plotPolarFFT( self ): self.fig.clear() self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] ) self.axes.hold(False) polarFFTimage = zorro.util.img2polar( np.log10( 1.0 + np.abs( np.fft.fftshift( np.fft.fft2( self.plotDict['image'] )))) ) if "lowPass" in self.plotDict: polarFFTimage = ni.gaussian_filter( polarFFTimage, self.plotDict["lowPass"] ) FFTclim = zorro.util.ciClim( polarFFTimage, sigma=2.0 ) mage = self.axes.imshow( polarFFTimage, interpolation='bicubic', vmin=FFTclim[0], vmax=FFTclim[1], cmap=self.plotDict['image_cmap'] ) if 'pixlsize' in self.plotDict: # Egh, this scalebar is sort of wrong, maybe I should transpose the plot? inv_ps = 1.0 / (polarFFTimage.shape[0] * self.plotDict['pixelsize'] ) zorro.util.plotScalebar( mage, inv_ps, units=u'nm^{-1}' ) self.axes.set_axis_off() if bool(self.plotDict['colorbar']): self.fig.colorbar( mage, fraction=0.046, pad=0.04) return self.printPlot( dpi_key=u'image_dpi' ) # TODO: render Gautoauto outputs? Maybe I should make the Gautomatch boxes seperately as a largely # transparent plot, and just add it on top or not?
def main(input_pic): img = cv.imread(input_pic,cv.CV_LOAD_IMAGE_GRAYSCALE) img=sp.gaussian_filter(img,sigma=3) img= imresize(img,((len(img)/10),(len(img[0])/10))) img_arr=np.asarray(img,dtype="int32") LoG_arr=LoG_Filter(img_arr) cv.imwrite('LoG_image.jpg',LoG_arr) LoG_arr=cv.imread('LoG_image.jpg',cv.CV_LOAD_IMAGE_GRAYSCALE) Hist=genHistogram(LoG_arr) #print(Hist) for i in range(0,len(LoG_arr)): for j in range(0,len(LoG_arr[0])): if LoG_arr[i][j]<200: LoG_arr[i][j]=0 else: LoG_arr[i][j]=255 cv.imwrite('LoG_image.jpg',LoG_arr) #img_new=cv.imread('LoG_image.jpg',cv.CV_LOAD_IMAGE_GRAYSCALE)
def global_distortions(self, arr): # http://scipy-lectures.github.io/advanced/image_processing/#image-filtering ds = self.diststate.get_sample() blur = ds['blur'] sharpen = ds['sharpen'] sharpen_amount = ds['sharpen_amount'] noise = ds['noise'] newarr = n.minimum(n.maximum(0, arr + n.random.normal(0, noise, arr.shape)), 255) if blur > 0.1: newarr = ndimage.gaussian_filter(newarr, blur) if sharpen: newarr_ = ndimage.gaussian_filter(arr, blur/2) newarr = newarr + sharpen_amount*(newarr - newarr_) if ds['resample']: sh = newarr.shape[0] newarr = resize_image(newarr, newh=ds['resample_height']) newarr = resize_image(newarr, newh=sh) return newarr
def loadslice(self,it,smth=None): """ Load the variables initialized by self.vars2load() """ for i in self.vars2l: if i in self.primitives: self.__dict__[i]=self.readslice(self.__dict__[i+'f'],it,i) for i in self.vars2l: if i in self.derived: #self.__dict__[i] = self._derivedv(i) self._derivedv(i) self.mmd={} for i in self.vars2l: if smth is not None: self.__dict__[i]=gf(self.__dict__[i],sigma=smth) self.mmd[i]=[self.__dict__[i].min(),self.__dict__[i].max()] self.time = it*self.dtmovie #### #### Method to add attributes to the object ####
def loadslice(self,it,smth=None): """ Load the variables initialized by self.vars2load() """ if self.data_type in ('b', 'bb'): for i in self.vars2l: exec('self.'+i+'=self.readslice(self.'+i+'f,'+str(it)+',"'+i+'")') else: for i in self.vars2l: exec('self.'+i+'=self.readslice(self.'+i+'f,'+str(it)+')') self.mmd={} for i in self.vars2l: if smth is not None: exec('self.'+i+'=gf(self.'+i+',sigma='+str(smth)+')') exec('self.mmd["'+i+'"]=[self.'+i+'.min(),self.'+i+'.max()]') self.time = it*self.dtmovie #### #### Method to add attributes to the object ####
def loadslice(self,it,smth=None): """ Load the variables initialized by self.vars2load() """ if self.data_type in ('b', 'bb'): for i in self.vars2l: exec('self.'+i+'=self.readslice(self.'+i+'f,'+str(it)+',"'+i+'")') else: for i in self.vars2l: exec('self.'+i+'=self.readslice(self.'+i+'f,'+str(it)+')') self.mmd={} for i in self.vars2l: if smth is not None: exec('self.'+i+'=gf(self.'+i+',sigma='+str(smth)+')') exec('self.mmd["'+i+'"]=[self.'+i+'.min(),self.'+i+'.max()]') self.time=it*self.movieout_full #### #### Method to add attributes to the object ####
def build_surf2d(img, ds=1, sigma=0, k=0.2): from skimage.filters import sobel_h, sobel_v from scipy.ndimage import gaussian_filter start = time() img = img[::-ds, ::ds] img = gaussian_filter(img, sigma) r, c = img.shape rs, cs, fs = build_grididx(r, c) vs = img[rs, cs] vts = np.array([cs*ds, rs*ds, vs*k], dtype=np.float32).T cs = (np.ones((3, r*c))*(vs/255)).astype(np.float32).T dx, dy = sobel_h(img), sobel_v(img) cx, cy = np.zeros((r*c, 3)), np.zeros((r*c, 3)) cx[:,0], cx[:,2] = 1, dx.ravel() cy[:,1], cy[:,2] = 1, dy.ravel() ns = np.cross(cx, cy) ns = (ns.T/np.linalg.norm(ns, axis=1)).astype(np.float32).T #ns = count_ns(vts, fs) print(time()-start) return vts, fs, ns, cs
def run(self, ips, snap, img, para = None): self.ips.lut[:] = self.buflut ndimg.gaussian_filter(snap, para['sigma'], output=img) mark = img<para['thr'] if para['ud'] else img>para['thr'] markers, n = ndimg.label(mark, np.ones((3,3)), output=np.uint16) if not para['ud']:img[:] = 255-img mark = watershed(img, markers, line=True, conn=para['con']+1) mark = np.multiply((mark==0), 255, dtype=np.uint8) if para['type'] == 'white line': img[:] = mark if para['type'] == 'gray line': np.minimum(snap, mark, out=img) if para['type'] == 'white line on ori': #img //=2 np.maximum(snap, mark, out=img)
def run(self, ips, snap, img, para = None): #denoised = rank.median(img, disk(para['sigma'])) #gradient = rank.gradient(denoised, disk(para['gdt'])) ndimg.gaussian_filter(snap, para['sigma'], output=img) markers, n = ndimg.label(ips.get_msk(), np.ones((3,3)), output=np.uint16) if not para['ud']:img[:] = 255-img mark = watershed(img, markers, line=True, conn=para['con']+1) mark = np.multiply((mark==0), 255, dtype=np.uint8) if para['type'] == 'white line': img[:] = mark if para['type'] == 'gray line': np.minimum(snap, mark, out=img) if para['type'] == 'white line on ori': np.maximum(snap, mark, out=img)
def __init__(self,path): blur_radius = 1.0 threshold = 50 img = misc.imread(path) self.origin = img # smooth the image (to remove small objects) imgf = ndimage.gaussian_filter(img, blur_radius) threshold = 50 # find connected components self.img, self.count = ndimage.label(imgf > threshold) self.labels = self.calculate_labels() # Return # ---------- # labels : dictionary, key = label, value = list of bounding in order y1, y2, x1, x2
def get_smoothed_white(self, npix=2, save=True, show=False, **kwargs): """Gets an smoothed version (Gaussian of sig=npix) of the white image. If save is True, it writes a file to disk called `smoothed_white.fits`. **kwargs are passed down to scipy.ndimage.gaussian_filter() """ hdulist = self.hdulist_white im = self.white_data if npix > 0: smooth_im = ndimage.gaussian_filter(im, sigma=npix, **kwargs) else: smooth_im = im if save: hdulist[1].data = smooth_im prihdr = hdulist[0].header comment = 'Spatially smoothed with a Gaussian kernel of sigma={} spaxels (by MuseCube)'.format(npix) # print(comment) prihdr['history'] = comment hdulist.writeto('smoothed_white.fits', clobber=True) if show: fig = aplpy.FITSFigure('smoothed_white.fits', figure=plt.figure()) fig.show_grayscale(vmin=self.vmin,vmax=self.vmax) return smooth_im
def global_distortions(self, arr): # http://scipy-lectures.github.io/advanced/image_processing/#image-filtering ds = self.diststate.get_sample() blur = ds['blur'] sharpen = ds['sharpen'] sharpen_amount = ds['sharpen_amount'] noise = ds['noise'] newarr = n.minimum(n.maximum(0, arr + n.random.normal(0, noise, arr.shape)), 255) if blur > 0.1: newarr = ndimage.gaussian_filter(newarr, blur) if sharpen: newarr_ = ndimage.gaussian_filter(arr, blur / 2) newarr = newarr + sharpen_amount * (newarr - newarr_) if ds['resample']: sh = newarr.shape[0] newarr = resize_image(newarr, newh=ds['resample_height']) newarr = resize_image(newarr, newh=sh) return newarr
def psf_recenter(stack, r_mask = 40, cy_ext = 1.5): ''' find the center of the psf and stack: the raw_psf r_mask: the radius of the mask size cy_ext: how far the background should extend to the outside ''' nz, ny, nx = stack.shape cy, cx = np.unravel_index(np.argmax(gf(stack,2)), (nz,ny,nx))[1:] ny_shift = int(ny/2 - cy) nx_shift = int(nx/2 - cx) PSF = np.roll(stack, ny_shift, axis = 1) PSF = np.roll(PSF, nx_shift, axis = 2) return PSF # Background estimation
def psf_zplane(stack, dz, w0, de = 1): ''' determine the position of the real focal plane. Don't mistake with psf_slice! ''' nz, ny, nx = stack.shape cy, cx = np.unravel_index(np.argmax(gf(stack,2)), (nz,ny,nx))[1:] zrange = (nz-1)*dz*0.5 zz = np.linspace(-zrange, zrange, nz) center_z = stack[:,cy-de:cy+de+1,cx-de:cx+de+1] im_z = center_z.mean(axis=2).mean(axis=1) b = np.mean((im_z[0],im_z[-1])) a = im_z.max() - b p0 = (a,0,w0,b) popt = optimize.curve_fit(gaussian, zz, im_z, p0)[0] z_offset = popt[1] # The original version is wrong return z_offset, zz
def course_speed_to_joint_bin(labels): # each of the labels[i, :] is the course and speed # convert each pair to the corresponding bin location course, speed = course_speed_to_discrete(labels) n = FLAGS.discretize_n_bins l = len(course) # follow the convention of speed first and speed second out = np.zeros((l, n, n), dtype=np.float32) for i, item in enumerate(zip(course, speed)): ci, si = item out[i, ci, si] = 1.0 # do the gaussian smoothing out[i, :, :] = gaussian_filter(out[i, :, :], sigma=FLAGS.discretize_label_gaussian_sigma, mode='constant', cval=0.0) # renormalization of the distribution out = out / np.sum(out, axis=(1,2), keepdims=True) out = np.reshape(out, [l, n*n]) return out
def elastic_transform_2d(img, alpha, sigma, random_mode=True, probability=0.5): #Taken from: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a if random_mode: if random.random() < probability: return img dx = nd.gaussian_filter((random_state.rand(img.shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha dy = nd.gaussian_filter((random_state.rand(img.shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha x, y = np.meshgrid(np.arange(img.shape[0]), np.arange(img.shape[1]), indexing='ij') indices = np.reshape(x+dx, (-1, 1)), np.reshape(y+dy, (-1, 1)) return apply_elastic(img, indices), indices
def highpass_filter(img, sigma=3): lowpass = gaussian_filter(img, 3) return img - lowpass
def speckle(img): severity = np.random.uniform(0, 0.6) blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1) img_speck = (img + blur) img_speck[img_speck > 1] = 1 img_speck[img_speck <= 0] = 0 return img_speck # paints the string in a random location the bounding box # also uses a random font, a slight random rotation, # and a random amount of speckle noise
def canny_demo(): # Generate noisy image of a square im = np.zeros((128, 128)) im[32:-32, 32:-32] = 1 im = ndi.rotate(im, 15, mode='constant') im = ndi.gaussian_filter(im, 4) im += 0.2 * np.random.random(im.shape) # Compute the Canny filter for two values of sigma edges1 = feature.canny(im) edges2 = feature.canny(im, sigma=3) # display results fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3), sharex=True, sharey=True) ax1.imshow(im, cmap=plt.cm.gray) ax1.axis('off') ax1.set_title('noisy image', fontsize=20) ax2.imshow(edges1, cmap=plt.cm.gray) ax2.axis('off') ax2.set_title('Canny filter, $\sigma=1$', fontsize=20) ax3.imshow(edges2, cmap=plt.cm.gray) ax3.axis('off') ax3.set_title('Canny filter, $\sigma=3$', fontsize=20) fig.tight_layout() plt.show()
def smooth_image(data, sigma): from scipy import ndimage return ndimage.gaussian_filter(data, sigma=sigma)
def _smooth(image, sigma, mode, cval): """Return image with each channel smoothed by the Gaussian filter.""" smoothed = np.empty(image.shape, dtype=np.double) # apply Gaussian filter to all dimensions independently if image.ndim == 3: for dim in range(image.shape[2]): ndi.gaussian_filter(image[..., dim], sigma, output=smoothed[..., dim], mode=mode, cval=cval) else: ndi.gaussian_filter(image, sigma, output=smoothed, mode=mode, cval=cval) return smoothed
def _smooth_3d(volume, sigma, mode, cval): """Return volume with each channel smoothed by the Gaussian filter.""" smoothed = np.empty(volume.shape, dtype=np.double) # apply Gaussian filter to all dimensions independently # volume.ndim == 4 means the volume is multimodal if volume.ndim == 4: # compute 3d convolution for each modality, dim is a modality for dim in range(volume.shape[3]): ndi.gaussian_filter(volume[..., dim], sigma, output=smoothed[..., dim], mode=mode, cval=cval) else: ndi.gaussian_filter(volume, sigma, output=smoothed, mode=mode, cval=cval) return smoothed
def speckle(img): severity = np.random.uniform(0, 0.6) blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1) img_speck = (img + blur) img_speck[img_speck > 1] = 1 img_speck[img_speck <= 0] = 0 return img_speck
def _load_data(self): with h5py.File(self.path, "r") as fp: self.image = gaussian_filter(fp["image"].value, self.sigma) self.gal_map = fp["segmaps/galaxy"].value self.star_map = fp["segmaps/star"].value
def finish_image(img): img = image.gaussian_filter(img, 3) img = img + np.random.normal(0, 0.01, img.shape) return img
def __blur(self, img): """ Gaussian blur """ print("Gaussian Filtering") return ndimage.gaussian_filter(img, sigma=50)
def random_noise(img): #TODO dataug severity = np.random.uniform(0, 0.6) blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1) img_speck = (img + blur) img_speck[img_speck > 1] = 1 img_speck[img_speck <= 0] = 0 return img_speck
def random_noise(img): #TODO dataug """ Puts random noise on image :param img: image without noise :return: image with noise """ severity = np.random.uniform(0, 0.6) blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1) img_speck = (img + blur) img_speck[img_speck > 1] = 1 img_speck[img_speck <= 0] = 0 return img_speck
def execGaussianFiltering(rawImgFloat): filteredImg = gaussian_filter(rawImgFloat, 1) seed = np.copy(filteredImg) seed[1:-1, 1:-1] = filteredImg.min() return filteredImg
def apply_laplacian_filter(array, alpha=30): """ Laplacian is approximated with difference of Gaussian :param array: :param alpha: :return: """ blurred_f = ndimage.gaussian_filter(array, 3) filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1) return blurred_f + alpha * (blurred_f - filter_blurred_f)
def sharpen(blurred_im, alpha): filter_blurred_im = ndi.gaussian_filter(blurred_im, 1) sharp = blurred_im + alpha * (blurred_im - filter_blurred_im) return sharp
def check_reg(fixed_img, moved_img, scale=15, norm=True, sigma=0.8, **kwargs): dim = list(moved_img.shape) resol = list(moved_img.header['pixdim'][1:4]) # Convert 4D image to 3D or raise error data = convert_to_3d(moved_img) # Check normalization if norm: data = apply_p2_98(data) # Set slice axis for mosaic grid slice_axis, cmap = check_sliceaxis_cmap(data, kwargs) cmap = 'YlOrRd' # Set grid shape data, slice_grid, size = set_mosaic_fig(data, dim, resol, slice_axis, scale) fig, axes = BrainPlot.mosaic(fixed_img, scale=scale, norm=norm, cmap='bone', **kwargs) # Applying inversion invert = check_invert(kwargs) data = apply_invert(data, *invert) # Plot image for i in range(slice_grid[1] * slice_grid[2]): ax = axes.flat[i] edge = data[:, :, i] edge = feature.canny(edge, sigma=sigma) # edge detection for second image # edge = ndimage.gaussian_filter(edge, 3) mask = np.ones(edge.shape) sx = ndimage.sobel(edge, axis=0, mode='constant') sy = ndimage.sobel(edge, axis=1, mode='constant') sob = np.hypot(sx, sy) mask[sob == False] = np.nan m_norm = colors.Normalize(vmin=0, vmax=1.5) if i < slice_grid[0] and False in np.isnan(mask.flatten()): ax.imshow(mask.T, origin='lower', interpolation='nearest', cmap=cmap, norm=m_norm, alpha=0.8) else: ax.imshow(np.zeros((dim[0], dim[1])).T, origin='lower', interpolation='nearest', cmap='bone') ax.set_axis_off() fig.set_facecolor('black') if notebook_env: display(fig) return fig, axes
def sample_transformation(self, imsz): choices = len(self.alpha_dist) c = int(n.random.randint(0, choices)) sigma = max(self.min_sigma[c], n.abs(self.sigma[c][1]*n.random.randn() + self.sigma[c][0])) alpha = n.random.uniform(self.alpha_dist[c][0], self.alpha_dist[c][1]) dispmapx = n.random.uniform(-1*self.displacement_range, self.displacement_range, size=imsz) dispmapy = n.random.uniform(-1*self.displacement_range, self.displacement_range, size=imsz) dispmapx = alpha * ndimage.gaussian_filter(dispmapx, sigma) dispmaxy = alpha * ndimage.gaussian_filter(dispmapy, sigma) return dispmapx, dispmaxy
def surface_distortions(self, arr): ds = self.surfdiststate.get_sample() blur = ds['blur'] origarr = arr.copy() arr = n.minimum(n.maximum(0, arr + n.random.normal(0, ds['noise'], arr.shape)), 255) # make some changes to the alpha arr[...,1] = ndimage.gaussian_filter(arr[...,1], ds['blur']) ds = self.surfdiststate.get_sample() arr[...,0] = ndimage.gaussian_filter(arr[...,0], ds['blur']) if ds['sharpen']: newarr_ = ndimage.gaussian_filter(origarr[...,0], blur/2) arr[...,0] = arr[...,0] + ds['sharpen_amount']*(arr[...,0] - newarr_) return arr