我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.ndimage.filters.gaussian_filter()。
def build_random_variables(self, **kwargs): # All this is done just once per batch (i.e. until `clear_random_variables` is called) np.random.seed() imshape = kwargs.get('imshape') # Build and scale random fields random_field_x = np.random.uniform(-1, 1, imshape) * self.alpha random_field_y = np.random.uniform(-1, 1, imshape) * self.alpha # Smooth random field (this has to be done just once per reset) sdx = gaussian_filter(random_field_x, self.sigma, mode='reflect') sdy = gaussian_filter(random_field_y, self.sigma, mode='reflect') # Make meshgrid x, y = np.meshgrid(np.arange(imshape[1]), np.arange(imshape[0])) # Make inversion coefficient _inverter = 1. if not self.invert else -1. # Distort meshgrid indices (invert if required) flow_y, flow_x = (y + _inverter * sdy).reshape(-1, 1), (x + _inverter * sdx).reshape(-1, 1) # Set random states self.set_random_variable('flow_x', flow_x) self.set_random_variable('flow_y', flow_y)
def demo(stem): flist = getimgfiles(stem) ext = flist[0].suffix for i in range(len(flist)-1): fn1 = f'{stem}.{i}{ext}' im1 = imread(fn1,flatten=True).astype(float) #flatten=True is rgb2gray # Iold = gaussian_filter(Iold,FILTER) fn2 = f'{stem}.{i+1}{ext}' im2 = imread(fn2,flatten=True).astype(float) # Inew = gaussian_filter(Inew,FILTER) U,V = HornSchunck(im1, im2, 1., 100) compareGraphs(U,V, im2) return U,V
def demo(stem, kernel=5,Nfilter=7): flist = getimgfiles(stem) ext = flist[0].suffix #%% priming read im1 = imread(f'{stem}.0{ext}', flatten=True) Y,X = im1.shape #%% evaluate the first frame's POI POI = getPOI(X,Y,kernel) #% get the weights W = gaussianWeight(kernel) #%% loop over all images in directory for i in range(1,len(flist)): im2 = imread(f'{stem}.{i}{ext}', flatten=True) im2 = gaussian_filter(im2, Nfilter) V = LucasKanade(im1, im2, POI, W, kernel) compareGraphsLK(im1, im2, POI, V) im1 = im2
def compute_colseps_conv(binary,scale=1.0): """Find column separators by convoluation and thresholding.""" h,w = binary.shape # find vertical whitespace by thresholding smoothed = gaussian_filter(1.0*binary,(scale,scale*0.5)) smoothed = uniform_filter(smoothed,(5.0*scale,1)) thresh = (smoothed<amax(smoothed)*0.1) DSAVE("1thresh",thresh) # find column edges by filtering grad = gaussian_filter(1.0*binary,(scale,scale*0.5),order=(0,1)) grad = uniform_filter(grad,(10.0*scale,1)) # grad = abs(grad) # use this for finding both edges grad = (grad>0.5*amax(grad)) DSAVE("2grad",grad) # combine edges and whitespace seps = minimum(thresh,maximum_filter(grad,(int(scale),int(5*scale)))) seps = maximum_filter(seps,(int(2*scale),1)) DSAVE("3seps",seps) # select only the biggest column separators seps = morph.select_regions(seps,sl.dim0,min=args['csminheight']*scale,nbest=args['maxcolseps']) DSAVE("4seps",seps) return seps
def compute_gradmaps(binary,scale): # use gradient filtering to find baselines boxmap = psegutils.compute_boxmap(binary,scale) cleaned = boxmap*binary DSAVE("cleaned",cleaned) if args['usegause']: # this uses Gaussians grad = gaussian_filter(1.0*cleaned,(args['vscale']*0.3*scale, args['hscale']*6*scale),order=(1,0)) else: # this uses non-Gaussian oriented filters grad = gaussian_filter(1.0*cleaned,(max(4,args['vscale']*0.3*scale), args['hscale']*scale),order=(1,0)) grad = uniform_filter(grad,(args['vscale'],args['hscale']*6*scale)) bottom = ocrolib.norm_max((grad<0)*(-grad)) top = ocrolib.norm_max((grad>0)*grad) return bottom,top,boxmap
def measure(self,line): h,w = line.shape smoothed = filters.gaussian_filter(line,(h*0.5,h*self.smoothness),mode='constant') smoothed += 0.001*filters.uniform_filter(smoothed,(h*0.5,w),mode='constant') self.shape = (h,w) a = argmax(smoothed,axis=0) a = filters.gaussian_filter(a,h*self.extra) self.center = array(a,'i') deltas = abs(arange(h)[:,newaxis]-self.center[newaxis,:]) self.mad = mean(deltas[line!=0]) self.r = int(1+self.range*self.mad) if self.debug: figure("center") imshow(line,cmap=cm.gray) plot(self.center) ginput(1,1000)
def elastic_transform(image, alpha, sigma, random_state=None): """Elastic deformation of images as described in [Simard2003]_. .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. """ if random_state is None: random_state = np.random.RandomState(None) shape = image.shape[1:]; dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) #return map_coordinates(image, indices, order=1).reshape(shape) res = np.zeros_like(image); for i in xrange(image.shape[0]): res[i] = map_coordinates(image[i], indices, order=1).reshape(shape) return res;
def level_curves(fname, npoints = 200, smoothing = 10, level = 0.5) : "Loads regularly sampled curves from a .PNG image." # Find the contour lines img = misc.imread(fname, flatten = True) # Grayscale img = (img.T[:, ::-1]) / 255. img = gaussian_filter(img, smoothing, mode='nearest') lines = find_contours(img, level) # Compute the sampling ratio for every contour line lengths = np.array( [arclength(line) for line in lines] ) points_per_line = np.ceil( npoints * lengths / np.sum(lengths) ) # Interpolate accordingly points = [] ; connec = [] ; index_offset = 0 for ppl, line in zip(points_per_line, lines) : (p, c) = resample(line, ppl) points.append(p) connec.append(c + index_offset) index_offset += len(p) size = np.maximum(img.shape[0], img.shape[1]) points = np.vstack(points) / size connec = np.vstack(connec) return Curve(points, connec) # Pyplot Output =================================================================================
def detect_peaks(hist, count=2): hist_copy = hist peaks = len(argrelextrema(hist_copy, np.greater, mode="wrap")[0]) sigma = log1p(peaks) print(peaks, sigma) while (peaks > count): new_hist = gaussian_filter(hist_copy, sigma=sigma) peaks = len(argrelextrema(new_hist, np.greater, mode="wrap")[0]) if peaks < count: peaks = count + 1 sigma = sigma * 0.5 continue hist_copy = new_hist sigma = log1p(peaks) print(peaks, sigma) return argrelextrema(hist_copy, np.greater, mode="wrap")[0]
def standardDeviation2d(img, ksize=5, blurred=None): ''' calculate the spatial resolved standard deviation for a given 2d array ksize -> kernel size blurred(optional) -> with same ksize gaussian filtered image setting this parameter reduces processing time ''' if ksize not in (list, tuple): ksize = (ksize,ksize) if blurred is None: blurred = gaussian_filter(img, ksize) else: assert blurred.shape == img.shape std = np.empty_like(img) _calc(img, ksize[0], ksize[1], blurred, std) return std
def gaussian_filter(self,sigma_x=0.0,sigma_y=0.0): ''' Applies a gaussian filter to the seismic velocity field to mimic the loss of spatial resolution introduced in tomographic imaging ''' from scipy.ndimage.filters import gaussian_filter #filter absolute perturbations dvp_filtered = gaussian_filter(self.dvp_abs,sigma=[sigma_x,sigma_y]) dvs_filtered = gaussian_filter(self.dvs_abs,sigma=[sigma_x,sigma_y]) drho_filtered = gaussian_filter(self.drho_abs,sigma=[sigma_x,sigma_y]) self.dvp_abs = dvp_filtered self.dvs_abs = dvs_filtered self.drho_abs = drho_filtered #filter relative perturbations dvp_filtered = gaussian_filter(self.dvp_rel,sigma=[sigma_x,sigma_y]) dvs_filtered = gaussian_filter(self.dvs_rel,sigma=[sigma_x,sigma_y]) drho_filtered = gaussian_filter(self.drho_rel,sigma=[sigma_x,sigma_y]) self.dvp_rel = dvp_filtered self.dvs_rel = dvs_filtered self.drho_rel = drho_filtered
def _apply(self, a, epsilons=1000): image = a.original_image min_, max_ = a.bounds() axis = a.channel_axis(batch=False) hw = [image.shape[i] for i in range(image.ndim) if i != axis] h, w = hw size = max(h, w) if not isinstance(epsilons, Iterable): epsilons = np.linspace(0, 1, num=epsilons + 1)[1:] for epsilon in epsilons: # epsilon = 1 will correspond to # sigma = size = max(width, height) sigmas = [epsilon * size] * 3 sigmas[axis] = 0 blurred = gaussian_filter(image, sigmas) blurred = np.clip(blurred, min_, max_) _, is_adversarial = a.predictions(blurred) if is_adversarial: return
def run_edges(image): ''' This function finds and colors all edges in the given image. ''' # Convert image to gray if len(image.shape) > 2: grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: grayimage = image # blur so the gradient operation is less noisy. # uses a gaussian filter with sigma = 2 grayimage = gaussian_filter(grayimage, 2).astype(float) # Filter with x and y sobel filters dx = convolve2d(grayimage, sobel_filter_x()) dy = convolve2d(grayimage, sobel_filter_y()) # Convert to orientation and magnitude images theta = transform_xy_theta(dx, dy) mag = transform_xy_mag(dx, dy) outimg = np.zeros((image.shape[0], image.shape[1], 3), dtype = np.uint8) # Fill with corresponding color. for r in range(outimg.shape[0]): for c in range(outimg.shape[1]): outimg[r,c,:] = get_color(theta[r,c], mag[r,c]) return outimg
def gauss_degrade(image,margin=1.0,change=None,noise=0.02,minmargin=0.5,inner=1.0): if image.ndim==3: image = mean(image,axis=2) m = mean([amin(image),amax(image)]) image = 1*(image>m) if margin<minmargin: return 1.0*image pixels = sum(image) if change is not None: npixels = int((1.0+change)*pixels) else: edt = distance_transform_edt(image==0) npixels = sum(edt<=(margin+1e-4)) r = int(max(1,2*margin+0.5)) ri = int(margin+0.5-inner) if ri<=0: mask = binary_dilation(image,iterations=r)-image else: mask = binary_dilation(image,iterations=r)-binary_erosion(image,iterations=ri) image += mask*randn(*image.shape)*noise*min(1.0,margin**2) smoothed = gaussian_filter(1.0*image,margin) frac = max(0.0,min(1.0,npixels*1.0/prod(image.shape))) threshold = mquantiles(smoothed,prob=[1.0-frac])[0] result = (smoothed>threshold) return 1.0*result
def get_smoothed_white(self, npix=2, save=True, show=False, **kwargs): """Gets an smoothed version (Gaussian of sig=npix) of the white image. If save is True, it writes a file to disk called `smoothed_white.fits`. **kwargs are passed down to scipy.ndimage.gaussian_filter() """ hdulist = self.hdulist_white im = self.white_data if npix > 0: smooth_im = ndimage.gaussian_filter(im, sigma=npix, **kwargs) else: smooth_im = im if save: hdulist[1].data = smooth_im prihdr = hdulist[0].header comment = 'Spatially smoothed with a Gaussian kernel of sigma={} spaxels (by MuseCube)'.format(npix) # print(comment) prihdr['history'] = comment hdulist.writeto('smoothed_white.fits', clobber=True) if show: fig = aplpy.FITSFigure('smoothed_white.fits', figure=plt.figure()) fig.show_grayscale(vmin=self.vmin,vmax=self.vmax) return smooth_im
def compute_gradmaps(binary, scale, usegauss, vscale, hscale, debug=False): # use gradient filtering to find baselines boxmap = psegutils.compute_boxmap(binary,scale) cleaned = boxmap*binary if debug: debug_show(cleaned, "cleaned") if usegauss: # this uses Gaussians grad = gaussian_filter(1.0*cleaned,(vscale*0.3*scale, hscale*6*scale), order=(1,0)) else: # this uses non-Gaussian oriented filters grad = gaussian_filter(1.0*cleaned, (max(4, vscale*0.3*scale), hscale*scale ), order=(1,0)) grad = uniform_filter(grad, (vscale, hscale*6*scale)) if debug: debug_show(grad, "compute_gradmaps grad") bottom = ocrolib.norm_max((grad<0)*(-grad)) top = ocrolib.norm_max((grad>0)*grad) if debug: debug_show(bottom, "compute_gradmaps bottom") debug_show(top, "compute_gradmaps top") return bottom, top, boxmap
def elastic_transform(image, mask, alpha, sigma, alpha_affine=None, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 """ if random_state is None: random_state = np.random.RandomState(None) shape = image.shape dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) res_x = map_coordinates(image, indices, order=1, mode='reflect').reshape(shape) res_y = map_coordinates(mask, indices, order=1, mode='reflect').reshape(shape) return res_x, res_y
def generate_dog(img, nb_octaves, nb_per_octave=4): """Generate the difference of gaussians of an image. Args: img The input image nb_octaves Number of octaves (groups of images with similar smoothing/sigmas) nb_per_octave Number of images in one octave (with increasing smoothing/sigmas) Returns: List of (difference image, sigma value) """ spaces = [] sigma_start = 1.6 k_start = math.sqrt(2) for i in range(nb_octaves): sigma = sigma_start * (2 ** i) last_gauss = None for j in range(nb_per_octave+1): k = k_start ** (j+1) gauss = filters.gaussian_filter(img, k*sigma) if last_gauss is not None: diff = gauss - last_gauss spaces.append((diff, k*sigma)) last_gauss = gauss return spaces
def transform2(image, input_height, input_width, resize_height=64, resize_width=64, crop=True, blur=3): if crop: cropped_image = center_crop( image, input_height, input_width, resize_height, resize_width) else: cropped_image = scipy.misc.imresize(image, [resize_height, resize_width]) image = np.array(cropped_image) #blurring r = filters.gaussian_filter(image[:, :, 0], blur) g = filters.gaussian_filter(image[:, :, 1], blur) b = filters.gaussian_filter(image[:, :, 2], blur) image_blurred = np.dstack((r, g, b)) return [image/127.5 - 1., image_blurred/127.5 - 1.0]
def TF_elastic_deform(img, alpha=1.0, sigma=1.0): """Elastic deformation of images as described in Simard 2003""" assert len(img.shape) == 3 h, w, nc = img.shape if nc != 1: raise NotImplementedError("Multi-channel not implemented.") # Generate uniformly random displacement vectors, then convolve with gaussian kernel # and finally multiply by a magnitude coefficient alpha dx = alpha * gaussian_filter( (np.random.random((h, w)) * 2 - 1), sigma, mode="constant", cval=0 ) dy = alpha * gaussian_filter( (np.random.random((h, w)) * 2 - 1), sigma, mode="constant", cval=0 ) # Map image to the deformation mesh x, y = np.meshgrid(np.arange(h), np.arange(w), indexing='ij') indices = np.reshape(x+dx, (-1, 1)), np.reshape(y+dy, (-1, 1)) return map_coordinates(img.reshape((h,w)), indices, order=1).reshape(h,w,nc)
def generateGaussianKernel(size): kernel = np.zeros((size, size)) kernel[size // 2, size // 2] = 1. gauss = fi.gaussian_filter(kernel, size // 2 // 3) gauss[gauss < gauss[0, size // 2]] = 0. return gauss
def preprocess(inputfile, outputfile, order=0, df=None, input_key=None, output_key=None): img = nib.load(inputfile) data = img.get_data() affine = img.affine zoom = img.header.get_zooms()[:3] data, affine = reslice(data, affine, zoom, (1., 1., 1.), order) data = np.squeeze(data) data = np.pad(data, [(0, 256 - len_) for len_ in data.shape], "constant") if order == 0: if df is not None: tmp = np.zeros_like(data) for target, source in zip(df[output_key], df[input_key]): tmp[np.where(data == source)] = target data = tmp data = np.int32(data) assert data.ndim == 3, data.ndim else: data_sub = data - gaussian_filter(data, sigma=1) img = sitk.GetImageFromArray(np.copy(data_sub)) img = sitk.AdaptiveHistogramEqualization(img) data_clahe = sitk.GetArrayFromImage(img)[:, :, :, None] data = np.concatenate((data_clahe, data[:, :, :, None]), 3) data = (data - np.mean(data, (0, 1, 2))) / np.std(data, (0, 1, 2)) assert data.ndim == 4, data.ndim assert np.allclose(np.mean(data, (0, 1, 2)), 0.), np.mean(data, (0, 1, 2)) assert np.allclose(np.std(data, (0, 1, 2)), 1.), np.std(data, (0, 1, 2)) data = np.float32(data) img = nib.Nifti1Image(data, affine) nib.save(img, outputfile)
def compute_colseps_mconv(binary,scale=1.0): """Find column separators using a combination of morphological operations and convolution.""" h,w = binary.shape smoothed = gaussian_filter(1.0*binary,(scale,scale*0.5)) smoothed = uniform_filter(smoothed,(5.0*scale,1)) thresh = (smoothed<amax(smoothed)*0.1) DSAVE("1thresh",thresh) blocks = morph.rb_closing(binary,(int(4*scale),int(4*scale))) DSAVE("2blocks",blocks) seps = minimum(blocks,thresh) seps = morph.select_regions(seps,sl.dim0,min=args['csminheight']*scale,nbest=args['maxcolseps']) DSAVE("3seps",seps) blocks = morph.r_dilation(blocks,(5,5)) DSAVE("4blocks",blocks) seps = maximum(seps,1-blocks) DSAVE("5combo",seps) return seps
def errors(self,range=10000,smooth=0): result = self.error_log[-range:] if smooth>0: result = filters.gaussian_filter(result,smooth,mode='mirror') return result
def cerrors(self,range=10000,smooth=0): result = [e*1.0/max(1,n) for e,n in self.cerror_log[-range:]] if smooth>0: result = filters.gaussian_filter(result,smooth,mode='mirror') return result
def gauss_distort(images,maxdelta=2.0,sigma=10.0): n,m = images[0].shape deltas = randn(2,n,m) deltas = gaussian_filter(deltas,(0,sigma,sigma)) deltas /= max(amax(deltas),-amin(deltas)) deltas *= maxdelta xy = transpose(array(meshgrid(range(n),range(m))),axes=[0,2,1]) # print(xy.shape, deltas.shape) deltas += xy return [map_coordinates(image,deltas,order=1) for image in images]
def get_instance(test=False): ge_fail, link_fail = random_fail(G_E) path_chg = path_changed(G_E, ge_fail, G_OBS) return link_fail, path_chg, ge_fail # img, _x = gen_crack() # img = gaussian_filter(img, 1.0) # return img, _x # a trace is named tuple # (Img, S, Os) # where Img is the black/white image # where S is the hidden hypothesis (i.e. label of the img) # Os is a set of Observations which is (qry_pt, label)
def _get_smoothed_histogram(self, chain, parameter): data = chain.get_data(parameter) smooth = chain.config["smooth"] if chain.grid: bins = get_grid_bins(data) else: bins = chain.config['bins'] bins, smooth = get_smoothed_bins(smooth, bins, data, chain.weights) hist, edges = np.histogram(data, bins=bins, normed=True, weights=chain.weights) edge_centers = 0.5 * (edges[1:] + edges[:-1]) xs = np.linspace(edge_centers[0], edge_centers[-1], 10000) if smooth: hist = gaussian_filter(hist, smooth, mode=self.parent._gauss_mode) kde = chain.config["kde"] if kde: kde_xs = np.linspace(edge_centers[0], edge_centers[-1], max(200, int(bins.max()))) ys = MegKDE(data, chain.weights, factor=kde).evaluate(kde_xs) area = simps(ys, x=kde_xs) ys = ys / area ys = interp1d(kde_xs, ys, kind="linear")(xs) else: ys = interp1d(edge_centers, hist, kind="linear")(xs) cs = ys.cumsum() cs /= cs.max() return xs, ys, cs
def draw_npcs(all_npcs, npc_locations, input_path, output_path, mark_npcs=None, relative=False, filter_sigma=10, sigmoid_k=8, sigmoid_c=0.1): map_orig = PIL.Image.open(input_path) # Get the overlay for NPCs we wish to plot if not mark_npcs: mark_npcs = all_npcs overlay = get_overlay_for(mark_npcs, npc_locations, map_orig.size) # If we want a relative fraction, we also want an overlay for the entire population if relative: overlay_population = get_overlay_for(all_npcs, npc_locations, map_orig.size) overlay = np.nan_to_num(np.divide(overlay, gaussian_filter(overlay_population, filter_sigma))) # Blur and normalize it (TODO if relative, we've already blurred the overall # population overlay once, is that a problem?) overlay = gaussian_filter(overlay, filter_sigma) overlay /= np.max(overlay) overlay = sigmoid(overlay, k=sigmoid_k, c=sigmoid_c) # Apply a colormap and delete the alpha channel overlay_cm = np.delete(matplotlib.cm.Blues(overlay.T), 3, axis=2) overlay_im = PIL.Image.fromarray(np.uint8(overlay_cm * 255)) map_final = PIL.Image.blend(map_orig, overlay_im, 0.7) map_final.save(output_path)
def blur_grid(grid): filtered = gaussian_filter(grid, sigma=1) filtered[(grid > 0.45) & (grid < 0.55)] = grid[(grid > 0.45) & (grid < 0.55)] return filtered
def debug_Data_Augmentation(blur=False, sigma=1.0, hflip=False, vflip=False, hvsplit=False, randbright=False): image = cv2.imread('Dataset/young/female/180.jpg', 0) #image = cv2.imread('Dataset/young/female/285.jpg', 0) #image = cv2.resize(image, (100, 100)) cv2.imshow('Image', image) # Data Augmentation: # Gaussian Blurred if blur: cv2.imshow('Blur', gaussian_filter(input=image, sigma=sigma)) #cv2.imwrite("Blur_{:1.1f}.jpg".format(sigma), # gaussian_filter(input=image, sigma=sigma)) cv2.imwrite("../xBlur_{:1.1f}.jpg".format(sigma), gaussian_filter(input=image, sigma=sigma)) # Flip and Rotate if (hflip and not vflip) or (hflip and hvsplit): cv2.imshow('hflip', np.fliplr(image)) cv2.imwrite("../hflip.jpg", np.fliplr(image)) if (vflip and not hflip) or (vflip and hvsplit): cv2.imshow('vflip', np.flipud(image)) cv2.imwrite("../vflip.jpg", np.flipud(image)) if hflip and vflip and not hvsplit: cv2.imshow('rot 180', np.rot90(image, k=2)) cv2.imwrite("../rot2k.jpg", np.rot90(image, k=2)) cv2.waitKey(0) cv2.destroyAllWindows()
def debug_analyse_image_texture(file, sigma=1.0): image = cv2.imread(file, 0) blur = gaussian_filter(input=image, sigma=sigma) cv2.imshow('Image', image - blur) #analysis = ndimage.gaussian_gradient_magnitude(image, sigma=sigma) #cv2.imshow('Analysis', analysis * 10) cv2.waitKey(0) cv2.destroyAllWindows() ##########################################
def distort_elastic(image, smooth=10.0, scale=100.0, seed=0): """ Elastic distortion of images. Channel axis in RGB images will not be distorted but grayscale or RGB images are both valid inputs. RGB and grayscale images will be distorted identically for the same seed. Simard, et. al, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. :param ndarayy image: Image of shape [h,w] or [h,w,c] :param float smooth: Smoothes the distortion. :param float scale: Scales the distortion. :param int seed: Seed for random number generator. Ensures that for the same seed images are distorted identically. :return: Distorted image with same shape as input image. :rtype: ndarray """ # create random, smoothed displacement field rnd = np.random.RandomState(int(seed)) h, w = image.shape[:2] dxy = rnd.rand(2, h, w, 3) * 2 - 1 dxy = gaussian_filter(dxy, smooth, mode="constant") dxy = dxy / np.linalg.norm(dxy) * scale dxyz = dxy[0], dxy[1], np.zeros_like(dxy[0]) # create transformation coordinates and deform image is_color = len(image.shape) == 3 ranges = [np.arange(d) for d in image.shape] grid = np.meshgrid(*ranges, indexing='ij') add = lambda v, dv: v + dv if is_color else v + dv[:, :, 0] idx = [np.reshape(add(v, dv), (-1, 1)) for v, dv in zip(grid, dxyz)] distorted = map_coordinates(image, idx, order=1, mode='reflect') return distorted.reshape(image.shape)
def level_curves(fname, npoints, smoothing = 10, level = 0.5) : # Find the contour lines img = misc.imread(fname, flatten = True) # Grayscale img = img.T[:, ::-1] img = img / 255. img = gaussian_filter(img, smoothing, mode='nearest') lines = find_contours(img, level) # Compute the sampling ratio lengths = [] for line in lines : lengths.append( arclength(line) ) lengths = array(lengths) points_per_line = ceil( npoints * lengths / sum(lengths) ) # Interpolate accordingly points = [] connec = [] index_offset = 0 for ppl, line in zip(points_per_line, lines) : (p, c) = resample(line, ppl) points.append(p) connec.append(c + index_offset) index_offset += len(p) points = vstack(points) connec = vstack(connec) return Curve(points.ravel(), connec, 2) # Dimension 2 !
def limitedfluxdensity(simulation, instrumentname, wavelengths, cmask, filterobject, fwhm, fluxlimit): # get the path for the data cube corresponding to this instrument fitspaths = filter(lambda fn: ("_"+instrumentname+"_") in fn, simulation.totalfitspaths()) if len(fitspaths) != 1: return None # get the data cube and convert it to per-wavelength units cube = pyfits.getdata(arch.openbinary(fitspaths[0])).T cube = simulation.convert(cube, to_unit='W/m3/sr', quantity='surfacebrightness', wavelength=wavelengths) # convolve the data cube to a single frame, and convert back to per-frequency units frame = filterobject.convolve(wavelengths[cmask], cube[:,:,cmask]) frame = simulation.convert(frame, from_unit='W/m3/sr', to_unit='MJy/sr', wavelength=filterobject.pivotwavelength()) # get information on the simulated pixels (assume all frames are identical and square) sim_pixels = simulation.instrumentshape()[0] sim_pixelarea = simulation.angularpixelarea() # in sr sim_pixelwidth = np.sqrt(sim_pixelarea) * 648000 / np.pi # in arcsec # convolve the frame with a Gaussian of the appropriate size frame = gaussian_filter(frame, sigma=fwhm/sim_pixelwidth/2.35482, mode='constant') # get information on the observational instrument's pixels (assume pixel width ~ fwhm/3) obs_pixelwidth = fwhm/3 # in arcsec # calculate the bin size to obtain simulated pixels similar to observed pixels bin_pixels = find_nearest_divisor(sim_pixels, obs_pixelwidth/sim_pixelwidth) bin_pixelarea = sim_pixelarea * bin_pixels**2 # rebin the frame frame = frame.reshape((sim_pixels//bin_pixels,bin_pixels,sim_pixels//bin_pixels,bin_pixels)).mean(axis=3).mean(axis=1) # integrate over the frame to obtain the total flux density and convert from MJy to Jy fluxdensity = frame[frame>fluxlimit].sum() * bin_pixelarea * 1e6 return fluxdensity # This function returns the integer divisor of the first (integer) argument that is nearest to the second (float) argument
def fastFilter(arr, ksize=30, every=None, resize=True, fn='median', interpolation=cv2.INTER_LANCZOS4, smoothksize=0, borderMode=cv2.BORDER_REFLECT): ''' fn['nanmean', 'mean', 'nanmedian', 'median'] a fast 2d filter for large kernel sizes that also works with nans the computation speed is increased because only 'every'nsth position within the median kernel is evaluated ''' if every is None: every = max(ksize//3, 1) else: assert ksize >= 3*every s0,s1 = arr.shape[:2] ss0 = s0//every every = s0//ss0 ss1 = s1//every out = np.full((ss0+1,ss1+1), np.nan) c = {'median':_calcMedian, 'nanmedian':_calcNanMedian, 'nanmean':_calcNanMean, 'mean':_calcMean, }[fn] ss0,ss1 = c(arr, out, ksize, every) out = out[:ss0,:ss1] if smoothksize: out = gaussian_filter(out, smoothksize) if not resize: return out return cv2.resize(out, arr.shape[:2][::-1], interpolation=interpolation)
def flatField(closeDist_img=None, inPlane_img=None, closeDist_bg=None, inPlane_bg=None, vignetting_model='different_objects', interpolation_method='kangWeiss', inPlane_scale_factor=None): # 1. Pixel sensitivity: if closeDist_img is not None: # TODO: find better name ff1 = flatFieldFromCalibration(closeDist_img, closeDist_bg) else: ff1 = 0 # 2. Vignetting from in-plane measurements: if inPlane_img is not None: bg = gaussian_filter(median_filter(ff1, 3), 9) ff1 -= bg ff2, mask = VIGNETTING_MODELS[vignetting_model](inPlane_img, inPlane_bg, inPlane_scale_factor) # import pylab as plt # plt.imshow(mask) # plt.show() ff2smooth = INTERPOLATION_METHODS[interpolation_method](ff2, mask) if isinstance(ff1, np.ndarray) and ff1.shape != ff2smooth.shape: ff2smooth = resize(ff2smooth, ff1.shape, mode='reflect') else: ff2 = 0 ff2smooth = 0 return ff1 + ff2smooth, ff2
def gauss_fltr(dem, sigma=1): print("Applying gaussian smoothing filter with sigma %s" % sigma) #Note, ndimage doesn't properly handle ma - convert to nan from scipy.ndimage.filters import gaussian_filter dem_filt_gauss = gaussian_filter(dem.filled(np.nan), sigma) #Now mask all nans #dem = np.ma.array(dem_filt_gauss, mask=dem.mask) out = np.ma.fix_invalid(dem_filt_gauss, copy=False, fill_value=dem.fill_value) out.set_fill_value(dem.fill_value) return out
def _gkern2(kernlen=21, nsig=3): """Returns a 2D Gaussian kernel array.""" # create nxn zeros inp = np.zeros((kernlen, kernlen)) # set element at the middle to one, a dirac delta inp[kernlen // 2, kernlen // 2] = 1 # gaussian-smooth the dirac, resulting in a gaussian filter mask return fi.gaussian_filter(inp, nsig)
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 """ if random_state is None: random_state = np.random.RandomState(None) shape = image.shape shape_size = shape[:2] # Random affine center_square = np.float32(shape_size) // 2 square_size = min(shape_size) // 3 pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101) dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha dz = np.zeros_like(dx) x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1)) return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
def cylinder(self,deltaT=300.0,radius=200.0,start_depth=100.0): ''' creates a cylindrical temperature anomaly with a gaussian blur usage: First read a temperature snapshot. Params: deltaT : cylinder excess temperature radius : cylinder radius (im km) start_depth: starting depth of the cylinder (default = 100.0) ''' T_ref = self.T_adiabat[::-1] T_here = np.zeros((self.npts_rad,self.npts_theta)) for i in range(0,self.npts_rad): print T_ref[i] km_per_degree = self.rad_km[i]*2*np.pi/360.0 for j in range(0,self.npts_theta): cyl_th_here = radius / km_per_degree th_here = self.theta[j] depth_here = 6371.0 - self.rad_km[i] if th_here <= cyl_th_here and depth_here > 100.0: T_here[(self.npts_rad-1)-i,j] = T_ref[i] + deltaT else: T_here[(self.npts_rad-1)-i,j] = T_ref[i] filtered = gaussian_filter(T_here,sigma=[0,3]) self.T = filtered #self.T = T_here
def blur(img, sigma=1): """Blur an image with a Gaussian kernel""" return filters.gaussian_filter(img, sigma)
def smooth_image(img, sigma = 1.0): if sigma is not None: return filters.gaussian_filter(np.asarray(img, float), sigma); else: return img;
def apply_laplacian_filter(array, alpha=30): """ Laplacian is approximated with difference of Gaussian :param array: :param alpha: :return: """ blurred_f = ndimage.gaussian_filter(array, 3) filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1) return blurred_f + alpha * (blurred_f - filter_blurred_f)
def blur_image(self): H_blur = gaussian_filter(self.H,sigma=self.blur) H_blur = H_blur/np.max(H_blur) self.H_blur = H_blur