我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用scipy.ndimage.filters.convolve1d()。
def gaussian_smoothing(self, data=None, filter_len=None, filter_sigma=None): """ This method convolves the data with a gaussian the smoothed data is returned @param array data: raw data @param int filter_len: length of filter @param int filter_sigma: width of gaussian @return array: smoothed data """ #Todo: Check for wrong data type if filter_len is None: if len(data) < 20.: filter_len = 5 elif len(data) >= 100.: filter_len = 10 else: filter_len = int(len(data) / 10.) + 1 if filter_sigma is None: filter_sigma = filter_len gaus = gaussian(filter_len, filter_sigma) return filters.convolve1d(data, gaus / gaus.sum(), mode='mirror')
def iuwt(wave, convol2d =0): mode = 'nearest' lvl,n1,n2 = np.shape(wave) h = np.array([1./16, 1./4, 3./8, 1./4, 1./16]) n = np.size(h) cJ = np.copy(wave[lvl-1,:,:]) for i in np.linspace(1,lvl-1,lvl-1): newh = np.zeros((1,n+(n-1)*(2**(lvl-1-i)-1))) newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h H = np.dot(newh.T,newh) ###### Line convolution if convol2d == 1: cnew = cp.convolve2d(cJ, H, mode='same', boundary='symm') else: cnew = sc.convolve1d(cJ,newh[0,:],axis = 0, mode = mode) ###### Column convolution cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode = mode) cJ = cnew+wave[lvl-1-i,:,:] return np.reshape(cJ,(n1,n2))
def iuwt_1D(wave): """ Inverse Starlet transform. INPUTS: wave: wavelet decomposition of an image. OUTPUTS: out: image reconstructed from wavelet coefficients OPTIONS: convol2d: if set, a 2D version of the filter is used (slower, default is 0) """ mode = 'nearest' lvl,n1= np.shape(wave) h = np.array([1./16, 1./4, 3./8, 1./4, 1./16]) n = np.size(h) cJ = np.copy(wave[lvl-1,:]) for i in np.linspace(1,lvl-1,lvl-1): newh = np.zeros((1,n+(n-1)*(2**(lvl-1-i)-1))) newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h H = np.dot(newh.T,newh) ###### Line convolution cnew = sc.convolve1d(cJ,newh[0,:],axis = 0, mode = mode) cJ = cnew+wave[lvl-1-i,:] out = cJ return out
def estimate_poissonian(self, x_axis, data, params): """ Provide an estimator for initial values of a poissonian function. @param numpy.array x_axis: 1D axis values @param numpy.array data: 1D data, should have the same dimension as x_axis. @param lmfit.Parameters params: object includes parameter dictionary which can be set @return tuple (error, params): Explanation of the return parameter: int error: error code (0:OK, -1:error) Parameters object params: set parameters of initial values """ error = self._check_1D_input(x_axis=x_axis, data=data, params=params) # a gaussian filter is appropriate due to the well approximation of poisson # distribution # gaus = gaussian(10,10) # data_smooth = filters.convolve1d(data, gaus/gaus.sum(), mode='mirror') data_smooth = self.gaussian_smoothing(data=data, filter_len=10, filter_sigma=10) # set parameters mu = x_axis[np.argmax(data_smooth)] params['mu'].value = mu params['amplitude'].value = data_smooth.max() / self.poisson(mu, mu) return error, params
def gaussianlinearoffset_testing_data(): x = np.linspace(0, 5, 30) x_nice=np.linspace(0, 5, 101) mod_final,params = qudi_fitting.make_gaussianwithslope_model() data=np.loadtxt("./../1D_shllow.csv") data_noisy=data[:,1] data_fit=data[:,3] x=data[:,2] update=dict() update["slope"]={"min":-np.inf,"max":np.inf} update["offset"]={"min":-np.inf,"max":np.inf} update["sigma"]={"min":-np.inf,"max":np.inf} update["center"]={"min":-np.inf,"max":np.inf} update["amplitude"]={"min":-np.inf,"max":np.inf} result=qudi_fitting.make_gaussianwithslope_fit(x_axis=x, data=data_noisy, add_params=update) # ## # gaus=gaussian(3,5) # qudi_fitting.data_smooth = filters.convolve1d(qudi_fitting.data_noisy, gaus/gaus.sum(),mode='mirror') plt.plot(x,data_noisy,label="data") plt.plot(x,data_fit,"k",label="old fit") plt.plot(x,result.init_fit,'-g',label='init') plt.plot(x,result.best_fit,'-r',label='fit') plt.legend() plt.show() print(result.fit_report())
def poissonian_testing(): start=0 stop=30 mu=8 num_points=1000 x = np.array(np.linspace(start, stop, num_points)) # x = np.array(x,dtype=np.int64) mod,params = qudi_fitting.make_poissonian_model() print('Parameters of the model',mod.param_names) p=Parameters() p.add('mu',value=mu) p.add('amplitude',value=200.) data_noisy=(mod.eval(x=x,params=p) * np.array((1+0.001*np.random.normal(size=x.shape) * p['amplitude'].value ) ) ) print('all int',all(isinstance(item, (np.int32,int, np.int64)) for item in x)) print('int',isinstance(x[1], int),float(x[1]).is_integer()) print(type(x[1])) #make the filter an extra function shared and usable for other functions gaus=gaussian(10,10) data_smooth = filters.convolve1d(data_noisy, gaus/gaus.sum(),mode='mirror') result = qudi_fitting.make_poissonian_fit(x, data_noisy) print(result.fit_report()) plt.figure() plt.plot(x, data_noisy, '-b', label='noisy data') plt.plot(x, data_smooth, '-g', label='smoothed data') plt.plot(x,result.init_fit,'-y', label='initial values') plt.plot(x,result.best_fit,'-r',linewidth=2.0, label='fit') plt.xlabel('counts') plt.ylabel('occurences') plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) plt.show()
def kernel(self, series, sigma=3): # fix the weight of data # http://www.nehalemlabs.net/prototype/blog/2014/04/12/ # how-to-fix-scipys-interpolating-spline-default-behavior/ series = np.asarray(series) b = gaussian(25, sigma) averages = filters.convolve1d(series, b/b.sum()) variances = filters.convolve1d(np.power(series-averages, 2), b/b.sum()) variances[variances == 0] = 1 return averages, variances
def temporal_feature_smoothing(video_features, kernel): #simple 1d convolution assuming that input is time x words x descriptors return convolve1d(video_features, weights = kernel, axis = 0)
def filterSimMat(simMat, filtLength, filtType, scaleFilterMethod='max1'): if filtType == 'hamming': filt = np.hamming(filtLength) elif filtType == 'flat': filt = np.ones(filtLength) else: raise RuntimeError("Unknown/unsupported filter type {}".format(filtType)) if scaleFilterMethod == 'max1': filt /= np.max(filt) elif scaleFilterMethod == 'sum1': filt /= np.sum(filt) # print filt # filt = np.tile(filt, (simMat.shape[0], 1)) # print filt.shape return filters.convolve1d(simMat, weights=filt, axis=1, mode='constant')
def _filterRows(X, filtLength): filt = np.hamming(filtLength) return filters.convolve1d(X, weights=filt, axis=1, mode='constant')
def filterRows(X, filtLength, filtType='hamming', scaleFilterMethod='max1'): if filtType == 'hamming': filt = np.hamming(filtLength) elif filtType == 'flat': filt = np.ones(filtLength) else: raise RuntimeError("Unknown/unsupported filter type {}".format(filtType)) if scaleFilterMethod == 'max1': filt /= np.max(filt) elif scaleFilterMethod == 'sum1': filt /= np.sum(filt) return filters.convolve1d(X, weights=filt, axis=1, mode='constant')
def notSoRandomWalk(shape, std=1, trendFilterLength=32, lpfLength=16): """bandpass filter a random walk so that the low-frequency trend / drift is eliminated and the high-frequency noise is attenuated""" walk = randwalk(shape, std=std) filt = np.hamming(trendFilterLength) filt /= np.sum(filt) whichAxis = len(walk.shape) > 1 # 0 iff 1d, else 1 # subtract baseline drift, roughly trend = filters.convolve1d(walk, weights=filt, axis=whichAxis, mode='reflect') walk -= trend # subtract noisey spikes walk = filters.convolve1d(walk, weights=np.hamming(lpfLength), axis=whichAxis, mode='reflect') return walk
def get_boundingbox(frame_set, use_region_of_interest=False): fstd = np.std(frame_set,axis=0) framesstd = np.mean(fstd) #th = framesstd / 3 th = framesstd #ones = np.ones(8) ones = np.ones(10) big_var = (fstd>th) if not use_region_of_interest or framesstd==0: # no bb, take full frame frameROIRes = np.zeros([20,50,50]) for i in range(20): frameROIRes[i,:,:] = scipy.misc.imresize(frame_set[i,:,:], size=(50,50),interp='bilinear') #frameROIRes = np.reshape(frameROIRes, (1,frameROIRes.shape[0]*frameROIRes.shape[1]*frameROIRes.shape[2])) frameROIRes = frameROIRes.astype(np.float32) return frameROIRes #, framesstd) big_var = big_var.astype(np.float32) big_var = filters.convolve1d(big_var, ones, axis=0) big_var = filters.convolve1d(big_var, ones, axis=1) th2 = 80 i,j = np.nonzero(big_var>th2) if (i.size > 0): si = np.sort(i) sj = np.sort(j) ll = si.shape[0] th1 = int(round(ll*0.03)) th2 = int(np.floor(ll*0.98)) y1 = si[th1] y2 = si[th2] x1 = sj[th1] x2 = sj[th2] # cut image ROI if (((x2-x1)>0) and ((y2-y1)>0)): framesRoi = frame_set[:,y1:y2,x1:x2] else: framesRoi = frame_set[:,:,:] else: framesRoi = frame_set[:,:,:] # debug - show ROI #cv2.namedWindow('ROI', cv2.WINDOW_NORMAL) #bla= scipy.misc.imresize(framesRoi[19,:,:], size=(200,200),interp='bilinear') #cv2.imshow('ROI', bla) # resize to 50x50 frameROIRes = np.zeros([20,50,50]) for i in range(20): frameROIRes[i,:,:] = scipy.misc.imresize(framesRoi[i,:,:], size=(50,50),interp='bilinear') #frameROIRes = frameROIRes / 255 # TODO - does this really nessacarry? return (frameROIRes)
def get_boundingbox(self): fstd = numpy.std(self.frame_set,axis=0) framesstd = numpy.mean(fstd) th = framesstd ones = numpy.ones(10) big_var = (fstd>th) if (framesstd==0): # no bb, take full frame frameROIRes = numpy.zeros([20,50,50]) for i in range(20): frameROIRes[i,:,:] = scipy.misc.imresize(self.frame_set[i,:,:], size=(50,50),interp='bilinear') frameROIRes = numpy.reshape(frameROIRes, (1,frameROIRes.shape[0]*frameROIRes.shape[1]*frameROIRes.shape[2])) frameROIRes = frameROIRes.astype(numpy.float32) return (frameROIRes) big_var = big_var.astype(numpy.float32) big_var = filters.convolve1d(big_var, ones, axis=0) big_var = filters.convolve1d(big_var, ones, axis=1) th2 = 80 i,j = numpy.nonzero(big_var>th2) if (i.size > 0): si = numpy.sort(i) sj = numpy.sort(j) ll = si.shape[0] th1 = round(ll*0.02) th2 = numpy.floor(ll*0.98) y1 = si[th1] y2 = si[th2] x1 = sj[th1] x2 = sj[th2] # cut image ROI if (((x2-x1)>0) and ((y2-y1)>0)): framesRoi = self.frame_set[:,y1:y2,x1:x2] else: framesRoi = self.frame_set[:,:,:] else: framesRoi = self.frame_set[:,:,:] # resize to 50x50 frameROIRes = numpy.zeros([20,50,50]) for i in range(20): frameROIRes[i,:,:] = scipy.misc.imresize(framesRoi[i,:,:], size=(50,50),interp='bilinear') riofstd = numpy.std(frameROIRes,axis=0) self.cur_std = numpy.mean(riofstd) frameROIRes = numpy.reshape(frameROIRes, (1,frameROIRes.shape[0]*frameROIRes.shape[1]*frameROIRes.shape[2])) frameROIRes = frameROIRes.astype(numpy.float32) return (frameROIRes)
def find_offset_parameter(self, x_values=None, data=None): """ This method convolves the data with a Lorentzian and the finds the offset which is supposed to be the most likely valy via a histogram. Additional the smoothed data is returned @param array x_values: x values @param array data: value of each data point corresponding to x values @return int error: error code (0:OK, -1:error) @return float array data_smooth: smoothed data @return float offset: estimated offset """ # lorentzian filter mod, params = self.make_lorentzian_model() # Todo: exclude filter in seperate method to be used in other methods if len(x_values) < 20.: len_x = 5 elif len(x_values) >= 100.: len_x = 10 else: len_x = int(len(x_values)/10.)+1 lorentz = mod.eval(x=np.linspace(0, len_x, len_x), amplitude=1, offset=0., sigma=len_x/4., center=len_x/2.) data_smooth = filters.convolve1d(data, lorentz/lorentz.sum(), mode='constant', cval=data.max()) # finding most frequent value which is supposed to be the offset hist = np.histogram(data_smooth, bins=10) offset = (hist[1][hist[0].argmax()]+hist[1][hist[0].argmax()+1])/2. return data_smooth, offset ############################################################################ # # # Additional routines with gaussian-like filter # # # ############################################################################