我们从Python开源项目中,提取了以下36个代码示例,用于说明如何使用scipy.signal.gaussian()。
def gaussian(y, window_size=3, sigma=2): """ Apply a gaussian filter to smooth the input vector Parameters ========== y : array The input array window_size : int An odd integer describing the size of the filter. sigma : float The numver of standard deviation """ filt = signal.gaussian(window_size, sigma) return Series(signal.convolve(y, filt, mode='same'), index=y.index)
def render(locs, info=None, oversampling=1, viewport=None, blur_method=None, min_blur_width=0): if viewport is None: try: viewport = [(0, 0), (info[0]['Height'], info[0]['Width'])] except TypeError: raise ValueError('Need info if no viewport is provided.') (y_min, x_min), (y_max, x_max) = viewport if blur_method is None: return render_hist(locs, oversampling, y_min, x_min, y_max, x_max) elif blur_method == 'gaussian': return render_gaussian(locs, oversampling, y_min, x_min, y_max, x_max, min_blur_width) elif blur_method == 'gaussian_iso': return render_gaussian_iso(locs, oversampling, y_min, x_min, y_max, x_max, min_blur_width) elif blur_method == 'smooth': return render_smooth(locs, oversampling, y_min, x_min, y_max, x_max) elif blur_method == 'convolve': return render_convolve(locs, oversampling, y_min, x_min, y_max, x_max, min_blur_width) else: raise Exception('blur_method not understood.')
def gaussian_smoothing(self, data=None, filter_len=None, filter_sigma=None): """ This method convolves the data with a gaussian the smoothed data is returned @param array data: raw data @param int filter_len: length of filter @param int filter_sigma: width of gaussian @return array: smoothed data """ #Todo: Check for wrong data type if filter_len is None: if len(data) < 20.: filter_len = 5 elif len(data) >= 100.: filter_len = 10 else: filter_len = int(len(data) / 10.) + 1 if filter_sigma is None: filter_sigma = filter_len gaus = gaussian(filter_len, filter_sigma) return filters.convolve1d(data, gaus / gaus.sum(), mode='mirror')
def test_smooth_1d(): for edge in ['m', 'c']: for N in [20,21]: # values in [9.0,11.0] x = rand(N) + 10 mn = 9.0 mx = 11.0 for M in range(18,27): print "1d", edge, "N=%i, M=%i" %(N,M) xsm = smooth(x, gaussian(M,2.0), edge=edge) assert len(xsm) == N # (N,1) case xsm2 = smooth(x[:,None], gaussian(M,2.0)[:,None], edge=edge) assert np.allclose(xsm, xsm2[:,0], atol=1e-14, rtol=1e-12) # Smoothed signal should not go to zero if edge effects are handled # properly. Also assert proper normalization (i.e. smoothed signal # is "in the middle" of the noisy original data). assert xsm.min() >= mn assert xsm.max() <= mx assert mn <= xsm[0] <= mx assert mn <= xsm[-1] <= mx # convolution with delta peak produces same data exactly assert np.allclose(smooth(x, np.array([0.0,1,0]), edge=edge),x, atol=1e-14, rtol=1e-12)
def test_smooth_nd(): for edge in ['m', 'c']: a = rand(20, 2, 3) + 10 for M in [5, 20, 123]: print "nd", edge, "M=%i" %M kern = gaussian(M, 2.0) asm = smooth(a, kern[:,None,None], axis=0, edge=edge) assert asm.shape == a.shape for jj in range(asm.shape[1]): for kk in range(asm.shape[2]): assert np.allclose(asm[:,jj,kk], smooth(a[:,jj,kk], kern, edge=edge)) mn = a[:,jj,kk].min() mx = a[:,jj,kk].max() smn = asm[:,jj,kk].min() smx = asm[:,jj,kk].max() assert smn >= mn, "min: data=%f, smooth=%f" %(mn, smn) assert smx <= mx, "max: data=%f, smooth=%f" %(mx, smx)
def smooth(votes, **params): """Compute the convolution with a Gaussian signal.""" window = params.get('window', 50) std = params.get('std', 20) profiles = dict() window = gaussian(window, std=std) for k, vote in votes.iteritems(): smoothed = convolve(vote, window, mode='same') profiles[k] = smoothed return profiles
def _get_spectrogram(self, **kwargs): NFFT = int(self.sound().framerate * self.windowLength()) noverlap = kwargs.get("noverlap", int(NFFT / 2)) data, ybins, xbins, im = self.ax_spectrogram.specgram( self.sound().astype(np.int32), NFFT=NFFT, Fs=self.sound().framerate, noverlap=noverlap, window=signal.gaussian(M=NFFT, std=noverlap)) self._extent = [xbins.min(), xbins.max(), ybins.min(), ybins.max()] self._spectrogram = self.transform(data)
def _gkern2(kernlen=21, nsig=3): """Returns a 2D Gaussian kernel array.""" # create nxn zeros inp = np.zeros((kernlen, kernlen)) # set element at the middle to one, a dirac delta inp[kernlen // 2, kernlen // 2] = 1 # gaussian-smooth the dirac, resulting in a gaussian filter mask return fi.gaussian_filter(inp, nsig)
def make_gaussian(k, std): '''Create a gaussian kernel. Input: k - the radius of the kernel. std - the standard deviation of the kernel. Output: output - a numpy array of shape (2k+1, 2k+1) and dtype float. If gaussian_1d is a gaussian filter of length 2k+1 in one dimension, kernel[i,j] should be filled with the product of gaussian_1d[i] and gaussian_1d[j]. Once all the points are filled, the kernel should be scaled so that the sum of all cells is equal to one.''' kernel = None # Insert your code here.---------------------------------------------------- kernel=np.zeros((2*k+1,2*k+1),dtype=np.float) gaussian_1d = signal.gaussian(2*k+1,std) for i in range(gaussian_1d.shape[0]): for j in range(gaussian_1d.shape[0]): kernel[i,j]=gaussian_1d[i]*gaussian_1d[j] kernelsum = kernel.sum() kernel = kernel/kernelsum #--------------------------------------------------------------------------- return kernel
def _fftconvolve(image, blur_width, blur_height): kernel_width = 10 * int(_np.round(blur_width)) + 1 kernel_height = 10 * int(_np.round(blur_height)) + 1 kernel_y = _signal.gaussian(kernel_height, blur_height) kernel_x = _signal.gaussian(kernel_width, blur_width) kernel = _np.outer(kernel_y, kernel_x) kernel /= kernel.sum() return _signal.fftconvolve(image, kernel, mode='same')
def poisson(self, x, mu): """ Poisson function taken from: https://github.com/scipy/scipy/blob/master/scipy/stats/_discrete_distns.py For license see documentation/BSDLicense_scipy.md Author: Travis Oliphant 2002-2011 with contributions from SciPy Developers 2004-2011 """ if len(np.atleast_1d(x)) == 1: check_val = x else: check_val = x[0] if check_val > 1e18: self.log.warning('The current value in the poissonian distribution ' 'exceeds 1e18! Due to numerical imprecision a valid ' 'functional output cannot be guaranteed any more!') # According to the central limit theorem, a poissonian distribution becomes # a gaussian distribution for large enough x. Since the numerical precision # is limited to calculate the logarithmized poissonian and obtain from that # the exponential value, a self defined cutoff is introduced and set to # 1e12. Beyond that number a gaussian distribution is assumed, which is a # completely valid assumption. if check_val < 1e12: return np.exp(xlogy(x, mu) - gammaln(x + 1) - mu) else: return np.exp(-((x - mu) ** 2) / (2 * mu)) / (np.sqrt(2 * np.pi * mu))
def estimate_poissonian(self, x_axis, data, params): """ Provide an estimator for initial values of a poissonian function. @param numpy.array x_axis: 1D axis values @param numpy.array data: 1D data, should have the same dimension as x_axis. @param lmfit.Parameters params: object includes parameter dictionary which can be set @return tuple (error, params): Explanation of the return parameter: int error: error code (0:OK, -1:error) Parameters object params: set parameters of initial values """ error = self._check_1D_input(x_axis=x_axis, data=data, params=params) # a gaussian filter is appropriate due to the well approximation of poisson # distribution # gaus = gaussian(10,10) # data_smooth = filters.convolve1d(data, gaus/gaus.sum(), mode='mirror') data_smooth = self.gaussian_smoothing(data=data, filter_len=10, filter_sigma=10) # set parameters mu = x_axis[np.argmax(data_smooth)] params['mu'].value = mu params['amplitude'].value = data_smooth.max() / self.poisson(mu, mu) return error, params
def gaussianlinearoffset_testing_data(): x = np.linspace(0, 5, 30) x_nice=np.linspace(0, 5, 101) mod_final,params = qudi_fitting.make_gaussianwithslope_model() data=np.loadtxt("./../1D_shllow.csv") data_noisy=data[:,1] data_fit=data[:,3] x=data[:,2] update=dict() update["slope"]={"min":-np.inf,"max":np.inf} update["offset"]={"min":-np.inf,"max":np.inf} update["sigma"]={"min":-np.inf,"max":np.inf} update["center"]={"min":-np.inf,"max":np.inf} update["amplitude"]={"min":-np.inf,"max":np.inf} result=qudi_fitting.make_gaussianwithslope_fit(x_axis=x, data=data_noisy, add_params=update) # ## # gaus=gaussian(3,5) # qudi_fitting.data_smooth = filters.convolve1d(qudi_fitting.data_noisy, gaus/gaus.sum(),mode='mirror') plt.plot(x,data_noisy,label="data") plt.plot(x,data_fit,"k",label="old fit") plt.plot(x,result.init_fit,'-g',label='init') plt.plot(x,result.best_fit,'-r',label='fit') plt.legend() plt.show() print(result.fit_report())
def poissonian_testing(): start=0 stop=30 mu=8 num_points=1000 x = np.array(np.linspace(start, stop, num_points)) # x = np.array(x,dtype=np.int64) mod,params = qudi_fitting.make_poissonian_model() print('Parameters of the model',mod.param_names) p=Parameters() p.add('mu',value=mu) p.add('amplitude',value=200.) data_noisy=(mod.eval(x=x,params=p) * np.array((1+0.001*np.random.normal(size=x.shape) * p['amplitude'].value ) ) ) print('all int',all(isinstance(item, (np.int32,int, np.int64)) for item in x)) print('int',isinstance(x[1], int),float(x[1]).is_integer()) print(type(x[1])) #make the filter an extra function shared and usable for other functions gaus=gaussian(10,10) data_smooth = filters.convolve1d(data_noisy, gaus/gaus.sum(),mode='mirror') result = qudi_fitting.make_poissonian_fit(x, data_noisy) print(result.fit_report()) plt.figure() plt.plot(x, data_noisy, '-b', label='noisy data') plt.plot(x, data_smooth, '-g', label='smoothed data') plt.plot(x,result.init_fit,'-y', label='initial values') plt.plot(x,result.best_fit,'-r',linewidth=2.0, label='fit') plt.xlabel('counts') plt.ylabel('occurences') plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) plt.show()
def double_poissonian_testing(): """ Testing of double poissonian with self created data. First version of double poissonian fit.""" start=100 stop=300 num_points=int((stop-start)+1)*100 x = np.linspace(start, stop, num_points) # double poissonian mod,params = qudi_fitting.make_multiplepoissonian_model(no_of_functions=2) print('Parameters of the model',mod.param_names) parameter=Parameters() parameter.add('p0_mu',value=200) parameter.add('p1_mu',value=240) parameter.add('p0_amplitude',value=1) parameter.add('p1_amplitude',value=1) data_noisy = ( np.array(mod.eval(x=x,params=parameter)) * np.array((1+0.2*np.random.normal(size=x.shape) )* parameter['p1_amplitude'].value) ) #make the filter an extra function shared and usable for other functions gaus=gaussian(10,10) data_smooth = filters.convolve1d(data_noisy, gaus/gaus.sum(),mode='mirror') result = qudi_fitting.make_doublepoissonian_fit(x, data_noisy) print(result.fit_report()) plt.figure() plt.plot(x, data_noisy, '-b', label='noisy data') plt.plot(x, data_smooth, '-g', label='smoothed data') plt.plot(x,result.init_fit,'-y', label='initial values') plt.plot(x,result.best_fit,'-r',linewidth=2.0, label='fit') plt.xlabel('counts') plt.ylabel('occurences') plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) plt.show()
def orientation(image, stride=8, window=17): with K.tf.name_scope('orientation'): assert image.get_shape().as_list()[3] == 1, 'Images must be grayscale' strides = [1, stride, stride, 1] E = np.ones([window, window, 1, 1]) sobelx = np.reshape(np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=float), [3, 3, 1, 1]) sobely = np.reshape(np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=float), [3, 3, 1, 1]) gaussian = np.reshape(gaussian2d((5, 5), 1), [5, 5, 1, 1]) with K.tf.name_scope('sobel_gradient'): Ix = K.tf.nn.conv2d(image, sobelx, strides=[1,1,1,1], padding='SAME', name='sobel_x') Iy = K.tf.nn.conv2d(image, sobely, strides=[1,1,1,1], padding='SAME', name='sobel_y') with K.tf.name_scope('eltwise_1'): Ix2 = K.tf.multiply(Ix, Ix, name='IxIx') Iy2 = K.tf.multiply(Iy, Iy, name='IyIy') Ixy = K.tf.multiply(Ix, Iy, name='IxIy') with K.tf.name_scope('range_sum'): Gxx = K.tf.nn.conv2d(Ix2, E, strides=strides, padding='SAME', name='Gxx_sum') Gyy = K.tf.nn.conv2d(Iy2, E, strides=strides, padding='SAME', name='Gyy_sum') Gxy = K.tf.nn.conv2d(Ixy, E, strides=strides, padding='SAME', name='Gxy_sum') with K.tf.name_scope('eltwise_2'): Gxx_Gyy = K.tf.subtract(Gxx, Gyy, name='Gxx_Gyy') theta = atan2([2*Gxy, Gxx_Gyy]) + np.pi # two-dimensional low-pass filter: Gaussian filter here with K.tf.name_scope('gaussian_filter'): phi_x = K.tf.nn.conv2d(K.tf.cos(theta), gaussian, strides=[1,1,1,1], padding='SAME', name='gaussian_x') phi_y = K.tf.nn.conv2d(K.tf.sin(theta), gaussian, strides=[1,1,1,1], padding='SAME', name='gaussian_y') theta = atan2([phi_y, phi_x])/2 return theta
def gaussian2d(shape=(5,5),sigma=0.5): """ 2D gaussian mask - should give the same result as MATLAB's fspecial('gaussian',[shape],[sigma]) """ m,n = [(ss-1.)/2. for ss in shape] y,x = np.ogrid[-m:m+1,-n:n+1] h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) ) h[ h < np.finfo(h.dtype).eps*h.max() ] = 0 sumh = h.sum() if sumh != 0: h /= sumh return h
def gausslabel(length=180, stride=2): gaussian_pdf = signal.gaussian(length+1, 3) label = np.reshape(np.arange(stride/2, length, stride), [1,1,-1,1]) y = np.reshape(np.arange(stride/2, length, stride), [1,1,1,-1]) delta = np.array(np.abs(label - y), dtype=int) delta = np.minimum(delta, length-delta)+length/2 return gaussian_pdf[delta]
def kernel(self, series, sigma=3): # fix the weight of data # http://www.nehalemlabs.net/prototype/blog/2014/04/12/ # how-to-fix-scipys-interpolating-spline-default-behavior/ series = np.asarray(series) b = gaussian(25, sigma) averages = filters.convolve1d(series, b/b.sum()) variances = filters.convolve1d(np.power(series-averages, 2), b/b.sum()) variances[variances == 0] = 1 return averages, variances
def filter(self, X, Y): X, Y = self.sortxy(X, Y) # using gaussian kernel to get a better variances avg, var = self.kernel(Y) spl = UnivariateSpline(X, Y, k=self.k, w=1/np.sqrt(var)) if self.interpolate: xmax = X[-1] Xfull = np.arange(xmax) Yfull = spl(Xfull) return Xfull, Yfull else: Y1 = spl(X) return X, Y1
def lorentz(M, std=1.0, sym=True): """Lorentz window (same as Cauchy function). Function skeleton stolen from scipy.signal.gaussian(). The Lorentz function is .. math:: L(x) = \\frac{\Gamma}{(x-x_0)^2 + \Gamma^2} Here :math:`x_0 = 0` and `std` = :math:`\Gamma`. Some definitions use :math:`1/2\,\Gamma` instead of :math:`\Gamma`, but without 1/2 we get comparable peak width to Gaussians when using this window in convolutions, thus ``scipy.signal.gaussian(M, std=5)`` is similar to ``lorentz(M, std=5)``. Parameters ---------- M : int number of points std : float spread parameter :math:`\Gamma` sym : bool Returns ------- w : (M,) """ if M < 1: return np.array([]) if M == 1: return np.ones(1,dtype=float) odd = M % 2 if not sym and not odd: M = M+1 n = np.arange(0, M) - (M - 1.0) / 2.0 w = std / (n**2.0 + std**2.0) w /= w.max() if not sym and not odd: w = w[:-1] return w
def build(self): self.trainable_weights = [self.alpha] if self.use_gaussian_window: window = signal.gaussian(self.window_size, std=self.std) else: window = np.ones(self.window_size, dtype=floatX) self.w_gaussian = K.variable(window) self.trainable_weights.append(self.w_gaussian)
def build(self, input_shape): self.trainable_weights = [self.alpha] if self.use_gaussian_window: self.std = 2 window = signal.gaussian(self.window_size, std=self.std) self.w_gaussian = K.variable(window)
def build(self, input_shape): self.trainable_weights = [self.alpha] if self.use_gaussian_window: window = signal.gaussian(self.window_size, std=self.std) else: window = np.ones(self.window_size, dtype=floatX) self.w_gaussian = K.variable(window) self.trainable_weights.append(self.w_gaussian)
def calibrate_eye(self,eye_channel,realign_mark,realign_timebin,eye_medfilt_win=21,eye_gausfilt_sigma=3): ''' Args eye_channel (list): the first element is the channel name for the horizontal eye position the second element is the channel name for the vertial eye position realign_mark (string): event marker used to align eye positions realign_timebin (list): a period of time relative to the realign_mark. Example: [0,100] eye_medfilt_win (int): parameter for the median filter to smooth the eye movement trajectory eye_gausfilt_sigma (int): sigma of the gaussian kernel to smooth the eye movement trajectory Return: - ''' samp_time = 1000.0/self.sampling_rate[eye_channel[0]] # medfilt eye x, y position lamb_medfilt = lambda ite:signal.medfilt(ite,eye_medfilt_win) self.data_df[eye_channel[0]] = self.data_df[eye_channel[0]].apply(lamb_medfilt) self.data_df[eye_channel[1]] = self.data_df[eye_channel[1]].apply(lamb_medfilt) # gaussian filt eye x,y position lamb_gausfilt = lambda ite:ndimage.filters.gaussian_filter1d(ite,eye_gausfilt_sigma) self.data_df[eye_channel[0]] = self.data_df[eye_channel[0]].apply(lamb_gausfilt) self.data_df[eye_channel[1]] = self.data_df[eye_channel[1]].apply(lamb_gausfilt) # align eye to realign_mark, realign_timebin uses realign_mark as reference realign_poinnum = (self.data_df[realign_mark]/samp_time).values start_points = realign_poinnum + realign_timebin[0]/samp_time points_num = int((realign_timebin[1]-realign_timebin[0])/samp_time) for channel in eye_channel: align_points = list() for idx in self.data_df.index: start_point = start_points[idx] if ~np.isnan(start_point): start_point = int(start_point) end_point = start_point + points_num align_point = self.data_df[channel].loc[idx][start_point:end_point] align_point = align_point.mean() else: align_point = np.nan align_points.append(align_point) self.data_df[channel] = self.data_df[channel] - align_points # find all saccades for all trials
def _flux(t, n, duration, std, offs=1): ''' returns Gaussian shaped signal fluctuations [events] t --> times to calculate event for n --> numbers of events per sec duration --> event duration per sec std --> std of event if averaged over time offs --> event offset ''' duration *= len(t) / t[-1] duration = int(max(duration, 1)) pos = [] n *= t[-1] pp = np.arange(len(t)) valid = np.ones_like(t, dtype=bool) for _ in range(int(round(n))): try: ppp = np.random.choice(pp[valid], 1)[0] pos.append(ppp) valid[max(0, ppp - duration - 1):ppp + duration + 1] = False except ValueError: break sign = np.random.randint(0, 2, len(pos)) sign[sign == 0] = -1 out = np.zeros_like(t) amps = np.random.normal(loc=0, scale=1, size=len(pos)) if duration > 2: evt = gaussian(duration, duration) evt -= evt[0] else: evt = np.ones(shape=duration) for s, p, a in zip(sign, pos, amps): pp = duration if p + duration > len(out): pp = len(out) - p out[p:p + pp] = s * a * evt[:pp] out /= out.std() / std out += offs return out
def make_poissonian_model(self, prefix=None): """ Create a model of a single poissonian with an offset. param str prefix: optional string, which serves as a prefix for all parameters used in this model. That will prevent name collisions if this model is used in a composite way. @return tuple: (object model, object params) Explanation of the objects: object lmfit.model.CompositeModel model: A model the lmfit module will use for that fit. Here a gaussian model. Returns an object of the class lmfit.model.CompositeModel. object lmfit.parameter.Parameters params: It is basically an OrderedDict, so a dictionary, with keys denoting the parameters as string names and values which are lmfit.parameter.Parameter (without s) objects, keeping the information about the current value. """ def poisson_function(x, mu): """ Function of a poisson distribution. @param numpy.array x: 1D array as the independent variable - e.g. occurences @param float mu: expectation value @return: poisson function: in order to use it as a model """ return self.poisson(x, mu) amplitude_model, params = self.make_amplitude_model(prefix=prefix) if not isinstance(prefix, str) and prefix is not None: self.log.error('The passed prefix <{0}> of type {1} is not a string and' 'cannot be used as a prefix and will be ignored for now.' 'Correct that!'.format(prefix, type(prefix))) poissonian_model = Model(poisson_function, independent_vars='x') else: poissonian_model = Model(poisson_function, independent_vars='x', prefix=prefix) poissonian_ampl_model = amplitude_model * poissonian_model params = poissonian_ampl_model.make_params() return poissonian_ampl_model, params
def make_poissonian_fit(self, x_axis, data, estimator, units=None, add_params=None): """ Performe a poissonian fit on the provided data. @param numpy.array x_axis: 1D axis values @param numpy.array data: 1D data, should have the same dimension as x_axis. @param method estimator: Pointer to the estimator method @param list units: List containing the ['horizontal', 'vertical'] units as strings @param Parameters or dict add_params: optional, additional parameters of type lmfit.parameter.Parameters, OrderedDict or dict for the fit which will be used instead of the values from the estimator. @return object result: lmfit.model.ModelFit object, all parameters provided about the fitting, like: success, initial fitting values, best fitting values, data with best fit with given axis,... """ poissonian_model, params = self.make_poissonian_model() error, params = estimator(x_axis, data, params) params = self._substitute_params(initial_params=params, update_params=add_params) try: result = poissonian_model.fit(data, x=x_axis, params=params) except: self.log.warning('The poissonian fit did not work. Check if a poisson ' 'distribution is needed or a normal approximation can be' 'used. For values above 10 a normal/ gaussian distribution ' 'is a good approximation.') result = poissonian_model.fit(data, x=x_axis, params=params) print(result.message) if units is None: units = ['arb. unit', 'arb. unit'] result_str_dict = dict() # create result string for gui oder OrderedDict() result_str_dict['Amplitude'] = {'value': result.params['amplitude'].value, 'error': result.params['amplitude'].stderr, 'unit': units[1]} # Amplitude result_str_dict['Event rate'] = {'value': result.params['mu'].value, 'error': result.params['mu'].stderr, 'unit': units[0]} # event rate result.result_str_dict = result_str_dict return result
def make_poissoniandouble_fit(self, x_axis, data, estimator, units=None, add_params=None): """ Perform a double poissonian fit on the provided data. @param numpy.array x_axis: 1D axis values @param numpy.array data: 1D data, should have the same dimension as x_axis. @param method estimator: Pointer to the estimator method @param list units: List containing the ['horizontal', 'vertical'] units as strings @param Parameters or dict add_params: optional, additional parameters of type lmfit.parameter.Parameters, OrderedDict or dict for the fit which will be used instead of the values from the estimator. @return object result: lmfit.model.ModelFit object, all parameters provided about the fitting, like: success, initial fitting values, best fitting values, data with best fit with given axis,... """ double_poissonian_model, params = self.make_poissoniandouble_model() error, params = estimator(x_axis, data, params) params = self._substitute_params(initial_params=params, update_params=add_params) try: result = double_poissonian_model.fit(data, x=x_axis, params=params) except: self.log.warning('The double poissonian fit did not work. Check if a ' 'poisson distribution is needed or a normal ' 'approximation can be used. For values above 10 a ' 'normal/ gaussian distribution is a good ' 'approximation.') result = double_poissonian_model.fit(data, x=x_axis, params=params) # Write the parameters to allow human-readable output to be generated result_str_dict = OrderedDict() if units is None: units = ["arb. units", 'arb. unit'] result_str_dict['Amplitude 1'] = {'value': result.params['p0_amplitude'].value, 'error': result.params['p0_amplitude'].stderr, 'unit': units[0]} result_str_dict['Event rate 1'] = {'value': result.params['p0_mu'].value, 'error': result.params['p0_mu'].stderr, 'unit': units[1]} result_str_dict['Amplitude 2'] = {'value': result.params['p1_amplitude'].value, 'error': result.params['p1_amplitude'].stderr, 'unit': units[0]} result_str_dict['Event rate 2'] = {'value': result.params['p1_mu'].value, 'error': result.params['p1_mu'].stderr, 'unit': units[1]} result.result_str_dict = result_str_dict return result
def gaussian_twoD_testing(): """ Implement and check the estimator for a 2D gaussian fit. """ data = np.empty((121,1)) amplitude=np.random.normal(3e5,1e5) center_x=91+np.random.normal(0,0.8) center_y=14+np.random.normal(0,0.8) sigma_x=np.random.normal(0.7,0.2) sigma_y=np.random.normal(0.7,0.2) offset=0 x = np.linspace(90,92,11) y = np.linspace(13,15,12) xx, yy = np.meshgrid(x, y) axes=(xx.flatten(), yy.flatten()) theta_here=10./360.*(2*np.pi) # data=qudi_fitting.twoD_gaussian_function((xx,yy),*(amplitude,center_x,center_y,sigma_x,sigma_y,theta_here,offset)) gmod,params = qudi_fitting.make_twoDgaussian_model() data = gmod.eval(x=axes, amplitude=amplitude, center_x=center_x, center_y=center_y, sigma_x=sigma_x, sigma_y=sigma_y, theta=theta_here, offset=offset) data += 50000*np.random.random_sample(np.shape(data)) gmod, params = qudi_fitting.make_twoDgaussian_model() para=Parameters() # para.add('theta',vary=False) # para.add('center_x',expr='0.5*center_y') # para.add('sigma_x',min=0.2*((92.-90.)/11.), max= 10*(x[-1]-y[0]) ) # para.add('sigma_y',min=0.2*((15.-13.)/12.), max= 10*(y[-1]-y[0])) # para.add('center_x',value=40,min=50,max=100) result = qudi_fitting.make_twoDgaussian_fit(xy_axes=axes, data=data)#,add_parameters=para) # # FIXME: What does "Tolerance seems to be too small." mean in message? # print(result.message) plt.close('all') fig, ax = plt.subplots(1, 1) ax.hold(True) ax.imshow(result.data.reshape(len(y),len(x)), cmap=plt.cm.jet, origin='bottom', extent=(x.min(), x.max(), y.min(), y.max()),interpolation="nearest") ax.contour(x, y, result.best_fit.reshape(len(y),len(x)), 8 , colors='w') plt.show() # plt.close('all') print(result.fit_report()) # print('Message:',result.message)
def tuning_curve_2d(position, spikes, xedges, yedges, occupied_thresh=0, gaussian_sigma=None): """Creates 2D tuning curves based on spikes and 2D position. Parameters ---------- position : nept.Position Must be a 2D position. spikes : list Containing nept.SpikeTrain for each neuron. xedges : np.array yedges : np.array sampling_rate : float occupied_thresh: float gaussian_sigma : float Sigma used in gaussian filter if filtering. Returns ------- tuning_curves : np.array Where each inner array is the tuning curve for an individual neuron. """ sampling_rate = np.median(np.diff(position.time)) position_2d, pos_xedges, pos_yedges = np.histogram2d(position.y, position.x, bins=[yedges, xedges]) position_2d *= sampling_rate shape = position_2d.shape occupied_idx = position_2d > occupied_thresh tc = [] for spiketrain in spikes: spikes_x = [] spikes_y = [] for spike_time in spiketrain.time: spike_idx = find_nearest_idx(position.time, spike_time) if np.abs(position.time[spike_idx] - spike_time) < sampling_rate: spikes_x.append(position.x[spike_idx]) spikes_y.append(position.y[spike_idx]) spikes_2d, spikes_xedges, spikes_yedges = np.histogram2d(spikes_y, spikes_x, bins=[yedges, xedges]) firing_rate = np.zeros(shape) firing_rate[occupied_idx] = spikes_2d[occupied_idx] / position_2d[occupied_idx] tc.append(firing_rate) if gaussian_sigma is not None: tuning_curves = [] for firing_rate in tc: tuning_curves.append(gaussian_filter(firing_rate, gaussian_sigma)) else: print('Tuning curves with no filter.') tuning_curves = tc return np.array(tuning_curves)
def bin_spikes(spikes, position, window_size, window_advance, gaussian_std=None, n_gaussian_std=5, normalized=True): """Bins spikes using a sliding window. Parameters ---------- spikes: list Of nept.SpikeTrain position: nept.AnalogSignal window_size: float window_advance: float gaussian_std: float or None n_gaussian_std: int normalized: boolean Returns ------- binned_spikes: nept.AnalogSignal """ bin_edges = get_edges(position, window_advance, lastbin=True) given_n_bins = window_size / window_advance n_bins = int(round(given_n_bins)) if abs(n_bins - given_n_bins) > 0.01: warnings.warn("window advance does not divide the window size evenly. " "Using window size %g instead." % (n_bins*window_advance)) if normalized: square_filter = np.ones(n_bins) * (1 / n_bins) else: square_filter = np.ones(n_bins) counts = np.zeros((len(spikes), len(bin_edges))) for idx, spiketrain in enumerate(spikes): counts[idx] = np.convolve(np.hstack([np.histogram(spiketrain.time, bins=bin_edges)[0], np.array([0])]), square_filter, mode='same') if gaussian_std is not None and gaussian_std > window_advance: n_points = n_gaussian_std * gaussian_std * 2 / window_advance n_points = max(n_points, 1.0) if n_points % 2 == 0: n_points += 1 n_points = int(round(n_points)) gaussian_filter = signal.gaussian(n_points, gaussian_std / window_advance) gaussian_filter /= np.sum(gaussian_filter) if len(gaussian_filter) < counts.shape[1]: for idx, spiketrain in enumerate(spikes): counts[idx] = np.convolve(counts[idx], gaussian_filter, mode='same') else: raise ValueError("gaussian filter too long for this length of time") return nept.AnalogSignal(counts.T, bin_edges)
def open_files(data_path, event_file, db_path): # Read all data st = read(data_path) # Fill stream with station coordinates (in SAC header) st.get_station_coordinates() # Read event file try: cat = obspy.read_events(event_file) if len(cat) > 1: msg = 'File %s contains more than one event. Dont know, which one\ to chose. Please provide QuakeML file with just one event.' raise TypeError(msg) event = cat[0] except: event = get_event_from_obspydmt(event_file) origin = event.origins[0] db = instaseis.open_db(db_path) # Initialize with MT from event file try: tensor = event.focal_mechanisms[0].moment_tensor.tensor except IndexError: print('No moment tensor present, using explosion. Hilarity may ensue') tensor = obspy.core.event.Tensor(m_rr=1e20, m_tt=1e20, m_pp=1e20, m_rp=0.0, m_rt=0.0, m_tp=0.0) # Init with Gaussian STF with a length T: # log10 T propto 0.5*Magnitude # Scaling is such that the 5.7 Virginia event takes 5 seconds if len(event.magnitudes) > 0: duration = 10 ** (0.5 * (event.magnitudes[0].mag / 5.7)) * 5.0 / 2 else: duration = 2.5 print('Assuming duration of %8.1f sec' % duration) stf = signal.gaussian(duration * 2, duration / 4 / db.info.dt) return db, st, origin, tensor, stf