我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.optimize.leastsq()。
def fit(self): # Quadratic fit to get an initial guess for the parameters. # Thanks: https://github.com/materialsproject/pymatgen # -> pymatgen/io/abinitio/eos.py a, b, c = np.polyfit(self.volume, self.energy, 2) v0 = -b/(2*a) e0 = a*v0**2 + b*v0 + c b0 = 2*a*v0 b1 = 4 # b1 is usually a small number like 4 if not self.volume.min() < v0 and v0 < self.volume.max(): raise StandardError('The minimum volume of a fitted parabola is not in the input volumes') # need to use lst2dct and dct2lst here to keep the order of parameters pp0_dct = dict(e0=e0, b0=b0, b1=b1, v0=v0) target = lambda pp, v: self.energy - self.func(v, self.func.lst2dct(pp)) pp_opt, ierr = leastsq(target, self.func.dct2lst(pp0_dct), args=(self.volume,)) self.params = self.func.lst2dct(pp_opt)
def _optLeastSqCircle(self, x,y): def calcR(x,y, xc, yc): ''' Calculate distance of each point from the center (xc, yc) . ''' return np.sqrt((x-xc)**2 + (y-yc)**2) def f(c, x, y): ''' Calculate the algebraic distance between the data points and the mean circle centered at c = (xc, yc). ''' Ri = calcR(x, y, *c) return Ri - Ri.mean() x_m = np.mean(x) y_m = np.mean(y) centre_estimate = x_m, y_m centre, ier = optimize.leastsq(f, centre_estimate, args=(x,y)) xc, yc = centre Ri = calcR(x, y, *centre) R = Ri.mean() residuals = np.sqrt((Ri - R)**2) return xc, yc, R, residuals
def nbinomres(p, hist, x, hist_err=None, N =1): ''' residuals to leastsq() to fit normal chi-square''' mu,M = p Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) w=np.where(hist>0.0); if hist_err is None: hist_err= np.ones_like( Np ) scale = np.sqrt( Np[w]/hist_err[w] ) #scale = 1 err = (hist[w] - Np[w])/scale return err ########### ##Dev at Octo 12, 2017
def nbinomlog1(p, hist, x, N, mu): """ Residuals for maximum likelihood fit to nbinom distribution. Vary M (shape param) but mu (count rate) fixed (using leastsq) p: fitting parameter, in this case is M, coherent mode number hist: histogram of photon count for each bin (is a number not probablity) x: photon count N: total photons count in the statistics, ( probablity = hist / N ) mu: average photon count for each bin """ M = abs(p[0]) w=np.where(hist>0.0); Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) err=2*(Np[w]-hist[w]) err = err - 2*hist[w]*np.log(Np[w]/hist[w])#note: sum(Np-hist)==0 return np.sqrt( np.abs(err) )
def set_up_peak_fit(self, xs, ys): params = self.shape_fitter.guess(xs, ys) params_dict = FittedPeakShape(self.shape_fitter.params_to_dict(params), self.shape_fitter) if len(params) > len(xs): self.params_list.append(params) self.params_dict_list.append(params_dict) return ys, params_dict fit = leastsq(self.shape_fitter.fit, params_dict.values(), (xs, ys)) params = fit[0] params_dict = FittedPeakShape(self.shape_fitter.params_to_dict(params), self.shape_fitter) self.params_list.append(params) self.params_dict_list.append(params_dict) residuals = self.shape_fitter.fit(params, xs, ys) return residuals, params_dict
def fit_spot(spot): size = spot.shape[0] size_half = int(size / 2) grid = _np.arange(-size_half, size_half + 1, dtype=_np.float32) model_x = _np.empty(size, dtype=_np.float32) model_y = _np.empty(size, dtype=_np.float32) model = _np.empty((size, size), dtype=_np.float32) residuals = _np.empty((size, size), dtype=_np.float32) # theta is [x, y, photons, bg, sx, sy] theta0 = _initial_parameters(spot, size, size_half) args = (spot, grid, size, model_x, model_y, model, residuals) result = _optimize.leastsq(_compute_residuals, theta0, args=args, ftol=1e-2, xtol=1e-2) # leastsq is much faster than least_squares ''' model = compute_model(result[0], grid, size, model_x, model_y, model) plt.figure() plt.subplot(121) plt.imshow(spot, interpolation='none') plt.subplot(122) plt.imshow(model, interpolation='none') plt.colorbar() plt.show() ''' return result[0]
def FitPCA(self, hPCA_Proj): ''' Determine the timing of the inflation event. Uses the first component of the pca projection and fits A * arctan( (t - t0) / c ) + B to the first pca projection. @param hPCA_Proj: The sklearn PCA projection @return [t0, c] ''' fitfunc = lambda p,t: p[0]*np.arctan((t-p[1])/p[2])+p[3] errfunc = lambda p,x,y: fitfunc(p,x) - y dLen = len(hPCA_Proj[:,0]) pA, success = optimize.leastsq(errfunc,[1.,dLen/2.,1.,0.],args=(np.arange(dLen),hPCA_Proj[:,0])) ct = pA[1:3] return ct, pA[0]
def FitPCA(self, hPCA_Proj): ''' Determine the timing of the inflation event from the first component of the pca projection fits A * arctan( (t - t0) / c ) + B to the first pca projection, in order to estimate source amplitude parameters @param hPCA_Proj: The sklearn PCA @return ct: the t0, c, and B parameters from the fit @return pA[0]: the fitted amplitude parameter ''' fitfunc = lambda p,t: p[0]*np.arctan((t-p[1])/p[2])+p[3] errfunc = lambda p,x,y: fitfunc(p,x) - y dLen = len(hPCA_Proj[:,0]) pA, success = optimize.leastsq(errfunc,[1.,dLen/2.,1.,0.],args=(np.arange(dLen),hPCA_Proj[:,0])) ct = pA[1:3] return ct, pA[0]
def GetFittingPlane3D(points): # returns a,b,c,d in ax+by+cz+d=0. a,b,c are also the normal. pointsT = points.transpose() # Inital guess of the plane diff = points[0] - points[-1] p0 = np.array(([diff[0], diff[1], diff[2], 1.])) sol = leastsq(residuals, p0, args=(None, pointsT))[0] #print "Solution: ", sol #print "Old Error: ", (f_min(pointsT, p0)**2).sum() #print "New Error: ", (f_min(pointsT, sol)**2).sum() return sol
def adasurf(points, config, initial_points = None): global ELAPSE_LSQ def residuals(params, x, y, z, regularization = 0.0): rt = z - config.surf_fun(x, y, params) # rt = np.append(rt, np.sqrt(regularization)*params) return rt def MSE(params, points): e = (points[:,2] - config.surf_fun(points[:,0], points[:,1], params)) return np.sqrt(np.dot(e.T, e)/len(e)) x1 = points[:, 0] y1 = points[:, 1] z1 = points[:, 2] starttime = time.clock() r = leastsq(residuals, [1, 0.5, 1], args=(x1, y1, z1)) ELAPSE_LSQ += time.clock() - starttime # normalizaer = np.linalg.norm(r[0]) return Surface(args = r[0] , residuals = MSE(r[0], points) , points = points, initial_points = initial_points)
def adasurf(points, config): # ?????????????????p????????x?y?????????? def residuals(params, x, y, z, regularization = 0.0): rt = z - config.surf_fun(x, y, params) rt = np.append(rt, np.sqrt(regularization)*params) return rt def MSE(params, points): e = (points[:,2] - config.surf_fun(points[:,0], points[:,1], params)) return np.sqrt(np.dot(e.T, e)/len(e)) x1 = points[:, 0] y1 = points[:, 1] z1 = points[:, 2] # ?????????????????????????????????????????????? r = leastsq(residuals, [1, 0.5, 1], args=(x1, y1, z1)) # ?????r[0]??????????r[1]?r[2]?????? return r[0], MSE(r[0], points), points
def adasurf(points, config): global ELAPSE_LSQ def residuals(params, x, y, z, regularization = 0.0): rt = z - config.surf_fun(x, y, params) rt = np.append(rt, np.sqrt(regularization)*params) return rt def MSE(params, points): e = (points[:,2] - config.surf_fun(points[:,0], points[:,1], params)) return np.sqrt(np.dot(e.T, e)/len(e)) x1 = points[:, 0] y1 = points[:, 1] z1 = points[:, 2] starttime = time.clock() r = leastsq(residuals, [1, 0.5, 1], args=(x1, y1, z1)) ELAPSE_LSQ += time.clock() - starttime return r[0], MSE(r[0], points), points
def residual_multigauss(param, dataimage, nonfinite = 0.0, ravelresidual=True, showimages=False, verbose=False): """ Calculating the residual bestween the multigaussian model with the paramters 'param' and the data. --- INPUT --- param Parameters of multi-gaussian model to generate. See modelimage_multigauss() header for details dataimage Data image to take residual nonfinite Value to replace non-finite entries in residual with ravelresidual To np.ravel() the residual image set this to True. Needed by scipy.optimize.leastsq() optimizer function showimages To show model and residiual images set to True verbose Toggle verbosity --- EXAMPLE OF USE --- import tdose_model_FoV as tmf param = [18,31,1*0.3,2.1*0.3,1.2*0.3,30*0.3, 110,90,200*0.5,20.1*0.5,15.2*0.5,0*0.5] dataimg = pyfits.open('/Users/kschmidt/work/TDOSE/mock_cube_sourcecat161213_tdose_mock_cube.fits')[0].data[0,:,:] residual = tmf.residual_multigauss(param, dataimg, showimages=True) """ if verbose: ' - Estimating residual (= model - data) between model and data image' imgsize = dataimage.shape xgrid, ygrid = tu.gen_gridcomponents(imgsize) modelimg = tmf.modelimage_multigauss((xgrid, ygrid),param,imgsize,showmodelimg=showimages, verbose=verbose) residualimg = modelimg - dataimage if showimages: plt.imshow(residualimg,interpolation='none', vmin=1e-5, vmax=np.max(residualimg), norm=mpl.colors.LogNorm()) plt.title('Resdiaul (= model - data) image') plt.show() if nonfinite is not None: residualimg[~np.isfinite(residualimg)] = 0.0 if ravelresidual: residualimg = np.ravel(residualimg) return residualimg # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def _epd_residual(coeffs, fluxes, xcc, ycc, bgv, bge): ''' This is the residual function to minimize using scipy.optimize.leastsq. ''' f = _epd_function(coeffs, fluxes, xcc, ycc, bgv, bge) residual = fluxes - f return residual
def fit(self, x, y, dcoef='none'): ''' performs the fit x, y : list Matching data arrays that define a numerical function y(x), this is the data to be fitted. dcoef : list or string You can provide a different guess for the coefficients, or provide the string 'none' to use the inital guess. The default is 'none'. Returns ------- ierr Values between 1 and 4 signal success. Notes ----- self.fcoef, contains the fitted coefficients. ''' self.x = x self.y = y if dcoef is not 'none': coef = dcoef else: coef = self.coef fcoef=optimize.leastsq(self.residual,coef,args=(y,self.func,x)) self.fcoef = fcoef[0].tolist() return fcoef[1]
def connect_edges(self): """Connect detected edges based on their slopes.""" # Fitting a straight line to each edge. p0 = [0., 0.] radian2angle = 180. / np.pi for edge in self.edges: p1, s = leastsq(self.residuals, p0, args=(edge['x'][:-1], edge['y'][:-1])) edge['slope'] = p1[0] edge['intercept'] = p1[1] edge['slope_angle'] = np.arctan(edge['slope']) * radian2angle # Connect by the slopes of two edges. len_edges = len(self.edges) for i in range(len_edges - 1): for j in range(i + 1, len_edges): if np.abs(self.edges[i]['slope_angle'] - self.edges[j]['slope_angle']) <= \ self.connectivity_angle: # Then, the slope between the centers of the two edges # should be similar with the slopes of # the two lines of the edges as well. c_slope = (self.edges[i]['y_center'] - self.edges[j]['y_center']) / \ (self.edges[i]['x_center'] - self.edges[j]['x_center']) c_slope_angle = np.arctan(c_slope) * radian2angle if np.abs(c_slope_angle - self.edges[i]['slope_angle']) <= \ self.connectivity_angle and \ np.abs(c_slope_angle - self.edges[j]['slope_angle']) <= \ self.connectivity_angle: self.edges[i]['connectivity'] = self.edges[j]['index'] break
def nbinomlog(p, hist, x, N): """ Residuals for maximum likelihood fit to nbinom distribution. Vary M (shape param) and mu (count rate) vary (using leastsq)""" mu,M = p mu=abs(mu) M= abs(M) w=np.where(hist>0.0); Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) err=2*(Np-hist) err[w] = err[w] - 2*hist[w]*np.log(Np[w]/hist[w])#note: sum(Np-hist)==0 return np.sqrt(np.abs( err )) #return err
def nbinomlog1(p, hist, x, N,mu): """ Residuals for maximum likelihood fit to nbinom distribution. Vary M (shape param) but mu (count rate) fixed (using leastsq)""" M = abs(p[0]) w=np.where(hist>0.0); Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) err=2*(Np-hist) err[w] = err[w] - 2*hist[w]*np.log(Np[w]/hist[w])#note: sum(Np-hist)==0 return np.sqrt( np.abs(err) )
def nbinomlog1_notworknow(p, hist,x, N, mu): """ Residuals for maximum likelihood fit to nbinom distribution. Vary M (shape param) but mu (count rate) fixed (using leastsq)""" M = abs(p[0]) w=np.where(hist>0.0); Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) err=2*(Np-hist) err[w] = err[w] - 2*hist[w]*np.log(Np[w]/hist[w])#note: sum(Np-hist)==0 #return np.sqrt(err) return err
def fit_sphere_form_factor_by_leastsq( p0, q, pq, fit_range=None, ): '''##Develop by YG at July 28, 2017 @CHX Fitting form factor of polyderse spherical particles by using scipy.leastsq fit Input: radius, sigma, delta_rho, background = p0 Return fit res, res[0] is the fitting parameters ''' if fit_range is not None: x1,x2 = fit_range q1,q2 = find_index(q,x1),find_index(q,x2) res = leastsq(fit_sphere_form_factor_func, [ p0 ], args=(pq[q1:q2], q[q1:q2], ), ftol=1.49012e-38, xtol=1.49012e-38, factor=100, full_output=1) return res
def nbinomres_old(p, hist, x, hist_err=None, N =1): ''' residuals to leastsq() to fit normal chi-square''' mu,M = p Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) err = (hist - Np)/np.sqrt(Np) return err
def nbinomlog_old(p, hist, x, hist_err=None, N =1): """ Residuals for maximum likelihood fit to nbinom distribution. Vary M (shape param) and mu (count rate) vary (using leastsq)""" mu,M = p mu=abs(mu) M= abs(M) w=np.where(hist>0.0); Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) err=2*(Np-hist) err = err[w] - 2*hist[w]*np.log(Np[w]/hist[w])#note: sum(Np-hist)==0 return np.sqrt(np.abs( err )) #return err
def nbinomlog1_old(p, hist, x, hist_err=None, N =1,mu=1): """ Residuals for maximum likelihood fit to nbinom distribution. Vary M (shape param) but mu (count rate) fixed (using leastsq)""" M = abs(p[0]) w=np.where(hist>0.0); Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) err=2*(Np-hist) err[w] = err[w] - 2*hist[w]*np.log(Np[w]/hist[w])#note: sum(Np-hist)==0 return np.sqrt( np.abs(err) )
def nbinomlog(p, hist, x, N ): """ Residuals for maximum likelihood fit to nbinom distribution. Vary M (shape param) and mu (count rate) vary (using leastsq)""" mu,M = p mu=abs(mu) M= abs(M) w=np.where(hist>0.0); Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) err=2*(Np[w]-hist[w]) err = err - 2*hist[w]*np.log(Np[w]/hist[w])#note: sum(Np-hist)==0 return np.sqrt(np.abs( err ) )
def fitgaussian(data): params = moments(data) errorfunction = lambda p: ravel(gaussian(*p)(*indices(data.shape)) - data) p, success = optimize.leastsq(errorfunction, params) return p # read the stars from the ds9 region and compute their central peak position through Gaussian fitting
def fitgaussian(data): params = moments(data) errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) - data) p, success = optimize.leastsq(errorfunction, params) # Return the parameter list return p # ----------------------------------------------------------------- # Returns a 2D Gaussian distribution form the given parameters.
def plot_fUV_sSFR(fig, fUV, sSFR, flag): fig.set_ylabel('UV heating [$\%$]',fontsize=18) fig.set_xlabel('$\log \mathrm{sSFR}/\mathrm{yr}^{-1})$',fontsize=18) fig.scatter(x,y, c=z, s=10,cmap=plt.get_cmap('autumn') ,edgecolor='') fig.set_xlim(-13.5,-8.5) fig.set_ylim(0,100) # Power-law fitting is best done by first converting # to a linear equation and then fitting to a straight line. # # y = a * x^b # log(y) = log(a) + b*log(x) logx = np.log10(sSFR[flag==0]) logy = np.log10(100.*fUV[flag==0]) p1, succes, infodict,mesg,ier = optimize.leastsq(PowerLawErrorFunction, [1,1], args=(logx,logy), full_output=1) rms = np.sqrt(((infodict['fvec']**2).sum())/(len(logx)-1)) print "power law best fit a*x^b, rms" print p1, rms xrange = fig.get_xlim() plotx = np.linspace(xrange[0],xrange[1],100) ploty = 10.**PowerLaw(p1,plotx) fig.plot(plotx,ploty, 'k-',linewidth=2) #fig.plot(plotx,72537.3507523*np.power(10**plotx,0.30174404), 'r-',linewidth=2)
def peak_shape_fit(self): xs, ys = self.xs, self.ys params = self.shape_fitter.guess(xs, ys) fit = leastsq(self.shape_fitter.fit, params, (xs, ys)) params = fit[0] self.params = params self.params_dict = FittedPeakShape(self.shape_fitter.params_to_dict(params), self.shape_fitter)
def set_up_peak_fit(self, ys=None, min_height=0, peak_count=0): xs = self.xs if ys is None: ys = self.ys params = self.shape_fitter.guess(xs, ys) params_dict = self.shape_fitter.params_to_dict(params) indices = peak_indices(ys, min_height) if len(indices) > 0: center = xs[max(indices, key=lambda x: ys[x])] else: center = xs[len(xs) / 2] params_dict['center'] = center fit = leastsq(self.shape_fitter.fit, params_dict.values(), (xs, ys)) params = fit[0] params_dict = FittedPeakShape(self.shape_fitter.params_to_dict(params), self.shape_fitter) self.params_list.append(params) self.params_dict_list.append(params_dict) residuals = self.shape_fitter.fit(params, xs, ys) fitted_apex_index = search.get_nearest(xs, params_dict['center'], 0) fitted_apex = ys[fitted_apex_index] new_min_height = fitted_apex * 0.5 if new_min_height < min_height: min_height *= 0.85 else: min_height = new_min_height indices = peak_indices(residuals, min_height) peak_count += 1 if indices.size and peak_count < self.max_peaks: residuals, params_dict = self.set_up_peak_fit(residuals, min_height, peak_count=peak_count) return residuals, params_dict
def effectiveFisher(residual_func, *flat_grids): """ Fit a quadratic to the ambiguity function tabulated on a grid. Inputs: - a pointer to a function to compute residuals, e.g. z(x1, ..., xN) - fit for N-dimensions, this is called 'residualsNd' - N+1 flat arrays of length K. N arrays for each on N parameters, plus 1 array of values of the overlap Returns: - flat array of upper-triangular elements of the effective Fisher matrix Example: x1s = [x1_1, ..., x1_K] x2s = [x2_1, ..., x2_K] ... xNs = [xN_1, ..., xN_K] gamma = effectiveFisher(residualsNd, x1s, x2s, ..., xNs, rhos) gamma [g_11, g_12, ..., g_1N, g_22, ..., g_2N, g_33, ..., g_3N, ..., g_NN] """ x0 = np.ones(len(flat_grids) + 2) fitgamma = leastsq(residual_func, x0=x0, args=tuple(flat_grids)) return fitgamma[0] #### CHECK ####
def fit_circle(points, prior=None): ''' Fit a circle (or n-sphere) to a set of points using least squares. Arguments --------- points: (n,d) set of points prior: tuple of best guess for (center, radius) Returns --------- center: (d), location of center radius: float, mean radius across circle error: float, peak to peak value of deviation from mean radius ''' def residuals(center): radii_sq = ((points-center)**2).sum(axis=1) residuals = radii_sq - radii_sq.mean() return residuals if prior is None: center_guess = np.mean(points, axis=0) else: center_guess = prior[0] center_result, return_code = leastsq(residuals, center_guess) if not (return_code in [1,2,3,4]): raise ValueError('Least square fit failed!') radii = np.linalg.norm(points-center_result, axis=1) radius = radii.mean() error = radii.ptp() return center_result, radius, error
def FitTimeSeries(self, pd_series, ct): ''' Fits the amplitude and offset of an inflation event given the time and length of the event. Fits A and B in A * arctan( (t - t0) / c) + B @param pd_series: Time series to be fit @param ct: [t0, c] @return Amplitude of fit ''' fitfunc2 = lambda p,c,t: p[0]*np.arctan((t-c[0])/c[1])+p[1] errfunc2 = lambda p,c,x,y: fitfunc2(p,c,x) - y dLen = len(pd_series) pA, pcov = optimize.leastsq(errfunc2,[1.,0.],args=(ct,np.arange(dLen),pd_series)) # res = fitfunc2(pA,ct,np.arange(dLen))[-1]-fitfunc2(pA,ct,np.arange(dLen))[0] res = pA[0]*np.pi s_sq = (errfunc2(pA,ct,np.arange(dLen),pd_series)**2).sum()/(len(pd_series)-2) pcov = pcov * s_sq error = [] for i in range(len(pA)): try: error.append(np.absolute(pcov[i][i])**0.5) except: error.append( 0.00 ) perr_leastsq = np.array(error) return res, perr_leastsq
def FitTimeSeries(self, pd_series, ct): ''' Fits the amplitude and offset of an inflation event given the time and length of the event Fits A and B in A * arctan( (t - t0) / c) + B @param pd_series: Time series to be fit @param ct: the time constants for the arctan @return res: Amplitude of the fit @return perr_leastsq: Error of the fit ''' fitfunc2 = lambda p,c,t: p[0]*np.arctan((t-c[0])/c[1])+p[1] errfunc2 = lambda p,c,x,y: fitfunc2(p,c,x) - y dLen = len(pd_series) pA, pcov = optimize.leastsq(errfunc2,[1.,0.],args=(ct,np.arange(dLen),pd_series)) # res = fitfunc2(pA,ct,np.arange(dLen))[-1]-fitfunc2(pA,ct,np.arange(dLen))[0] res = pA[0]*np.pi s_sq = (errfunc2(pA,ct,np.arange(dLen),pd_series)**2).sum()/(len(pd_series)-2) pcov = pcov * s_sq error = [] for i in range(len(pA)): try: error.append(np.absolute(pcov[i][i])**0.5) except: error.append( 0.00 ) perr_leastsq = np.array(error) return res, perr_leastsq
def getTrend(xdata): ''' The getTrend function applies the signal.detrend function Returns the trend, given a time index input. @param xdata: 1D time-series data in a pandas series format @return the detrended data in pandas series format @return the linear trend assuming a 1 day per sample time fit @return the parameters for the linear trend ''' # returns detrended data and the trend # checks the time frequency to create a time vector if xdata.index.freq != pd.tseries.offsets.Day(): print('Frequency of Sampling is not Day, please check that resampling occurred!') timeDat = np.arange(0,len(xdata.index)) dataDat = xdata # remove time and data with NaN's for now ninds = np.array(pd.isnull(dataDat)==True) argtd = (timeDat[ninds==False],dataDat[ninds==False]) fitfunc = lambda p, x: p[0]*x+p[1] # Target function (linear) errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function pAnnual = [1., 5.] # Initial guess for the parameters pA, success = optimize.leastsq(errfunc, pAnnual[:], args=argtd) trend = fitfunc(pA, timeDat) # puts the data back into the pandas series format to recover the time index xdetr = pd.core.series.Series(data=dataDat-trend,index=xdata.index) return xdetr, trend, pA
def guass(self, max_x=None, min_x=0.0): """Run Least Square algorithm to guass the line. Only x value in (min_x, max_x] will be use as input points. min_x: min x value, lower than min_x will be abandoned, default 0.0, when you want to omit left part points, assign the value. max_y: for omit right part points. """ def _error(w, x, y): return (w[0] + w[1] * x) - y arr_x = self.points[0] arr_y = self.points[1] _arr_x = [] _arr_y = [] if max_x is not None: for i, x in enumerate(arr_x): if min_x < x <= max_x: _arr_x.append(x) _arr_y.append(arr_y[i]) else: for i, x in enumerate(arr_x): if min_x < x: _arr_x.append(x) _arr_y.append(arr_y[i]) x = np.log(_arr_x) y = np.log(_arr_y) log.debug("least x: [%s, ...]" % str(x[: 4])[1: -1]) log.debug("least y: [%s, ...]" % str(y[: 4])[1: -1]) result = leastsq(_error, [1, 1], args=(x, y)) self.a = np.power(math.e, result[0][0]) self.b = result[0][1] log.debug("least a: %f" % self.a) log.debug("least b: %f" % self.b) self.line_ready = True
def plot_scaling(counter_object, log_bins): '''Normalize contact probabilities and calculate slope between bins 6 and 9.''' fitfunc = lambda p, x: p[0] + p[1] * x errfunc = lambda p, x, y: (y - fitfunc(p, x)) pinit = [1.0, -1.0] x = [] y = [] for i in range(len(log_bins) - 1): x.append(log_bins[i]) value = float(counter_object[log_bins[i]]) / float(abs(log_bins[i] - log_bins[i + 1])) y.append(value) area = 0 for i in range(len(x)): area += y[i] scaling_factor = float(1) / float(area) newx = np.array(x[:-1]) newy = np.array(y[:-1]) logx = newx logy = newy for i in range(len(newx)): logx[i] = log10(newx[i]) logy[i] = log10(scaling_factor * newy[i]) out = optimize.leastsq(errfunc, pinit, args=(logx[6:10], logy[6:10]), full_output=1) pfinal = out[0] index = pfinal[1] amp = np.exp(pfinal[0]) logy_fit = [] for i in logx: new = pfinal[0] + pfinal[1] * i logy_fit.append(new) return (index, logx, logy)
def adasurf(points, config, initial_points = None): current_fun = 0 def residuals(params, x, y, z, regularization = 0.0): return config.residuals(params, x, y, z, current_fun, regularization) def MSE(params, points): e = config.residuals(params, points[:,0], points[:,1], points[:,2], fitting_fun = current_fun) return np.sqrt(np.dot(e.T, e)/len(e)) x1 = points[:, 0] y1 = points[:, 1] z1 = points[:, 2] r = leastsq(residuals, [1.0, 1.0, 1.0, 1.0], args=(x1, y1, z1)) if r[0][0] > 100 or r[0][1] > 100: # overfit # print "OVER FIT SWITCH " # print "before", r[0] current_fun = 1 r = leastsq(residuals, [1.0, 1.0, 1.0, 1.0], args=(x1, y1, z1)) if r[0][0] > 100 or r[0][1] > 100: current_fun = 2 r = leastsq(residuals, [1.0, 1.0, 1.0, 1.0], args=(x1, y1, z1)) # print "after", r[0] return Surface(args = r[0], residuals = MSE(r[0], points), points = points, initial_points = initial_points, current_fun = current_fun)
def error_curvefit(self, t, p): params = self.unpack_params(p) result = self.forward(t, params) return result # Estimate the uncertainty in the inverted parameters, based # on the Jacobian matrix provided by the leastsq function
def inv_leastsq(self): # Prepare a vector with our initial guess x0 = self.pack_params() # OLS function popt, pcov, infodict, errmsg, success = leastsq(self.error, x0, full_output=True) # Return best-fit parameters and Jacobian matrix return popt, pcov # Perform non-linear least-squares
def fit_sinusoid(value, angle, f, p0=[0.5, 0.25, 0.25]): """Fit a periodic function of known frequency, f, to the value and angle data. value = Func(angle, f). NOTE: Because the fiting function is sinusoidal instead of square, contrast values larger than unity are clipped back to unity. parameters ---------- value : NxM ndarray The value of the function at N angles and M radii angle : Nx1 ndarray The N angles at which the function was sampled f : scalar The expected angular frequency; the number of black/white pairs in the siemens star. i.e. half the number of spokes p0 : list, optional The initial guesses for the parameters. returns: -------- MTFR: 1xM ndarray The modulation part of the MTF at each of the M radii """ M = value.shape[1] # Distance to the target function def errorfunc(p, x, y): return periodic_function(p, x) - y time = np.linspace(0, 2*np.pi, 100) MTFR = np.ndarray((1, M)) x = (f*angle).squeeze() for radius in range(0, M): p1, success = optimize.leastsq(errorfunc, p0[:], args=(x, value[:, radius])) # print(success) # plt.figure() # plt.plot(angle, value[:, radius], "ro", # time, periodic_function(p1, f*time), "r-") MTFR[:, radius] = np.sqrt(p1[1]**2 + p1[2]**2)/p1[0] # cap the MTF at unity MTFR[MTFR > 1.] = 1. assert(not np.any(MTFR < 0)), MTFR assert(MTFR.shape == (1, M)), MTFR.shape return MTFR
def nbinomres(p, hist, x, N): ''' residuals to leastsq() to fit normal chi-square''' mu,M = p Np=N * st.nbinom.pmf(x,M,1.0/(1.0+mu/M)) err = (hist - Np)/np.sqrt(Np) return err
def get_xsvs_fit(spe_cts_all, K_mean, varyK=True, max_bins= None, qth=None, g2=None, times=None,taus=None): ''' Fit the xsvs by Negative Binomial Function using max-likelihood chi-squares ''' max_cts= spe_cts_all[0][0].shape[0] -1 num_times, num_rings = spe_cts_all.shape if max_bins is not None: num_times = min( num_times, max_bins ) bin_edges, bin_centers, Knorm_bin_edges, Knorm_bin_centers = get_bin_edges( num_times, num_rings, K_mean, int(max_cts+2) ) if g2 is not None: g2c = g2.copy() g2c[0] = g2[1] ML_val = {} KL_val = {} K_ =[] if qth is not None: range_ = range(qth, qth+1) else: range_ = range( num_rings) for i in range_: N=1 ML_val[i] =[] KL_val[i]= [] if g2 is not None: mi_g2 = 1/( g2c[:,i] -1 ) m_=np.interp( times, taus, mi_g2 ) for j in range( num_times ): x_, x, y = bin_edges[j, i][:-1], Knorm_bin_edges[j, i][:-1], spe_cts_all[j, i] if g2 is not None: m0=m_[j] else: m0= 10 #resultL = minimize(nbinom_lnlike, [K_mean[i] * 2**j, m0], args=(x_, y) ) #the normal leastsq #result_n = leastsq(nbinomres, [K_mean[i] * 2**j, m0], args=(y,x_,N),full_output=1) #not vary K if not varyK: resultL = leastsq(nbinomlog1, [ m0], args=(y, x_, N, K_mean[i] * 2**j ), ftol=1.49012e-38, xtol=1.49012e-38, factor=100, full_output=1) ML_val[i].append( abs(resultL[0][0] ) ) KL_val[i].append( K_mean[i] * 2**j ) # resultL[0][0] ) else: #vary M and K resultL = leastsq(nbinomlog, [K_mean[i] * 2**j, m0], args=(y,x_,N), ftol=1.49012e-38, xtol=1.49012e-38, factor=100,full_output=1) ML_val[i].append( abs(resultL[0][1] ) ) KL_val[i].append( abs(resultL[0][0]) ) # resultL[0][0] ) #print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j==0: K_.append( KL_val[i][0] ) return ML_val, KL_val, np.array( K_ )
def optimize_eigvals(diff_matrix, x_points, dc_init, exp_norm_profiles, display_result=True, labels=None): """ Fit the diffusion matrix Parameters ---------- diff_matrix : tuple tuple of (eigenvalues, eigenvectors) in reduced basis (dim n-1) x_points : 1-D array_like spatial coordinates dc_init : array concentration difference between endmembers exp_norm_profiles : list of arrays profiles to be fitted, of length the nb of experiments, with n profiles for each experiment. Profiles are normalized, that is, an estimation of the estimated mean concentration should be substracted. """ n_comp = len(dc_init[0]) - 1 n_exp = len(x_points) def cost_function(coeffs, p_matrix, x_points, dc_init, exp_norm_profiles): n_comp = len(dc_init[0]) - 1 diag = coeffs[:n_comp] n_exp = len(x_points) adjust_cmeans = coeffs[n_comp: n_comp + (n_comp) * n_exp].reshape(( n_exp, n_comp)) adjust_dc = coeffs[n_comp + (n_comp) * n_exp: n_comp + 2 * (n_comp) * n_exp].reshape(( n_exp, n_comp)) errors = np.array([]) for i in range(n_exp): dc_corr = np.copy(dc_init[i]) dc_corr[:-1] -= adjust_dc[i] profile_corr = np.copy(exp_norm_profiles[i]) profile_corr[:-1, :] -= adjust_cmeans[i][:, None] error = evolve_profile((diag, p_matrix), x_points[i], dc_corr, profile_corr, plot=False) errors = np.concatenate((errors, error)) return errors diag, P = diff_matrix coeffs = np.concatenate((diag, np.zeros(2 * n_exp * n_comp))) res = optimize.leastsq(cost_function, coeffs, args=(P, x_points, dc_init, exp_norm_profiles), ftol=1.e-15, full_output=True, factor=10)[0] diags, shifts = res[:n_comp], \ res[n_comp:].reshape((2, n_exp, n_comp)) if display_result: for i in range(n_exp): dc_corr = np.copy(dc_init[i]) dc_corr[:-1] -= shifts[1, i] prof_corr = np.copy(exp_norm_profiles[i]) prof_corr[:-1] -= shifts[0, i][:, None] _ = evolve_profile((diags, P), x_points[i], dc_corr, exp_norm_profiles=prof_corr, labels=labels) return diags, shifts
def optimize_profile_multi_temp(diff_matrix, x_points, dc_init, exp_norm_profiles, display_result=True, labels=None): """ Fit the diffusion matrix Parameters ---------- diff_matrix : tuple tuple of (eigenvalues, eigenvectors) in reduced basis (dim n-1) x_points : 1-D array_like spatial coordinates dc_init : array concentration difference between endmembers exp_norm_profiles : list of arrays profiles to be fitted, of length the nb of experiments, with n profiles for each experiment. Profiles are normalized, that is, an estimation of the estimated mean concentration should be substracted. """ n_comp = len(dc_init[0][0]) - 1 n_temp = len(x_points) n_eigvals = n_temp * n_comp n_exp = np.array([len(x) for x in x_points]) def cost_function(coeffs, x_points, dc_init, exp_norm_profiles): diag = coeffs[:n_eigvals].reshape((n_temp, n_comp)) P = np.matrix(coeffs[n_eigvals: n_eigvals + n_comp**2].reshape((n_comp, n_comp))) errors = np.array([]) for i in range(n_temp): for j in range(n_exp[i]): profile_corr = np.copy(exp_norm_profiles[i][j]) error = evolve_profile((diag[i], P), x_points[i][j], dc_init[i][j], profile_corr, plot=False) errors = np.concatenate((errors, error)) return errors diag, P = diff_matrix coeffs = np.concatenate((diag, np.array(P).ravel(), np.zeros(2 * n_exp.sum() * n_comp))) res = optimize.leastsq(cost_function, coeffs, args=(x_points, dc_init, exp_norm_profiles), ftol=1.e-15, full_output=True, factor=10)[0] diags, eigvecs = res[:n_eigvals].reshape((n_temp, n_comp)), \ res[n_eigvals: n_eigvals + n_comp**2].reshape((n_comp, n_comp)) if display_result: for i in range(n_temp): for j in range(n_exp[i]): _ = evolve_profile((diags[i], eigvecs), x_points[i][j], dc_init[i][j], exp_norm_profiles=exp_norm_profiles[i][j], labels=labels) return diags, eigvecs
def run(self): def residuals(p,x,y): A, k, theta = p x = x y = y return numexpr.evaluate('y - A * sin(2 * pi * k * x + theta)') print(self.name ,"* Started measurements") a = INITIAL_SIGNAL_AMPLITUDE b = 50 c = 0 lastMeasurmentTime = 0 x = np.linspace(0, 1, num=RATE*MEASUREMENT_TIMEFRAME, endpoint=False) y = self.buffer.get(RATE*MEASUREMENT_TIMEFRAME) plsq = leastsq(residuals, np.array([a,b,c]),args=(x,y)) a = plsq[0][0] b = plsq[0][1] c = plsq[0][2] nrMeasurments = 0 TIME_OFFSET = time.time() while (not self.stopSignal.is_set()): analyze_start = time.time() if (nrMeasurments > 200): nrMeasurments = 0 TIME_OFFSET=analyze_start x = np.linspace(analyze_start-TIME_OFFSET-1, analyze_start-TIME_OFFSET, num=RATE*MEASUREMENT_TIMEFRAME, endpoint=False) y = self.buffer.get(RATE*MEASUREMENT_TIMEFRAME) plsq = leastsq(residuals, np.array([a,b,c]),args=(x,y)) if plsq[0][1] < SANITY_LOWER_BOUND or plsq[0][1] > SANITY_UPPER_BOUND: printToConsole(str(plsq[0][1]) + "looks fishy, trying again." , 5) plsq = leastsq(residuals, np.array([INITIAL_SIGNAL_AMPLITUDE,50,0]),args=(x,y)) if plsq[0][1] < SANITY_LOWER_BOUND or plsq[0][1] > SANITY_UPPER_BOUND: printToConsole("Now got " + str(plsq[0][1]) + ". Buffer data is corrupt, need new data", 5) time.sleep(MEASUREMENT_TIMEFRAME) printToConsole("Back up, continue measurments",5) else: frqChange = np.abs(plsq[0][1] - b) frqChangeTime = time.time() - lastMeasurmentTime #plt.plot(x,y, x,plsq[0][0] * sin(2 * pi * plsq[0][1] * x + plsq[0][2])) #plt.show() if frqChange/frqChangeTime < SANITY_MAX_FREQUENCYCHANGE: a = plsq[0][0] b = plsq[0][1] c = plsq[0][2] lastMeasurmentTime = time.time() log.store(b,lastMeasurmentTime-analyze_start) else: printToConsole("Frequency Change too big " + str(frqChange) + ", "+ str(frqChangeTime) +", " + str(frqChange / frqChangeTime) + "," + "Buffer is probably corrupt", 5) time.sleep(MEASUREMENT_TIMEFRAME) nrMeasurments+=1
def runtime_regression(daily_runtime, daily_demand, method): """ Least squares regession of runtime against a measure of demand. Parameters ---------- hourly_runtime : pd.Series with pd.DatetimeIndex Runtimes for a particular heating or cooling season. daily_demand : pd.Series with pd.DatetimeIndex A daily demand measure for each day in the heating or cooling season. Returns ------- slope : float The slope parameter found by the regression to minimize sq error intercept : float The intercept parameter found by the regression to minimize sq error mean_sq_err : float The mean squared error of the regession. root_mean_sq_err : float The root mean squared error of the regession. """ # drop NA df = pd.DataFrame({"x": daily_demand, "y": daily_runtime}).dropna() x, y = df.x, df.y if method == "cooling": def model(params): alpha, tau = params return y - (alpha * (x + tau)) else: def model(params): alpha, tau = params return y - (alpha * (x - tau)) x0 = (1, 0) try: results = leastsq(model, x0) except TypeError: # too few data points causes the following TypeError # TypeError: Improper input: N=2 must not exceed M=1 return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan params = results[0] alpha, tau = params mse = np.nanmean(model(params)**2) rmse = mse ** 0.5 cvrmse = rmse / np.nanmean(y) mape = np.nanmean(np.absolute(model(params) / y)) mae = np.nanmean(np.absolute(model(params))) return alpha, tau, mse, rmse, cvrmse, mape, mae
def solveCSE(en, rot, mu, R, VT): n, m, oo = VT.shape V = np.transpose(VT) # find channels that are open, as defined by E' > 0 edash = en - np.diag(VT[:, :, -1]) openchann = edash > 0 nopen = edash[openchann].size mx = matching_point(en, rot, V, R, mu) if mx < oo-5: out = leastsq(eigen, (en, ), args=(rot, mx, V, R, mu), xtol=0.01) en = float(out[0]) # solve CSE according to Johnson renormalized Numerov method WI = WImat(en, rot, V, R, mu) RI = RImat(WI, mx) wf = [] if nopen > 0: oc = 0 for j, ed in enumerate(edash): if ed > 0: f = fmat(j, RI, WI, mx) wf.append(wavefunction(WI, oc, f)) oc += 1 else: f = fmat(0, RI, WI, mx) wf.append(wavefunction(WI, nopen, f)) wf = np.array(wf) wf = np.transpose(wf) if nopen == 0: wf = normalize(wf, R) else: K, AI, B = amplitude(wf, R, edash, mu) # shape = nopen x nopen # K = BA-1 = U tan xi UT eig, U = linalg.eig(K) # form A^-1 U cos xi exp(i xi) UT I = np.identity(nopen, dtype=complex) xi = np.arctan(eig)*I cosxi = np.cos(xi)*I expxi = np.exp((0+1j)*xi)*I expxiUT = expxi @ np.transpose(U) cosxiexpxiUT = cosxi @ expxiUT UcosxiexpxiUT = U @ cosxiexpxiUT Norm = AI @ UcosxiexpxiUT # == (cu, su) complex # complex wavefunction array oo x n x nopen wf = wf @ Norm return wf, en