我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用numpy.polyval()。
def calibrate(self): # generate sequence self.set() # run and load normalized data data, _ = self.run(norm_pts = {self.qubit_names[0]: (0, 1), self.qubit_names[1]: (0, 2)}) # select target qubit data_t = data[self.qubit_names[1]] # fit self.opt_par, all_params_0, all_params_1 = fit_CR([self.lengths, self.phases, self.amps], data_t, self.cal_type) # plot the result xaxis = self.lengths if self.cal_type==CR_cal_type.LENGTH else self.phases if self.cal_type==CR_cal_type.PHASE else self.amps finer_xaxis = np.linspace(np.min(xaxis), np.max(xaxis), 4*len(xaxis)) self.plot["Data 0"] = (xaxis, data_t[:len(data_t)//2]) self.plot["Fit 0"] = (finer_xaxis, np.polyval(all_params_0, finer_xaxis) if self.cal_type == CR_cal_type.AMP else sinf(finer_xaxis, *all_params_0)) self.plot["Data 1"] = (xaxis, data_t[len(data_t)//2:]) self.plot["Fit 1"] = (finer_xaxis, np.polyval(all_params_1, finer_xaxis) if self.cal_type == CR_cal_type.AMP else sinf(finer_xaxis, *all_params_1)) return (str.lower(self.cal_type.name), self.opt_par)
def band_center(spectrum, low_endmember=None, high_endmember=None, degree=3): x = spectrum.index y = spectrum if not low_endmember: low_endmember = x[0] if not high_endmember: high_endmember = x[-1] ny = y[low_endmember:high_endmember] fit = np.polyfit(ny.index, ny, degree) center_fit = Series(np.polyval(fit, ny.index), ny.index) center = band_minima(center_fit) return center, center_fit
def plot(self, dataset, path, show=False): with PdfPages(path) as pdf: x_vals = dataset.data['T'].tolist() y_vals = dataset.data[self.symbol].tolist() plt.plot(x_vals, y_vals, 'ro', alpha=0.4, markersize=4) x_vals2 = np.linspace(min(x_vals), max(x_vals), 80) fx = np.polyval(self._coeffs, x_vals2) plt.plot(x_vals2, fx, linewidth=0.3, label='') plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 4)) plt.legend(loc=3, bbox_to_anchor=(0, 0.8)) plt.title('$%s$ vs $T$' % self.display_symbol) plt.xlabel('$T$ (K)') plt.ylabel('$%s$ (%s)' % (self.display_symbol, self.units)) fig = plt.gcf() pdf.savefig(fig) plt.close() if show: webbrowser.open_new(path)
def fit_cubic(y0, y1, g0, g1): """Fit cubic polynomial to function values and derivatives at x = 0, 1. Returns position and function value of minimum if fit succeeds. Fit does not succeeds if 1. polynomial doesn't have extrema or 2. maximum is from (0,1) or 3. maximum is closer to 0.5 than minimum """ a = 2*(y0-y1)+g0+g1 b = -3*(y0-y1)-2*g0-g1 p = np.array([a, b, g0, y0]) r = np.roots(np.polyder(p)) if not np.isreal(r).all(): return None, None r = sorted(x.real for x in r) if p[0] > 0: maxim, minim = r else: minim, maxim = r if 0 < maxim < 1 and abs(minim-0.5) > abs(maxim-0.5): return None, None return minim, np.polyval(p, minim)
def __call__(self, dispersion, *parameters): """ Generate data at the dispersion points, given the parameters. :param dispersion: An array of dispersion points to calculate the data for. :param parameters: Keyword arguments of the model parameters and their values. """ function, profile_parameters = self._profiles[self.metadata["profile"]] N = len(profile_parameters) y = 1.0 - function(dispersion, *parameters[:N]) # Assume rest of the parameters are continuum coefficients. if parameters[N:]: y *= np.polyval(parameters[N:], dispersion) return y
def meyeraux(x): """meyer wavelet auxiliary function. v(x) = 35*x^4 - 84*x^5 + 70*x^6 - 20*x^7. Parameters ---------- x : array grid points Returns ------- y : array values at x """ # Auxiliary def values. y = np.polyval([-20, 70, -84, 35, 0, 0, 0, 0], x) * (x >= 0) * (x <= 1) y += (x > 1) return y
def get_bias_coeff_from_T(self, master_bias_temp, master_bias_level, frame_temp, calibrated_params): """ :param master_bias_temp: Temperature of the master bias frame. :param master_bias_level: Median of the master bias frame. :param frame_temp: Temperature of the frame to correct. :param calibrated_params: parameters [a, b] of the function bias_level(T) = aT + b. T is in degrees C and bias_level(T) is the median of the bias frame at the given temperature. """ calib_master_bias_level = np.polyval( calibrated_params, master_bias_temp) calib_frame_bias_level = np.polyval( calibrated_params, frame_temp) return calib_frame_bias_level / calib_master_bias_level
def csalbr(tau): # Previously 3 functions csalbr fintexp1, fintexp3 a = [-.57721566, 0.99999193, -0.24991055, 0.05519968, -0.00976004, 0.00107857] #xx = a[0] + a[1] * tau + a[2] * tau**2 + a[3] * tau**3 + a[4] * tau**4 + a[5] * tau**5 #xx = np.polyval(a[::-1], tau) # xx = a[0] # xftau = 1.0 # for i in xrange(5): # xftau = xftau*tau # xx = xx + a[i] * xftau fintexp1 = np.polyval(a[::-1], tau) - np.log(tau) fintexp3 = (np.exp(-tau) * (1.0 - tau) + tau**2 * fintexp1) / 2.0 return (3.0 * tau - fintexp3 * (4.0 + 2.0 * tau) + 2.0 * np.exp(-tau)) / (4.0 + 3.0 * tau) # From crefl.1.7.1
def fit_dimensions(self, data, fit_TTmax = True): """ if fit_max is True, use blade length profile to adjust dHS_max """ if (fit_TTmax and 'L_blade' in data.columns): dat = data.dropna(subset=['L_blade']) xn = self.xn(dat['rank']) fit = numpy.polyfit(xn,dat['L_blade'],7) x = numpy.linspace(min(xn), max(xn), 500) y = numpy.polyval(fit,x) self.xnmax = x[numpy.argmax(y)] self.TTmax = self.TTxn(self.xnmax) self.scale = {k: numpy.mean(data[k] / self.predict(k,data['rank'], data['nff'])) for k in data.columns if k in self.ref.columns} return self.scale
def __init__(self, points, values, *args, **kwds): """ Parameters ---------- points : nd array (npoints, ndim) values : 1d array (npoints,) **kwds : keywords to [avg]polyfit() """ self.points = self._fix_shape_init(points) assert self.points.ndim == 2, "points is not 2d array" self.values = values if self._has_keys(kwds, ['degrange', 'degmin', 'degmax', 'levels']): self.fitfunc = avgpolyfit self.evalfunc = avgpolyval else: self.fitfunc = polyfit self.evalfunc = polyval self.fit = self.fitfunc(self.points, self.values, *args, **kwds)
def vp2dp(vp,p=[],temp=[],enhance=False): """ Convert a volume mixing ratio to a dew point ( and vapour pressure ) Using ITS-90 correction of Wexler's formula Optional enhancement factors for non ideal """ vp=np.atleast_1d(vp) c=np.array([-9.2288067e-06, 0.46778925, -20.156028, 207.98233],dtype='f8') d=np.array([-7.5172865e-05, 0.0056577518, -0.13319669, 1],dtype='f8') lnes=np.log(vp*1e2) dp=np.polyval(c,lnes)/np.polyval(d,lnes) if(enhance and len(p)>0) : if(len(temp)==0): temp=dp A=np.array([8.5813609e-09, -6.7703064e-06, 0.001807157, -0.16302041],dtype='f8') B=np.array([6.3405286e-07, -0.00077326396, 0.34378043, -59.890467],dtype='f8') alpha=np.polyval(A,temp) beta=np.exp(np.polyval(B,temp)) ef=np.exp(alpha*(1-vp/p)+beta*(p/vp-1)) vp=vp/ef lnes=np.log(vp*1e2) dp=np.polyval(c,lnes)/np.polyval(d,lnes) return dp
def eval_trace_poly(self, use_poly=False, smoothing_length=25): sel = self.trace_x != self.flag if use_poly: self.trace = np.polyval(self.trace_polyvals, 1. * np.arange(self.D) / self.D) else: self.trace = np.zeros((self.D,)) init_x = np.where(sel)[0][0] fin_x = np.where(sel)[0][-1] self.trace[init_x:fin_x] = np.interp(np.arange(init_x,fin_x), self.trace_x[sel], self.trace_y[sel]) self.trace[init_x:fin_x] = biweight_filter(self.trace[init_x:fin_x], smoothing_length) ix = int(init_x+smoothing_length/2+1) fx = int(init_x+smoothing_length/2+1 + smoothing_length*2) p1 = np.polyfit(np.arange(ix,fx), self.trace[ix:fx], 1) self.trace[:ix] = np.polyval(p1, np.arange(ix)) ix = int(fin_x-smoothing_length/2-1 - smoothing_length*2) fx = int(fin_x-smoothing_length/2) pf = np.polyfit(np.arange(ix,fx), self.trace[ix:fx], 1) self.trace[fx:self.D] = np.polyval(pf, np.arange(fx,self.D))
def calculate_wavelength_new(x, solar_spec, fibers, fibn, group, smooth_length=21, init_lims=None, order=3, init_sol=None, debug=False, interactive=False, nbins=21, wavebuff=100, plotbuff=85, fixscale=True, use_leastsq=False, res=1.9): L = len(x) if init_lims is None: init_lims = [np.min(solar_spec[:,0]), np.max(solar_spec[:,0])] if init_sol is not None: init_wave_sol = np.polyval(init_sol, 1. * x / L) y_sun = solar_spec[:,1] lowfib = np.max([0,fibn-group]) highfib = np.min([len(fibers)-1,fibn+group]) y = np.array([biweight_filter(fibers[i].spectrum, smooth_length) / fibers[i].spectrum for i in xrange(lowfib,highfib)]) y = biweight_location(y,axis=(0,)) bins = np.linspace(init_lims[0], init_lims[1], nbins) bins = bins[1:-1] scale = 1.*(init_lims[1] - init_lims[0])/L wv0 = init_lims[0] wave0_save = [] scale_save = [] x_save = [] wave_save = []
def calc_omega(cp): cp.insert a=[] for i in range(len(cp)): ptmp = [] tmp = 0 for j in range(len(cp)): if j != i: row = [] row.insert(0,1/(cp[i]-cp[j])) row.insert(1,-cp[j]/(cp[i]-cp[j])) ptmp.insert(tmp,row) tmp += 1 p=[1] for j in range(len(cp)-1): p = conv(p,ptmp[j]) pint = numpy.polyint(p) arow = [] for j in range(len(cp)): arow.append(numpy.polyval(pint,cp[j])) a.append(arow) return a
def calc_adot(cp,order=1): a=[] for i in range(len(cp)): ptmp = [] tmp = 0 for j in range(len(cp)): if j != i: row = [] row.insert(0,1/(cp[i]-cp[j])) row.insert(1,-cp[j]/(cp[i]-cp[j])) ptmp.insert(tmp,row) tmp += 1 p=[1] for j in range(len(cp)-1): p = conv(p,ptmp[j]) pder = numpy.polyder(p,order) arow = [] for j in range(len(cp)): arow.append(numpy.polyval(pder,cp[j])) a.append(arow) return a
def calc_afinal(cp): afinal=[] for i in range(len(cp)): ptmp = [] tmp = 0 for j in range(len(cp)): if j != i: row = [] row.insert(0,1/(cp[i]-cp[j])) row.insert(1,-cp[j]/(cp[i]-cp[j])) ptmp.insert(tmp,row) tmp += 1 p=[1] for j in range(len(cp)-1): p = conv(p,ptmp[j]) afinal.append(numpy.polyval(p,1.0)) return afinal
def test_polyfit(self): c = np.array([3., 2., 1.]) x = np.linspace(0, 2, 7) y = np.polyval(c, x) err = [1, -1, 1, -1, 1, -1, 1] weights = np.arange(8, 1, -1)**2/7.0 # check 1D case m, cov = np.polyfit(x, y+err, 2, cov=True) est = [3.8571, 0.2857, 1.619] assert_almost_equal(est, m, decimal=4) val0 = [[2.9388, -5.8776, 1.6327], [-5.8776, 12.7347, -4.2449], [1.6327, -4.2449, 2.3220]] assert_almost_equal(val0, cov, decimal=4) m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) val = [[8.7929, -10.0103, 0.9756], [-10.0103, 13.6134, -1.8178], [0.9756, -1.8178, 0.6674]] assert_almost_equal(val, cov2, decimal=4) # check 2D (n,1) case y = y[:, np.newaxis] c = c[:, np.newaxis] assert_almost_equal(c, np.polyfit(x, y, 2)) # check 2D (n,2) case yy = np.concatenate((y, y), axis=1) cc = np.concatenate((c, c), axis=1) assert_almost_equal(cc, np.polyfit(x, yy, 2)) m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) assert_almost_equal(est, m[:, 0], decimal=4) assert_almost_equal(est, m[:, 1], decimal=4) assert_almost_equal(val0, cov[:, :, 0], decimal=4) assert_almost_equal(val0, cov[:, :, 1], decimal=4)
def var(x): """ Creates a polynomial that describes the kernel width """ p=[0.2,0.02] return np.polyval(p,x) #x sampling:
def extrap1d_constrained_linear_regression(x, y, xEval, side='right', numPts=10): """Perform extrapolation using constrained linear regression on part of the data (x,y). Use numPts from either the left or right side of the data (specified by input variable side) as the input data. The linear regression is constrained to pass through the final point (x0, y0) (rightmost point if side=='right', leftmost if side=='left'). Data MUST be sorted. Inputs: x independent variable on the smaller domain (array) y dependent variable on the smaller domain (array) xEval values of x at which to evaluate the linear regression model side side of the data from which to perform linear regression ('left' or 'right') numPts number of points to use in the linear regression (scalar) Outputs: yEval values of dependent variable in the linear regression model evaluated at x_eval (array) """ assert side=='left' or side=='right' if side=='left': xSide = x[:numPts] ySide = y[:numPts] x0 = x[0] y0 = y[0] elif side=='right': xSide = x[-numPts:] ySide = y[-numPts:] x0 = x[-1] y0 = y[-1] a = least_squares_slope(xSide, ySide, x0, y0) # determine model (a, x0, y0) b = y0 - a*x0 #y_eval = scipy.polyval([a,b], x_eval) yEval = a*(xEval - x0) + y0 # evaluate model on desired points return yEval
def remove_continuum_blockvisibility(vis: BlockVisibility, degree=1, mask=None) -> BlockVisibility: """ Fit and remove continuum visibility Fit a polynomial in frequency of the specified degree where mask is True :param vis: :param degree: Degree of polynomial :param mask: :return: """ assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), vis if mask is not None: assert numpy.sum(mask) > 2 * degree, "Insufficient channels for fit" nchan = len(vis.frequency) x = (vis.frequency - vis.frequency[nchan // 2]) / (vis.frequency[0] - vis.frequency[nchan // 2]) for row in range(vis.nvis): for ant2 in range(vis.nants): for ant1 in range(vis.nants): for pol in range(vis.polarisation_frame.npol): wt = numpy.sqrt(vis.data['weight'][row, ant2, ant1, :, pol]) if mask is not None: wt[mask] = 0.0 fit = numpy.polyfit(x, vis.data['vis'][row, ant2, ant1, :, pol], w=wt, deg=degree) prediction = numpy.polyval(fit, x) vis.data['vis'][row, ant2, ant1, :, pol] -= prediction return vis
def remove_continuum_image(im: Image, degree=1, mask=None): """ Fit and remove continuum visibility in place Fit a polynomial in frequency of the specified degree where mask is True :param im: :param deg: :param mask: :return: """ assert isinstance(im, Image) if mask is not None: assert numpy.sum(mask) > 2 * degree, "Insufficient channels for fit" nchan, npol, ny, nx = im.shape channels = numpy.arange(nchan) with warnings.catch_warnings(): warnings.simplefilter('ignore') frequency = im.wcs.sub(['spectral']).wcs_pix2world(channels, 0)[0] frequency -= frequency[nchan // 2] frequency /= numpy.max(frequency) wt = numpy.ones_like(frequency) if mask is not None: wt[mask] = 0.0 for pol in range(npol): for y in range(ny): for x in range(nx): fit = numpy.polyfit(frequency, im.data[:, pol, y, x], w=wt, deg=degree) prediction = numpy.polyval(fit, frequency) im.data[:, pol, y, x] -= prediction return im
def phaserate(plotar, ms2mappings): if plotar.plotType!='phatime': raise RuntimeError, "phaserate() cannot run on plot type {0}".format( plotar.plotType ) spm = ms2mappings.spectralMap # iterate over all plots and all data sets within the plots for k in plotar.keys(): for d in plotar[k].keys(): # get a reference to the data set dsref = plotar[k][d] # get the full data set label - we have access to all the data set's properties (FQ, SB, POL etc) n = plots.join_label(k, d) # fit a line through the unwrapped phase unw = numpy.unwrap(numpy.deg2rad(dsref.yval)) coeffs = numpy.polyfit(dsref.xval, unw, 1) # evaluate the fitted polynomial at the x-loci extray = numpy.polyval(coeffs, dsref.xval) # here we could compute the reliability of the fit diff = unw - extray ss_tot = numpy.sum(numpy.square(unw - unw.mean())) ss_res = numpy.sum(numpy.square(diff)) r_sq = 1.0 - ss_res/ss_tot # compare std deviation and variance in the residuals after fit std_r = numpy.std(diff) var_r = numpy.var(diff) f = spm.frequencyOfFREQ_SB(n.FQ, n.SB) rate = coeffs[0] if var_r<std_r: print "{0}: {1:.8f} ps/s @ {2:5.4f}MHz [R2={3:.3f}]".format(n, rate/(2.0*numpy.pi*f*1.0e-12), f/1.0e6, r_sq ) # before plotting wrap back to -pi,pi and transform to degrees dsref.extra = [ drawline_fn(n, dsref.xval, numpy.rad2deg(do_wrap(extray))) ]
def phasedbg(plotar, ms2mappings): global cache if plotar.plotType!='phatime': raise RuntimeError, "phasedbg() cannot run on plot type {0}".format( plotar.plotType ) store = len(cache)==0 # iterate over all plots and all data sets within the plots for k in plotar.keys(): for d in plotar[k].keys(): # get a reference to the data set dsref = plotar[k][d] # get the full data set label - we have access to all the data set's properties (FQ, SB, POL etc) n = plots.join_label(k, d) # fit a line through the unwrapped phase unw = numpy.unwrap(numpy.deg2rad(dsref.yval)) #coeffs = numpy.polyfit(dsref.xval, unw, 1) coeffs = numpy.polyfit(xrange(len(dsref.yval)), unw, 1) # evaluate the fitted polynomial at the x-loci extray = numpy.polyval(coeffs, dsref.xval) # here we could compute the reliability of the fit diff = unw - extray # compare std deviation and variance in the residuals after fit std_r = numpy.std(diff) var_r = numpy.var(diff) coeffs = numpy.rad2deg(coeffs) if var_r<std_r: # decide what to do if store: cache[n] = coeffs else: # check if current key exists in cache; if so # do differencing otherCoeffs = cache.get(n, None) if otherCoeffs is None: print "{0}: not found in cache".format( n ) else: delta = otherCoeffs - coeffs print "{0.BL} {0.SB} {0.P}: dRate={1:5.4f} dOff={2:4.1f}".format(n, delta[0], delta[1]+360.0 if delta[1]<0.0 else delta[1]) # before plotting wrap back to -pi,pi and transform to degrees #dsref.extra = [ drawline_fn(n, dsref.xval, numpy.rad2deg(do_wrap(extray))) ] if not store: cache = {}
def estimate_tau0(T0, atm): T0_fit = np.array([600,1000]) tau0_fit = np.array([5,0.1]) x = 1000.0/T0_fit y = 1.0*np.log10(tau0_fit) coeff_low = np.polyfit(x,y,1) T0_fit = np.array([1000,1600]) tau0_fit = np.array([0.1,1e-4]) x = 1000.0/T0_fit y = 1.0*np.log10(tau0_fit) coeff_high = np.polyfit(x,y,1) atm_fit = 1.0*np.array([1, 10, 20]) mtp_fit = 1.0*np.array([1, 0.1, 0.02]) x = np.sqrt(atm_fit) y = np.log10(mtp_fit) coeff_p = np.polyfit(x,y,1) if T0 < 1000: tau_1atm = (10 ** np.polyval(coeff_low, 1000.0/T0)) else: tau_1atm = (10 ** np.polyval(coeff_high, 1000.0/T0)) mtp = 10 ** np.polyval(coeff_p, np.sqrt(atm)) tau = tau_1atm * mtp #print 'tau = '+str(tau) tau_level = 10 ** (np.floor(np.log10(tau))) #print 'tau_level = '+str(tau_level) tau_rounded = tau_level * np.round(tau/tau_level * 1.0)/1.0 #print 'tau_rounded = '+str(tau_rounded) #print tau_level #print str(tau_rounded) #print tau_rounded tau_rounded = max(1e-2, tau_rounded) return float(str(tau_rounded))
def compose_functions(cls, x, number_of_compositions=1, functions=(np.sin, np.exp, np.square, np.polyval, np.tan, ), clip_big_values=True, clip_value=1e6): """Compose functions from an iterable of functions. This is a helper function to cover a more real life scenario of plottings. Arguments: x (numpy.array): array for which composed values will be computed. number_of_compositions (int): number of compositions of functions. functions (tuple): an iterable of functions. clip_big_values (bool): whether or not to limit function extremes. clip_value (float): limit values for function composition. Returns: y (numpy.array): array of composed functions """ i = 0 y = x while i < number_of_compositions: func = np.random.choice(functions) if func == np.polyval: n_coefs = np.random.randint(0, 10) coefs = np.random.randint(-50, 50, size=n_coefs) y = func(coefs, x) else: y = func(y) if clip_big_values: y = np.clip(y, -clip_value, clip_value) i += 1 return y # Goes with 'fast' parameters by default.
def plot_fit(self): """Plot the training data in X array along with decision boundary """ from matplotlib import pyplot as plt x1 = np.linspace(self.table.min(), self.table.max(), 100) #reverse self.theta as it requires coeffs from highest degree to constant term x2 = np.polyval(np.poly1d(self.theta[::-1]),x1) plt.plot(x1, x2, color='r', label='decision boundary'); plt.scatter(self.X[:, 1], self.X[:, 2], s=40, c=self.y, cmap=plt.cm.Spectral) plt.legend() plt.show()
def ployfit( y, x=None, order = 20 ): ''' fit data (one-d array) by a ploynominal function return the fitted one-d array ''' if x is None: x = range(len(y)) pol = np.polyfit(x, y, order) return np.polyval(pol, x)
def construct_lanes(data, pts_per_lane): poly_x, poly_y = data[:,:4], data[:,4:] lane_x = np.vstack( [np.polyval(poly_x[t], np.linspace(0, 50, pts_per_lane)) for t in xrange(poly_x.shape[0])]) lane_y = np.vstack( [np.polyval(poly_y[t], np.linspace(0, 50, pts_per_lane)) for t in xrange(poly_y.shape[0])]) lane = np.hstack([lane_x, lane_y]) nnz = lane_x.sum(axis=1) > 0. return lane, nnz
def calculate(self, **state): """ Calculate the material physical property at the specified temperature in the units specified by the object's 'property_units' property. :param T: [K] temperature :returns: physical property value """ super().calculate(**state) return np.polyval(self._coeffs, state['T'])
def fit_quartic(y0, y1, g0, g1): """Fit constrained quartic polynomial to function values and erivatives at x = 0,1. Returns position and function value of minimum or None if fit fails or has a maximum. Quartic polynomial is constrained such that it's 2nd derivative is zero at just one point. This ensures that it has just one local extremum. No such or two such quartic polynomials always exist. From the two, the one with lower minimum is chosen. """ def g(y0, y1, g0, g1, c): a = c+3*(y0-y1)+2*g0+g1 b = -2*c-4*(y0-y1)-3*g0-g1 return np.array([a, b, c, g0, y0]) def quart_min(p): r = np.roots(np.polyder(p)) is_real = np.isreal(r) if is_real.sum() == 1: minim = r[is_real][0].real else: minim = r[(r == max(-abs(r))) | r == -max(-abs(r))][0].real return minim, np.polyval(p, minim) D = -(g0+g1)**2-2*g0*g1+6*(y1-y0)*(g0+g1)-6*(y1-y0)**2 # discriminant of d^2y/dx^2=0 if D < 1e-11: return None, None else: m = -5*g0-g1-6*y0+6*y1 p1 = g(y0, y1, g0, g1, .5*(m+np.sqrt(2*D))) p2 = g(y0, y1, g0, g1, .5*(m-np.sqrt(2*D))) if p1[0] < 0 and p2[0] < 0: return None, None [minim1, minval1] = quart_min(p1) [minim2, minval2] = quart_min(p2) if minval1 < minval2: return minim1, minval1 else: return minim2, minval2
def __call__(self, x, model_dispersion, model_normalized_flux, *parameters): # Marginalize over all models. assert len(parameters) == len(self.parameter_names) # Continuum. O = self.metadata["continuum_order"] continuum = 1.0 if 0 > O \ else np.polyval(parameters[-(O + 1):][::-1], model_dispersion) y = model_normalized_flux * continuum # Smoothing? try: index = self.parameter_names.index("smoothing") except ValueError: None else: kernel = abs(parameters[index]) y = ndimage.gaussian_filter1d(y, kernel, axis=-1) # Redshift? try: index = self.parameter_names.index("redshift") except ValueError: z = 0 else: v = parameters[index] z = v/299792.458 # km/s return np.interp(x, model_dispersion * (1 + z), y)
def FIT(p, seq): k = len(p) predict = polyval(p, k+1) return predict
def project_xy(xy_coords, pvec): # get cubic polynomial coefficients given # # f(0) = 0, f'(0) = alpha # f(1) = 0, f'(1) = beta alpha, beta = tuple(pvec[CUBIC_IDX]) poly = np.array([ alpha + beta, -2*alpha - beta, alpha, 0]) xy_coords = xy_coords.reshape((-1, 2)) z_coords = np.polyval(poly, xy_coords[:, 0]) objpoints = np.hstack((xy_coords, z_coords.reshape((-1, 1)))) image_points, _ = cv2.projectPoints(objpoints, pvec[RVEC_IDX], pvec[TVEC_IDX], K, np.zeros(5)) return image_points
def calcmlmags(self, lightcurve): """ Returns a "lc.mags"-like array made using the ml-parameters. It has the same size as lc.mags, and contains the microlensing to be added to them. The lightcurve object is not changed ! For normal use, call getmags() from the lightcurve. Idea : think about only returning the seasons mags to speed it up ? Not sure if reasonable, as no seasons defined outside ? """ jds = lightcurve.jds[self.season.indices] # Is this already a copy ? It seems so. So no need for an explicit copy(). # We do not need to apply shifts (i.e. getjds()), as anyway we "center" the jds. # Old method : if self.mltype == "poly": refjd = np.mean(jds) jds -= refjd # This is apparently safe, it does not shifts the lightcurves jds. allmags = np.zeros(len(lightcurve.jds)) allmags[self.season.indices] = np.polyval(self.params, jds) # probably faster then += return allmags # Legendre polynomials : if self.mltype == "leg": rjd = (np.max(jds) - np.min(jds))/2.0 cjd = (np.max(jds) + np.min(jds))/2.0 jds = (jds - cjd)/rjd allmags = np.zeros(len(lightcurve.jds)) for (n, p) in enumerate(self.params): allmags[self.season.indices] += p * ss.legendre(n)(jds) return allmags
def _ir_calibrate(self, data): """IR calibration """ cwl = self._header['block5']["central_wave_length"][0] * 1e-6 c__ = self._header['calibration']["speed_of_light"][0] h__ = self._header['calibration']["planck_constant"][0] k__ = self._header['calibration']["boltzmann_constant"][0] a__ = (h__ * c__) / (k__ * cwl) #b__ = ((2 * h__ * c__ ** 2) / (1.0e6 * cwl ** 5 * data.data)) + 1 data.data[:] *= 1.0e6 * cwl ** 5 data.data[:] **= -1 data.data[:] *= (2 * h__ * c__ ** 2) data.data[:] += 1 #Te_ = a__ / np.log(b__) data.data[:] = a__ / np.log(data.data) c0_ = self._header['calibration']["c0_rad2tb_conversion"][0] c1_ = self._header['calibration']["c1_rad2tb_conversion"][0] c2_ = self._header['calibration']["c2_rad2tb_conversion"][0] #data.data[:] = c0_ + c1_ * Te_ + c2_ * Te_ ** 2 data.data[:] = np.polyval([c2_, c1_, c0_], data.data) data.mask[data.data < 0] = True data.mask[np.isnan(data.data)] = True
def _discretize(self, observation): if not self._is_discrete: buckets = np.array(observation, dtype=np.int) for i in range(len(observation)): buckets[i] = np.digitize(observation[i], self._separators[i]) observation = np.polyval(buckets, self._bins) return observation
def op(i, j, x, y): p0, a, b, c = orthopy.line.recurrence_coefficients.jacobi( i, 0, 0, # standardization='monic' standardization='p(1)=(n+alpha over n)' ) val1 = orthopy.line.tools.evaluate_orthogonal_polynomial( (x-y)/(x+y), p0, a, b, c ) val1 = numpy.polyval(scipy.special.jacobi(i, 0, 0), (x-y)/(x+y)) # treat x==0, y==0 separately if isinstance(val1, numpy.ndarray): idx = numpy.where(numpy.logical_and(x == 0, y == 0))[0] val1[idx] = numpy.polyval(scipy.special.jacobi(i, 0, 0), 0.0) else: if numpy.isnan(val1): val1 = numpy.polyval(scipy.special.jacobi(i, 0, 0), 0.0) p0, a, b, c = orthopy.line.recurrence_coefficients.jacobi( j, 2*i+1, 0, # standardization='monic' standardization='p(1)=(n+alpha over n)' ) val2 = orthopy.line.tools.evaluate_orthogonal_polynomial( 1-2*(x+y), p0, a, b, c ) # val2 = numpy.polyval(scipy.special.jacobi(j, 2*i+1, 0), 1-2*(x+y)) flt = numpy.vectorize(float) return flt( numpy.sqrt(2*i + 1) * val1 * (x+y)**i * numpy.sqrt(2*j + 2*i + 2) * val2 )
def test_eval(t, ref, tol=1.0e-14): n = 5 p0, a, b, c = orthopy.line.recurrence_coefficients.legendre( n, 'monic', symbolic=True ) value = orthopy.line.evaluate_orthogonal_polynomial(t, p0, a, b, c) assert value == ref # Evaluating the Legendre polynomial in this way is rather unstable, so # don't go too far with n. approx_ref = numpy.polyval(legendre(n, monic=True), t) assert abs(value - approx_ref) < tol return
def test_eval_vec(t, ref, tol=1.0e-14): n = 5 p0, a, b, c = orthopy.line.recurrence_coefficients.legendre( n, 'monic', symbolic=True ) value = orthopy.line.evaluate_orthogonal_polynomial(t, p0, a, b, c) assert (value == ref).all() # Evaluating the Legendre polynomial in this way is rather unstable, so # don't go too far with n. approx_ref = numpy.polyval(legendre(n, monic=True), t) assert (abs(value - approx_ref) < tol).all() return
def test_clenshaw(tol=1.0e-14): n = 5 _, _, alpha, beta = \ orthopy.line.recurrence_coefficients.legendre(n, 'monic') t = 1.0 a = numpy.ones(n+1) value = orthopy.line.clenshaw(a, alpha, beta, t) ref = math.fsum([ numpy.polyval(legendre(i, monic=True), t) for i in range(n+1)]) assert abs(value - ref) < tol return
def polyval(fit, points, der=0, avg=False): """Evaluate polynomial generated by ``polyfit()`` on `points`. Parameters ---------- fit, points : see polyfit() der : int, optional Derivative order. Only for 1D, uses np.polyder(). avg : bool, optional Internal hack, only used by ``avgpolyval()``. Notes ----- For 1D we provide "analytic" derivatives using np.polyder(). For ND, we didn't implement an equivalent machinery. For 2D, you might get away with fitting a bispline (see Interpol2D) and use it's derivs. For ND, try rbf.py's RBF interpolator which has at least 1st derivatives for arbitrary dimensions. See Also -------- :class:`PolyFit`, :class:`PolyFit1D`, :func:`polyfit` """ pscale, pmin = fit['pscale'], fit['pmin'] vscale, vmin = fit['vscale'], fit['vmin'] if der > 0: assert points.shape[1] == 1, "deriv only for 1d poly (ndim=1)" # ::-1 b/c numpy stores poly coeffs in reversed order dcoeffs = np.polyder(fit['coeffs'][::-1], m=der) return np.polyval(dcoeffs, (points[:,0] - pmin[0,0]) / pscale[0,0]) / \ pscale[0,0]**der * vscale else: vand = vander((points - pmin) / pscale, fit['deg']) if avg: return np.dot(vand, fit['coeffs']) * vscale else: return np.dot(vand, fit['coeffs']) * vscale + vmin
def test_compare_numpy(): x = np.sort(np.random.rand(10)) y = np.random.rand(10) yy1 = np.polyval(np.polyfit(x, y, 3), x) for scale in [True,False]: yy2 = num.PolyFit1D(x, y, 3, scale=scale)(x) assert np.allclose(yy1, yy2)
def Legendre_matrix(N, ctheta): r"""Matrix of weighted Legendre Polynominals. Computes a matrix of weighted Legendre polynominals up to order N for the given angles .. math:: L_n(\theta) = \frac{2n+1}{4 \pi} P_n(\theta) Parameters ---------- N : int Maximum order. ctheta : (Q,) array_like Angles. Returns ------- Lmn : ((N+1), Q) numpy.ndarray Matrix containing Legendre polynominals. """ if ctheta.ndim == 0: M = 1 else: M = len(ctheta) Lmn = np.zeros([N+1, M], dtype=complex) for n in range(N+1): Lmn[n, :] = (2*n+1)/(4*np.pi) * np.polyval(special.legendre(n), ctheta) return Lmn
def eval_fibmodel_poly(self, use_poly=False): self.fibmodel = np.zeros((self.D, len(self.binx))) for i in xrange(len(self.binx)): if use_poly: self.fibmodel[:,i] = np.polyval(self.fibmodel_polyvals[:,i], 1.* np.arange(self.D) / self.D) else: self.fibmodel[:,i] = np.interp(np.arange(self.D), self.fibmodel_x, self.fibmodel_y[:,i])
def eval_wave_poly(self): self.wavelength = np.polyval(self.wave_polyvals, 1.* np.arange(self.D) / self.D)