我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用scipy.optimize.minimize_scalar()。
def optSigma2(self, U, s, y, covars, logdetXX, reml, ldeltamin=-5, ldeltamax=5): #Prepare required matrices Uy = U.T.dot(y).flatten() UX = U.T.dot(covars) if (U.shape[1] < U.shape[0]): UUX = covars - U.dot(UX) UUy = y - U.dot(Uy) UUXUUX = UUX.T.dot(UUX) UUXUUy = UUX.T.dot(UUy) UUyUUy = UUy.T.dot(UUy) else: UUXUUX, UUXUUy, UUyUUy = None, None, None numIndividuals = U.shape[0] ldeltaopt_glob = optimize.minimize_scalar(self.negLLevalLong, bounds=(-5, 5), method='Bounded', args=(s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, numIndividuals, reml)).x ll, sig2g, beta, r2 = self.negLLevalLong(ldeltaopt_glob, s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, numIndividuals, reml, returnAllParams=True) sig2e = np.exp(ldeltaopt_glob) * sig2g return sig2g, sig2e, beta, ll
def fit_z(locs, info, calibration, magnification_factor, filter=2): cx = _np.array(calibration['X Coefficients']) cy = _np.array(calibration['Y Coefficients']) z = _np.zeros_like(locs.x) square_d_zcalib = _np.zeros_like(z) sx = locs.sx sy = locs.sy for i in range(len(z)): result = _minimize_scalar(_fit_z_target, args=(sx[i], sy[i], cx, cy)) z[i] = result.x square_d_zcalib[i] = result.fun z *= magnification_factor locs = _lib.append_to_rec(locs, z, 'z') locs = _lib.append_to_rec(locs, _np.sqrt(square_d_zcalib), 'd_zcalib') locs = _lib.ensure_sanity(locs, info) return filter_z_fits(locs, filter)
def estimate_mode(acc): """ Estimate the mode of a set of float values between 0 and 1. :param acc: Data. :returns: The mode of the sample :rtype: float """ # Taken from sloika. if len(acc) > 1: da = gaussian_kde(acc) optimization_result = minimize_scalar(lambda x: -da(x), bounds=(0, 1), method='Bounded') if optimization_result.success: try: mode = optimization_result.x[0] except IndexError: mode = optimization_result.x else: sys.stderr.write("Mode computation failed") mode = 0 else: mode = acc[0] return mode
def update_sigma_one_element_wrapper(u, node_to_pair_u, xi, num_type, num_node, sigma_old): # compute quantities involving pairs having u sigma__v_arr = np.asarray(map(lambda v_s: sigma_old[v_s[0]], node_to_pair_u)) xi_arr = np.asarray(map(lambda v_s: xi[v_s[1]], node_to_pair_u)) # objective function w.r.t. sigma__u def cur_obj(x): return PReP.update_sigma_one_element_obj(x, sigma__v_arr, xi_arr, num_type) # bounds bnds = (0., 1e10) # solve optimization and return opt = minimize_scalar(cur_obj, bounds=bnds, method="bounded") return opt.x # Outside of PReP since parallelization package joblib does not support instance method
def fit(self, X, C, y, regions, kernelType, reml=True, maxiter=100): #construct a list of kernel names (one for each region) if (kernelType == 'adapt'): kernelNames = self.buildKernelAdapt(X, C, y, regions, reml, maxiter) else: kernelNames = [kernelType] * len(regions) #perform optimization kernelObj, hyp_kernels, sig2e, fixedEffects = self.optimize(X, C, y, kernelNames, regions, reml, maxiter) #compute posterior distribution Ktraintrain = kernelObj.getTrainKernel(hyp_kernels) post = self.infExact_scipy_post(Ktraintrain, C, y, sig2e, fixedEffects) #fix intercept if phenotype is binary if (len(np.unique(y)) == 2): controls = (y<y.mean()) cases = ~controls meanVec = C.dot(fixedEffects) mu, var = self.getPosteriorMeanAndVar(np.diag(Ktraintrain), Ktraintrain, post, meanVec) fixedEffects[0] -= optimize.minimize_scalar(self.getNegLL, args=(mu, np.sqrt(sig2e+var), controls, cases), method='brent').x #construct trainObj trainObj = dict([]) trainObj['sig2e'] = sig2e trainObj['hyp_kernels'] = hyp_kernels trainObj['fixedEffects'] = fixedEffects trainObj['kernelNames'] = kernelNames return trainObj
def __call__(self, sat_pos, args=(), **kwds): """ Return the definite integral of *self.fun*(pos, *args*[0], ..., *args*[-1]) for the line of site from *stn_pos* to *sat_pos* (a :class:`PyPosition`) where pos is a :class:`PyPosition` on the line of site (and note the integration bounds on h defined in __init__). The remaining *kwds* are passed to the quadrature routine (:py:func:`quad`). """ diff = NP.array(sat_pos.xyz) - NP.array(self.stn_pos.xyz) S_los = NP.linalg.norm(diff) / 1e3 def pos(s): """ Return the ECEF vector a distance *s* along the line-of-site (in [km]). """ return PyPosition(*(NP.array(self.stn_pos.xyz) + (s / S_los) * diff)) # determine integration bounds # distance along of line of site at which the geodetic height # is self.height1 s1 = minimize_scalar(lambda l: (pos(l).height / 1e3 - self.height1)**2, bounds=[0, S_los], method='Bounded').x # distance along of line of site at which the geodetic height # is self.height2 s2 = minimize_scalar(lambda l: (pos(l).height / 1e3 - self.height2)**2, bounds=[0, S_los], method='Bounded').x def wrapper(s, *args): return self.fun(pos(s), *args) return quad(wrapper, s1, s2, args=args, **kwds)[0]
def GaussNewton(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1): x = np.array(x0, dtype=np.float64) oldCost = -1 for i in range(maxIter): r = fun(x, *args) cost = np.sum(r**2) if verbose > 0: print "Cost at iteration " + str(i) + ": " + str(cost) if (cost < eps or abs(cost - oldCost) < eps): break oldCost = cost J = funJack(x, *args) grad = np.dot(J.T, r) H = np.dot(J.T, J) direction = np.linalg.solve(H, grad) #optymalizacja dlugosci kroku lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args)) #dlugosc kroku alpha = lineSearchRes["x"] x = x + alpha * direction if verbose > 0: print "Gauss Netwon finished after " + str(i + 1) + " iterations" r = fun(x, *args) cost = np.sum(r**2) print "cost = " + str(cost) print "x = " + str(x) return x
def SteepestDescent(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1): x = np.array(x0, dtype=np.float64) oldCost = -1 for i in range(maxIter): r = fun(x, *args) cost = np.sum(r**2) if verbose > 0: print "Cost at iteration " + str(i) + ": " + str(cost) #warunki stopu if (cost < eps or abs(cost - oldCost) < eps): break oldCost = cost J = funJack(x, *args) grad = 2 * np.dot(J.T, r) direction = grad #optymalizacja dlugosci kroku lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args)) #dlugosc kroku alpha = lineSearchRes["x"] x = x + alpha * direction if verbose > 0: print "Steepest Descent finished after " + str(i + 1) + " iterations" r = fun(x, *args) cost = np.sum(r**2) print "cost = " + str(cost) print "x = " + str(x) return x
def u1_peak(self, k): T = k * self.Tr self.z.cost(T) self.z.T = T self.z.p = self.z.p.reshape((-1, self.z.order + 1)) bnds = self.get_bounds(self.u1, k) u1_res = minimize_scalar(lambda t: -self.u1(t), bounds=bnds, method='bounded') self.peaks[0] = np.abs(self.u1(u1_res.x)) return self.peaks[0]
def theta_peak(self, k): T = k * self.Tr self.x.cost(T) self.x.T = T self.x.p = self.x.p.reshape((-1, self.x.order + 1)) bnds = self.get_bounds(lambda t: np.abs(self.theta(t)), k) theta_res = minimize_scalar(lambda t: -np.abs(self.theta(t)), bounds=bnds, method='bounded') self.peaks[3] = np.abs(self.theta(theta_res.x)) return self.peaks[3]
def phi_peak(self, k): T = k * self.Tr self.y.cost(T) self.y.T = T self.y.p = self.y.p.reshape((-1, self.y.order + 1)) bnds = self.get_bounds(lambda t: np.abs(self.phi(t)), k) phi_res = minimize_scalar(lambda t: -np.abs(self.phi(t)), bounds=bnds, method='bounded') self.peaks[4] = np.abs(self.phi(phi_res.x)) return self.peaks[4] # Returns a set of bounds for the minimizer to use. Necessary for accurate # minimization of non-convex functions such as u1, u2, u3, and u4. This is # a BRUTE FORCE method. We take rezo samples per piecewise polynomial # segment, find the time which results in the maximum, and return the two # time samples adjacent to that time.
def get_uncertainty_scalefactor(df): """ Find the factor by which to multiply the 1-sigma measurement uncertainties so that they agree with the literature values 68% of the time. :param df: A pandas DataFrame with corrected temperatures, such as output by correct_measured_temperature :return: The scaling factor. Multiply df['T_uperr'] and df['T_lowerr'] by this to get more realistic uncertainties. """ def get_zscore(x, y, xerr, yerr, f=1.0): delta = x - y sigma = np.sqrt(f * xerr ** 2 + yerr ** 2) return delta / sigma def min_func(f, x, y, xerr, yerr): zscores = get_zscore(x, y, xerr, yerr, f) return (len(zscores[zscores ** 2 > 1]) / float(len(zscores)) - 0.32) ** 2 df['T_err'] = np.minimum(df['T_uperr'], df['T_lowerr']) # Be conservative and use the smaller error. fitresult = minimize_scalar(min_func, bounds=[0, 10], method='bounded', args=(df['Corrected Temperature'], df['Tactual'], df['T_err'], df['Tact_err'])) logging.info('Uncertainty scale factor = {:.2g}'.format(fitresult.x)) return fitresult.x
def split_phase(self): pvt=self.pvt comps=self.split_init() if len(comps)==1: new_pvt={} for i in comps[0]: new_pvt[i]=pvt[i].copy() new_pvt[i]['comp']=comps[0][i] return [Phase(new_pvt,self.P,self.T,self.tangents,self.moles)] else: index=[] for i in pvt: index.append(i) comp_1=[comps[0][j] for j in index] comp_2=[comps[1][j] for j in index] z=[pvt[j]['comp'] for j in index] def func(frac): return ((1-frac)**2)*np.dot(comp_2,comp_2)+2*frac*(1-frac)*np.dot(comp_2,comp_1)+(frac**2)*np.dot(comp_1,comp_1)-np.dot(z,z)\ -2*(1-frac)*np.dot(z,comp_2)-2*frac*np.dot(z,comp_1) bounds=[0.,1.] fraction=minimize_scalar(func,bounds=bounds,method='Bounded').x new_pvt1={} new_pvt2={} c=0 for i in index: new_pvt1[i],new_pvt2[i]=pvt[i].copy(),pvt[i].copy() new_pvt1[i]['comp'],new_pvt2[i]['comp']=comp_1[c],comp_2[c] c+=1 return [Phase(new_pvt1,self.P,self.T,self.tangents,self.moles*fraction), Phase(new_pvt2,self.P,self.T,self.tangents,self.moles*(1-fraction))]
def flash(self): index=[i for i in self.pvt] z=[self.pvt[i]['comp'] for i in index] n=len(index) x0=[ln(self.phases[0]['comp'][i]) for i in index]+[ln(self.phases[1]['comp'][i]) for i in index] def fun(x): comp_l={index[i]:exp(x[n+i]) for i in range(n)} comp_v={index[i]:exp(x[i]) for i in range(n)} fug_v=peng_robinson.fug_minimum_gibbs(self.pvt,self.T,self.P,1.,comp=comp_v,phase='light') fug_l=peng_robinson.fug_minimum_gibbs(self.pvt,self.T,self.P,1.,comp=comp_l,phase='heavy') F=[fug_v[i]+ln(comp_v[i])-fug_l[i]-ln(comp_l[i]) for i in index] F+=[(z[i]-exp(x[i]))/(exp(x[n+i])-exp(x[i]))-(z[n-1]-exp(x[n-1]))/(exp(x[-1])-exp(x[n-1])) for i in range(n-1)] return np.dot(F,F) cons=({'type': 'ineq', 'fun': lambda x: np.array([-c for c in x]), 'jac': lambda x: np.array([[-1. if c1 == c2 else 0 for c1 in range(len(x))] for c2 in range(len(x))])}, {'type': 'eq', 'fun': lambda x: np.array([1-sum([exp(c) for c in x[:n]]),1-sum([exp(c) for c in x[n:]])]), 'jac': lambda x: np.array([[-exp(c) if c in x[:n] else 0 for c in x],[-exp(c) if c in x[n:] else 0 for c in x]]) }) res=minimize(fun,x0,method='SLSQP',constraints=cons) comp_v = [exp(c) for c in res.x[:n]] comp_l = [exp(c) for c in res.x[n:]] def func(frac): return ((1 - frac) ** 2) * np.dot(comp_l, comp_l) + 2 * frac * (1 - frac) * np.dot(comp_l, comp_v) +\ (frac ** 2) * np.dot(comp_v, comp_v) - np.dot(z, z) \ - 2 * (1 - frac) * np.dot(z, comp_l) - 2 * frac * np.dot(z, comp_v) bounds = [0., 1.] fraction = minimize_scalar(func, bounds=bounds, method='Bounded').x comp_v={index[i]:comp_v[i] for i in range(n)} comp_l={index[i]:comp_l[i] for i in range(n)} self.phases = [{'frac': fraction, 'comp': comp_v}, {'frac': 1 - fraction, 'comp': comp_l}]
def find_vapor_frcn(pvt,K): V=minimize_scalar(lambda x:rachford_rice_g(x,pvt,K)**2,bracket=[0,1],bounds=[0,1],method='bounded').x comp_l={} comp_v={} for i in pvt: comp_v[i] = pvt[i]['comp'] * math.exp(K[i]) / (1 - V * (1 - math.exp(K[i]))) comp_l[i] = pvt[i]['comp'] / (1 - V * (1 - math.exp(K[i]))) return (V,comp_v,comp_l)
def linearsearch(xa,ca,t0,ya): alpha = minimize_scalar(estimateZ, args=(xa, ca, t0,ya), bounds = (0,1), method = 'Bounded') return alpha.x # main functions #### Step 1: Network Representation and Data Structure ## Define the link-node matrix
def optimize_Tc(self): ''' determines the coalescent time scale that optimizes the coalescent likelihood of the tree ''' from scipy.optimize import minimize_scalar initial_Tc = self.Tc def cost(Tc): self.set_Tc(Tc) return -self.total_LH() sol = minimize_scalar(cost, bounds=[ttconf.TINY_NUMBER,10.0]) if sol["success"]: self.set_Tc(sol['x']) else: self.set_Tc(initial_Tc.y, T=initial_Tc.x)
def get_rv(vel, corr, Npix=None, **fit_kws): """ Get the best radial velocity, with errors. This will only work if the ccf was made with the maximum likelihood method! Uses the formula given in Zucker (2003) MNRAS, 342, 4 for the rv error. Parameters: =========== - vel: numpy.ndarray The velocities - corr: numpy.ndarray The ccf values. Should be the same size as vel - Npix: integer The number of pixels used in the CCF. Returns: ======== -rv: float The best radial velocity, in km/s -rv_err: float Uncertainty in the velocity, in km/s -ccf: float The CCF power at the maximum velocity. """ vel = np.atleast_1d(vel) corr = np.atleast_1d(corr) sorter = np.argsort(vel) fcn = spline(vel[sorter], corr[sorter]) fcn_prime = fcn.derivative(1) fcn_2prime = fcn.derivative(2) guess = vel[np.argmax(corr)] def errfcn(v): ll = 1e6*fcn_prime(v)**2 return ll out = minimize_scalar(errfcn, bounds=(guess-2, guess+2), method='bounded') rv = out.x if Npix is None: Npix = vel.size rv_var = -(Npix * fcn_2prime(rv) * (fcn(rv) / (1 - fcn(rv) ** 2))) ** (-1) return rv, np.sqrt(rv_var), fcn(rv)
def optimal_t_compressed(self, seq_pair, multiplicity): """ Find the optimal distance between the two sequences """ def _neg_prob(t, seq_pair, multiplicity): """ Probability to observe child given the the parent state, transition matrix and the time of evolution (branch length). Parameters ---------- t : double Branch length (time between sequences) parent : numpy.array Parent sequence child : numpy.array Child sequence tm : GTR Model of evolution Returns ------- prob : double Negative probability of the two given sequences to be separated by the time t. """ return -1.0*self.prob_t_compressed(seq_pair, multiplicity,t, return_log=True) try: from scipy.optimize import minimize_scalar opt = minimize_scalar(_neg_prob, bounds=[0,ttconf.MAX_BRANCH_LENGTH], method='bounded', args=(seq_pair, multiplicity), options={'xatol':1e-8}) new_len = opt["x"] except: import scipy print('legacy scipy', scipy.__version__) from scipy.optimize import fminbound new_len = fminbound(_neg_prob, 0,ttconf.MAX_BRANCH_LENGTH, args=(seq_pair, multiplicity)) opt={'success':True} if new_len > .9 * ttconf.MAX_BRANCH_LENGTH: self.logger("WARNING: GTR.optimal_t_compressed -- The branch length seems to be very long!", 4, warn=True) if opt["success"] != True: # return hamming distance: number of state pairs where state differs/all pairs new_len = np.sum(multiplicity[seq_pair[:,1]!=seq_pair[:,0]])/np.sum(multiplicity) return new_len
def get_max_posterior_region(self, node, fraction = 0.9): ''' If temporal reconstruction was done using the marginal ML mode, the entire distribution of times is available. this function here determines the 95% confidence interval defines as the range where 5% of probability are below and above. Note that this does not necessarily contain the highest probability position. Parameters ---------- node : PhyloTree.Clade The node for which the confidence region is to be calculated interval : float Float specifying who much of the posterior probability is to be contained in the region Returns ------- max_posterior_region : numpy array Array with two numerical dates delineating the high posterior region ''' if not hasattr(node, "marginal_inverse_cdf"): return np.array([np.nan, np.nan]) if node.marginal_inverse_cdf=="delta": return np.array([node.numdate, node.numdate]) min_max = (node.marginal_pos_LH.xmin, node.marginal_pos_LH.xmax) if node.marginal_pos_LH.peak_pos == min_max[0]: #peak on the left return self.get_confidence_interval(node, (0, fraction)) elif node.marginal_pos_LH.peak_pos == min_max[1]: #peak on the right return self.get_confidence_interval(node, (1.0-fraction ,1.0)) else: # peak in the center of the distribution # construct height to position interpolators left and right of the peak # this assumes there is only one peak --- might fail in odd cases from scipy.interpolate import interp1d from scipy.optimize import minimize_scalar as minimize pidx = np.argmin(node.marginal_pos_LH.y) pval = np.min(node.marginal_pos_LH.y) left = interp1d(node.marginal_pos_LH.y[:(pidx+1)]-pval, node.marginal_pos_LH.x[:(pidx+1)], kind='linear', fill_value=min_max[0], bounds_error=False) right = interp1d(node.marginal_pos_LH.y[pidx:]-pval, node.marginal_pos_LH.x[pidx:], kind='linear', fill_value=min_max[1], bounds_error=False) # function to minimize -- squared difference between prob mass and desired fracion def func(x, thres): interval = np.array([left(x), right(x)]).squeeze() return (thres - np.diff(node.marginal_cdf(np.array(interval))))**2 # minimza and determine success sol = minimize(func, bracket=[0,10], args=(fraction,)) if sol['success']: return self.date2dist.to_numdate(np.array([right(sol['x']), left(sol['x'])]).squeeze()) else: # on failure, return standard confidence interval return self.get_confidence_interval(node, (0.5*(1-fraction), 1-0.5*(1-fraction)))