我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.optimize.minimize()。
def even_points(npts, use_fib_init=True, method='CG', potential_func=elec_potential, maxiter=None): """ Distribute npts over a sphere and minimize their potential, making them "evenly" distributed Starting with the Fibonacci spiral speeds things up by ~factor of 2. """ if use_fib_init: # Start with fibonacci spiral guess theta, phi = fib_sphere_grid(npts) else: # Random on a sphere theta = np.random.rand(npts)*np.pi*2. phi = np.arccos(2.*np.random.rand(npts)-1.) x = np.concatenate((theta, phi)) # XXX--need to check if this is the best minimizer min_fit = minimize(potential_func, x, method='CG', options={'maxiter': maxiter}) x = min_fit.x theta = x[0:x.size/2] phi = x[x.size/2:] # Looks like I get the same energy values as https://en.wikipedia.org/wiki/Thomson_problem return theta, phi
def estimate_theta(self, samples): ''' Estimates the theta parameters from the given samples. Parameters ---------- samples : array_like n-by-2 matrix of samples where n is the number of samples. ''' if self.theta is not None: bnds = self.theta_bounds() def cost(theta): ''' Calculates the cost of a given `theta` parameter. ''' self.theta = np.asarray(theta) vals = self.logpdf(samples) # For optimization, filter out inifinity values return -np.sum(vals[np.isfinite(vals)]) result = minimize(cost, self.theta, method='TNC', bounds=bnds) self.theta = result.x
def run(self, verbose=False): """ executes calibration of the model and prints final result """ self.verbose = verbose x0, bnds = self.iter_params() self.res = minimize( self.min_function, x0, method="Nelder-Mead", bounds=bnds, options={ 'maxiter': 100, 'maxfun': 500 }) self.print_final_results()
def fit(self, data, labels, l1_weight_cost=0., l2_weight_cost=0.): if self.optimizer == "lbfgs": from scipy.optimize import minimize res = minimize(self.f_and_g, self.params.copy(), (data, labels, l1_weight_cost, l2_weight_cost), method="L-BFGS-B", jac=True, options={"ftol": 1E-4}) p = res.x elif self.optimizer == "minimize_cg": max_n_line_search = np.inf p, g, n_line_searches = minimize(self.params.copy(), (data, labels, l1_weight_cost, l2_weight_cost), self.f_and_g, True, maxnumlinesearch=max_n_line_search, verbose=self.verbose) else: raise ValueError("Unknown optimizer setting {}".format(self.optimizer)) if self.verbose: print("Training complete!") self.update_params(p)
def __init__(self, wp, der=4): ''' Waypoints must be provided in the form: [[ x0, dx0, d2x0, ... ] [ x1, dx1, d2x0, ... ] [ x2, dx2, d2x2, ... ] [ ... ... ]] Omitted derivatives will be left free, and any derivatives up to and including the objective (der) derivative will be made continous on the waypoints. ''' self.wp = np.array(wp) # waypoints self.match = der # derivative to minimize and match if len(wp) < 3: print('More waypoints required.') elif der < 2: print('Higher derivative required.') else: self.init_QP()
def main(): 'Runs calculation chain' constants = earth_constants_from_text('finals2000A.data') ecef_conv_func = make_teme_ecef_conv_func(*constants) tle = '''0 INMARSAT 3-F1 1 23839U 96020A 14066.96754476 -.00000012 00000-0 10000-3 0 9995 2 23839 001.6371 073.1994 0005326 270.3614 234.8362 01.00274124 65669''' orig_elements = elem_from_tle(tle) sat_state_ecef = ecef_state_from_csv('ecef.txt') resid_func = make_residual_func(tle, sat_state_ecef, orig_elements, ecef_conv_func) opt_factors = minimize(resid_func, zeros(6), method='BFGS', options={'maxiter':11, 'eps':1e-4}) opt_tle = tle_from_factors(tle, opt_factors.x, orig_elements) with open('data/opt.tle', 'wt') as fout: for line in opt_tle: fout.write(line)
def bfgs(obj_and_grad, x, callback=None, num_iters=100): def epoch_counter(): epoch = 0 while True: yield epoch epoch += 1 ec = epoch_counter() wrapped_callback = None if callback: def wrapped_callback(x): callback(x, next(ec)) res = minimize(fun=obj_and_grad, x0=x, jac=True, callback=wrapped_callback, options={'maxiter': num_iters, 'disp': True}) return res.x
def guess_fit(self): """ This doesn't work too great, but the full MCMC fit looks good. """ def errfcn(pars): ll = self.lnprob(pars) return -ll # Set up initial guesses initial_guess = np.ones(self.bin_centers.size + 4) initial_guess[-4] = 0.0 initial_guess[-3] = -0.25 initial_guess[-2] = -1.0 initial_guess[-1] = -1.0 # Set up bounds bounds = [[1e-3, None] for p in self.bin_centers] bounds.append([-10, 20]) bounds.append([-10, 10]) bounds.append((-1, 5)) bounds.append((-10, 10)) # Minimize out = minimize(errfcn, initial_guess, bounds=bounds) return out.x
def bm_single_brlen_optim(tree,ntraits,sigsq=1.,alg="Powell"): count = 0 for node in tree.iternodes(order=1): if node.istip: #count += 1 continue start = np.array([child.length for child in node.children],dtype=np.double) if alg == "Powell": opt = optimize.minimize(calc_like_single_brlen,start,args=(node,tree,ntraits,sigsq), method = "Powell") elif alg == "SLSQP": opt = optimize.minimize(calc_like_single_brlen,start,args=(node,tree,ntraits,sigsq), method = "SLSQP",bounds=((0.00001,1000.),(0.00001,1000.))) elif alg == "L-BFGS-B": opt = optimize.minimize(calc_like_single_brlen,start,args=(node,tree,ntraits,sigsq), method = "L-BFGS-B",bounds=((0.00001,1000.),(0.00001,1000.))) #print opt.x ccount = 0 for child in node.children: child.length = opt.x[ccount] ccount+=1 count += 1 return [tree.get_newick_repr(True),opt]
def fit(self, X, T): print(self.max_iter) opt_solution = minimize(self.compute_cost, self.flatten(), args=(X, T), method='L-BFGS-B', tol=self.tol, jac=True, options={'maxiter': self.max_iter, 'disp': True}) print(opt_solution.success) print(opt_solution.fun) self.stack(opt_solution.x) return self
def _acquisition(self, gp, y): y_max = y.max() bounds = numpy.array([[0., 0.999999] for _ in self.space.names()]) max_acq = None #y_max x_max = None x_seeds = numpy.random.uniform(bounds[:, 0], bounds[:, 1], size=(100, bounds.shape[0])) for x_try in x_seeds: # Find the minimum of minus the acquisition function x_try[[not i for i in self.space.isactive(x_try)]] = 0 res = minimize(lambda x: -self.utility(x.reshape(1, -1), gp=gp, y_max=y_max, **self.utility_kws), x_try.reshape(1, -1), bounds=bounds, method="L-BFGS-B") # Store it if better than previous minimum(maximum). if max_acq is None or -res.fun[0] >= max_acq: x_max = res.x max_acq = -res.fun[0] assert x_max is not None, "This is an invalid case" return x_max
def _optimize_single(self, x0): x0 = list(x0) if x0[0] == None: x0[0] = 0 dt_ok = np.asscalar(self.dispersion.dt_ok(x0)) if dt_ok < 0: # Initial conditions violate constraints, reject return x0, None, float('inf') x0[0] = dt_ok x0[0] = min(x0[0], self.dtmax) x0[0] = max(x0[0], self.dtmin) x0 = np.asfarray(x0) stencil_ok = self.dispersion.stencil_ok(x0) if stencil_ok < 0: # Initial conditions violate constraints, reject return x0, None, float('inf') res = scop.minimize(self.dispersion.norm, x0, method='SLSQP', constraints = self.constraints, options = dict(disp=False, iprint = 2)) norm = self.dispersion_high.norm(res.x) return x0, res, norm
def fit(self, X): self._X_fit = to_time_series_dataset(X) self.weights = _set_weights(self.weights, self._X_fit) if self.barycenter_ is None: if check_equal_size(self._X_fit): self.barycenter_ = EuclideanBarycenter.fit(self, self._X_fit) else: resampled_X = TimeSeriesResampler(sz=self._X_fit.shape[1]).fit_transform(self._X_fit) self.barycenter_ = EuclideanBarycenter.fit(self, resampled_X) if self.max_iter > 0: # The function works with vectors so we need to vectorize barycenter_. res = minimize(self._func, self.barycenter_.ravel(), method=self.method, jac=True, tol=self.tol, options=dict(maxiter=self.max_iter, disp=False)) return res.x.reshape(self.barycenter_.shape) else: return self.barycenter_
def fit_non_linear_parameter(self,Y): """ Estimation of the SNLM theta parameters """ if isinstance(self.non_linear_parameter_init,tuple): non_linear_parameter_value = brute(self.cost_function,self.non_linear_parameter_init, args=(Y,)) non_linear_parameter_init=non_linear_parameter_value if verbose==True: print("Result of the brute optimisation: ") print(non_linear_parameter_value) else: non_linear_parameter_init=self.non_linear_parameter_init output=minimize(self.opposite_cost_function,self.non_linear_parameter_init,args=(Y,),method=self.method_name, jac=self.jac, hess=self.hess,hessp=self.hessp, bounds=self.bounds, constraints=self.constraints, tol=self.tol, options=self.options) theta=output.x return theta
def assess_assets(self, required_labor, mean_wage, mean_equip_price): """identify desired mixture of productive assets, i.e. workers, equipment, and wage""" down_wage_pressure = self.wage_increment def objective(x): n_workers, wage, n_equipment = x return n_workers * wage + n_equipment * mean_equip_price def constraint(x): n_workers, wage, n_equipment = x equip_labor = min(n_workers * self.labor_per_equipment, n_equipment * self.labor_per_equipment) return n_workers * self.labor_per_worker + equip_labor - required_labor results = optimize.minimize(objective, (1,0,0), constraints=[ {'type': 'ineq', 'fun': constraint}, {'type': 'ineq', 'fun': lambda x: x[0]}, {'type': 'ineq', 'fun': lambda x: x[1] - (mean_wage - down_wage_pressure)}, {'type': 'ineq', 'fun': lambda x: x[2]} ], options={'maxiter':20}) n_workers, wage, n_equipment = np.ceil(results.x).astype(np.int) return n_workers, wage, n_equipment
def even_points_xyz(npts, use_fib_init=True, method='CG', potential_func=elec_potential_xyz, maxiter=None, callback=None): """ Distribute npts over a sphere and minimize their potential, making them "evenly" distributed Starting with the Fibonacci spiral speeds things up by ~factor of 2. """ if use_fib_init: # Start with fibonacci spiral guess theta, phi = fib_sphere_grid(npts) else: # Random on a sphere theta = np.random.rand(npts)*np.pi*2. phi = np.arccos(2.*np.random.rand(npts)-1.) x = np.concatenate(thetaphi2xyz(theta, phi)) # XXX--need to check if this is the best minimizer min_fit = minimize(potential_func, x, method='CG', options={'maxiter': maxiter}, callback=callback) x = x02sphere(min_fit.x) # Looks like I get the same energy values as https://en.wikipedia.org/wiki/Thomson_problem return x
def transfer(self): """ Do the transfer job :return: """ self.tool = ST(self.model_dirs, self.content_img, self.style_img, self.device_id) self.optimizer = Numeric() # construct the optimize param method = 'L-BFGS-B' args = (self.tool.getNet(), self.tool.getLayers(), self.tool.getFcontent(), self.tool.getGstyle(), self.tool.getStyleLayer(), self.tool.getContentLayer(), self.tool.getRatio()) jac = True bounds = self.tool.getBounds() img0 = self.tool.getImg0() options = {'maxiter': self.max_iter, 'disp': self.verbose} res = minimize(self.optimizer.computeLossAndGradAll, img0.flatten(), args=args, method=method, jac=jac, bounds=bounds, options=options) data = self.tool.net.blobs['data'].data[0,...] self.tool.saveImg(data)
def optimize_weights(ensemble_indices, ens_member_predictions): """ Optimizes the weights of models in the ensemble using the Constrained Optimization by Linear Approximation (COBYLA) method to maximize the validation accuracy :param ensemble_indices: list of indices of individual models that should be included in the optimized ensemble :param ens_member_predictions: list of prediction lists of the individual models :return: optimal weights, predictions of the optimal ensemble """ def weight_accuracy(weights): # Objective function (negative accuracy) to be minimized averaged_pred = numpy.average(ens_member_predictions, axis=0, weights=weights) return -accuracy(averaged_pred) opt_result = opt.minimize(weight_accuracy, numpy.ones(len(ensemble_indices)) / len(ensemble_indices), method='COBYLA', constraints=({'type': 'ineq', 'fun': lambda x: 1 - sum(x)})) averaged_pred = numpy.average(ens_member_predictions, axis=0, weights=opt_result['x']) print 'Optimized ensemble accuracy: ' print accuracy(averaged_pred) print 'Optimal weights: ' + str(opt_result['x']) return opt_result['x'], averaged_pred
def bfgs(obj_and_grad, x, callback=None, num_iters=100): def epoch_counter(): epoch = 0 while True: yield epoch epoch += 1 ec = epoch_counter() wrapped_callback=None if callback: def wrapped_callback(x): callback(x, next(ec)) res = minimize(fun=obj_and_grad, x0=x, jac =True, callback=wrapped_callback, options = {'maxiter':num_iters, 'disp':True}) return res.x
def optimize(fun, individual): """ Prepare individual and fun to optimize fun(c | individual) :param fun: callable of lambda expression and its constant values. :param individual: :return: scipy.optimize.OptimizeResult """ f = compile(individual) def h(consts=()): return fun(f, consts) expr, args = to_polish(individual, return_args=True) constants = [a for a in args if isinstance(a, Constant)] if constants: res = minimize(h, np.ones_like(constants)) individual.consts = res.x return res else: return OptimizeResult(x=(), fun=h(), nfev=1, nit=0, success=True)
def gaussian_function(X,Y): ''' Parameters ---------- X: List of distances Y: List of amplitudes Returns ------- result : object Optimization result returned by scipy.optimize.minimize. See scipy.optimize documentation for details. ''' X = np.float64(X) Y = np.float64(Y) def objective(theta): (mu,sigma,scale,dc) = theta z = npdf(mu,sigma,X)*scale+dc error = np.sum( (z-Y)**2 ) return error result = minimize(objective,[0,1,1]) return result
def half_gaussian_function(X,Y): ''' Parameters ---------- X: List of distances Y: List of amplitudes Returns ------- ''' X = np.float64(X) Y = np.float64(Y) def objective(theta): (sigma,scale,dc) = theta z = npdf(0,sigma,X)*scale+dc error = np.sum( (z-Y)**2 ) return error result = minimize(objective,[1,1,0]) if not result.success: print(result.message) raise RuntimeError('Optimization failed: %s'%result.message) sigma,scale,dc = result.x return sigma,scale,dc
def fitGLM(X,Y,L2Penalty=0.0): ''' Fit the model using gradient descent with hessian Parameters ---------- X : matrix design matrix Y : vector binary spike observations L2Penalty : scalar optional L2 penalty on features, defaults to 0 ''' objective, gradient, hessian = GLMPenaltyL2(X,Y,L2Penalty) initial = np.zeros(X.shape[1]+1) M = minimize(objective,initial, jac=gradient,hess=hessian,method='Newton-CG')['x'] mu,B = M[0],M[1:] return mu,B
def gradientglmfit(X,Y,L2Penalty=0.0): ''' mu_hat, B_hat = gradientglmfit(X,Y,L2Penalty=0.0) Fit Poisson GLM using gradient descent with hessian Parameters ---------- X : np.array Covariate matrix Nsamples x Nfeatures Y : np.array Binary point-process observations, 1D array length Nsamples ''' objective, gradient, hessian = GLMPenaltyL2(X,Y,L2Penalty) initial = np.zeros(X.shape[1]+1) M = minimize(objective, initial, jac =gradient, hess =hessian, method='Newton-CG')['x'] mu_hat,B_hat = M[0],M[1:] return mu_hat, B_hat
def train(self, X, y): #Make an internal variable for the callback function: self.X = X self.y = y #Make empty list to store costs: self.J = [] params0 = self.N.getParams() options = {'maxiter': 200, 'disp' : True} _res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', \ args=(X, y), options=options, callback=self.callbackF) self.N.setParams(_res.x) self.optimizationResults = _res
def __init__(self,BS,job='min',options={},**karg): ''' Constructor. Parameters ---------- BS : BaseSpace or dict * BaseSpace: the basespace on which to compute the grand potential; * dict: the initial guess in the basespace. job : 'min','der', optional * 'min': minimize the grand potential * 'der': derivate the grand potential options : dict, optional The extra parameters to help handle the job. * job=='min': the optional parameters passed to scipy.optimize.minimize. * job=='der': entry 'nder', the number of derivates to calculates. ''' self.BS=BS self.job=job assert job in ('min','der') assert isinstance(BS,HP.BaseSpace) or isinstance(BS,dict) if job=='min': self.options={'method':options.get('method',None),'options':options.get('options',None)} if isinstance(BS,dict) else {'mode':'+'} if job=='der': self.options={'mode':'+','nder':options.get('nder',2)}
def optim_rank(cls, pred_tr, pred_te, labels_tr, labels_te, data_tr, data_te, verbose=0): "nelder-mead partition, for ranking" def lossfunc(ary): _loss = -1.0 * model_util.evaluate_class(labels_tr, cls.to_class(pred_tr, ary)) if verbose == 1: print _loss return _loss pred_tr = pred_tr.argsort().argsort() / float(len(pred_tr)) pred_te = pred_te.argsort().argsort() / float(len(pred_te)) init = np.zeros(7) for i in range(7): init[i] = np.where(labels_tr <= (i + 1), 1.0, 0.0).sum() / float(len(labels_tr)) print init res = minimize(lossfunc, init, method="Nelder-Mead") pred_clss = cls.to_class(pred_te, res.x) return pred_clss
def calibrate_rho(self): method = 'L-BFGS-B' options = dict() rho0 = (self.rho_gr0, self.rho_fl0) rho_bounds = ((self.rho_gr_min, self.rho_gr_max), (self.rho_fl_min, self.rho_fl_max)) rho_par = minimize(self.rho_residual, rho0, method=method, bounds=rho_bounds, options=options) self.rho_gr = rho_par.x[0] self.rho_fl = rho_par.x[1] if self.rho_std is None: self.rho_std = np.std(self.rho - RP.rho(self.phi, self.rho_gr, self.rho_fl), ddof=0) # print "rho_par.success =", rho_par.success # print "rho_par.message =", rho_par.message # print "rho_par.nit =", rho_par.nit # print
def calibrate_vp(self): method = 'L-BFGS-B' options = dict() vp0 = (self.km0, self.gm0, self.alpha0, self.kfl0) vp_bounds = ((self.km_min, self.km_max), (self.gm_min, self.gm_max), (self.alpha_min, self.alpha_max), (self.kfl_min, self.kfl_max)) vp_par = minimize(self.vp_residual, vp0, method=method, bounds=vp_bounds, options=options) self.Km = vp_par.x[0] self.Gm = vp_par.x[1] self.alpha = vp_par.x[2] self.Kfl = vp_par.x[3] if self.vp_std is None: self.vp_std = np.std(self.vp - RP.Vp(self.Km, self.Gm, self.phi, self.alpha, self.Kfl, self.rho), ddof=0) # print "vp_par.success =", vp_par.success # print "vp_par.message =", vp_par.message # print "vp_par.nit =", vp_par.nit # print
def optimize(p1, p2): """Find mean-field state for J/U=p1 and mu/U=p2.""" init = init_fs(cutoff=CUTOFF, kappa=1.) # the bound is crucial for convergence res = minimize( partial(energy_per_site, hopping=p1, mu=p2), init, bounds=[[0., 1.]] * CUTOFF, constraints=[ {'type': 'eq', 'fun': constraint_normalization}, ]) return res.x
def train(self): init_etas = self.eta.flatten() cons = self.createConstraintList() try: res = minimize(self.computeObjectiveFunction, init_etas, jac=self.computeMatrixGradient,constraints=cons, method='SLSQP', options={'disp': self.verbose,'maxiter':self.max_iter}) except: return False self.eta = res.x.reshape(self.n_tasks,-1) if self.verbose: print "Results of this run!" print "\t ETA", self.eta print "\t Training ACC", self.predictAndGetAccuracy(self.train_tasks) return True
def _gamma_update_core(self): gamma = self.gamma err = check_grad( gamma_fullsum_func, gamma_fullsum_grad, gamma, self.node_vec, self.eventmemes, self.etimes, self.T, self.mu, self.alpha, self.omega, self.W, self.beta, self.kernel_evaluate, self.K_evaluate, ) print 'gradient error ', err optout = minimize( gamma_fullsum_grad, gamma, ( self.node_vec, self.eventmemes, self.etimes, self.T, self.mu, self.alpha, self.omega, self.W, self.beta, self.kernel_evaluate, self.K_evaluate, ), method='BFGS', jac=True, options={'disp': True}, ) return float(optout.x)
def get_next_hyperparameters(self, optimiser): best_params = {} for i in range(self.n_restarts_optimizer): start_vals = np.random.uniform(np.array(self.bounds_arr)[:,0], np.array(self.bounds_arr)[:,1]) #minimized = minimize(lambda x: -1 * optimiser.predict(x), start_vals, bounds=, method='L-BFGS-B') if self.method == 'expected_improvement': minimized = minimize(lambda x: self.expected_improvement(optimiser, x), start_vals, bounds=self.bounds_arr, method='L-BFGS-B') elif self.method == 'upper_confidence_bound': minimized = minimize(lambda x: self.upper_confidence_bound(optimiser, x), start_vals, bounds=self.bounds_arr, method='L-BFGS-B') elif self.method == 'probability_of_improvement': minimized = minimize(lambda x: self.probability_of_improvement(optimiser, x), start_vals, bounds=self.bounds_arr, method='L-BFGS-B') self.success = minimized['success'] if minimized['success']: new_params = {} for hp,v in zip(self.hyperparams, minimized['x']): if hp.param_type == 'integer': new_params[hp.name] = int(round(v)) else: new_params[hp.name] = v return new_params else: self.success = False #assert False, 'Optimiser did not converge!' warnings.warn('Optimiser did not converge! Continuing with randomly sampled data...') self.non_convergence_count += 1 return {hp.name:v for hp,v in zip(self.hyperparams, start_vals)}
def autofocus(self, field_index=0): ''' Adjusts the W020 aberration coefficient to maximize the MTF at a given field index. Args: field_index (`int`): index of the field to maximize MTF at. Returns: `Lens`: self. ''' coefs = self.aberrations.copy() try: # try to access the W020 aberration float(coefs['W020']) except KeyError: # if it is not set, make it 0 coefs['W020'] = 0.0 def opt_fcn(self, coefs, w020): # shift the defocus term appropriately abers = coefs.copy() abers['W020'] += w020 pupil = Seidel(**abers, epd=self.epd, samples=self.samples, h=self.fields[field_index]) # cost value (to be minimized) is RMS wavefront return pupil.rms opt_fcn = partial(opt_fcn, self, coefs) new_defocus = minimize(opt_fcn, x0=0, method='Powell') coefs['W020'] += float(new_defocus['x']) self.aberrations = coefs.copy() return self ####### analytically setting aberrations ----------------------------------- ####### data generation ----------------------------------------------------
def _spherical_defocus_from_monochromatic_mtf(lens, frequencies, mtf_s, mtf_t): ''' Uses nonlinear optimization to set the W020, W040, W060, and W080 coefficients in a lens model based on MTF measurements taken on the optical axis. Args: lens (`Lens`): a lens object. frequencies (`iterable`): A set of frequencies the provided MTF values correspond to. mtf_s (`iterable`): A set of sagittal MTF measurements of equal length to the frequencies argument. mtf_t (`iterable`): A set of tangential MTF measurements of equal length to the frequencies argument. Returns: `Lens`: A new lens object with its aberrations field modified with new spherical coefficients. ''' work_lens = lens.clone() fcn = partial(_spherical_cost_fcn_raw, frequencies, mtf_s, mtf_t, work_lens) results = minimize(fcn, [0, 0, 0, 0], method='Powell') W020, W040, W060, W080 = results['x'] work_lens.aberrations['W020'] = W020 work_lens.aberrations['W040'] = W040 work_lens.aberrations['W060'] = W060 work_lens.aberrations['W080'] = W080 return work_lens
def _spherical_cost_fcn_raw(frequencies, truth_s, truth_t, lens, abervalues): ''' TODO - document. partial() should be used on this and scipy.minimize'd abervalues - array of [W020, W040, W060, W080] ''' pupil = Seidel(epd=lens.epd, samples=lens.samples, W020=abervalues[0], W040=abervalues[1], W060=abervalues[2], W080=abervalues[3]) psf = PSF.from_pupil(pupil, efl=lens.efl) mtf = MTF.from_psf(psf) synth_t = mtf.exact_polar(frequencies, 0) synth_s = mtf.exact_polar(frequencies, 90) truth = np.stack((truth_s, truth_t)) synth = np.stack((synth_s, synth_t)) return ((truth - synth) ** 2).sum()
def updateLocation(self,r0): lb = np.r_[-6.,-6.,-5.] ub = np.r_[6.,6.,-0.1] # Sol = op.minimize(self.computeMisfit,r0,method='Powell',options={'xtol':1e-5}) Sol = op.root(self.computeVecFcn,r0,method='lm',options={'xtol':1e-5}) r0 = Sol.x return r0,Sol ############################################################################## # DEFINE TEMTADSproblem class ##############################################################################
def updateLocation(self,r0): # Sol = op.minimize(self.computeMisfit,r0,method='dogleg') Sol = op.root(self.computeVecFcn,r0,method='lm',options={'xtol':1e-5}) r0 = Sol.x return r0,Sol ############################################################################## # DEFINE MPVproblem class ##############################################################################
def updateLocation(self,r0): # Sol = op.minimize(self.computeMisfit,r0,method='dogleg') Sol = op.root(self.computeVecFcn,r0,method='lm',options={'xtol':1e-5}) r0 = Sol.x return r0,Sol
def get(self, x_des, seed=(), bounds=()): """ Get the IK by minimization :param x_des: desired task space pose [[x, y, z], [x, y, z, w]] or flattened [x, y, z, x, y, z, w] :param seed: ['s0', 's1', 'e0', 'e1', 'w0', 'w1', 'w2'] :param bounds: [(min, max), (min, max), (min, max), ... for each joint] :return: (bool, joints) """ if len(bounds) != len(self._joints): bounds = [(-pi, pi) for j in self._joints] if len(seed) == 0: seed = [0. for j in self._joints] args = [element for component in x_des for element in component] if len(x_des) == 2 else x_des result = minimize(self.cost_ik, seed, args=[args], bounds=bounds, method='L-BFGS-B') return result.success, result.x
def irradiance_rescale(irrad, modeled_irrad): ''' Attempts to rescale modeled irradiance to match measured irradiance on clear days Parameters ---------- irrad: Pandas Series (numeric) measured irradiance time series modeled_irrad: Pandas Series (numeric) modeled irradiance time series Returns ------- Pandas Series (numeric): resacaled modeled irradaince time series ''' def _rmse(fact): rescaled_modeled_irrad = fact * modeled_irrad csi = irrad / rescaled_modeled_irrad filt = (csi >= 0.8) & (csi <= 1.2) rmse = np.sqrt(((rescaled_modeled_irrad[filt] - irrad[filt]) ** 2.0).mean()) return rmse guess = np.percentile(irrad.dropna(), 90) / np.percentile(modeled_irrad.dropna(), 90) min_result = minimize(_rmse, guess, method='Nelder-Mead') factor = min_result['x'][0] out_irrad = factor * modeled_irrad return out_irrad
def invert_bfgs(gen_model, invert_model, ftr_model, im, z_predict=None, npx=64): _f, z = invert_model nz = gen_model.nz if z_predict is None: z_predict = np_rng.uniform(-1., 1., size=(1, nz)) else: z_predict = floatX(z_predict) z_predict = np.arctanh(z_predict) im_t = gen_model.transform(im) ftr = ftr_model(im_t) prob = optimize.minimize(f_bfgs, z_predict, args=(_f, im_t, ftr), tol=1e-6, jac=True, method='L-BFGS-B', options={'maxiter':200}) print('n_iters = %3d, f = %.3f' % (prob.nit, prob.fun)) z_opt = prob.x z_opt_n = floatX(z_opt[np.newaxis, :]) [f_opt, g, gx] = _f(z_opt_n, im_t, ftr) gx = gen_model.inverse_transform(gx, npx=npx) z_opt = np.tanh(z_opt) return gx, z_opt,f_opt
def _solve_lr(vlines, w, l, opt_options=OPTIMIZATION_OPTIONS, opt_method=OPTIMIZATION_METHOD, limit=0.3): """ Solve for the left and right edge displacement. This routine estimates the amount to move the upper left and right cornders of the image in a horizontal direction in order to make the given lines parallel and vertical. :param vlines: Lines that we want to map to vertical lines. :param w: The width of the image :param l: The height of the image :param opt_options: Optimization options passed into `minimize` :param opt_method: The optimization method. :param limit: A limit on the amount of displacement -- beyond this and we will assume failure. :return: (dl, dr), the horizontal displacement of the left and right corners. """ if len(vlines) == 0: return 0, 0 a = np.append(vlines[:, 0, :], np.ones((len(vlines), 1)), axis=1) b = np.append(vlines[:, 1, :], np.ones((len(vlines), 1)), axis=1) def objective(x): dl, dr = x Hv = np.linalg.inv(H_v(dl, dr, w, l)) return np.sum(np.abs(Hv[0, :].dot(a.T) / Hv[2, :].dot(a.T) - Hv[0, :].dot(b.T) / Hv[2, :].dot(b.T))) res = minimize(objective, (0., 0.), options=opt_options, method=opt_method) dl, dr = res.x # Give up if the solution is not plausible (this indicates that the 'vlines' are too noisy if abs(dl) > limit * w: dl = 0 if abs(dr) > limit * w: dr = 0 return dl, dr
def _solve_ud(hlines, dl, dr, w, l, opt_options=OPTIMIZATION_OPTIONS, opt_method=OPTIMIZATION_METHOD, limit=0.3): """ Solve for the left top and bottom edge displacement. This routine estimates the amount to move the upper left and lower left corners of the image in a vertical direction in order to make the given lines parallel and horizontal. :param hlines: Lines that we want to map to horizontal lines. :param w: The width of the image :param l: The height of the image :param opt_options: Optimization options passed into `minimize` :param opt_method: The optimization method. :param limit: A limit on the amount of displacement -- beyond this and we will assume failure. It is expressed as a fraction of the image height. :return: (dl, dr), the horizontal displacement of the left and right corners. """ if len(hlines) == 0: return 0, 0 a = np.append(hlines[:, 0, :], np.ones((len(hlines), 1)), axis=1) b = np.append(hlines[:, 1, :], np.ones((len(hlines), 1)), axis=1) Hv = np.linalg.inv(H_v(dl, dr, w, l)) a = Hv.dot(a.T).T b = Hv.dot(b.T).T def objective(x): du, dd = x Hh = np.linalg.inv(H_h(du, dd, w, l)) return np.sum(np.abs(Hh[1, :].dot(a.T) / Hh[2, :].dot(a.T) - Hh[1, :].dot(b.T) / Hh[2, :].dot(b.T))) res = minimize(objective, (0., 0.), options=opt_options, method=opt_method) du, dd = res.x # Give up if the result is not plausible. We are better off nor warping. if abs(du) > limit * l: du = 0 if abs(dd) > limit * l: dd = 0 return du, dd
def calc_all_sigams(data, sigmas): batchs = 128 num_of_bins = 8 # bins = np.linspace(-1, 1, num_of_bins).astype(np.float32) # bins = stats.mstats.mquantiles(np.squeeze(data.reshape(1, -1)), np.linspace(0,1, num=num_of_bins)) # data = bins[np.digitize(np.squeeze(data.reshape(1, -1)), bins) - 1].reshape(len(data), -1) batch_points = np.rint(np.arange(0, data.shape[0] + 1, batchs)).astype(dtype=np.int32) I_XT = [] num_of_rand = min(800, data.shape[1]) for sigma in sigmas: # print sigma I_XT_temp = 0 for i in range(0, len(batch_points) - 1): new_data = data[batch_points[i]:batch_points[i + 1], :] rand_indexs = np.random.randint(0, new_data.shape[1], num_of_rand) new_data = new_data[:, :] N = new_data.shape[0] d = new_data.shape[1] diff_mat = np.linalg.norm(((new_data[:, np.newaxis, :] - new_data)), axis=2) # print diff_mat.shape, new_data.shape s0 = 0.2 # DOTO -add leaveoneout validation res = minimize(optimiaze_func, s0, args=(diff_mat, d, N), method='nelder-mead', options={'xtol': 1e-8, 'disp': False, 'maxiter': 6}) eta = res.x diff_mat0 = - 0.5 * (diff_mat / (sigma ** 2 + eta ** 2)) diff_mat1 = np.sum(np.exp(diff_mat0), axis=0) diff_mat2 = -(1.0 / N) * np.sum(np.log2((1.0 / N) * diff_mat1)) I_XT_temp += diff_mat2 - d * np.log2((sigma ** 2) / (eta ** 2 + sigma ** 2)) # print diff_mat2 - d*np.log2((sigma**2)/(eta**2+sigma**2)) I_XT_temp /= len(batch_points) I_XT.append(I_XT_temp) sys.stdout.flush() return I_XT
def ml_estimation(self): bounds = ((0,1), (0,2)) * self.n_cues r = minimize(self.neg_log_likelihood, [0.1,0.1]*self.n_cues, method='L-BFGS-B', bounds=bounds) return r
def _Phi(u,wv,maxiter,\ rho,X,y1,y2,filt0,alg1,alg2,hparams0,hparams1,hparams2): # Phi(u) = -rho*max_w -f1(u,w) + max_v -f2(u,v) # = -rho Phi1(u) + Phi2(u), where # Phi1(u) = max_w -f1, Phi2(u) = max_v -f2 w,v = wv # Phi1(u) = max_w -f_util(u,w) = -min_w f_util(u,w) G = filt0.g(u,X,hparams0) res = minimize(alg1.f, w, args=(G,y1,hparams1),\ method='BFGS', jac=alg1.dfdv, options={'disp':False, 'maxiter':maxiter}) w = res.x Phiu1 = -res.fun # Phi2(u) = max_v -f_priv(u,v) = -min_v f_priv(u,v) res = minimize(alg2.f, v, args=(G,y2,hparams2),\ method='BFGS',jac=alg2.dfdv, options={'disp':False, 'maxiter':maxiter}) v = res.x Phiu2 = -res.fun Phiu = -rho*Phiu1 + Phiu2 + .5*hparams0['l']*(u**2).sum() assert np.isnan(w).any()==False assert np.isnan(v).any()==False assert np.isnan(Phiu)==False return (Phiu,(w,v))
def _Phi_lin(u,wv,q,maxiter,\ rho,X,y1,y2,filt0,alg1,alg2,hparams0,hparams1,hparams2): w,v = wv d = hparams1['d'] N = X.shape[1] # dgdu: u.size x d x Nsub # If dgdu is too large, subsample X and limit dgdu to 2GB MAX_MEMORY = 8.*1024*1024*1024 # 8GB Nsub = round(MAX_MEMORY/(u.size*d*8.)) Nsub = min(Nsub,N) ind = np.random.choice(range(N),size=(Nsub,),replace=False) tG = filt0.g(u,X[:,ind],hparams0) dgdu = filt0.dgdu(u,X[:,ind],hparams0).reshape((u.size,d*Nsub)) # u.size x d x N # Phi_lin(u) = -rho*max_w -f1(u,w) + max_v -f2(u,v) # = -rho Phi1lin(u) + Phi2lin(u), where # Phi1lin(u) = max_w -f1lin, Phi2lin(u) = max_v -f2lin res = minimize(alg1.flin, w, args=(q,tG,y1[ind],dgdu,hparams1), \ method='BFGS',jac=alg1.dflindv, options={'disp':False, 'maxiter':maxiter}) w = res.x Phiu1 = -res.fun res = minimize(alg2.flin, v, args=(q,tG,y2[ind],dgdu,hparams2),\ method='BFGS',jac=alg2.dflindv, options={'disp':False, 'maxiter':maxiter}) v = res.x Phiu2 = -res.fun Phiu = -rho*Phiu1 + Phiu2 + .5*hparams0['l']*(u**2).sum() assert np.isnan(w).any()==False assert np.isnan(v).any()==False assert np.isnan(Phiu)==False return (Phiu,(w,v))