我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用scipy.optimize.brute()。
def optimize_brute(params_to_optimize, distance_function): lower_bound = np.zeros(len(params_to_optimize), dtype=float) upper_bound = np.ones(len(params_to_optimize), dtype=float) # introduce random shift (0,grid step) # max 10% number_of_steps = 6 step = (upper_bound - lower_bound) / float(number_of_steps) random_shift = np.array([random.random() * 1 / 10 for _ in range(len(lower_bound))], dtype=float) lower_bound += random_shift * step upper_bound += random_shift * step start = time.clock() result = opt.brute(distance_function, zip(lower_bound, upper_bound), finish=None, Ns=number_of_steps, disp=True, full_output=True) elapsed = time.clock() - start logger.debug("Opt finished: " + str(result[:2]) + " Elapsed[s]: " + str(elapsed)) return result[0], result[1]
def optimize(method_name, encoded_params, distance_function): initial_distance = distance_function(encoded_params) logger.debug("Initial parameters distance is (%f)." % initial_distance) if method_name == 'brute': best_params_encoded, distance = optimize_brute(encoded_params, distance_function) elif method_name == 'brutemaxbasin' or method_name == 'superfit': best_params_encoded, distance = optimize_brute(encoded_params, distance_function) logger.debug("Best grid parameters distance is (%f)." % distance) best_params_encoded, distance = optimize_basinhopping(best_params_encoded, distance_function) else: raise Exception("No such optimization method.") if initial_distance <= distance: logger.debug("Initial parameters (%f) are not worse than the best found (%f)." % (initial_distance, distance)) return encoded_params, initial_distance else: return best_params_encoded, distance
def optimize_brute(params_to_optimize, distance_function): lower_bound = params_to_optimize - np.maximum(np.abs(params_to_optimize), 0.1) upper_bound = params_to_optimize + np.maximum(np.abs(params_to_optimize), 0.1) # introduce random shift (0,grid step) # max 20% number_of_steps = 5 step = (upper_bound - lower_bound) / float(number_of_steps) random_shift = np.array([random.random() * 2 / 10 for _ in range(len(lower_bound))]) lower_bound += random_shift * step upper_bound += random_shift * step logger.debug("Search range: " + str(zip(lower_bound, upper_bound))) result = opt.brute(distance_function, zip(lower_bound, upper_bound), Ns=number_of_steps, disp=True, finish=None, full_output=True) logger.debug("Opt finished:" + str(result[:2])) return result[0], result[1]
def bruteranges(step, radius, center): """ Auxiliary function for brute force exploration. Prepares the "ranges" parameter to be passed to brute force optimizer In other words, we draw a cube ... radius is an int saying how many steps to go left and right of center. center is an array of the centers, it can be of any lenght. You make 2*radius + 1 steps in each direction !, so radius=2 means 5 steps thus 125 calls for 4 curves. """ low = - step * radius up = step * (radius+1) if center.shape == (): c = float(center) return [((c+low),(c+up),step)] else: return [((c+low),(c+up),step) for c in center]
def fit_non_linear_parameter(self,Y): """ Estimation of the SNLM theta parameters """ if isinstance(self.non_linear_parameter_init,tuple): non_linear_parameter_value = brute(self.cost_function,self.non_linear_parameter_init, args=(Y,)) non_linear_parameter_init=non_linear_parameter_value if verbose==True: print("Result of the brute optimisation: ") print(non_linear_parameter_value) else: non_linear_parameter_init=self.non_linear_parameter_init output=minimize(self.opposite_cost_function,self.non_linear_parameter_init,args=(Y,),method=self.method_name, jac=self.jac, hess=self.hess,hessp=self.hessp, bounds=self.bounds, constraints=self.constraints, tol=self.tol, options=self.options) theta=output.x return theta
def plot_cost_function(self,Y,ranges,coef_display=1): x0,fval,grid,Jout=brute(self.cost_function,ranges, args=(Y,),full_output=True) Ndim,N1,N2=grid.shape if Ndim==1: print("plot") if Ndim==2: fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(coef_display*grid[0],coef_display*grid[1],Jout,cmap=plt.get_cmap("hot")) plt.xlabel("Parameter1") plt.ylabel("Parameter2") if Ndim >2: raise ValueError("Too much dimensions (this method can only plot <=2 dimensions)") return x0
def run_multiprocess(image, gt_snakes, precision=None, avg_cell_diameter=None, method='brute', initial_params=None, background_image=None, ignore_mask=None): """ :param gt_snakes: gt snakes label image :param precision: if initial_params is None then it is used to calculate parameters :param avg_cell_diameter: if initial_params is None then it is used to calculate parameters :param method: optimization engine :param initial_params: overrides precision and avg_cell_diameter :return: """ logger.info("Ranking parameter fitting (mp) started...") if initial_params is None: params = default_parameters(segmentation_precision=precision, avg_cell_diameter=avg_cell_diameter) else: params = copy.deepcopy(initial_params) avg_cell_diameter = params["segmentation"]["avgCellDiameter"] start = time.clock() best_params, distance = multiproc_optimize((image, background_image, ignore_mask), gt_snakes, method, params) best_params_full = PFRankSnake.merge_rank_parameters(params, best_params) stop = time.clock() logger.debug("Best: \n" + "\n".join([k + ": " + str(v) for k, v in sorted(best_params.iteritems())])) logger.debug("Time: %d" % (stop - start)) logger.info("Ranking parameter fitting (mp) finished with best score %f" % distance) return best_params_full, best_params, distance
def multiproc_optimize(images, gt_snakes, method='brute', initial_params=None): return general_multiproc_fitting(run_wrapper, images, gt_snakes, method, initial_params)
def run(image, gt_snakes, precision, avg_cell_diameter, method='brute', initial_params=None, background_image=None, ignore_mask=None): global best_3, calculations """ :param image: input image :param gt_snakes: gt snakes label image :param precision: if initial_params is None then it is used to calculate parameters :param avg_cell_diameter: if initial_params is None then it is used to calculate parameters :param method: optimization engine :param initial_params: overrides precision and avg_cell_diameter :return: """ logger.info("Parameter fitting started...") if initial_params is None: params = default_parameters(segmentation_precision=precision, avg_cell_diameter=avg_cell_diameter) else: params = copy.deepcopy(initial_params) images = ImageRepo(image, params) images.background = background_image if ignore_mask is not None: images.apply_mask(ignore_mask) start = time.clock() best_3 = [] calculations = 0 best_arg, best_score = optimize(method, gt_snakes, images, params, precision, avg_cell_diameter) best_params = pf_parameters_decode(best_arg, get_size_weight_list(params)) stop = time.clock() logger.debug("Best: \n" + "\n".join([k + ": " + str(v) for k, v in sorted(best_params.iteritems())])) logger.debug("Time: %d" % (stop - start)) logger.info("Parameter fitting finished with best score %f" % best_score) return PFSnake.merge_parameters(params, best_params), best_arg, best_score
def fit(self, T_prim, delta_mag, delta_mag_error, T_range=(3500, 9000)): """ Fit for the companion temperature given a primary temperature and delta-magnitude measurement Parameters: =========== - T_prim: float The primary star temperature (in Kelvin) - delta_mag: float The magnitude difference between the primary and companion - delta_mag_error: float Uncertainty in the magnitude difference - T_range: tuple of size 2 The lower and upper bounds on the companion temperature. """ def lnlike(T2, T1, dm, dm_err): dm_synth = self.__call__(T2) - self.__call__(T1) logging.debug('T2 = {}: dm = {}'.format(T2, dm_synth)) return 0.5 * (dm - dm_synth)**2 / dm_err**2 T_sec = brute(lnlike, [T_range], args=(T_prim, delta_mag, delta_mag_error)) return T_sec
def initialize_opt(self, fn=None, grid=False, Ns=None): """ Returns initial values for the optimization Parameters ---------- fn : function Function over which grid search takes place grid : bool Whether to return initialization values from grid search Ns : int Number of points per axis over which to evaluate during grid search Returns ------- x0 : ndarray 1 X N vector of initial values for each parameter """ if grid is False: # Generate initial values x0 = np.random.normal(loc=0, scale=1, size=self.nparams) else: param_ranges = () for k in range(self.nparams): param_ranges = param_ranges + ((-3, 3),) res_brute = brute(fn, param_ranges, Ns=Ns, full_output=True) if res_brute[0] is not None: x0 = res_brute[0] else: print(' Grid Initialization returned None.') x0 = np.random.normal(loc=0, scale=1, size=self.nparams) return x0
def brute_search(self, weights, metaParamNames=list(), objective=lambda x: x.loss, negative_objective=False, stochastic_objective=True, stochastic_samples=25, stochastic_precision=0.01, ): """ Uses BFS to find optimal simulation hyperparameters Note: You want runs passed in the solver to have __no randomness__ Solving a stochastic simulation will cause problems with solver Arguments --------- Weights: list<floats> weights to be optimized on metaParamName: list<string> list of names of arguments to be optimized on the index respects the weights argument above the strings should be the same as in a simulation run input Weights and metaparamNames together would form the algoParams dict in a normal simulation objective: function(Market) -> float objective function by default number of matches can be changed to loss, for example, by "objective=lambda x: x.loss" returns ------- np.array of weights where function is minimized if negative_objective=True, where maximized """ def this_run(w): # note - sign here # TODO fix negative sign sign = 1 if negative_objective else -1 if stochastic_objective: result = 0 # If objective stochastic, make montecarlo draws & average for i in range(stochastic_samples): result += sign * \ self.single_run(w, metaParamNames=metaParamNames, objective=objective) # Average montecarlo draws result = result/stochastic_samples # Tune precision for convergence result = int(result/stochastic_precision)*stochastic_precision return result else: return sign*self.single_run(w, metaParamNames=metaParamNames, objective=objective) # res = [] # for i in range(10): # res.append(this_run(weights)) res = optimize.brute(this_run, weights, full_output=True, disp=True, finish=optimize.fmin ) return res[0]
def gps_to_pixel(gpsmethod, gps_coord, bounds): """ Function for finding the pixel coordinate associated with a gps coordinate @param gpsmethod: GPS coordinate mapping function from above @param gps_coord: GPS coordinate to match, as (lat,lon) @param bounds: Pixel bounds to search within ((y_low,y_high),(x_low,x_high)) @return Nearest integer pixel value """ # map_ops great circle distance func = lambda imgxy: wgs84_distance(gpsmethod(imgxy[0], imgxy[1]), (gps_coord[0], gps_coord[1])) res = brute(func, bounds) # return np.array([int(round(cc)) for cc in res]) return np.array(res) #### Test Examples ############################################################# # Testing sanitize_latlon #print(sanitize_latlon((0,0))) #print(sanitize_latlon((30,362))) #print(sanitize_latlon((30,-40))) #print(sanitize_latlon((92,0))) #print() #print(sanitize_latlon((-2,0), start_from_90N=True)) #print(sanitize_latlon((-2,-2), start_from_90N=True)) #print() #ppd = 16 #print(sanitize_latlon((-2*ppd,0), ppd=ppd, start_from_90N=True)) #print(sanitize_latlon((-2*ppd, -2*ppd), ppd=ppd, start_from_90N=True)) #nw = [(90 - -42)*ppd, (345)*ppd] #se = [(90 - -45)*ppd, (352)*ppd] #topo_subset = data_array[nw[0]:se[0],nw[1]:se[1]] # #local_slope_map = calc_slopes(topo_subset, ppd) #plt.imshow(local_slope_map, cmap="jet") #plt.savefig("tycho.jpg") #plt.show() #ppd = int(topo_array.shape[0] / 180) # Tycho #nw = [131, 348] #se = [135, 352] #nwse = [nw,se] # Tiny #nw = [88, 178] #se = [92, 182] #nwse = [nw,se] # Very large area #nw = [10,90] #se = [170,270] #nwse = [nw,se] #map1 = calc_slopes(topo_array, ppd, nwse=nwse, scaled=False) #map2 = calc_slopes(topo_array, ppd, nwse=nwse, scaled=True)
def brute_search(data): print("got here with no errors") obj_func = partial(objective_function, data) # Back in graduate school professor Lecun said in class that ARIMA models # typically only need a max parameter of 5, so I doubled it just in case. upper_bound_AR = 10 upper_bound_I = 10 upper_bound_MA = 10 grid_not_found = True print("got to while loop") while grid_not_found: try: if upper_bound_AR < 0 or upper_bound_I < 0 or upper_bound_MA < 0: grid_not_found = False grid = ( slice(1, upper_bound_AR, 1), slice(1, upper_bound_I, 1), slice(1, upper_bound_MA, 1) ) order = brute(obj_func, grid, finish=None) return order except Exception as e: error_string = str(e) if "MA" in error_string: upper_bound_MA -= 1 elif "AR" in error_string: upper_bound_AR -= 1 else: upper_bound_I -= 1 # assuming we don't ever hit a reasonable set of upper_bounds, # it's pretty safe to assume this will work try: grid = ( slice(1, 2, 1), slice(1, 2, 1), slice(1, 2, 1) ) order = brute(obj_func, grid, finish=None) return order except: # however we don't always meet invertibility conditions # Here we explicitly test for a single MA # or AR process being a better fit # If either has a lower (better) aic score we return that model order try: model_ar_one = sm.tsa.ARIMA(data, (1, 0, 0)).fit(disp=0) model_ma_one = sm.tsa.ARIMA(data, (0, 0, 1)).fit(disp=0) except: return None if model_ar_one.aic < model_ma_one.aic: return (1, 0, 0) else: return (0, 0, 1)
def brute_search(data): obj_func = partial(objective_function, data) # Back in graduate school professor Lecun said in class that ARIMA models # typically only need a max parameter of 5, so I doubled it just in case. upper_bound_AR = 10 upper_bound_I = 10 upper_bound_MA = 10 grid_not_found = True while grid_not_found: try: if upper_bound_AR < 0 or upper_bound_I < 0 or upper_bound_MA < 0: grid_not_found = False grid = ( slice(1, upper_bound_AR, 1), slice(1, upper_bound_I, 1), slice(1, upper_bound_MA, 1) ) order = brute(obj_func, grid, finish=None) return order except Exception as e: error_string = str(e) if "MA" in error_string: upper_bound_MA -= 1 elif "AR" in error_string: upper_bound_AR -= 1 else: upper_bound_I -= 1 # assuming we don't ever hit a reasonable set of upper_bounds, # it's pretty safe to assume this will work try: grid = ( slice(1, 2, 1), slice(1, 2, 1), slice(1, 2, 1) ) order = brute(obj_func, grid, finish=None) return order except: # however we don't always meet invertibility conditions # Here we explicitly test for a single MA # or AR process being a better fit # If either has a lower (better) aic score we return that model order try: model_ar_one = sm.tsa.ARIMA(data, (1, 0, 0)).fit(disp=0) model_ma_one = sm.tsa.ARIMA(data, (0, 0, 1)).fit(disp=0) except: return None if model_ar_one.aic < model_ma_one.aic: return (1, 0, 0) else: return (0, 0, 1)
def __printfitstart(self, n_iterations, c_limit, algorithm, init_grid, n_reinit, grid_reinit, dofull, early_stopping, verbose): """ Prints information in console banner when fitting starts Parameters ---------- n_iterations : int Maximum number of iterations to allow. c_limit : float Threshold at which convergence is determined algorithm : {'BFGS', 'L-BFGS-B'} Algorithm to use for optimization init_grid : bool Whether to initialize the optimizer using brute force grid search. If False, will sample from normal distribution with mean 0 and standard deviation 1. n_reinit : int Number of times to reinitialize the optimizer if not converged grid_reinit : bool If optimization does not converge, whether to reinitialize with values from grid search dofull : bool Whether update of the full covariance matrix of the prior should be done. If False, the covariance matrix is limited to one in which the off-diagonal elements are set to zero. early_stopping : bool Whether to stop the EM procedure if the log-model-evidence begins decreasing (thereby reverting to the last iteration's results). verbose : bool Whether to print progress of model fitting """ if init_grid is True: init_method = 'Grid Search' else: init_method = 'Random Initialization' print('=============================================\n' + ' MODEL: ' + self.name + '\n' + ' METHOD: Expectation-Maximization\n' + ' INITIALIZATION: ' + init_method + '\n' + ' N-RESTARTS: ' + str(n_reinit) + '\n' + ' GRID REINITIALIZATION: ' + str(grid_reinit) + '\n' + ' MAX EM ITERATIONS: ' + str(n_iterations) + '\n' + ' EARLY STOPPING: ' + str(early_stopping) + '\n' + ' CONVERGENCE LIMIT: ' + str(c_limit) + '\n' + ' OPTIMIZATION ALGORITHM: ' + algorithm + '\n' + ' FULL COVARIANCE UPDATE: ' + str(dofull) + '\n' + ' VERBOSE: ' + str(verbose) + '\n' + '=============================================\n')