我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用scipy.optimize.fminbound()。
def _find_estimator_weight(self, y, dv_pre, y_pred): """Make line search to determine estimator weights.""" with warnings.catch_warnings(): warnings.simplefilter("ignore") def optimization_function(alpha): p_ij = self._estimate_instance_probabilities(dv_pre + alpha * y_pred) p_i = self._estimate_bag_probabilites(p_ij) return self._negative_log_likelihood(p_i) # TODO: Add option to choose optimization method. alpha, fval, err, n_func = fminbound(optimization_function, 0.0, 5.0, full_output=True, disp=1) if self.learning_rate < 1.0: alpha *= self.learning_rate return alpha, fval
def QwlApproxSolver(hl, vs, dn, fr): """ This function solves the quarter-wavelength problem (Boore 2003) and return the frequency-dependent depth, velocity, density and amplification factor. Input parameters: hl = vector of n thickness (m) vs = vector of n S-wave velocites (m/s) dn = vector of n densities (gr/m3) fr = vector of discrete frequencies (Hz) Output: qwhl = vector of quarter-wavelength depths qwvs = vector of quarter-wavelength velocities qwdn = vector of quarter-wavelength densities """ # Initialisation fnum = len(fr) lnum = len(hl) hl = _np.array(hl) vs = _np.array(vs) dn = _np.array(dn) qwhl = _np.zeros(fnum) qwvs = _np.zeros(fnum) qwdn = _np.zeros(fnum) for nf in range(fnum): # Upper depth bound for the search ubnd = _np.max(vs)/(4.*fr[nf]) # Search for quarter-wavelength depth qwhl[nf] = _spo.fminbound(QwlFitFunc, 0., ubnd, args=(lnum,hl,1./vs,fr[nf])) # Computing average velocity (note: slowness is used) qwvs[nf] = 1./DepthAverage(lnum, hl, 1./vs, qwhl[nf]) # Computing average density (for amplification function) qwdn[nf] = DepthAverage(lnum, hl, dn, qwhl[nf]) return qwhl, qwvs, qwdn #-----------------------------------------------------------------------------------------
def bellman_operator(V, cp, return_policy=False): """ The approximate Bellman operator, which computes and returns the updated value function TV (or the V-greedy policy c if return_policy is True). Parameters ---------- V : array_like(float) A NumPy array of dim len(cp.asset_grid) times len(cp.z_vals) cp : ConsumerProblem An instance of ConsumerProblem that stores primitives return_policy : bool, optional(default=False) Indicates whether to return the greed policy given V or the updated value function TV. Default is TV. Returns ------- array_like(float) Returns either the greed policy given V or the updated value function TV. """ # === Simplify names, set up arrays === # R, ?, ?, u, b = cp.R, cp.?, cp.?, cp.u, cp.b asset_grid, z_vals = cp.asset_grid, cp.z_vals new_V = np.empty(V.shape) new_c = np.empty(V.shape) z_idx = list(range(len(z_vals))) # === Linear interpolation of V along the asset grid === # vf = lambda a, i_z: np.interp(a, asset_grid, V[:, i_z]) # === Solve r.h.s. of Bellman equation === # for i_a, a in enumerate(asset_grid): for i_z, z in enumerate(z_vals): def obj(c): # objective function to be *minimized* y = sum(vf(R * a + z - c, j) * ?[i_z, j] for j in z_idx) return - u(c) - ? * y c_star = fminbound(obj, 1e-8, R * a + z + b) new_c[i_a, i_z], new_V[i_a, i_z] = c_star, -obj(c_star) if return_policy: return new_c else: return new_V
def bellman_operator(w, grid, ?, u, f, shocks, Tw=None, compute_policy=0): """ The approximate Bellman operator, which computes and returns the updated value function Tw on the grid points. An array to store the new set of values Tw is optionally supplied (to avoid having to allocate new arrays at each iteration). If supplied, any existing data in Tw will be overwritten. Parameters ---------- w : array_like(float, ndim=1) The value of the input function on different grid points grid : array_like(float, ndim=1) The set of grid points ? : scalar The discount factor u : function The utility function f : function The production function shocks : numpy array An array of draws from the shock, for Monte Carlo integration (to compute expectations). Tw : array_like(float, ndim=1) optional (default=None) Array to write output values to compute_policy : Boolean, optional (default=False) Whether or not to compute policy function """ # === Apply linear interpolation to w === # w_func = lambda x: np.interp(x, grid, w) # == Initialize Tw if necessary == # if Tw is None: Tw = np.empty_like(w) if compute_policy: ? = np.empty_like(w) # == set Tw[i] = max_c { u(c) + ? E w(f(y - c) z)} == # for i, y in enumerate(grid): def objective(c): return - u(c) - ? * np.mean(w_func(f(y - c) * shocks)) c_star = fminbound(objective, 1e-10, y) if compute_policy: ?[i] = c_star Tw[i] = - objective(c_star) if compute_policy: return Tw, ? else: return Tw
def optimal_t_compressed(self, seq_pair, multiplicity): """ Find the optimal distance between the two sequences """ def _neg_prob(t, seq_pair, multiplicity): """ Probability to observe child given the the parent state, transition matrix and the time of evolution (branch length). Parameters ---------- t : double Branch length (time between sequences) parent : numpy.array Parent sequence child : numpy.array Child sequence tm : GTR Model of evolution Returns ------- prob : double Negative probability of the two given sequences to be separated by the time t. """ return -1.0*self.prob_t_compressed(seq_pair, multiplicity,t, return_log=True) try: from scipy.optimize import minimize_scalar opt = minimize_scalar(_neg_prob, bounds=[0,ttconf.MAX_BRANCH_LENGTH], method='bounded', args=(seq_pair, multiplicity), options={'xatol':1e-8}) new_len = opt["x"] except: import scipy print('legacy scipy', scipy.__version__) from scipy.optimize import fminbound new_len = fminbound(_neg_prob, 0,ttconf.MAX_BRANCH_LENGTH, args=(seq_pair, multiplicity)) opt={'success':True} if new_len > .9 * ttconf.MAX_BRANCH_LENGTH: self.logger("WARNING: GTR.optimal_t_compressed -- The branch length seems to be very long!", 4, warn=True) if opt["success"] != True: # return hamming distance: number of state pairs where state differs/all pairs new_len = np.sum(multiplicity[seq_pair[:,1]!=seq_pair[:,0]])/np.sum(multiplicity) return new_len