我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用scipy.optimize.brentq()。
def sample(): ''' Draw a sample from the distribution of polar angle of the angular momentum vector, :math:`\\theta`, computed using the Monte Carlo technique discussed in the paper. .. plot:: :align: center from planetplanet.photo import theta import matplotlib.pyplot as pl x = [theta.sample() for i in range(10000)] pl.hist(x, bins = 50) pl.xlabel(r'$\\theta$ [deg]', fontweight = 'bold') pl.ylabel('Probability', fontweight = 'bold') pl.show() ''' y = np.random.random() f = lambda x: CDF(x) - y while np.sign(f(0)) == np.sign(f(1)): y = np.random.random() f = lambda x: CDF(x) - y return brentq(f, 0, 1)
def brent_method(self, lower_bound, upper_bound): """ Brent Method - Inverse Quadratic Interpolation Returns a zero x* of the function f in the given interval [a, b], to within a tolerance 6 * machine_epsilon * |x*| + 2 * tolerance. This function assumes f(a) * f(b) < 0 :param lower_bound: :param upper_bound: :return: x* s.t. tgt_value = f(x*) """ from scipy.optimize import brentq try: x_star = brentq(self.__objective_function, lower_bound, upper_bound, xtol=self.tolerance, maxiter=self.iteration_max) except ValueError as err: if str(err) != "f(a) and f(b) must have different signs": raise new_lower_bound, new_upper_bound, _, _ = self.monotonic_root_bracketing(lower_bound, upper_bound) x_star = brentq(self.__objective_function, new_lower_bound, new_upper_bound, xtol=self.tolerance, maxiter=self.iteration_max) return x_star
def IBAWVol( IsCall, Fwd, Strike, Price, Texp, rd, rf): ''' Implied vol for american options according to BAW ''' Df = exp( -rd * Texp) if Texp <= 0.: raise ValueError, 'maturity must be > 0' def f( vol ): return Price - Df * BSFwd(IsCall, Fwd, Strike, vol, Texp) - BAWPremium(IsCall, Fwd, Strike, vol, Texp, rd, rf) Vol = brentq(f,0.001, 10.0) if Vol<0 or Vol>100.: raise ValueError, 'the implied vol solver fails' return Vol
def get_omega_implicit(n, w, k, tau, h, m): phi_m = (2 * np.pi * m) / n func = lambda s: -s + 0.5 * k * tau * (h(-s + phi_m) + h(-s - phi_m)) + w * tau h_min = 2 * h.min() h_max = 2 * h.max() s_min = h_min + w * tau s_max = h_max + w * tau s = np.linspace(s_min, s_max, 10000) i_root = get_sign_changes(func(s)) if len(i_root) > 0: omega = [] for ir in range(len(i_root)): s_tmp = optimize.brentq(func, s[i_root[ir]], s[i_root[ir] + 1]) omega.append(w + 0.5 * k * (h(-s_tmp + phi_m) + h(-s_tmp - phi_m))) return omega else: return None
def _get_implicit_omega(n, w, k, h, m, tau): phi_m = (2 * np.pi * m) / n func = lambda s: -s + 0.5 * k * tau * (h(-s + phi_m) + h(-s - phi_m)) + w * tau h_min = 2 * h.min() h_max = 2 * h.max() s_min = h_min + w * tau s_max = h_max + w * tau s = np.linspace(s_min, s_max, 10000) i_root = get_sign_changes(func(s)) if len(i_root) > 0: omega = [] for ir in range(len(i_root)): s_tmp = optimize.brentq(func, s[i_root[ir]], s[i_root[ir] + 1]) omega.append(w + 0.5 * k * (h(-s_tmp + phi_m) + h(-s_tmp - phi_m))) return omega else: return []
def get_omega(w, k, tau, f_sum, ns=1000): # Setup implicit equation for s f = lambda s: k * tau * f_sum(s) + w * tau - s # Determine search interval for s # Assumption c can only vary between -1 and +1 s_min = (w - k) * tau s_max = (w + k) * tau s = np.linspace(s_min - 2, s_max + 2, ns) # safty margin added # Find sign changes as you go along curve # Assumes that there are no double sign changes between two values of s # A finer sampling interval can be achieved by increasing ns i_root = get_sign_changes(f(s)) if len(i_root) > 0: omega = [] for ir in range(len(i_root)): # Numerically solve the implicit equation for omega s_tmp = optimize.brentq(f, s[i_root[ir]], s[i_root[ir] + 1]) omega.append(w + k * f_sum(s_tmp)) return omega else: return None
def betaDistObjective(nabla): # Make the "intermediate objective function" for the beta-dist estimation #print('Trying nabla=' + str(nabla)) intermediateObjective = lambda DiscFac : simulateKYratioDifference(DiscFac, nabla=nabla, N=Params.pref_type_count, type_list=est_type_list, weights=Params.age_weight_all, total_output=Params.total_output, target=KY_target) if Params.do_tractable: top = 0.98 else: top = 0.998 DiscFac_new = brentq(intermediateObjective,0.90,top,xtol=10**(-8)) N=Params.pref_type_count sim_wealth = (np.vstack((this_type.W_history for this_type in est_type_list))).flatten() sim_weights = np.tile(np.repeat(Params.age_weight_all,Params.sim_pop_size),N) my_diff = calculateLorenzDifference(sim_wealth,sim_weights,Params.percentiles_to_match,lorenz_target) print('DiscFac=' + str(DiscFac_new) + ', nabla=' + str(nabla) + ', diff=' + str(my_diff)) if my_diff < Params.diff_save: Params.DiscFac_save = DiscFac_new return my_diff # ================================================================= # ========= Estimating the model ================================== #==================================================================
def itransform(self, y_transformed): # FIXME: Interpolate rather than solve for speed? ycdf = norm.cdf(y_transformed) y = [brentq(self._obj, a=self.lb, b=self.ub, args=(yi,)) for yi in ycdf] return np.array(y)
def find_ir(m, utility, payment, a=0.0, b=1.0): """Find the price of a bond that creates equal utility at time 0 as adding `payment` to the value of consumption in the final period. The purpose of this function is to find the interest rate embedded in the `EZUtility` model. Parameters ---------- m : ndarray or list array of mitigation utility : `Utility` object object of utility class payment : float value added to consumption in the final period a : float, optional initial guess b : float, optional initial guess - f(b) needs to give different sign than f(a) Returns ------- tuple result of optimization Note ---- requires the 'scipy' package """ def min_func(price): utility_with_final_payment = utility.adjusted_utility(m, final_cons_eps=payment) first_period_eps = payment * price utility_with_initial_payment = utility.adjusted_utility(m, first_period_consadj=first_period_eps) return utility_with_final_payment - utility_with_initial_payment return brentq(min_func, a, b)
def find_term_structure(m, utility, payment, a=0.0, b=1.5): """Find the price of a bond that creates equal utility at time 0 as adding `payment` to the value of consumption in the final period. The purpose of this function is to find the interest rate embedded in the `EZUtility` model. Parameters ---------- m : ndarray or list array of mitigation utility : `Utility` object object of utility class payment : float value added to consumption in the final period a : float, optional initial guess b : float, optional initial guess - f(b) needs to give different sign than f(a) Returns ------- tuple result of optimization Note ---- requires the 'scipy' package """ def min_func(price): period_cons_eps = np.zeros(int(utility.decision_times[-1]/utility.period_len) + 1) period_cons_eps[-2] = payment utility_with_payment = utility.adjusted_utility(m, period_cons_eps=period_cons_eps) first_period_eps = payment * price utility_with_initial_payment = utility.adjusted_utility(m, first_period_consadj=first_period_eps) return utility_with_payment - utility_with_initial_payment return brentq(min_func, a, b)
def find_bec(m, utility, constraint_cost, a=-150, b=150): """Used to find a value for consumption that equalizes utility at time 0 in two different solutions. Parameters ---------- m : ndarray or list array of mitigation utility : `Utility` object object of utility class constraint_cost : float utility cost of constraining period 0 to zero a : float, optional initial guess b : float, optional initial guess - f(b) needs to give different sign than f(a) Returns ------- tuple result of optimization Note ---- requires the 'scipy' package """ def min_func(delta_con): base_utility = utility.utility(m) new_utility = utility.adjusted_utility(m, first_period_consadj=delta_con) print(base_utility, new_utility, constraint_cost) return new_utility - base_utility - constraint_cost return brentq(min_func, a, b)
def perpetuity_yield(price, start_date, a=0.1, b=100000): """Find the yield of a perpetuity starting at year `start_date`. Parameters ---------- price : float price of bond ending at `start_date` start_date : int start year of perpetuity a : float, optional initial guess b : float, optional initial guess - f(b) needs to give different sign than f(a) Returns ------- tuple result of optimization Note ---- requires the 'scipy' package """ def min_func(perp_yield): return price - (100. / (perp_yield+100.))**start_date * (perp_yield + 100)/perp_yield return brentq(min_func, a, b)
def find_distance_by_area(r, R, a, numeric_correction=0.0001): ''' Solves circle_intersection_area(r, R, d) == a for d numerically (analytical solution seems to be too ugly to pursue). Assumes that a < pi * min(r, R)**2, will fail otherwise. The numeric correction parameter is used whenever the computed distance is exactly (R - r) (i.e. one circle must be inside another). In this case the result returned is (R-r+correction). This helps later when we position the circles and need to ensure they intersect. >>> find_distance_by_area(1, 1, 0, 0.0) 2.0 >>> round(find_distance_by_area(1, 1, 3.1415, 0.0), 4) 0.0 >>> d = find_distance_by_area(2, 3, 4, 0.0) >>> d 3.37... >>> round(circle_intersection_area(2, 3, d), 10) 4.0 >>> find_distance_by_area(1, 2, np.pi) 1.0001 ''' if r > R: r, R = R, r if np.abs(a) < tol: return float(r + R) if np.abs(min([r, R])**2 * np.pi - a) < tol: return np.abs(R - r + numeric_correction) return brentq(lambda x: circle_intersection_area(r, R, x) - a, R - r, R + r)
def _proj_l1_scalar_root(v, gamma): r"""Projection operator of the :math:`\ell_1` norm. The solution is computed via the method of Sec. 6.5.2 in :cite:`parikh-2014-proximal`. There is no `axis` parameter since the algorithm for computing the solution treats the input `v` as a single vector. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` Returns ------- x : ndarray Output array """ if norm_l1(v) <= gamma: return v else: av = np.abs(v) fn = lambda t: np.sum(np.maximum(0, av - t)) - gamma t = optim.brentq(fn, 0, av.max()) return prox_l1(v, t)
def callvalue_to_vol(self, value, strike=None): if strike is None: strike = self.forward def find_vol(vol): self.stdev = vol * self.sqr_term return self.value(strike=strike) - value return solver(find_vol, 1e-10, 1e2)
def get_spread(self, proj, disc, pv): def find_spread(spread): return self.value(proj, disc, spread=spread) - pv return solver(find_spread, -1e2, 1e2)
def MomentsTipAssympGeneral(dist, Kprime, Eprime, muPrime, Cbar, Vel, stagnant, KIPrime): """Moments of the General tip asymptote to calculate the volume integral (see Donstov and Pierce, 2017)""" TipAsmptargs = (dist, Kprime, Eprime, muPrime, Cbar, Vel) if stagnant: w = KIPrime * dist ** 0.5 / Eprime else: (a, b) = FindBracket_w(dist, Kprime, Eprime, muPrime, Cbar, Vel) w = brentq(TipAsym_UniversalW_zero_Res, a, b, TipAsmptargs) # root finding between the upper and lower bounds on width if w < -1e-15: w = abs(w) if Vel < 1e-6: delt = 1 / 6 else: Kh = Kprime * dist ** 0.5 / (Eprime * w) Ch = 2 * Cbar * dist ** 0.5 / (Vel ** 0.5 * w) g0 = f(Kh, 0.9911799823 * Ch, 10.392304845) delt = 10.392304845 * (1 + 0.9911799823 * Ch) * g0 M0 = 2 * w * dist / (3 + delt) M1 = 2 * w * dist ** 2 / (5 + delt) if np.isnan(M0) or np.isnan(M1): raise SystemExit('Moment(s) of assymptote are nan') return (M0, M1)
def FF_Yang_Dou(Re, rough): """ This function approximate the friction factor for the given Reynold's number and the relative roughness arrays with the Yang Dou approximation (see Yang, S. Dou, G. (2010). Turbulent drag reduction with polymer additive in rough pipes. Journal of Fluid Mechanics, 642 279-294). The function is implicit and utilize a numerical root finder Arguments: ReNum (ndarray-float): Reynold's number rough (ndarray-float): 1/relative roughness (w/roughness length scale) Returns: ndarray-float : frction factor """ ff_args = (Re, rough) sol_vbyu = fsolve(FF_Yang_Dou_residual, 15., ff_args) # sol_vbyu = brentq(FF_Yang_Dou_residual, 18., 100., ff_args) ff_Yang_Dou = 2 / sol_vbyu ** 2 Rplus = Re / (2 * sol_vbyu) ff_Man_Strkl = 0.143 / 4 / rough ** (1 / 3) if Rplus >= 100 * rough: ff = ff_Man_Strkl else: ff = ff_Yang_Dou if rough < 32 and ff > ff_Man_Strkl: ff = ff_Man_Strkl return ff #-----------------------------------------------------------------------------------------------------------------------
def root(x,T): eta_0 = 0.0 eta_1 = 1e0 while polynom(eta_1,x,T) >= 0: eta_1 = 10*eta_1 return brentq(polynom,eta_0,eta_1,args=(x,T),maxiter=1000) # GQCA probability of finding the configuration j in the alloy
def __init__(self, omega, beta_min=-10., beta_max=10.): """ Constructor. Args: omega (Parameter): the omega parameter of the policy from which beta of the Boltzmann policy is computed; beta_min (float, -10.): one end of the bracketing interval for minimization with Brent's method; beta_max (float, 10.): the other end of the bracketing interval for minimization with Brent's method. """ self.__name__ = 'Mellowmax' class MellowmaxParameter: def __init__(self, outer, beta_min, beta_max): self._omega = omega self._outer = outer self._beta_min = beta_min self._beta_max = beta_max def __call__(self, state): mm = Mellowmax.mellow_max(self._outer._approximator, state, self._omega(state)) def f(beta): v = self._outer._approximator.predict(state) - mm return np.sum(np.exp(beta * v) * v) try: return brentq(f, a=self._beta_min, b=self._beta_max) except ValueError: return 0. beta_mellow = MellowmaxParameter(self, beta_min, beta_max) super(Mellowmax, self).__init__(beta_mellow)
def find_balanced_budget_tax(c): """ Find tax level that will induce a balanced budget. """ def steady_state_budget(t): e, u, w = compute_steady_state_quantities(c, t) return t - u * c ? = brentq(steady_state_budget, 0.0, 0.9 * c) return ? # Levels of unemployment insurance we wish to study
def get_event_volume(self): """ Calculate an estimate for the total count of activities solely due to the SMP model. Returns ------- totals: 2-tuple Sum of model counts evaluation over the duration of the event with the background (baseline), and same but without the baseline included. """ # unpack optimized model parameters x0, alpha, beta, a0, y0 = self.model.parameters x_root = sp_optimize.brentq(self.model.evaluate_point, 1.1*x0, 10.0, args=(x0, alpha, beta, a0, y0, True) ) # create the x values, step size is same as original x_array_step = self.data.scaled_x[1] - self.data.scaled_x[0] x_array = np.arange(x0, x_root, x_array_step) # create array of point-by-point (scaled) model evaluations (w/ + w/o baselines) point_evals_with_baseline = self.model.evaluate(x_array, x0, alpha, beta, a0, y0) point_evals_without_baseline = self.model.evaluate(x_array, x0, alpha, beta, a0, y0, remove_offset=True) # de-scale the y values to actual counts baseline_counts = sum( self.data.scale_data(point_evals_with_baseline, axis='y', make_larger=True) ) no_baseline_counts = sum( self.data.scale_data(point_evals_without_baseline, axis='y', make_larger=True) ) # combine the two for return totals = ( baseline_counts, no_baseline_counts, ) return totals
def swiss_roll_2d(n_samples=100, noise=0.0, regular=True, random_state=None): if regular: def reg_sampler(): def locations(k): return np.sqrt(np.abs(0.5 * lambertw(0.5 * np.exp(2 * k - 1)))) # Next lines find the root of an approximate inverse of the # arc-length function. # k_min = brentq(lambda x: locations(x) - np.pi, 12, 13) # k_max = brentq(lambda x: locations(x) - 4 * np.pi, 161, 163) # Since their values are fixed, I will hardcode them to: k_min = 12.207481467498257 k_max = 161.63784184495893 t = locations(np.linspace(k_min, k_max, n_samples)) return t # t = 1.5 * np.pi * (1 + 2 * np.linspace(0, 1, n_samples)) t = reg_sampler() t = t[np.newaxis, :] x = t * np.cos(t) y = t * np.sin(t) X = np.concatenate((x, y)) generator = check_random_state(random_state) X += noise * generator.randn(2, n_samples) X = X.T t = np.squeeze(t) return X, t else: X, t = sk_datasets.make_swiss_roll(n_samples=n_samples, noise=noise, random_state=random_state) X = X[:, [0, 2]] idx = np.argsort(t) return X[idx, :], t[idx]
def get_Kstar_max(self): """ value of K*(dimensionless vortex constant) for which weight flow in maximum See also: NASA TN D-4421 eq. 23-25 """ max_val = 0 while(1): try: f = lambda Mstar: (1 - (max_val/self.Mlstar)**2 * Mstar**2) ** (1/(self.gamma - 1)) integrate.quad(f,self.Mlstar,self.Mustar) except: max_val -= 0.1 break max_val += 0.1 def func(Kstar_max): a1 = (1 - Kstar_max**2)**(1/(self.gamma -1)) a2 = (1 - (Kstar_max**2) * (self.Mustar/self.Mlstar)**2) a3 = (1/(self.gamma - 1)) # This if is to avoid the bug in python3 it self if(a2<0): a2 = ((Kstar_max**2) * (self.Mustar/self.Mlstar)**2 - 1) a = a1 + a2**a3 else : a = a1 - a2 ** a3 f = lambda Mstar: (1 - (Kstar_max/self.Mlstar)**2 * Mstar**2) ** (1/(self.gamma - 1)) b = integrate.quad(f,self.Mlstar,self.Mustar) return a - b[0] Kstar_max = optimize.brentq(func,0.1,max_val) return Kstar_max
def minimize_q(D,L,massFlow,Cp_l,Cp_g,vapQ,T0,T1,T_out,U,h1,h0): Cp= vapQ * Cp_g + (1 - vapQ) * Cp_l A = math.pi * D * L def fun(x): return -x+T_out + (T0 - T_out) * exp(-(x - T0) * U * A / ((h1 - h0) * massFlow)) tolT=0.05 error=tolT+1 print 'TTTTTTTTTTTTTTTTTTTTTTTTTTTT\n',T1 # T=brentq(fun,max(T0,T1,T_out)+50,min(T0,T1,T_out)-50) # T=2*((h1-h0)*massFlow/(T1-T0)+T_out)/(1+2*(h1-h0)/(T1-T0)) T=T1+((h1-h0)*massFlow+U*A*(T1-T0)/ln((T1-T_out)/(T0-T_out)))/(massFlow*Cp) print T1,T # while error>tolT: # T=fun(T1) # print T # error=abs(T-T1) # T1=T print 'LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL' return T
def solve_optPower(lam_ij, lam_id, lamjd_rSet, rSet, pi_max): """ For the sake of demonstration, here the closed-form expression of the outage probability of the multi-relay DF cooperative communication under the assumption of i.i.d channel fading is exploited. Input pi_max and pi_min are the guessed bounds of the power level used at the source i, respectively. lamjd_rSet is an array containing the lambda_jd(i) over the set rSet """ xmin = 0.01 xmax = 10*np.log10(pi_max) #x is in dB func = lambda x: const.BETA - outage_prob_sys_iid(lam_ij, lam_id, 10.0**(x/10.0), rSet) # lowerB = func(xmin) # print lowerB # upperB = func(xmax) # print upperB # if lowerB*upperB > 0.0: # print lowerB, upperB # raise x0 = brentq(func, xmin, xmax) w_i = 10.0**(x0/10.0) wj_rSet = lamjd_rSet*(w_i/lam_id) return w_i, wj_rSet#in Watt
def get_omega(w, k, tau, f_sum, ns=1000): # Setup implicit equation for s f = lambda s: k * tau * f_sum(s) + w * tau - s # Determine search interval for s # Assumption c can only vary between -1 and +1 s_min = (w - k) * tau s_max = (w + k) * tau s = np.linspace(s_min - 2, s_max + 2, ns) # safty margin added # Find sign changes as you go along curve # Assumes that there are no double sign changes between two values of s # A finer sampling interval can be achieved by increasing ns i_root = get_sign_changes(f(s)) if len(i_root) > 0: omega = [] for ir in range(len(i_root)): # Numerically solve the implicit equation for omega s_tmp = optimize.brentq(f, s[i_root[ir]], s[i_root[ir] + 1]) omega.append(w + k * f_sum(s_tmp)) return omega else: return None # #############################################################################
def get_omega(topo, twist_number, h, k, w, tau, ns=10000): # Setup coupling sum c = lambda s: topo.get_coupling_sum(h, twist_number, s) # Setup implicit equation for s f = lambda s: k * tau * c(s) + w * tau - s # Determine search interval for s # Assumption c can only vary between -1 and +1 s_min = (w - k) * tau s_max = (w + k) * tau s = np.linspace(s_min - 2, s_max + 2, ns) # safty margin added # Find sign changes as you go along curve # Assumes that there are no double sign changes between two values of s # A finer sampling interval can be achieved by increasing ns i_root = get_sign_changes(f(s)) if len(i_root) > 0: omega = [] for ir in range(len(i_root)): # Numerically solve the implicit equation for omega s_tmp = optimize.brentq(f, s[i_root[ir]], s[i_root[ir] + 1]) omega.append(w + k * c(s_tmp)) return omega else: return None
def get_omega(system, ns=1000): # Get coupling sum function h_sum = SyncState.get_coupling_sum(system) # Setup implicit equation for s f = lambda s: system.g.k * system.g.tau * h_sum(s) + system.pll.w * system.g.tau - s # Determine search interval for s # Assumption c can only vary between -1 and +1 s_min = (system.pll.w - system.g.k) * system.g.tau s_max = (system.pll.w + system.g.k) * system.g.tau s = np.linspace(s_min - 2, s_max + 2, ns) # safty margin added # Find sign changes as you go along curve # Assumes that there are no double sign changes between two values of s # A finer sampling interval can be achieved by increasing ns i_root = get_sign_changes(f(s)) if len(i_root) > 0: omega = [] for ir in range(len(i_root)): # Numerically solve the implicit equation for omega s_tmp = optimize.brentq(f, s[i_root[ir]], s[i_root[ir] + 1]) omega.append(system.pll.w + system.g.k * h_sum(s_tmp)) return omega else: return None
def _findroot(self, func, x0=None, xab=None, **kwds): """Find root of `func` by Newton's method if `x0` is given or Brent's method if `xab` is given. If neither is given, then ``xab=[self.x[0],self.x[-1]]`` and Brent's method is used. Parameters ---------- func : callable, must accept a scalar and retun a scalar x0 : float start guess for Newton's secant method xab : sequence of length 2 start bracket for Brent's method, root must lie in between **kwds : passed to scipy root finder (newton() or brentq()) Returns ------- xx : scalar the root of func(x) """ if x0 is not None: xx = optimize.newton(func, x0, **kwds) else: if xab is None: xab = [self.x[0], self.x[-1]] xx = optimize.brentq(func, xab[0], xab[1], **kwds) return xx
def get_lagna_data(jd_sunrise, lat, lon, tz_off, ayanamsha_id=swe.SIDM_LAHIRI, debug=False): """Returns the lagna data Args: float jd: The Julian Day at which the lagnam is to be computed lat: Latitude of the place where the lagnam is to be computed lon: Longitude of the place where the lagnam is to be computed offset: Used by internal functions for bracketing Returns: tuples detailing the end time of each lagna, beginning with the one prevailing at sunrise Examples: >>> get_lagna_data(2458222.5208333335, lat=13.08784, lon=80.27847, tz_off=5.5) [(12, 2458222.5214310056), (1, 2458222.596420153), (2, 2458222.6812926503), (3, 2458222.772619788), (4, 2458222.8624254186), (5, 2458222.9478168003), (6, 2458223.0322211445), (7, 2458223.1202004547), (8, 2458223.211770839), (9, 2458223.3000455885), (10, 2458223.3787625884), (11, 2458223.4494649624)] """ lagna_sunrise = 1 + floor(get_lagna_float(jd_sunrise, lat, lon, ayanamsha_id=ayanamsha_id)) lagna_list = [(x + lagna_sunrise - 1) % 12 + 1 for x in range(12)] lbrack = jd_sunrise - 3 / 24 rbrack = jd_sunrise + 3 / 24 lagna_data = [] for lagna in lagna_list: # print('---\n', lagna) if (debug): print('lagna sunrise', get_lagna_float(jd_sunrise, lat, lon, ayanamsha_id=ayanamsha_id)) print('lbrack', get_lagna_float(lbrack, lat, lon, int(-lagna), ayanamsha_id=ayanamsha_id)) print('rbrack', get_lagna_float(rbrack, lat, lon, int(-lagna), ayanamsha_id=ayanamsha_id)) lagna_end_time = brentq(get_lagna_float, lbrack, rbrack, args=(lat, lon, -lagna, debug)) lbrack = lagna_end_time + 1 / 24 rbrack = lagna_end_time + 3 / 24 lagna_data.append((lagna, lagna_end_time)) return lagna_data
def calculate_turbulentCf_spaldingChi(self, r=0.89): # Calculate the turbulent skin fricton coefficient using the Spalding Chi method # This is more accurate than Van driest for T_wall/self.T_adiabaticWall < 0.2 # van Driest says r = 0.85 to 0.89 for lam to turbs self.r = 0.89 # Set up the variables/coefficients for the estimate # Various wall temperature ratios TawOnT = self.T_adiabaticWall/self.T TwOnT = T_wall/self.T denominator = ( (TawOnT + TwOnT)**2. - 4.*TwOnT )**0.5 alpha = (TawOnT + TwOnT - 2.) / denominator beta = (TawOnT - TwOnT) / denominator F_c = (TawOnT - 1.) / (np.arcsin(alpha) + np.arcsin(beta))**2. # Solve the implicit equation for the incompressible skin friction LHS = self.localReynolds / (F_c*(TawOnT**0.772 * TwOnT**-1.474)) K = 0.4 E = 12. kappa = lambda cf: K * (2./cf)**0.5 # bracket = (2. + (2. - kappa)**2.)*exp(kappa) - 6. - 2.*kappa - (1./12)*kappa**4. - (1./20)*kappa**5. - (1./60)*kappa**6. - (1./256)*kappa**7. bracket = lambda cf: (2. + (2. - kappa(cf))**2.)*exp(kappa(cf)) - 6. - 2.*kappa(cf) - (1./12)*kappa(cf)**4. - (1./20)*kappa(cf)**5. - (1./60)*kappa(cf)**6. - (1./256)*kappa(cf)**7. cf_inc_func = lambda cf: (1./12)*(2./cf)**2. + (1./(E*K**3.)) * bracket(cf) - LHS try: cf_inc = brentq(cf_inc_func, 5e-6, 0.1) cf = (1./F_c) * cf_inc except: # print "Calculation of turbulent Cf failed, Flow properties at culprit cell below." # print "Am I in the Wake? Running length is", self.localRunningLength, "Set cf to zero." # names = ['Local Re', 'length', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'T_wall', 'p', 'rho', 'velocity', 'Mach'] # properties = [float(self.localReynolds), float(self.localRunningLength), self.mu, self.mu_wall, self.T_adiabaticWall, self.T, T_wall, self.p, self.rho, self.velocityMagnitude, self.M] # for c1, c2 in zip(names, properties): # print "%-10s %s" % (c1, c2) # print '\n' cf = 0. self.badCfCount += 1 if self.coneCorrectionFlag: # Flow is 3D, apply cone rule correction cf *= 1.15 return cf #%%
def calculate_turbulentCf(self, r=0.89): #print "Turbulent flow" # Calculate the turbulent skin fricton coefficient # van Driest says r = 0.85 to 0.89 for lam to turbs self.r = r # Set up the variables/coefficients for the Van Driest estimate aSquared = (g_inf - 1)/2. * self.r * self.M**2. * self.T/T_wall b = self.T_adiabaticWall/T_wall - 1 denominator = (b**2. + 4.*aSquared)**0.5 A = self.clean_A(aSquared, b, denominator) B = self.clean_B(aSquared, b, denominator) # Solve the implicit equation for skin friction #cf_guess = 1. cf_func = lambda cf_turbulent: 4.15*log(self.localReynolds*cf_turbulent*self.mu/self.mu_wall) + 1.7 - (asin(A) + asin(B)) / (cf_turbulent*(self.T_adiabaticWall/self.T - 1.))**0.5 try: cf = brentq(cf_func, 1e-15, 0.1) # names = ['Local Re', 'length', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'T_wall', 'p', 'rho', 'velocity', 'Mach'] # properties = [float(self.localReynolds), float(self.localRunningLength), self.mu, self.mu_wall, self.T_adiabaticWall, self.T, T_wall, self.p, self.rho, self.velocityMagnitude, self.M] # for c1, c2 in zip(names, properties): # print "%-10s %s" % (c1, c2) # print '\n' except: # print "Calculation of turbulent Cf failed, Flow properties at culprit cell:" # print "Wake? Running length is", self.localRunningLength # names = ['Local Re', 'length', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'T_wall', 'p', 'rho', 'velocity', 'Mach'] # properties = [float(self.localReynolds), float(self.localRunningLength), self.mu, self.mu_wall, self.T_adiabaticWall, self.T, T_wall, self.p, self.rho, self.velocityMagnitude, self.M] # for c1, c2 in zip(names, properties): # print "%-10s %s" % (c1, c2) # print '\n' cf = 0. self.badCfCount += 1 if self.coneCorrectionFlag: # Flow is 3D, apply cone rule correction cf *= 1.15 self.cf = cf # This is to plot lam (BLregime = 0) vs transitional (0 < BLregime < 1) # vs turb flow (BLregime = 1) self.BLregime = 1 return self.cf
def findLorenzDistanceAtTargetKY(Economy,param_name,param_count,center_range,spread,dist_type): ''' Finds the sum of squared distances between simulated and target Lorenz points in an economy when a given parameter has heterogeneity according to some distribution. The class of distribution and a measure of spread are given as inputs, but the measure of centrality such that the capital to income ratio matches the target ratio must be found. Parameters ---------- Economy : cstwMPCmarket An object representing the entire economy, containing the various AgentTypes as an attribute. param_name : string The name of the parameter of interest that varies across the population. param_count : int The number of different values the parameter of interest will take on. center_range : [float,float] Bounding values for a measure of centrality for the distribution of the parameter of interest. spread : float A measure of spread or diffusion for the distribution of the parameter of interest. dist_type : string The type of distribution to be used. Can be "lognormal" or "uniform" (can expand). Returns ------- dist : float Sum of squared distances between simulated and target Lorenz points for this economy (sqrt). ''' # Define the function to search for the correct value of center, then find its zero intermediateObjective = lambda center : getKYratioDifference(Economy = Economy, param_name = param_name, param_count = param_count, center = center, spread = spread, dist_type = dist_type) optimal_center = brentq(intermediateObjective,center_range[0],center_range[1],xtol=10**(-6)) Economy.center_save = optimal_center # Get the sum of squared Lorenz distances given the correct distribution of the parameter Economy(LorenzBool = True) # Make sure we actually calculate simulated Lorenz points Economy.distributeParams(param_name,param_count,optimal_center,spread,dist_type) # Distribute parameters Economy.solveAgents() Economy.makeHistory() dist = Economy.calcLorenzDistance() Economy(LorenzBool = False) print ('findLorenzDistanceAtTargetKY tried spread = ' + str(spread) + ' and got ' + str(dist)) return dist
def main(args): with tf.Graph().as_default(): with tf.Session() as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext) # Load the model facenet.load_model(args.model) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") #image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs image_size = args.image_size embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') batch_size = args.lfw_batch_size nrof_images = len(paths) nrof_batches = int(math.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches): start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder:images, phase_train_placeholder:False } emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds) print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
def BAWPremium( IsCall, Fwd, Strike, Vol, Texp, rd, rf ): ''' Early exercise premium for american spot options based on Barone-Adesi, Whaley approximation formula. To compute the options prices, this needs to be added to the european options prices. ''' if Texp <= 0. or Vol <=0: return 0. T = Texp K = Strike D = exp( -rd * Texp) Dq = exp( -rf * Texp ) Phi = (IsCall and 1 or -1) k = (D==1.) and 2./Vol/Vol or 2.* rf/Vol/Vol/(1-D) # the expression in the middle is really the expression on the right in the limit D -> 1 # note that lim_{D -> 1.} log(D)/(1-D) = -1. beta = 2.*(rd-rf)/Vol/Vol if Phi == 1: q2=(-(beta-1.)+sqrt((beta-1.)**2+4.*k))/2. def EarlyExerBdry( eeb ): x = D*BSFwd(True,eeb,K,Vol,T) + (1.-Dq*cnorm(fd1(eeb,K,Vol,T)))*eeb/q2 - eeb + K return x eeBdry = D*BSFwd(True,Fwd,K,Vol,T) + (1.-Dq*cnorm(fd1(Fwd,K,Vol,T)))* Fwd/q2 + K eeBdry = newton(EarlyExerBdry,eeBdry) if Fwd >= eeBdry: eePrem = -D*BSFwd(True,Fwd,K,Vol,T) + Fwd - K else: A2=(eeBdry/q2)*(1.-Dq*cnorm(fd1(eeBdry,K,Vol,T))) eePrem = A2 * pow(Fwd/eeBdry,q2) elif Phi == -1: q1=(-(beta-1.)-sqrt((beta-1.)**2+4.*k))/2. def EarlyExerBdry( eeb ): x = D*BSFwd(False,eeb,K,Vol,T) - (1.-Dq*cnorm(-fd1(eeb,K,Vol,T)))*eeb/q1 + eeb - K return x eeBdry = -D*BSFwd(False,Fwd,K,Vol,T) + (1.-Dq*cnorm(-fd1(Fwd,K,Vol,T)))*Fwd/q1 + K eeBdry = brentq(EarlyExerBdry,1e-12, K) if Fwd <= eeBdry: eePrem = -D*BSFwd(False,Fwd,K,Vol,T) + K - Fwd else: A1=-(eeBdry/q1)*(1.-Dq*cnorm(-fd1(eeBdry,K,Vol,T))) eePrem = A1 * pow(Fwd/eeBdry,q1) else: raise ValueError, 'option type can only be call or put' return eePrem
def __strikes_and_weights(self, opt, swaption, strike, period): if isinstance(swaption, BlackLognormalModel): # lognormal model strike_at_maxvega = swaption.forward * np.exp(swaption.stdev ** 2 / 2) # at which max vega occurs lower_bound = 1e-10 # rate must be positive else: # normal model strike_at_maxvega = swaption.forward # at which max vega occurs lower_bound = -1e2 # rate can be negative cutoff_vega = swaption.vega(strike_at_maxvega) / 100.0 # cutoff at vega that is 100 times smaller find_strike = lambda k: swaption.vega(k) - cutoff_vega if opt == self.Option.Cap: cutoff_rate = solver(find_strike, strike_at_maxvega, 1e2) # to high end else: # == self.Option.Floor cutoff_rate = solver(find_strike, lower_bound, strike_at_maxvega) # to low end def curves(t, h): # bumped by h, new curve with (t, T, h) def bump(curve): def newcurve(T): # T can be float or Numpy.Array return curve(T) * np.exp(-h * self.__b(t, T)) # curve(t) = 1.0 return DiscountCurve.from_fn(t, newcurve) return bump(self.proj), bump(self.disc) def rate_to_h(swaprate): def find_h(h): newproj, newdisc = curves(period.fixdate.t, h) return swaption.underlier.pleg_parrate(newproj, newdisc) - swaprate try: # clamped to avoid double precision overflow return solver(find_h, -self.__shift_clamp, self.__shift_clamp) except ValueError: return self.__shift_clamp # only positive h causes trouble h_stt = rate_to_h(strike) h_end = rate_to_h(cutoff_rate) # clamped if too large n = self.nswpns H = np.linspace(h_stt, h_end, n + 1) K = np.zeros_like(H) G = np.zeros_like(H) for i, h in enumerate(H): newproj, newdisc = curves(period.fixdate.t, h) K[i] = swaption.underlier.pleg_parrate(newproj, newdisc) G[i] = period.accr_cov * newdisc(period.paydate.t) / swaption.underlier.pleg.get_annuity(newdisc) GK = G * (K - K[0]) # [g * (k - K[0]) for g, k in zip(G, K)] W = np.zeros(n) for i in range(1, n): W[i - 1] = (GK[i] - W[:i].dot(K[i] - K[:i])) / (K[i] - K[i - 1]) return K[:-1], W # swaption at K[n] is not used
def scaleIW(self, x, method='brentq'): if self.verbose: print 'Scaling input weights...' self.actCache.disable() scale = self.iwScale maxIter = 100 accuracy = 1e-4 method = method.lower() def stdErr(m): self.setIWMult(m) act = self.eval(x) err = np.abs(np.std(act)-scale) if self.verbose: print 'scale, mult: ', (np.std(act), m) return err def stdRoot(m): self.setIWMult(m) act = self.eval(x) err = np.std(act) - scale if self.verbose: print 'scale, mult: ', (np.std(act), m) return err if method == 'rprop': miniRProp(0.75, errFunc=stdErr, maxIter=maxIter, accuracy=accuracy) #verbose=self.verbose) elif method == 'brentq': m, r = spopt.brentq(stdRoot, 1.0e-5, 10.0, xtol=accuracy, full_output=True) if self.verbose: print 'brentq iterations: %d' % r.iterations elif method == 'simplex': r = spopt.minimize(stdErr, scale, method='Nelder-Mead', tol=accuracy, options={'maxiter': 100}) m = r.x else: raise Exception('Invalid scaleIW method %s.' % method) self.actCache.enable()
def TipAsymInversion(w, frac, matProp, simParmtrs, dt=None, Kprime_k=None): """ Evaluate distance from the front using tip assymptotics of the given regime, given the fracture width in the ribbon cells. Arguments: w (ndarray-float): fracture width frac (Fracture object): current fracture object matProp (MaterialProperties object): Material properties simParmtrs (SimulationParameters object): Simulation parameters dt (float): time step Kprime_k (ndarray-float): Kprime for current iteration of toughness loop. if not given, the Kprime from the given material properties object will be used. Returns: ndarray-float: distance (unsigned) from the front to the ribbon cells. """ if not Kprime_k == None: Kprime = Kprime_k else: Kprime = matProp.Kprime[frac.EltRibbon] if simParmtrs.tipAsymptote == 'U': ResFunc = TipAsym_Universal_zero_Res # ResFunc = TipAsym_Universal_delt_Res elif simParmtrs.tipAsymptote == 'Kt': return 0 # todo: to be implementd elif simParmtrs.tipAsymptote == 'M': ResFunc = TipAsym_viscStor_Res elif simParmtrs.tipAsymptote == 'Mt': ResFunc = TipAsym_viscLeakOff_Res elif simParmtrs.tipAsymptote == 'MK': ResFunc = TipAsym_MKTransition_Res elif simParmtrs.tipAsymptote == 'K': return w[frac.EltRibbon] ** 2 * (matProp.Eprime / Kprime) ** 2 (moving, a, b) = FindBracket_dist(w, frac.EltRibbon, Kprime, matProp.Eprime, frac.muPrime, matProp.Cprime, frac.sgndDist, dt, ResFunc) dist = -frac.sgndDist[frac.EltRibbon] for i in range(0, len(moving)): # todo: need to use the properties class TipAsmptargs = (w[frac.EltRibbon[moving[i]]], Kprime[moving[i]], matProp.Eprime, frac.muPrime[frac.EltRibbon[moving[i]]], matProp.Cprime[frac.EltRibbon[moving[i]]], -frac.sgndDist[frac.EltRibbon[moving[i]]], dt) try: dist[moving[i]] = brentq(ResFunc, a[i], b[i], TipAsmptargs) except RuntimeError: dist[moving[i]] = np.nan return dist # -----------------------------------------------------------------------------------------------------------------------
def coleman_operator(c, cp): """ The approximate Coleman operator. Iteration with this operator corresponds to time iteration on the Euler equation. Computes and returns the updated consumption policy c. The array c is replaced with a function cf that implements univariate linear interpolation over the asset grid for each possible value of z. Parameters ---------- c : array_like(float) A NumPy array of dim len(cp.asset_grid) times len(cp.z_vals) cp : ConsumerProblem An instance of ConsumerProblem that stores primitives Returns ------- array_like(float) The updated policy, where updating is by the Coleman operator. """ # === simplify names, set up arrays === # R, ?, ?, du, b = cp.R, cp.?, cp.?, cp.du, cp.b asset_grid, z_vals = cp.asset_grid, cp.z_vals z_size = len(z_vals) ? = R * ? vals = np.empty(z_size) # === linear interpolation to get consumption function === # def cf(a): """ The call cf(a) returns an array containing the values c(a, z) for each z in z_vals. For each such z, the value c(a, z) is constructed by univariate linear approximation over asset space, based on the values in the array c """ for i in range(z_size): vals[i] = np.interp(a, asset_grid, c[:, i]) return vals # === solve for root to get Kc === # Kc = np.empty(c.shape) for i_a, a in enumerate(asset_grid): for i_z, z in enumerate(z_vals): def h(t): expectation = np.dot(du(cf(R * a + z - t)), ?[i_z, :]) return du(t) - max(? * expectation, du(R * a + z + b)) Kc[i_a, i_z] = brentq(h, 1e-8, R * a + z + b) return Kc
def coleman_operator(g, grid, ?, u_prime, f, f_prime, shocks, Kg=None): """ The approximate Coleman operator, which takes an existing guess g of the optimal consumption policy and computes and returns the updated function Kg on the grid points. An array to store the new set of values Kg is optionally supplied (to avoid having to allocate new arrays at each iteration). If supplied, any existing data in Kg will be overwritten. Parameters ---------- g : array_like(float, ndim=1) The value of the input policy function on grid points grid : array_like(float, ndim=1) The set of grid points ? : scalar The discount factor u_prime : function The derivative u'(c) of the utility function f : function The production function f(k) f_prime : function The derivative f'(k) shocks : numpy array An array of draws from the shock, for Monte Carlo integration (to compute expectations). Kg : array_like(float, ndim=1) optional (default=None) Array to write output values to """ # === Apply linear interpolation to g === # g_func = lambda x: np.interp(x, grid, g) # == Initialize Kg if necessary == # if Kg is None: Kg = np.empty_like(g) # == solve for updated consumption value for i, y in enumerate(grid): def h(c): vals = u_prime(g_func(f(y - c) * shocks)) * f_prime(y - c) * shocks return u_prime(c) - ? * np.mean(vals) c_star = brentq(h, 1e-10, y - 1e-10) Kg[i] = c_star return Kg
def find_steady_state(self, a, b, method='brentq', **kwargs): """ Compute the equilibrium value of capital stock (per unit effective labor). Parameters ---------- a : float One end of the bracketing interval [a,b]. b : float The other end of the bracketing interval [a,b] method : str (default=`brentq`) Method to use when computing the steady state. Supported methods are `bisect`, `brenth`, `brentq`, `ridder`. See `scipy.optimize` for more details (including references). kwargs : optional Additional keyword arguments. Keyword arguments are method specific see `scipy.optimize` for details. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. """ if method == 'bisect': result = optimize.bisect(self.evaluate_k_dot, a, b, **kwargs) elif method == 'brenth': result = optimize.brenth(self.evaluate_k_dot, a, b, **kwargs) elif method == 'brentq': result = optimize.brentq(self.evaluate_k_dot, a, b, **kwargs) elif method == 'ridder': result = optimize.ridder(self.evaluate_k_dot, a, b, **kwargs) else: mesg = ("Method must be one of : 'bisect', 'brenth', 'brentq', " + "or 'ridder'.") raise ValueError(mesg) return result
def get_omega(self, k=1, ns=1000): # Get parameters tau = self.sys.g.tau kc = self.sys.g.k w = self.sys.pll.w # Determine min and max values for coupling sum function h_min = self.sys.g.func.min() h_max = self.sys.g.func.max() c_bar = self.sys.g.get_single_site_coupling(k) c_bar_sum = np.sum(c_bar) # should be 1 for normalized coupling h_sum_min = c_bar_sum * h_min h_sum_max = c_bar_sum * h_max # Determine search interval for s s_min = kc * tau * h_sum_min + w * tau s_max = kc * tau * h_sum_max + w * tau s_min = s_min - 2 # add safety margin s_max = s_max + 2 if s_min < 0: s_min = 0.0 # exclude negative frequencies s = np.linspace(s_min, s_max, ns) # Setup coupling sum function h_sum = lambda x: self.get_coupling_sum(x / tau, k=k) # Setup implicit equation for s f = lambda x: kc * tau * h_sum(x) + w * tau - x # Find sign changes as you go along curve # Assumes that there are no double sign changes between two values of s # A finer sampling interval can be achieved by increasing ns i_root = get_sign_changes(f(s)) if len(i_root) > 0: omega = [] for i in range(len(i_root)): # Numerically solve the implicit equation for omega s_tmp = optimize.brentq(f, s[i_root[i]], s[i_root[i] + 1]) omega.append(w + kc * h_sum(s_tmp)) return omega else: raise Exception('No global synchronization frequency found.')
def get_omega(self, k=1, ns=1000): # Get parameters tau = self.sys.g.tau kc = self.sys.g.k w = self.sys.pll.w # Determine min and max values for coupling sum function h_min = self.sys.g.func.min() h_max = self.sys.g.func.max() c_bar = self.sys.g.get_single_site_coupling(k) c_bar_sum = np.sum(c_bar) # should be 1 for normalized coupling h_sum_min = c_bar_sum * h_min h_sum_max = c_bar_sum * h_max # Determine search interval for s s_min = kc * tau * h_sum_min + w * tau s_max = kc * tau * h_sum_max + w * tau s_min = s_min - 2 # add safety margin s_max = s_max + 2 if s_min < 0: s_min = 0.0 # exclude negative frequencies s = np.linspace(s_min, s_max, ns) # Setup coupling sum function if tau != 0: h_sum = lambda x: self.get_coupling_sum(x / tau, k=k) else: h_sum = lambda x: self.get_coupling_sum(0, k=k) # Setup implicit equation for s f = lambda x: kc * tau * h_sum(x) + w * tau - x # Find sign changes as you go along curve # Assumes that there are no double sign changes between two values of s # A finer sampling interval can be achieved by increasing ns i_root = get_sign_changes(f(s)) if len(i_root) > 0: omega = [] for i in range(len(i_root)): # Numerically solve the implicit equation for omega s_tmp = optimize.brentq(f, s[i_root[i]], s[i_root[i] + 1]) omega.append(w + kc * h_sum(s_tmp)) return omega else: raise Exception('No global synchronization frequency found.')
def implied_volatility(price, S, K, t, r, flag): """Calculate the Black-Scholes implied volatility. :param price: the Black-Scholes option price :type price: float :param S: underlying asset price :type S: float :param K: strike price :type K: float :param t: time to expiration in years :type t: float :param r: risk-free interest rate :type r: float :param flag: 'c' or 'p' for call or put. :type flag: str >>> S = 100 >>> K = 100 >>> sigma = .2 >>> r = .01 >>> flag = 'c' >>> t = .5 >>> price = black_scholes(flag, S, K, t, r, sigma) >>> iv = implied_volatility(price, S, K, t, r, flag) >>> expected_price = 5.87602423383 >>> expected_iv = 0.2 >>> abs(expected_price - price) < 0.00001 True >>> abs(expected_iv - iv) < 0.01 True >>> sigma = 0.3 >>> S, K, t, r, flag = 100.0, 1000.0, 0.5, 0.05, 'p' >>> price = black_scholes(flag, S, K, t, r, sigma) >>> print (price) 875.309912028 >>> iv = implied_volatility(price, S, K, t, r, flag) >>> print (round(iv, 1)) 0.0 """ f = lambda sigma: price - black_scholes(flag, S, K, t, r, sigma) return brentq( f, a=1e-12, b=100, xtol=1e-15, rtol=1e-15, maxiter=1000, full_output=False )