我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用numpy.arctanh()。
def test_branch_cuts(self): # check branch cuts and continuity on them yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True # check against bogus branch cuts: assert continuity between quadrants yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1 yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1 yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1 yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1 yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1 yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1
def test_branch_cuts_complex64(self): # check branch cuts and continuity on them yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True, np.complex64 yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True, np.complex64 yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True, np.complex64 yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True, np.complex64 yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True, np.complex64 yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64 yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64 yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64 yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64 yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64 yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64 # check against bogus branch cuts: assert continuity between quadrants yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64 yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64 yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64 yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64 yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64 yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64
def test_against_cmath(self): import cmath points = [-1-1j, -1+1j, +1-1j, +1+1j] name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} atol = 4*np.finfo(np.complex).eps for func in self.funcs: fname = func.__name__.split('.')[-1] cname = name_map.get(fname, fname) try: cfunc = getattr(cmath, cname) except AttributeError: continue for p in points: a = complex(func(np.complex_(p))) b = cfunc(p) assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
def test_energy_conservation_sech2disk_manyparticles(): # Test that energy is conserved for a self-gravitating disk N= 101 totmass= 1. sigma= 1. zh= 2.*sigma**2./totmass x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh v= numpy.random.normal(size=N)*sigma v-= numpy.mean(v) # stabilize m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1)) g= wendy.nbody(x,v,m,0.05) E= wendy.energy(x,v,m) cnt= 0 while cnt < 100: tx,tv= next(g) assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration" cnt+= 1 return None
def test_energy_conservation_sech2disk_manyparticles(): # Test that energy is conserved for a self-gravitating disk N= 101 totmass= 1. sigma= 1. zh= 2.*sigma**2./totmass x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh v= numpy.random.normal(size=N)*sigma v-= numpy.mean(v) # stabilize m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1)) omega= 1.1 g= wendy.nbody(x,v,m,0.05,omega=omega) E= wendy.energy(x,v,m,omega=omega) cnt= 0 while cnt < 100: tx,tv= next(g) assert numpy.fabs(wendy.energy(tx,tv,m,omega=omega)-E) < 10.**-10., "Energy not conserved during simple N-body integration with external harmonic potential" cnt+= 1 return None
def test_energy_conservation_sech2disk_manyparticles(): # Test that energy is conserved for a self-gravitating disk N= 101 totmass= 1. sigma= 1. zh= 2.*sigma**2./totmass x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh v= numpy.random.normal(size=N)*sigma v-= numpy.mean(v) # stabilize m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1)) g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000) E= wendy.energy(x,v,m) cnt= 0 while cnt < 100: tx,tv= next(g) assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration" cnt+= 1 return None
def test_notracermasses(): # approx should work with tracer sheets # Test that energy is conserved for a self-gravitating disk N= 101 totmass= 1. sigma= 1. zh= 2.*sigma**2./totmass x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh v= numpy.random.normal(size=N)*sigma v-= numpy.mean(v) # stabilize m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1)) m[N//2:]= 0. m*= 2. g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000) E= wendy.energy(x,v,m) cnt= 0 while cnt < 100: tx,tv= next(g) assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration with some tracer particles" cnt+= 1 return None
def test_energy_conservation_sech2disk_manyparticles(): # Test that energy is conserved for a self-gravitating disk N= 101 totmass= 1. sigma= 1. zh= 2.*sigma**2./totmass x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh v= numpy.random.normal(size=N)*sigma v-= numpy.mean(v) # stabilize m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1)) omega= 1.1 g= wendy.nbody(x,v,m,0.05,omega=omega,approx=True,nleap=1000) E= wendy.energy(x,v,m,omega=omega) cnt= 0 while cnt < 100: tx,tv= next(g) assert numpy.fabs(wendy.energy(tx,tv,m,omega=omega)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration with external harmonic potential" cnt+= 1 return None
def test_againstexact_sech2disk_manyparticles(): # Test that the exact N-body and the approximate N-body agree N= 101 totmass= 1. sigma= 1. zh= 2.*sigma**2./totmass x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh v= numpy.random.normal(size=N)*sigma v-= numpy.mean(v) # stabilize m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1)) omega= 1.1 g= wendy.nbody(x,v,m,0.05,approx=True,nleap=2000,omega=omega) ge= wendy.nbody(x,v,m,0.05,omega=omega) cnt= 0 while cnt < 100: tx,tv= next(g) txe,tve= next(ge) assert numpy.all(numpy.fabs(tx-txe) < 10.**-5.), "Exact and approximate N-body give different positions" assert numpy.all(numpy.fabs(tv-tve) < 10.**-5.), "Exact and approximate N-body give different positions" cnt+= 1 return None
def get_net_vectors(subject_list, kind, atlas_name="aal"): """ subject_list : the subject short IDs list kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation atlas_name : name of the atlas used returns: matrix : matrix of connectivity vectors (num_subjects x num_connections) """ # This is an alternative implementation networks = load_all_networks(subject_list, kind, atlas_name=atlas_name) # Get Fisher transformed matrices norm_networks = [np.arctanh(mat) for mat in networks] # Get upper diagonal indices idx = np.triu_indices_from(norm_networks[0], 1) # Get vectorised matrices vec_networks = [mat[idx] for mat in norm_networks] # Each subject should be a row of the matrix matrix = np.vstack(vec_networks) return matrix
def transformparameterndarray(parameterndarray, includejumps): parameterndarray = npu.tondim1(parameterndarray) res = [ parameterndarray[0], # meanlogvar 2. * np.arctanh(parameterndarray[1]), # persistence np.log(parameterndarray[2] * parameterndarray[2]), # voloflogvar 2. * np.arctanh(parameterndarray[3]) # cor ] if includejumps: res.append(np.arctanh(2*parameterndarray[4] - 1)) # jumpintensity res.append(np.log(parameterndarray[5] * parameterndarray[5])) # jumpvol return np.array(res)
def g_inv(x, t=4): """Inverse of g transform.""" xp = np.clip(x, -t, t) diff = np.arctanh(np.clip(x - xp, -1 + 1e-10, 1 - 1e-10)) return xp + diff
def initialize(self, z0): z = self.opt_model[2] z.set_value(floatX(np.arctanh(z0)))
def invert_bfgs(gen_model, invert_model, ftr_model, im, z_predict=None, npx=64): _f, z = invert_model nz = gen_model.nz if z_predict is None: z_predict = np_rng.uniform(-1., 1., size=(1, nz)) else: z_predict = floatX(z_predict) z_predict = np.arctanh(z_predict) im_t = gen_model.transform(im) ftr = ftr_model(im_t) prob = optimize.minimize(f_bfgs, z_predict, args=(_f, im_t, ftr), tol=1e-6, jac=True, method='L-BFGS-B', options={'maxiter':200}) print('n_iters = %3d, f = %.3f' % (prob.nit, prob.fun)) z_opt = prob.x z_opt_n = floatX(z_opt[np.newaxis, :]) [f_opt, g, gx] = _f(z_opt_n, im_t, ftr) gx = gen_model.inverse_transform(gx, npx=npx) z_opt = np.tanh(z_opt) return gx, z_opt,f_opt
def test_time(): # Just run the timer... N= 101 totmass= 1. sigma= 1. zh= 2.*sigma**2./totmass x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh v= numpy.random.normal(size=N)*sigma v-= numpy.mean(v) # stabilize m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1)) g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000,full_output=True) tx,tv, time_elapsed= next(g) assert time_elapsed < 1., 'More than 1 second elapsed for simple problem' return None
def itransform_define(transform): """ This function links the user's choice of transformation with its inverse """ if transform == 'tanh': return np.arctanh elif transform == 'exp': return np.log elif transform == 'logit': return Family.logit elif transform is None: return np.array else: return None
def itransform_name_define(transform): """ This function is used for model results table, displaying any transformations performed """ if transform == 'tanh': return 'arctanh' elif transform == 'exp': return 'log' elif transform == 'logit': return 'ilogit' elif transform is None: return '' else: return None
def atanh(v): return v.__class__(numpy.arctanh(v))
def inv_clipping_sigma(x, max_in): xx = x.clip(-0.99*max_in, 0.99*max_in) return (max_in * numpy.arctanh(xx / max_in)).clip(-max_in, max_in)
def fisher_z(data): """ Fisher's z-transformation For a given dataset :math:`p` bound to :math:`[0.0, 1.0]`, we can use Fisher's z-transformation to normalize it in an approximately Gaussian distribution. This transformation is computed as follows: .. math:: z_p := \\frac{1}{2} \\text{ln} \\left ( \\frac{1+p}{1-p} \\right ) = \\text{arctanh}(p) """ return np.arctanh(data)
def test_numpy_method(): # This type of code is used frequently by PyMC3 users x = tt.dmatrix('x') data = np.random.rand(5, 5) x.tag.test_value = data for fct in [np.arccos, np.arccosh, np.arcsin, np.arcsinh, np.arctan, np.arctanh, np.ceil, np.cos, np.cosh, np.deg2rad, np.exp, np.exp2, np.expm1, np.floor, np.log, np.log10, np.log1p, np.log2, np.rad2deg, np.sin, np.sinh, np.sqrt, np.tan, np.tanh, np.trunc]: y = fct(x) f = theano.function([x], y) utt.assert_allclose(np.nan_to_num(f(data)), np.nan_to_num(fct(data)))
def impl(self, x): # If x is an int8 or uint8, numpy.arctanh will compute the result in # half-precision (float16), where we want float32. x_dtype = str(getattr(x, 'dtype', '')) if x_dtype in ('int8', 'uint8'): return numpy.arctanh(x, sig='f') return numpy.arctanh(x)
def calcAdimCtrl(self,alfa,beta): #u = numpy.empty((self.N,self.m)) Nu = len(alfa) u = numpy.empty((Nu,2)) restrictions = self.restrictions alpha_min = restrictions['alpha_min'] alpha_max = restrictions['alpha_max'] beta_min = restrictions['beta_min'] beta_max = restrictions['beta_max'] a1 = .5*(alpha_max + alpha_min) a2 = .5*(alpha_max - alpha_min) b1 = .5*(beta_max + beta_min) b2 = .5*(beta_max - beta_min) alfa -= a1 alfa *= 1.0/a2 beta -= b1 beta *= 1.0/b2 u[:,0] = alfa.copy() u[:,1] = beta.copy() # Basic saturation for j in range(2): for k in range(Nu): if u[k,j] > 0.99999: u[k,j] = 0.99999 if u[k,j] < -0.99999: u[k,j] = -0.99999 u = numpy.arctanh(u) return u
def arctanh(inp): if isinstance(inp, ooarray) and inp.dtype == object: return ooarray([arctanh(elem) for elem in inp]) if not isinstance(inp, oofun): return np.arctanh(inp) # TODO: move it outside of arctanh definition def interval(arg_inf, arg_sup): raise 'interval for arctanh is unimplemented yet' r = oofun(np.arctanh, inp, d = lambda x: FDmisc.Diag(1.0/(1 - x**2)), vectorized = True, interval = interval) return r
def confidence_interval(rho, N): """ Give a 95% confidence interval for a Spearman correlation score, given the correlation and the number of cases. """ z = np.arctanh(rho) interval = 1.96 / np.sqrt(N - 3) low = z - interval high = z + interval return pd.Series( [rho, np.tanh(low), np.tanh(high)], index=['acc', 'low', 'high'] )
def test_numpy_ufuncs(self): # test ufuncs of numpy 1.9.2. see: # http://docs.scipy.org/doc/numpy/reference/ufuncs.html # some functions are skipped because it may return different result # for unicode input depending on numpy version for name, idx in compat.iteritems(self.indices): for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10, np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin, np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, np.rad2deg]: if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin): # raise TypeError or ValueError (PeriodIndex) # PeriodIndex behavior should be changed in future version with tm.assertRaises(Exception): func(idx) elif isinstance(idx, (Float64Index, Int64Index)): # coerces to float (e.g. np.sin) result = func(idx) exp = Index(func(idx.values), name=idx.name) self.assert_index_equal(result, exp) self.assertIsInstance(result, pd.Float64Index) else: # raise AttributeError or TypeError if len(idx) == 0: continue else: with tm.assertRaises(Exception): func(idx) for func in [np.isfinite, np.isinf, np.isnan, np.signbit]: if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin): # raise TypeError or ValueError (PeriodIndex) with tm.assertRaises(Exception): func(idx) elif isinstance(idx, (Float64Index, Int64Index)): # results in bool array result = func(idx) exp = func(idx.values) self.assertIsInstance(result, np.ndarray) tm.assertNotIsInstance(result, Index) else: if len(idx) == 0: continue else: with tm.assertRaises(Exception): func(idx)