我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用numpy.nextafter()。
def test_spacing_nextafter(self): """Test np.spacing and np.nextafter""" # All non-negative finite #'s a = np.arange(0x7c00, dtype=uint16) hinf = np.array((np.inf,), dtype=float16) a_f16 = a.view(dtype=float16) assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) # switch to negatives a |= 0x8000 assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:]) assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def _sample(self, n_samples): # samples must be sampled from (-1, 1) rather than [-1, 1) loc, scale = self.loc, self.scale if not self.is_reparameterized: loc = tf.stop_gradient(loc) scale = tf.stop_gradient(scale) shape = tf.concat([[n_samples], self.batch_shape], 0) uniform_samples = tf.random_uniform( shape=shape, minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), self.dtype.as_numpy_dtype(0.)), maxval=1., dtype=self.dtype) samples = loc - scale * tf.sign(uniform_samples) * \ tf.log1p(-tf.abs(uniform_samples)) static_n_samples = n_samples if isinstance(n_samples, int) else None samples.set_shape( tf.TensorShape([static_n_samples]).concatenate( self.get_batch_shape())) return samples
def test_standard_scaler_trasform_with_partial_fit(): # Check some postconditions after applying partial_fit and transform X = X_2d[:100, :] scaler_incr = StandardScaler() for i, batch in enumerate(gen_batches(X.shape[0], 1)): X_sofar = X[:(i + 1), :] chunks_copy = X_sofar.copy() scaled_batch = StandardScaler().fit_transform(X_sofar) scaler_incr = scaler_incr.partial_fit(X[batch]) scaled_incr = scaler_incr.transform(X_sofar) assert_array_almost_equal(scaled_batch, scaled_incr) assert_array_almost_equal(X_sofar, chunks_copy) # No change right_input = scaler_incr.inverse_transform(scaled_incr) assert_array_almost_equal(X_sofar, right_input) zero = np.zeros(X.shape[1]) epsilon = np.nextafter(0, 1) assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal assert_array_less(zero, scaler_incr.scale_ + epsilon) # (i+1) because the Scaler has been already fitted assert_equal((i + 1), scaler_incr.n_samples_seen_)
def _sample_n(self, n, seed=None): sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0) logits = self.logits * array_ops.ones(sample_shape) if logits.get_shape().ndims == 2: logits_2d = logits else: logits_2d = array_ops.reshape(logits, [-1, self.event_size]) np_dtype = self.dtype.as_numpy_dtype() minval = np.nextafter(np_dtype(0), np_dtype(1)) uniform = random_ops.random_uniform(shape=array_ops.shape(logits_2d), minval=minval, maxval=1, dtype=self.dtype, seed=seed) gumbel = - math_ops.log(- math_ops.log(uniform)) noisy_logits = math_ops.div(gumbel + logits_2d, self.temperature) samples = nn_ops.log_softmax(noisy_logits) ret = array_ops.reshape(samples, sample_shape) return ret
def test_float_remainder_corner_cases(self): # Check remainder magnitude. for dt in np.typecodes['Float']: b = np.array(1.0, dtype=dt) a = np.nextafter(np.array(0.0, dtype=dt), -b) rem = np.remainder(a, b) assert_(rem <= b, 'dt: %s' % dt) rem = np.remainder(-a, -b) assert_(rem >= -b, 'dt: %s' % dt) # Check nans, inf with warnings.catch_warnings(): warnings.simplefilter('always') warnings.simplefilter('ignore', RuntimeWarning) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) fnan = np.array(np.nan, dtype=dt) rem = np.remainder(fone, fzer) assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) # MSVC 2008 returns NaN here, so disable the check. #rem = np.remainder(fone, finf) #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) rem = np.remainder(fone, fnan) assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) rem = np.remainder(finf, fone) assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
def _test_nextafter(t): one = t(1) two = t(2) zero = t(0) eps = np.finfo(t).eps assert_(np.nextafter(one, two) - one == eps) assert_(np.nextafter(one, zero) - one < 0) assert_(np.isnan(np.nextafter(np.nan, one))) assert_(np.isnan(np.nextafter(one, np.nan))) assert_(np.nextafter(one, one) == one)
def test_nextafter_vs_spacing(): # XXX: spacing does not handle long double yet for t in [np.float32, np.float64]: for _f in [1, 1e-5, 1000]: f = t(_f) f1 = t(_f + 1) assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_float_modulus_corner_cases(self): # Check remainder magnitude. for dt in np.typecodes['Float']: b = np.array(1.0, dtype=dt) a = np.nextafter(np.array(0.0, dtype=dt), -b) rem = self.mod(a, b) assert_(rem <= b, 'dt: %s' % dt) rem = self.mod(-a, -b) assert_(rem >= -b, 'dt: %s' % dt) # Check nans, inf with warnings.catch_warnings(): warnings.simplefilter('always') warnings.simplefilter('ignore', RuntimeWarning) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) fnan = np.array(np.nan, dtype=dt) rem = self.mod(fone, fzer) assert_(np.isnan(rem), 'dt: %s' % dt) # MSVC 2008 returns NaN here, so disable the check. #rem = self.mod(fone, finf) #assert_(rem == fone, 'dt: %s' % dt) rem = self.mod(fone, fnan) assert_(np.isnan(rem), 'dt: %s' % dt) rem = self.mod(finf, fone) assert_(np.isnan(rem), 'dt: %s' % dt)
def loglikelihood(num_points, num_dims, clusters, distances_dict_values_cl): ll = 0 variance = cluster_variance(num_points, clusters, distances_dict_values_cl) or np.nextafter(0, 1) # print 'var', variance for cluster in clusters: fRn = len(cluster) t1 = fRn * np.log(fRn) t2 = fRn * np.log(num_points) t3 = ((fRn * num_dims) / 2.0) * np.log((2.0 * np.pi) * variance) t4 = (fRn - 1.0) / 2.0 ll += t1 - t2 - t3 - t4 return ll
def _sample_n(self, n, seed=None): shape = array_ops.concat(0, ([n], self.batch_shape())) # Sample uniformly-at-random from the open-interval (-1, 1). uniform_samples = random_ops.random_uniform( shape=shape, minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), self.dtype.as_numpy_dtype(0.)), maxval=1., dtype=self.dtype, seed=seed) return (self.loc - self.scale * math_ops.sign(uniform_samples) * math_ops.log(1. - math_ops.abs(uniform_samples)))
def _sample_n(self, n, seed=None): shape = array_ops.concat(0, ([n], array_ops.shape(self._lam))) # Sample uniformly-at-random from the open-interval (0, 1). sampled = random_ops.random_uniform( shape, minval=np.nextafter(self.dtype.as_numpy_dtype(0.), self.dtype.as_numpy_dtype(1.)), maxval=array_ops.ones((), dtype=self.dtype), seed=seed, dtype=self.dtype) return -math_ops.log(sampled) / self._lam
def test_uniform_range_bounds(self): fmin = np.finfo('float').min fmax = np.finfo('float').max func = mt19937.uniform assert_raises(OverflowError, func, -np.inf, 0) assert_raises(OverflowError, func, 0, np.inf) assert_raises(OverflowError, func, fmin, fmax) assert_raises(OverflowError, func, [-np.inf], [0]) assert_raises(OverflowError, func, [0], [np.inf]) # (fmax / 1e17) - fmin is within range, so this should not throw # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > # DBL_MAX by increasing fmin a bit mt19937.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_float_remainder_corner_cases(self): # Check remainder magnitude. for dt in np.typecodes['Float']: b = np.array(1.0, dtype=dt) a = np.nextafter(np.array(0.0, dtype=dt), -b) rem = np.remainder(a, b) assert_(rem <= b, 'dt: %s' % dt) rem = np.remainder(-a, -b) assert_(rem >= -b, 'dt: %s' % dt) # Check nans, inf with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in remainder") for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) fnan = np.array(np.nan, dtype=dt) rem = np.remainder(fone, fzer) assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) # MSVC 2008 returns NaN here, so disable the check. #rem = np.remainder(fone, finf) #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) rem = np.remainder(fone, fnan) assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) rem = np.remainder(finf, fone) assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
def test_float_modulus_corner_cases(self): # Check remainder magnitude. for dt in np.typecodes['Float']: b = np.array(1.0, dtype=dt) a = np.nextafter(np.array(0.0, dtype=dt), -b) rem = self.mod(a, b) assert_(rem <= b, 'dt: %s' % dt) rem = self.mod(-a, -b) assert_(rem >= -b, 'dt: %s' % dt) # Check nans, inf with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in remainder") for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) fnan = np.array(np.nan, dtype=dt) rem = self.mod(fone, fzer) assert_(np.isnan(rem), 'dt: %s' % dt) # MSVC 2008 returns NaN here, so disable the check. #rem = self.mod(fone, finf) #assert_(rem == fone, 'dt: %s' % dt) rem = self.mod(fone, fnan) assert_(np.isnan(rem), 'dt: %s' % dt) rem = self.mod(finf, fone) assert_(np.isnan(rem), 'dt: %s' % dt)
def _uniform_inclusive(loc=0.0, scale=1.0): # like scipy.stats.distributions but inclusive of `high` # XXX scale + 1. might not actually be a float after scale if # XXX scale is very large. return uniform(loc=loc, scale=np.nextafter(scale, scale + 1.))
def test_real_bounds(): # should give same answer as using check_limits() but this is easier # to read a = Real(1., 2.1) assert_false(0.99 in a) assert_true(1. in a) assert_true(2.09 in a) assert_true(2.1 in a) assert_false(np.nextafter(2.1, 3.) in a)
def _sample_n(self, n, seed=None): shape = array_ops.concat(([n], self.batch_shape()), 0) # Sample uniformly-at-random from the open-interval (-1, 1). uniform_samples = random_ops.random_uniform( shape=shape, minval=np.nextafter(self.dtype.as_numpy_dtype(-1.), self.dtype.as_numpy_dtype(0.)), maxval=1., dtype=self.dtype, seed=seed) return (self.loc - self.scale * math_ops.sign(uniform_samples) * math_ops.log(1. - math_ops.abs(uniform_samples)))
def _sample_n(self, n, seed=None): shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0) np_dtype = self.dtype.as_numpy_dtype() minval = np.nextafter(np_dtype(0), np_dtype(1)) uniform = random_ops.random_uniform(shape=shape, minval=minval, maxval=1, dtype=self.dtype, seed=seed) sampled = -math_ops.log(-math_ops.log(uniform)) return sampled * self.scale + self.loc
def _sample_n(self, n, seed=None): shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0) np_dtype = self.dtype.as_numpy_dtype() minval = np.nextafter(np_dtype(0), np_dtype(1)) uniform = random_ops.random_uniform(shape=shape, minval=minval, maxval=1, dtype=self.dtype, seed=seed) sampled = math_ops.log(uniform) - math_ops.log(1-uniform) return sampled * self.scale + self.loc
def _sample_n(self, n, seed=None): shape = array_ops.concat(([n], array_ops.shape(self._lam)), 0) # Sample uniformly-at-random from the open-interval (0, 1). sampled = random_ops.random_uniform( shape, minval=np.nextafter(self.dtype.as_numpy_dtype(0.), self.dtype.as_numpy_dtype(1.)), maxval=array_ops.ones((), dtype=self.dtype), seed=seed, dtype=self.dtype) return -math_ops.log(sampled) / self._lam
def similarity(data, vocab, normalize=False): """Return the similarity between some data and the vocabulary. Computes the dot products between all data vectors and each vocabulary vector. If ``normalize=True``, normalizes all vectors to compute the cosine similarity. Parameters ---------- data: array_like The data used for comparison. vocab: Vocabulary or array_like Vocabulary (or list of vectors) to use to calculate the similarity values. normalize : bool, optional (Default: False) Whether to normalize all vectors, to compute the cosine similarity. """ from nengo_spa.vocab import Vocabulary if isinstance(data, SemanticPointer): data = data.v if isinstance(vocab, Vocabulary): vectors = vocab.vectors elif is_iterable(vocab): if isinstance(next(iter(vocab)), SemanticPointer): vocab = [p.v for p in vocab] vectors = np.array(vocab, copy=False, ndmin=2) else: raise ValidationError("%r object is not a valid vocabulary" % (type(vocab).__name__), attr='vocab') dots = np.dot(vectors, data.T) if normalize: # Zero-norm vectors should return zero, so avoid divide-by-zero error eps = np.nextafter(0, 1) # smallest float above zero dnorm = np.maximum(npext.norm(data.T, axis=0, keepdims=True), eps) vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps) if len(dots.shape) == 1: vnorm = np.squeeze(vnorm) dots /= dnorm dots /= vnorm return dots.T
def _compute_mi_cc(x, y, n_neighbors): """Compute mutual information between two continuous variables. Parameters ---------- x, y : ndarray, shape (n_samples,) Samples of two continuous random variables, must have an identical shape. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information. If it turned out to be negative it is replace by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual information". Phys. Rev. E 69, 2004. """ n_samples = x.size x = x.reshape((-1, 1)) y = y.reshape((-1, 1)) xy = np.hstack((x, y)) # Here we rely on NearestNeighbors to select the fastest algorithm. nn = NearestNeighbors(metric='chebyshev', n_neighbors=n_neighbors) nn.fit(xy) radius = nn.kneighbors()[0] radius = np.nextafter(radius[:, -1], 0) # Algorithm is selected explicitly to allow passing an array as radius # later (not all algorithms support this). nn.set_params(algorithm='kd_tree') nn.fit(x) ind = nn.radius_neighbors(radius=radius, return_distance=False) nx = np.array([i.size for i in ind]) nn.fit(y) ind = nn.radius_neighbors(radius=radius, return_distance=False) ny = np.array([i.size for i in ind]) mi = (digamma(n_samples) + digamma(n_neighbors) - np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1))) return max(0, mi)