我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.errstate()。
def test_warnings(self): # test warning code path with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") with np.errstate(all="warn"): np.divide(1, 0.) self.assertEqual(len(w), 1) self.assertTrue("divide by zero" in str(w[0].message)) np.array(1e300) * np.array(1e300) self.assertEqual(len(w), 2) self.assertTrue("overflow" in str(w[-1].message)) np.array(np.inf) - np.array(np.inf) self.assertEqual(len(w), 3) self.assertTrue("invalid value" in str(w[-1].message)) np.array(1e-300) * np.array(1e-300) self.assertEqual(len(w), 4) self.assertTrue("underflow" in str(w[-1].message))
def test_zero_division(self): with np.errstate(all="ignore"): for t in [np.complex64, np.complex128]: a = t(0.0) b = t(1.0) assert_(np.isinf(b/a)) b = t(complex(np.inf, np.inf)) assert_(np.isinf(b/a)) b = t(complex(np.inf, np.nan)) assert_(np.isinf(b/a)) b = t(complex(np.nan, np.inf)) assert_(np.isinf(b/a)) b = t(complex(np.nan, np.nan)) assert_(np.isnan(b/a)) b = t(0.) assert_(np.isnan(b/a))
def test_signed_zeros(self): with np.errstate(all="ignore"): for t in [np.complex64, np.complex128]: # tupled (numerator, denominator, expected) # for testing as expected == numerator/denominator data = ( (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)), (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)), (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)), (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)), (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)), ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0)) ) for cases in data: n = cases[0] d = cases[1] ex = cases[2] result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) # check real and imag parts separately to avoid comparison # in array context, which does not account for signed zeros assert_equal(result.real, ex[0]) assert_equal(result.imag, ex[1])
def test_identity_equality_mismatch(self): a = np.array([np.nan], dtype=object) with warnings.catch_warnings(): warnings.filterwarnings('always', '', FutureWarning) assert_warns(FutureWarning, np.equal, a, a) assert_warns(FutureWarning, np.not_equal, a, a) with warnings.catch_warnings(): warnings.filterwarnings('error', '', FutureWarning) assert_raises(FutureWarning, np.equal, a, a) assert_raises(FutureWarning, np.not_equal, a, a) # And the other do not warn: with np.errstate(invalid='ignore'): np.less(a, a) np.greater(a, a) np.less_equal(a, a) np.greater_equal(a, a)
def gisfinite(x): """like isfinite, but always raise an error if type not supported instead of returning a TypeError object. Notes ----- isfinite and other ufunc sometimes return a NotImplementedType object instead of raising any exception. This function is a wrapper to make sure an exception is always raised. This should be removed once this problem is solved at the Ufunc level.""" from numpy.core import isfinite, errstate with errstate(invalid='ignore'): st = isfinite(x) if isinstance(st, type(NotImplemented)): raise TypeError("isfinite not supported for this type") return st
def gisinf(x): """like isinf, but always raise an error if type not supported instead of returning a TypeError object. Notes ----- isinf and other ufunc sometimes return a NotImplementedType object instead of raising any exception. This function is a wrapper to make sure an exception is always raised. This should be removed once this problem is solved at the Ufunc level.""" from numpy.core import isinf, errstate with errstate(invalid='ignore'): st = isinf(x) if isinstance(st, type(NotImplemented)): raise TypeError("isinf not supported for this type") return st
def __ipow__(self, other): """ Raise self to the power other, in place. """ other_data = getdata(other) other_mask = getmask(other) with np.errstate(divide='ignore', invalid='ignore'): self._data.__ipow__(np.where(self._mask, self.dtype.type(1), other_data)) invalid = np.logical_not(np.isfinite(self._data)) if invalid.any(): if self._mask is not nomask: self._mask |= invalid else: self._mask = invalid np.copyto(self._data, self.fill_value, where=invalid) new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) return self
def get_inverse_distance_matrix(self): """Calculates the inverse distance matrix A defined as: A_ij = 1/|r_i - r_j| For periodic systems the distance of an atom from itself is the smallest displacement of an atom from one of it's periodic copies, and the distance of two different atoms is the distance of two closest copies. Returns: np.array: Symmetric 2D matrix containing the pairwise inverse distances. """ if self._inverse_distance_matrix is None: distance_matrix = self.get_distance_matrix() with np.errstate(divide='ignore'): inv_distance_matrix = np.reciprocal(distance_matrix) self._inverse_distance_matrix = inv_distance_matrix return self._inverse_distance_matrix
def plotImage(dta, saveFigName): plt.clf() dx, dy = 1, 1 # generate 2 2d grids for the x & y bounds with np.errstate(invalid='ignore'): y, x = np.mgrid[ slice(0, len(dta) , dx), slice(0, len(dta[0]), dy) ] z = dta z_min, z_max = -np.abs(z).max(), np.abs(z).max() #try: c = plt.pcolormesh(x, y, z, cmap='hsv', vmin=z_min, vmax=z_max) #except ??? as err: # data not regular? # c = plt.pcolor(x, y, z, cmap='hsv', vmin=z_min, vmax=z_max) d = plt.colorbar(c, orientation='vertical') lx = plt.xlabel("index") ly = plt.ylabel("season length") plt.savefig(str(saveFigName))
def solve(self, verbose=True): """ solves coupled PDEs Keyword Arguments: verbose {bool} -- if true verbose output (default: {True}) with estimation of computational time etc. """ self.reset() with np.errstate(invalid='raise'): for i in np.arange(1, len(np.linspace(0, self.tend, round(self.tend / self.dt) + 1))): # try: self.integrate_one_timestep(i) if verbose: self.estimate_time_of_computation(i) # except FloatingPointError as inst: # print( # '\nABORT!!!: Numerical instability... Please, adjust dt and dx manually...') # traceback.print_exc() # sys.exit()
def _calc_orb_grid(self, orbital): """ Calculate grid of values for this orbital Args: orbital (moldesign.Orbital): orbital to calcualte grid for Returns: VolumetricGrid: grid that amplitudes where computed on Vector[1/length**1.5]: list of orbital amplitudes at each point on grid """ # NEWFEATURE: limit grid size based on the non-zero atomic centers. Useful for localized # orbitals, which otherwise require high resolution grid = padded_grid(self.wfn.positions, padding=3.0 * u.angstrom, npoints=self.numpoints) with np.errstate(under='ignore'): values = orbital(grid.allpoints()) return grid, values
def snr(vref, vcmp): """ Compute Signal to Noise Ratio (SNR) of two images. Parameters ---------- vref : array_like Reference image vcmp : array_like Comparison image Returns ------- x : float SNR of `vcmp` with respect to `vref` """ dv = np.var(vref) with np.errstate(divide='ignore'): rt = dv/mse(vref, vcmp) return 10.0*np.log10(rt)
def isnr(vref, vdeg, vrst): """ Compute Improvement Signal to Noise Ratio (ISNR) for reference, degraded, and restored images. Parameters ---------- vref : array_like Reference image vdeg : array_like Degraded image vrst : array_like Restored image Returns ------- x : float ISNR of `vrst` with respect to `vref` and `vdeg` """ msedeg = mse(vref, vdeg) mserst = mse(vref, vrst) with np.errstate(divide='ignore'): rt = msedeg/mserst return 10.0*np.log10(rt)
def bsnr(vblr, vnsy): """ Compute Blurred Signal to Noise Ratio (BSNR) for a blurred and noisy image. Parameters ---------- vblr : array_like Blurred noise free image vnsy : array_like Blurred image with additive noise Returns ------- x : float BSNR of `vnsy` with respect to `vblr` and `vdeg` """ blrvar = np.var(vblr) nsevar = np.var(vnsy - vblr) with np.errstate(divide='ignore'): rt = blrvar/nsevar return 10.0*np.log10(rt)
def zdivide(x, y): """ Return x/y, with 0 instead of NaN where y is 0. Parameters ---------- x : array_like Numerator y : array_like Denominator Returns ------- z : ndarray Quotient `x`/`y` """ with np.errstate(divide='ignore', invalid='ignore'): div = x / y div[np.logical_or(np.isnan(div), np.isinf(div))] = 0 return div
def zero_safe_divide(a, b, default_error_value=0.): """Element-wise division that accounts for floating point errors. Both invalid floating-point (e.g. 0. / 0.) and divide be zero errors are suppressed. Resulting values (NaN and Inf respectively) are replaced with `default_error_value`. """ import numpy as np with np.errstate(invalid='ignore', divide='ignore'): quotient = np.true_divide(a, b) bad_value_indices = np.logical_or( np.isnan(quotient), np.isinf(quotient)) quotient[bad_value_indices] = default_error_value return quotient
def pdf(cls, x, a, b): """Density function at `x`. Parameters ---------- x : float or array-like a : float or array-like b : float or array-like Returns ------- np.array """ with np.errstate(divide='ignore'): p = np.where((x < np.exp(a)) | (x > np.exp(b)), 0, np.reciprocal(x)) p /= (b - a) # normalize return p
def get_sph_theta(coords, normal): # The angle (theta) with respect to the normal (J), is the arccos # of the dot product of the normal with the normalized coordinate # vector. res_normal = resize_vector(normal, coords) # check if the normal vector is normalized # since arccos requires the vector to be normalised res_normal = normalize_vector(res_normal) tile_shape = [1] + list(coords.shape)[1:] J = np.tile(res_normal,tile_shape) JdotCoords = np.sum(J*coords,axis=0) with np.errstate(invalid='ignore'): ret = np.arccos( JdotCoords / np.sqrt(np.sum(coords**2,axis=0))) ret[np.isnan(ret)] = 0 return ret
def log_mat(x, n, g_coeff, c_1, const): with np.errstate(divide='ignore', invalid='ignore'): K = g_coeff.shape[0] - 1 thres = 2 * c_1 * math.log(n) / n [T, X] = np.meshgrid(thres, x) ratio = np.clip(2*X/T - 1, 0, 1) # force MATLAB-esque behavior with NaN, inf ratio[T == 0] = 1.0 ratio[X == 0] = 0.0 q = np.reshape(np.arange(K), [1, 1, K]) g = np.tile(np.reshape(g_coeff, [1, 1, K + 1]), [c_1.shape[1], 1]) g[:, :, 0] = g[:, :, 0] + np.log(thres) MLE = np.log(X) + (1-X) / (2*X*n) MLE[X == 0] = -np.log(n) - const tmp = (n*X[:,:,np.newaxis] - q)/(T[:,:,np.newaxis]*(n - q)) polyApp = np.sum(np.cumprod(np.dstack([np.ones(T.shape + (1,)), tmp]), axis=2) * g, axis=2) polyFail = np.logical_or(np.isnan(polyApp), np.isinf(polyApp)) polyApp[polyFail] = MLE[polyFail] return ratio*MLE + (1-ratio)*polyApp
def log_mask_zero(a): """Computes the log of input probabilities masking divide by zero in log. Notes ----- During the M-step of EM-algorithm, very small intermediate start or transition probabilities could be normalized to zero, causing a *RuntimeWarning: divide by zero encountered in log*. This function masks this unharmful warning. """ a = np.asarray(a) with np.errstate(divide="ignore"): a_log = np.log(a) a_log[a <= 0] = 0.0 return a_log
def medianThreshold(img, threshold=0.1, size=3, condition='>', copy=True): ''' set every the pixel value of the given [img] to the median filtered one of a given kernel [size] in case the relative [threshold] is exeeded condition = '>' OR '<' ''' from scipy.ndimage import median_filter indices = None if threshold > 0: blur = np.asfarray(median_filter(img, size=size)) with np.errstate(divide='ignore', invalid='ignore', over='ignore'): if condition == '>': indices = abs((img - blur) / blur) > threshold else: indices = abs((img - blur) / blur) < threshold if copy: img = img.copy() img[indices] = blur[indices] return img, indices
def get_bad_count(scene, algorithms, thresh, percentage=False): bad_count = np.zeros(scene.get_shape()) gt = scene.get_gt() for algorithm in algorithms: algo_result = misc.get_algo_result(algorithm, scene) abs_diffs = np.abs(gt - algo_result) with np.errstate(invalid="ignore"): bad = abs_diffs > thresh bad += misc.get_mask_invalid(abs_diffs) bad_count += bad if percentage: bad_count = misc.percentage(len(algorithms), bad_count) return bad_count
def get_score(self, algo_result, gt, scene, with_visualization=False): diffs = np.abs(algo_result - gt) * self.factor mask = self.get_evaluation_mask(scene) * misc.get_mask_valid(diffs) * misc.get_mask_valid(algo_result) sorted_diffs = np.sort(diffs[mask]) idx = np.size(sorted_diffs) * self.percentage / 100. score = sorted_diffs[int(idx)] if not with_visualization: return score with np.errstate(invalid="ignore"): m_bad_pix = np.abs(diffs) > score vis = np.abs(diffs) vis[m_bad_pix] = -1 vis = np.ma.masked_array(vis, mask=~mask) return score, vis
def get_drdX(self,X): "Derivative of radial coordinate with respect to compactified" dXdr = self.get_dXdr(X) with np.errstate(invalid='ignore'): drdX = 1./dXdr return drdX
def get_d2rdX2(self,X): "Second derivative of radial coordinate with respect to compactified" d2Xdr2 = self.get_d2Xdr2(X) with np.errstate(invalid='ignore'): d2rdX2 = 1./d2Xdr2 return d2rdX2
def get_X_from_x(self,x): "X is x compactified" L = self.L with np.errstate(invalid='ignore'): X = (x-L)/(x+L) return X
def get_x_from_X(self,X): "X is x compactified" L = self.L with np.errstate(invalid='ignore'): x = L*(1+X)/(1-X) return x
def test_generic(self): """ Test that NaNs are replaced with a fill value """ with np.errstate(divide='ignore', invalid='ignore'): vals = nan_to_num(np.array([0])/0., fill_value = 14) self.assertEqual(vals[0], 14)
def f_score(Theta_true, Theta_est, beta=1, eps=1e-6, per_ts=False): """Compute f1 score in the same manner as `precision` and `recall`. Therefore see those two functions for the respective waiting and per_ts explanation. Parameters ---------- Theta_true : 3D ndarray, shape (timesteps, n_vertices, n_vertices) Theta_est : 3D ndarray, shape (timesteps, n_vertices, n_vertices) beta : float (default 1) beta value of the F score to be computed eps : float per_ts : bool whether to compute average or per timestep recall Returns ------- ndarray or float recall list or single precision value """ prec = precision(Theta_true, Theta_est, eps, per_ts=True) rec = recall(Theta_true, Theta_est, eps, per_ts=True) with np.errstate(divide='ignore', invalid='ignore'): nom = (1 + beta**2) * prec * rec print(beta**2 * prec) den = beta**2 * prec + rec f = np.nan_to_num(np.true_divide(nom, den)) return f if per_ts else np.sum(f) / len(Theta_true)
def global_f_score(Theta_true, Theta_est, beta=1, eps=1e-6): """In line with `global_precision` and `global_recall`, compute the global f score given true and estimated graphical structures. The f score has the only parameter beta. Parameters ---------- Theta_true : 3D ndarray, shape (timesteps, n_vertices, n_vertices) Theta_est : 3D ndarray, shape (timesteps, n_vertices, n_vertices) beta : float (default 1) beta value of the F score to be computed eps : float per_ts : bool whether to compute average or per timestep recall Returns ------- float f-beta score """ assert Theta_est.shape == Theta_true.shape d = Theta_true.shape[1] n = len(Theta_est) tps = fps = fns = tns = 0 for i in range(n): est_edges = set(get_edges(Theta_est[i], eps)) gt_edges = set(get_edges(Theta_true[i], eps)) n_joint = len(est_edges.intersection(gt_edges)) tps += n_joint fps += len(est_edges) - n_joint fns += len(gt_edges) - n_joint tns += d**2 - d - tps - fps - fns nom = (1 + beta**2) * tps denom = nom + beta**2 * fns + fps with np.errstate(divide='ignore', invalid='ignore'): f = np.nan_to_num(np.true_divide(nom, denom)) return f
def __entropy(self, pdf): """Calculate shannon entropy of posterior distribution. Arguments --------- pdf : ndarray (float64) posterior distribution of psychometric curve parameters for each stimuli Returns ------- 1D numpy array (float64) : Shannon entropy of posterior for each stimuli """ # Marginalize out all nuisance parameters, i.e. all except alpha and sigma postDims = np.ndim(pdf) if self.marginalize == True: while postDims > 3: # marginalize out second-to-last dimension, last dim is x pdf = np.sum(pdf, axis=-2) postDims -= 1 # find expected entropy, suppress divide-by-zero and invalid value warnings # as this is handled by the NaN redefinition to 0 with np.errstate(divide='ignore', invalid='ignore'): entropy = np.multiply(pdf, np.log(pdf)) entropy[np.isnan(entropy)] = 0 # define 0*log(0) to equal 0 dimSum = tuple(range(postDims - 1)) # dimensions to sum over. also a Chinese dish entropy = -(np.sum(entropy, axis=dimSum)) return entropy
def estimate_errors(self, gehrels=True): """ Estimate the statistical errors of each spectral group (after applying grouping) for the source spectrum (and background spectrum). If `gehrels=True', the statistical error for a spectral group with N photons is given by `1 + sqrt(N + 0.75)'; otherwise, the error is given by `sqrt(N)'. Attributes ---------- spec_err : `~numpy.ndarray` Estimated errors for the spectral data. NOTE: If the spectral data (in counts) have negative groups, the errors of those groups are set to 0.0! """ with np.errstate(invalid="ignore"): if gehrels: self.spec_err = 1.0 + np.sqrt(self.spec_data + 0.75) else: self.spec_err = np.sqrt(self.spec_data) # Warn about and fix the invalid error values invalid = ~np.isfinite(self.spec_err) if np.sum(invalid) > 0: print("WARNING: invalid spectral errors are set to 0.0! " + "(due to negative spectral group counts)") self.spec_err[invalid] = 0.0
def norm_axesimage(ax, vmin, vmax): try: axim = ax.get_images()[0] except IndexError: return im = axim.get_array() if im.ndim == 3: # RGB, custom norm if vmax - vmin > 0: # the masked array may give underflowerror here with np.errstate(under='ignore'): axim.set_array((im - vmin) / (vmax - vmin)) axim.set_clim(0, 1) # this is actually ignored for RGB by mpl else: # use built-in axim.set_clim(vmin, vmax) return axim
def rotate(points, rot_vecs): """Rotate points by given rotation vectors. Rodrigues' rotation formula is used. """ theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis] with np.errstate(invalid='ignore'): v = rot_vecs / theta v = np.nan_to_num(v) dot = np.sum(points * v, axis=1)[:, np.newaxis] cos_theta = np.cos(theta) sin_theta = np.sin(theta) return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v
def get_relative_normed_coordinates(self, molecule): # ignore divide by zero warning #with warnings.catch_warnings(): # warnings.filterwarnings("ignore", r'invalid value encountered in divide') with np.errstate(divide="ignore", invalid="ignore"): return (molecule.coordinates - self.coordinates[None,:]) / self.distances[:,None]
def test_array_str_64bit(self, level=rlevel): # Ticket #501 s = np.array([1, np.nan], dtype=np.float64) with np.errstate(all='raise'): np.array_str(s) # Should succeed
def test_sign_for_complex_nan(self, level=rlevel): # Ticket 794. with np.errstate(invalid='ignore'): C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) have = np.sign(C) want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) assert_equal(have, want)
def test_errobj_reference_leak(self, level=rlevel): # Ticket #955 with np.errstate(all="ignore"): z = int(0) p = np.int32(-1) gc.collect() n_before = len(gc.get_objects()) z**p # this shouldn't leak a reference to errobj gc.collect() n_after = len(gc.get_objects()) assert_(n_before >= n_after, (n_before, n_after))
def test_signed_integer_division_overflow(self): # Ticket #1317. def test_type(t): min = np.array([np.iinfo(t).min]) min //= -1 with np.errstate(divide="ignore"): for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long): test_type(t)
def test_underlow(self): # Regression test for #759: # instanciating MachAr for dtype = np.float96 raises spurious warning. with errstate(all='raise'): try: self._run_machar_highprec() except FloatingPointError as e: self.fail("Caught %s exception, should not have been raised." % e)
def test_divide_err(self): with np.errstate(divide='raise'): try: np.array([1.]) / np.array([0.]) except FloatingPointError: pass else: self.fail() np.seterr(divide='ignore') np.array([1.]) / np.array([0.])
def test_errobj(self): olderrobj = np.geterrobj() self.called = 0 try: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") with np.errstate(divide='warn'): np.seterrobj([20000, 1, None]) np.array([1.]) / np.array([0.]) self.assertEqual(len(w), 1) def log_err(*args): self.called += 1 extobj_err = args assert_(len(extobj_err) == 2) assert_("divide" in extobj_err[0]) with np.errstate(divide='ignore'): np.seterrobj([20000, 3, log_err]) np.array([1.]) / np.array([0.]) self.assertEqual(self.called, 1) np.seterrobj(olderrobj) with np.errstate(divide='ignore'): np.divide(1., 0., extobj=[20000, 3, log_err]) self.assertEqual(self.called, 2) finally: np.seterrobj(olderrobj) del self.called