我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.seterr()。
def setUp(self): # Base data definition. x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) self.err_status = np.geterr() np.seterr(divide='ignore', invalid='ignore')
def _logcdf(self, samples): lower = np.full(2, -np.inf) upper = norm.ppf(samples) limit_flags = np.zeros(2) if upper.shape[0] > 0: def func1d(upper1d): ''' Calculates the multivariate normal cumulative distribution function of a single sample. ''' return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1] vals = np.apply_along_axis(func1d, -1, upper) else: vals = np.empty((0, )) old_settings = np.seterr(divide='ignore') vals = np.log(vals) np.seterr(**old_settings) vals[np.any(samples == 0.0, axis=1)] = -np.inf vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1]) vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0]) return vals
def gaussian_function(y, dimension, ?, cov, log=False, standard=False): """??????????? y???(???) ??????(?????) cov??????,log????????,standard??????""" x = y - ? if standard: x = np.dot(x, np.linalg.inv(cov) ** 0.5) cov_ = np.eye(dimension) else: cov_ = cov np.seterr(all='ignore') # ?????? if log: func = - (dimension / 2) * np.log(2 * math.pi) - 0.5 * np.log(np.linalg.det(cov_)) exp = -0.5 * np.dot(np.dot(x, np.linalg.inv(cov_)), x.T) return func + exp else: sigma = (2 * math.pi) ** (dimension / 2) * np.linalg.det(cov_) ** 0.5 func = 1. / sigma exp = np.exp(-0.5 * np.dot(np.dot(x, np.linalg.inv(cov_)), x.T)) return func * exp
def with_error_settings(**new_settings): """ TODO. Arguments: **new_settings: TODO Returns: """ @decorator.decorator def dec(f, *args, **kwargs): old_settings = np.geterr() np.seterr(**new_settings) ret = f(*args, **kwargs) np.seterr(**old_settings) return ret return dec
def evaluate(img_col, args): numpy.seterr(all='ignore') assert isinstance(img_col, numpy.ndarray), 'img_col must be a numpy array' assert img_col.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img_col.ndim) assert isinstance(args, argparse.Namespace), 'args must be of type argparse.Namespace not {0}'.format(type(args)) img_gry = cv2.cvtColor(img_col, cv2.COLOR_RGB2GRAY) rows, cols = img_gry.shape crow, ccol = rows/2, cols/2 f = numpy.fft.fft2(img_gry) fshift = numpy.fft.fftshift(f) fshift[crow-75:crow+75, ccol-75:ccol+75] = 0 f_ishift = numpy.fft.ifftshift(fshift) img_fft = numpy.fft.ifft2(f_ishift) img_fft = 20*numpy.log(numpy.abs(img_fft)) if args.display and not args.testing: cv2.destroyAllWindows() scripts.display('img_fft', img_fft) scripts.display('img_col', img_col) cv2.waitKey(0) result = numpy.mean(img_fft) return img_fft, result, result < args.thresh
def pwdist_canberra(self, seq1idx, seq2idx): """Compute the Canberra distance between two vectors. References: 1. http://scipy.org/ Notes: When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is used in the calculation. """ u = self[seq1idx] v = self[seq2idx] olderr = np.seterr(invalid='ignore') try: d = np.nansum(abs(u - v) / (abs(u) + abs(v))) finally: np.seterr(**olderr) return d
def Salton(MatrixAdjacency_Train): similarity_StartTime = time.clock() similarity = np.dot(MatrixAdjacency_Train,MatrixAdjacency_Train) deg_row = sum(MatrixAdjacency_Train) deg_row.shape = (deg_row.shape[0],1) deg_row_T = deg_row.T tempdeg = np.dot(deg_row,deg_row_T) temp = np.sqrt(tempdeg) np.seterr(divide='ignore', invalid='ignore') Matrix_similarity = np.nan_to_num(similarity / temp) # print np.isnan(Matrix_similarity) # Matrix_similarity = np.nan_to_num(Matrix_similarity) # print np.isnan(Matrix_similarity) similarity_EndTime = time.clock() print " SimilarityTime: %f s" % (similarity_EndTime- similarity_StartTime) return Matrix_similarity
def feature(self, words): """average words' vectors""" feature_vec = np.zeros((self.dimension,), dtype="float32") retrieved_words = 0 for token in words: try: feature_vec = np.add(feature_vec, self.embeddings[token]) retrieved_words += 1 except KeyError: pass # if a word is not in the embeddings' vocabulary discard it np.seterr(divide='ignore', invalid='ignore') feature_vec = np.divide(feature_vec, retrieved_words) return feature_vec
def __init__(self, policy, mdp_info, params, features=None): self.__name__ = 'GPOMDP' super(GPOMDP, self).__init__(policy, mdp_info, params, features) self.sum_d_log_pi = None self.list_sum_d_log_pi = list() self.list_sum_d_log_pi_ep = list() self.list_reward = list() self.list_reward_ep = list() self.baseline_num = list() self.baseline_den = list() self.step_count = 0 # Ignore divide by zero np.seterr(divide='ignore', invalid='ignore')
def main(): with open(path, 'r') as data_file: movieList = json.load(data_file) count = 0 np.seterr(divide='ignore', invalid='ignore') for movie in movieList: if movie["reviews"] and movie["critics"]: readMovie(movie) # reviewer_list[movie["movieTitle"]] = recommender(movie) # cluster[movie["movieTitle"]] = HAC(movie) score_list[movie["movieTitle"]] = newScore(movie) count += 1 print count break with open("scoreList.json", 'w+') as outfile: json.dump(score_list, outfile, indent=4, separators=(',', ': '))
def get_base_means(raw_read_coverage, chrm_sizes): # ignore divide by zero errors that occur where there is no # coverage. Need to correct nan values after subtracting two sets of # coverage so leave as nan for now old_err_settings = np.seterr(all='ignore') # take the mean over all signal overlapping each base mean_base_signal = {} for chrm, strand in [(c, s) for c in chrm_sizes.keys() for s in ('+', '-')]: if (chrm, strand) in raw_read_coverage: cs_base_means = get_reads_base_means( raw_read_coverage[(chrm, strand)], chrm_sizes[chrm], strand == '-') else: cs_base_means = np.empty(chrm_sizes[chrm]) cs_base_means[:] = np.nan mean_base_signal[(chrm, strand)] = cs_base_means _ = np.seterr(**old_err_settings) return mean_base_signal
def __call__(self, y_true_proba, y_proba): """ See Murphy (1973) A vector partition of the probability score """ np.seterr(divide="ignore") pos_obs_freq = np.histogram( y_proba[y_true_proba == 1], bins=self.bins)[0] fore_freq = np.histogram(y_proba, bins=self.bins)[0] climo = y_true_proba.mean() unc = climo * (1 - climo) pos_obs_rel_freq = np.zeros(pos_obs_freq.size) for p in range(pos_obs_rel_freq.size): if fore_freq[p] > 0: pos_obs_rel_freq[p] = pos_obs_freq[p] / fore_freq[p] else: pos_obs_rel_freq[p] = np.nan score = np.nansum(fore_freq * (pos_obs_rel_freq - climo) ** 2) score /= float(y_proba.size) return score / unc
def dephasing(f): """ Computes the dephasing time of a given function using optical response formalisms: S. Mukamel, Principles of Nonlinear Optical Spectroscopy, 1995 About the implementation we use the 2nd order cumulant expansion. See also eq. (2) in : Kilina et al. Phys. Rev. Lett., 110, 180404, (2013) To calculate the dephasing time tau we fit the dephasing function to a gaussian of the type : exp(-0.5 * (-x / tau) ** 2) """ ts = np.arange(f.shape[0]) cumu_ii = np.stack(np.sum(f[0:i]) for i in range(ts.size)) / hbar cumu_i = np.stack(np.sum(cumu_ii[0:i]) for i in range(ts.size)) / hbar deph = np.exp(-cumu_i) np.seterr(over='ignore') popt = curve_fit(gauss_function, ts, deph)[0] xs = np.exp(-0.5 * (-ts / popt[0]) ** 2) deph = np.column_stack((deph, xs)) rate = popt[0] return deph, rate
def run(self): numpy.seterr(all='raise') try: with self._pulseaudio_client: while not self._stop_event.is_set(): with self.lock: self._sample() for callback in self._on_sample_callbacks: callback() except Exception as e: self.exit_success = False raise e else: self.exit_success = True
def get_vlb(self): # return avg energy plus entropy, our contribution to the mean field # variational lower bound errs = np.seterr(invalid='ignore',divide='ignore') prod = self.r*np.log(self.r) prod[np.isnan(prod)] = 0. # 0 * -inf = 0. np.seterr(**errs) logpitilde = self.weights.expected_log_likelihood(np.arange(len(self.components))) q_entropy = -prod.sum() p_avgengy = (self.r*logpitilde).sum() return p_avgengy + q_entropy ### EM
def _expected_durations(self, dur_potentials,cumulative_obs_potentials, alphastarl,betal,normalizer): logpmfs = -np.inf*np.ones((self.Tfull,alphastarl.shape[1])) errs = np.seterr(invalid='ignore') # logaddexp(-inf,-inf) # TODO censoring not handled correctly here for tblock in xrange(self.Tblock): possible_durations = self.segmentlens[tblock:].cumsum()[:self.trunc] cB, offset = cumulative_obs_potentials(tblock) logpmfs[possible_durations -1] = np.logaddexp( dur_potentials(tblock) + alphastarl[tblock] + betal[tblock:tblock+self.trunc if self.trunc is not None else None] + cB - (offset + normalizer), logpmfs[possible_durations -1]) np.seterr(**errs) return np.exp(logpmfs.T) ################### # sparate trans # ###################
def max_likelihood(self,stateseqs=None,expected_transcounts=None): trans_counts = sum(expected_transcounts) if stateseqs is None \ else self._count_transitions(stateseqs) # NOTE: could just call max_likelihood on each trans row, but this way # it handles a few lazy-initialization cases (e.g. if _row_distns aren't # initialized) errs = np.seterr(invalid='ignore',divide='ignore') trans_matrix = np.nan_to_num(trans_counts / trans_counts.sum(1)[:,na]) np.seterr(**errs) # all-zero rows get set to uniform trans_matrix[trans_matrix.sum(1) == 0] = 1./trans_matrix.shape[0] assert np.allclose(trans_matrix.sum(1),1.) self.trans_matrix = trans_matrix return self
def max_likelihood(self,stateseqs=None,expected_transcounts=None): trans_counts = sum(expected_transcounts) if stateseqs is None \ else self._count_transitions(stateseqs) # NOTE: we could just call max_likelihood on each trans row, but this # way it's a bit nicer errs = np.seterr(invalid='ignore',divide='ignore') trans_matrix = np.nan_to_num(trans_counts / trans_counts.sum(1)[:,na]) np.seterr(**errs) # all-zero rows get set to uniform trans_matrix[trans_matrix.sum(1) == 0] = 1./(trans_matrix.shape[0]-1) trans_matrix.flat[::trans_matrix.shape[0]+1] = 0. self.trans_matrix = trans_matrix assert np.allclose(0.,np.diag(self.trans_matrix)) assert np.allclose(1.,self.trans_matrix.sum(1)) return self
def _messages_backwards_log_slow(trans_potential, init_potential, likelihood_log_potential, feature_weights, window_data): errs = np.seterr(over='ignore') Al = np.log(trans_potential) pil = np.log(init_potential) aBl = likelihood_log_potential nhs = trans_potential.shape[0] sequence_length = aBl.shape[0] betal = np.zeros((sequence_length, nhs * 2)) giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2)) for t in xrange(betal.shape[0]-2,-1,-1): temp_constant = np.sum(feature_weights[:-nhs-1] * window_data[t+1,:]) + feature_weights[-1] temp_exp = temp_constant + feature_weights[-nhs-1:-1] temp_logaddexp = np.logaddexp(0, temp_exp) temp_log_linear = np.tile(temp_exp, 2) * np.repeat([0,1], nhs) - np.tile(temp_logaddexp, 2) np.logaddexp.reduce( giant_Al_pil + betal[t+1] + np.hstack((aBl[t+1], aBl[t+1])) + temp_log_linear ,axis=1 ,out=(betal[t])) np.seterr(**errs) return betal
def _messages_backwards_log_fast(trans_potential, init_potential, likelihood_log_potential_llt): errs = np.seterr(over='ignore') Al = np.log(trans_potential) pil = np.log(init_potential) aBl = likelihood_log_potential_llt nhs = trans_potential.shape[0] sequence_length = aBl.shape[0] betal = np.zeros((sequence_length, nhs * 2)) giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2)) for t in xrange(betal.shape[0]-2,-1,-1): np.logaddexp.reduce( giant_Al_pil + betal[t+1] + aBl[t+1], axis=1, out=(betal[t])) np.seterr(**errs) return betal ### Gibbs sampling
def _sample_forwards_log(self, betal, trans_matrix, init_state_distn, log_likelihoods_loglinear): errs = np.seterr(over='ignore') Al = trans_matrix aBl = log_likelihoods_loglinear T = aBl.shape[0] pil = init_state_distn nhs = trans_matrix.shape[0] giant_Al_pil = np.tile(np.vstack((np.tile(pil, (nhs,1)), Al )), (1,2)) stateseq = np.empty(T,dtype=np.int32) true_segmentation = np.ones(T,dtype=np.int32) nextstate_unsmoothed = np.tile(init_state_distn, 2) for idx in xrange(T): logdomain = betal[idx] + aBl[idx] ###check this for the initial and last state and compare with the forward message logdomain[nextstate_unsmoothed == 0] = -np.inf if np.any(np.isfinite(logdomain)): stateseq[idx] = sample_discrete(nextstate_unsmoothed * np.exp(logdomain - np.amax(logdomain))) else: stateseq[idx] = sample_discrete(nextstate_unsmoothed) if stateseq[idx] < nhs: true_segmentation[idx] = 0 nextstate_unsmoothed = giant_Al_pil[stateseq[idx]] return stateseq, true_segmentation
def evaluate(self,n, features, stack_float, stack_bool,labels=None): """evaluate node in program""" np.seterr(all='ignore') if len(stack_float) >= n.arity['f'] and len(stack_bool) >= n.arity['b']: if n.out_type == 'f': stack_float.append( self.safe(self.eval_dict[n.name](n,features,stack_float, stack_bool,labels))) if (np.isnan(stack_float[-1]).any() or np.isinf(stack_float[-1]).any()): print("problem operator:",n) else: stack_bool.append(self.safe(self.eval_dict[n.name](n,features, stack_float, stack_bool, labels))) if np.isnan(stack_bool[-1]).any() or np.isinf(stack_bool[-1]).any(): print("problem operator:",n)
def test_zero_precision_recall(): # Check that pathological cases do not bring NaNs old_error_settings = np.seterr(all='raise') try: y_true = np.array([0, 1, 2, 0, 1, 2]) y_pred = np.array([2, 0, 1, 1, 2, 0]) assert_almost_equal(precision_score(y_true, y_pred, average='weighted'), 0.0, 2) assert_almost_equal(recall_score(y_true, y_pred, average='weighted'), 0.0, 2) assert_almost_equal(f1_score(y_true, y_pred, average='weighted'), 0.0, 2) finally: np.seterr(**old_error_settings)
def transform(value, left_scale, right_scale, scale=0): if left_scale is None or right_scale is None: raise Exception('Left or Right scales cannot be None.') if scale not in [0, 1]: raise Exception('Scale must be 0 or 1.') invalid_err = np.geterr().get('invalid') invalid_err = np.geterr().get('invalid') np.seterr(invalid='ignore') if scale == 0: range_ = np.absolute(right_scale - left_scale) translated_value = np.abs(value - left_scale) ret_val = (translated_value / range_) else: if left_scale <= 0.0: raise Exception() ls = np.log10(left_scale) rs = np.log10(right_scale) range_ = rs - ls translated_value = np.log10(value) - ls ret_val = (translated_value / range_) np.seterr(invalid=invalid_err) return ret_val
def numpy_seterr(): np.seterr(divide='raise', invalid='raise')
def _compose_alpha(img_in, img_layer, opacity: float=1.0): """ Calculate alpha composition ratio between two images. """ comp_alpha = np.minimum(img_in[:, :, 3], img_layer[:, :, 3]) * opacity new_alpha = img_in[:, :, 3] + (1.0 - img_in[:, :, 3]) * comp_alpha np.seterr(divide='ignore', invalid='ignore') ratio = comp_alpha / new_alpha ratio[ratio == np.NAN] = 0.0 return ratio
def _marginal_densities(self, samples): ''' Evaluate marginal densities and cumulative distribution functions. Parameters ---------- samples : array_like n-by-d matrix of samples where n is the number of samples and d is the number of marginals. Returns ------- dout : dictionary The densities and cumulative distribution functions. Keys: `logpdf`: Equal to first element of `logp`. 'logp': Log of the probability density function. 'cdfp': Upper cumulative distribution functions. 'cdfm': Lower cumulative distribution functions. 'is_continuous': List of booleans where element i is `True` if output element i is continuous. ''' logp = np.zeros(samples.shape) cdfp = np.zeros(samples.shape) cdfm = np.zeros(samples.shape) is_continuous = np.zeros(len(self.marginals), dtype=bool) for k, marginal in enumerate(self.marginals): is_continuous[k] = marginal.is_continuous cdfp[:, k] = marginal.cdf(samples[:, k]) if marginal.is_continuous: logp[:, k] = marginal.logpdf(samples[:, k]) else: cdfm[:, k] = marginal.cdf(samples[:, k] - 1) old_settings = np.seterr(divide='ignore') logp[:, k] = np.log(np.maximum(0, cdfp[:, k] - cdfm[:, k])) np.seterr(**old_settings) logpdf = logp[:, self.output_layer.input_indices[0][0]] dout = {'logpdf': logpdf, 'logp': logp, 'cdfp': cdfp, 'cdfm': cdfm, 'is_continuous': is_continuous} return dout
def logcdf(self, samples): ''' Calculates the log of the cumulative distribution function. Parameters ---------- samples : array_like n-by-2 matrix of samples where n is the number of samples. Returns ------- vals : ndarray Log of the cumulative distribution function evaluated at `samples`. ''' samples = np.copy(np.asarray(samples)) samples = self.__crop_input(samples) samples = self.__rotate_input(samples) vals = self._logcdf(samples) # Transform according to rotation, but take `__rotate_input` into # account. if self.rotation == '90°': old_settings = np.seterr(divide='ignore') vals = np.log(np.maximum(0, samples[:, 0] - np.exp(vals))) np.seterr(**old_settings) elif self.rotation == '180°': old_settings = np.seterr(divide='ignore') vals = np.log(np.maximum(0, (1 - samples[:, 0]) + (1 - samples[:, 1]) - 1.0 + np.exp(vals))) np.seterr(**old_settings) elif self.rotation == '270°': old_settings = np.seterr(divide='ignore') vals = np.log(np.maximum(0, samples[:, 1] - np.exp(vals))) np.seterr(**old_settings) return vals
def _logcdf(self, samples): old_settings = np.seterr(divide='ignore') vals = np.sum(np.log(samples), axis=1) np.seterr(**old_settings) return vals
def _logcdf(self, samples): if self.theta == 0: vals = np.sum(np.log(samples), axis=1) else: old_settings = np.seterr(divide='ignore') vals = (-1 / self.theta) \ * np.log(np.maximum(samples[:, 0]**(-self.theta) + samples[:, 1]**(-self.theta) - 1, 0)) np.seterr(**old_settings) return vals
def viterbi(self, data, data_time, unit_train_time=unit_time): """ viterbi??? :param data: ????(??) :param data_time: ???????? :param unit_train_time: ???????? :return: """ def info(p): """???????""" point_ = p.max() mark_ = np.where(p == point_) mark_state = mark_[0][0] return point_, mark_state self.__cal_data(data, data_time, unit_train_time) complex_states, complex_observation, complex_A, complex_B, complex_? = self.__embedded_list '''????''' np.seterr(divide='ignore') if self.__p_list is None: self.__p_list = np.log(complex_?) + complex_B[:, 0] point, mark = info(self.__p_list) self.score += point self.mark = mark if mark == len(complex_states) - 1: return True else: p_ = np.zeros_like(self.__p_list) for j in range(len(complex_states)): tmp = self.__p_list + np.log(complex_A[:, j]) max_p = tmp.max() p_[j] = max_p self.__p_list = p_ + complex_B[:, 0] point, mark = info(self.__p_list) self.score += point self.mark = mark if len(complex_states) - mark <= 1: return True
def test_set(self): with np.errstate(): err = np.seterr() old = np.seterr(divide='print') self.assertTrue(err == old) new = np.seterr() self.assertTrue(new['divide'] == 'print') np.seterr(over='raise') self.assertTrue(np.geterr()['over'] == 'raise') self.assertTrue(new['divide'] == 'print') np.seterr(**old) self.assertTrue(np.geterr() == old)
def test_divide_err(self): with np.errstate(divide='raise'): try: np.array([1.]) / np.array([0.]) except FloatingPointError: pass else: self.fail() np.seterr(divide='ignore') np.array([1.]) / np.array([0.])
def setUp(self): self.olderr = np.seterr(invalid='ignore')
def tearDown(self): np.seterr(**self.olderr)
def setUp(self): # Base data definition. self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) self.err_status = np.geterr() np.seterr(divide='ignore', invalid='ignore')
def tearDown(self): np.seterr(**self.err_status)
def test_testUfuncRegression(self): f_invalid_ignore = [ 'sqrt', 'arctanh', 'arcsin', 'arccos', 'arccosh', 'arctanh', 'log', 'log10', 'divide', 'true_divide', 'floor_divide', 'remainder', 'fmod'] for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh', 'absolute', 'fabs', 'negative', 'floor', 'ceil', 'logical_not', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'floor_divide', 'remainder', 'fmod', 'hypot', 'arctan2', 'equal', 'not_equal', 'less_equal', 'greater_equal', 'less', 'greater', 'logical_and', 'logical_or', 'logical_xor']: try: uf = getattr(umath, f) except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(np.ma, f) args = self.d[:uf.nin] with np.errstate(): if f in f_invalid_ignore: np.seterr(invalid='ignore') if f in ['arctanh', 'log', 'log10']: np.seterr(divide='ignore') ur = uf(*args) mr = mf(*args) self.assertTrue(eq(ur.filled(0), mr.filled(0), f)) self.assertTrue(eqmask(ur.mask, mr.mask))
def set_fp_err(): #np.seterr(all='raise') np.seterr(all='warn')