我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.finfo()。
def matrix_rank(M, tol=None): """Return matrix rank of array using SVD method Args: M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to 2. tol (None or float): Threshold of singular value of `M`. When `tol` is `None`, and `eps` is the epsilon value for datatype of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`, where `S` is the singular value of `M`. It obeys :func:`numpy.linalg.matrix_rank`. Returns: cupy.ndarray: Rank of `M`. .. seealso:: :func:`numpy.linalg.matrix_rank` """ if M.ndim < 2: return (M != 0).any().astype('l') S = decomposition.svd(M, compute_uv=False) if tol is None: tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * numpy.finfo(S.dtype).eps) return (S > tol).sum(axis=-1)
def test_eos_masking(self): probs = tf.constant([[-.2, -.2, -.2, -.2, -.2], [-.3, -.3, -.3, 3, 0], [5, 6, 0, 0, 0]]) eos_token = 0 previously_finished = tf.constant([0, 1, 0], dtype=tf.float32) masked = beam_search.mask_probs(probs, eos_token, previously_finished) with self.test_session() as sess: probs = sess.run(probs) masked = sess.run(masked) np.testing.assert_array_equal(probs[0], masked[0]) np.testing.assert_array_equal(probs[2], masked[2]) np.testing.assert_equal(masked[1][0], 0) np.testing.assert_approx_equal(masked[1][1], np.finfo('float32').min) np.testing.assert_approx_equal(masked[1][2], np.finfo('float32').min) np.testing.assert_approx_equal(masked[1][3], np.finfo('float32').min) np.testing.assert_approx_equal(masked[1][4], np.finfo('float32').min)
def EStep(self): P = np.zeros((self.M, self.N)) for i in range(0, self.M): diff = self.X - np.tile(self.TY[i, :], (self.N, 1)) diff = np.multiply(diff, diff) P[i, :] = P[i, :] + np.sum(diff, axis=1) c = (2 * np.pi * self.sigma2) ** (self.D / 2) c = c * self.w / (1 - self.w) c = c * self.M / self.N P = np.exp(-P / (2 * self.sigma2)) den = np.sum(P, axis=0) den = np.tile(den, (self.M, 1)) den[den==0] = np.finfo(float).eps self.P = np.divide(P, den) self.Pt1 = np.sum(self.P, axis=0) self.P1 = np.sum(self.P, axis=1) self.Np = np.sum(self.P1)
def mean_variance_normalisation(h5f, mvn_h5f, vad=None): """Do mean variance normlization. Optionnaly use a vad. Parameters: ---------- h5f: str. h5features file name mvn_h5f: str, h5features output name """ dset = h5py.File(h5f).keys()[0] if vad is not None: raise NotImplementedError else: data = h5py.File(h5f)[dset]['features'][:] features = data epsilon = np.finfo(data.dtype).eps mean = np.mean(data) std = np.std(data) mvn_features = (features - mean) / (std + epsilon) shutil.copy(h5f, mvn_h5f) h5py.File(mvn_h5f)[dset]['features'][:] = mvn_features
def test_two_factors(self): """Tests the alias list for two identical factors.""" factor_data = [ [-1, -1], [-1, -1], [1, 1], [1, 1], [0, 0] ] factor_names = design.get_factor_names(len(factor_data[0])) factor_data = pd.DataFrame(factor_data, columns=factor_names) aliases, alias_coefs = alias_list("X1+X2", factor_data) answer = [[1, 0, 0], [0, 1, 1]] np.testing.assert_allclose(alias_coefs, answer, rtol=1e-4, atol=np.finfo(float).eps) answer_list = ["X1 = X2"] self.assertEqual(answer_list, aliases)
def __init__(self, mX, sTarget, nResidual, psTarget = [], pnResidual = [], alpha = 1.2, method = 'Wiener'): self._mX = mX self._eps = np.finfo(np.float).eps self._sTarget = sTarget self._nResidual = nResidual self._pTarget = psTarget self._pY = pnResidual self._mask = [] self._Out = [] self._alpha = alpha self._method = method self._iterations = 200 self._lr = 1.5e-3#2e-3 self._hetaplus = 1.1 self._hetaminus = 0.1 self._amountiter = 0
def xover(rate): """ This is a mimic of a fwdpp recombination policy. We return a sorted list of breakpoints on the interval [0,1). The list is capped with the max value of a float (C/C++ double), which is a trick fwdpp uses. It happens that we generate the exact same value from time to time. Internall, fwdpp doesn't care, and recoginizes that as a "double x-over". However, msprime cares, b/c it results in an edge with left == right and an Exception gets raised. So, we purge out double x-overs via np.unique. """ nbreaks = np.random.poisson(rate) if nbreaks == 0: return np.empty([0], dtype=np.float) rv = np.random.random_sample(nbreaks) rv = np.unique(rv) rv = np.insert(rv, len(rv), np.finfo(np.float).max) return rv
def test_against_cmath(self): import cmath points = [-1-1j, -1+1j, +1-1j, +1+1j] name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} atol = 4*np.finfo(np.complex).eps for func in self.funcs: fname = func.__name__.split('.')[-1] cname = name_map.get(fname, fname) try: cfunc = getattr(cmath, cname) except AttributeError: continue for p in points: a = complex(func(np.complex_(p))) b = cfunc(p) assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
def _test_type_repr(self, t): finfo = np.finfo(t) last_fraction_bit_idx = finfo.nexp + finfo.nmant last_exponent_bit_idx = finfo.nexp storage_bytes = np.dtype(t).itemsize*8 # could add some more types to the list below for which in ['small denorm', 'small norm']: # Values from http://en.wikipedia.org/wiki/IEEE_754 constr = np.array([0x00]*storage_bytes, dtype=np.uint8) if which == 'small denorm': byte = last_fraction_bit_idx // 8 bytebit = 7-(last_fraction_bit_idx % 8) constr[byte] = 1 << bytebit elif which == 'small norm': byte = last_exponent_bit_idx // 8 bytebit = 7-(last_exponent_bit_idx % 8) constr[byte] = 1 << bytebit else: raise ValueError('hmm') val = constr.view(t)[0] val_repr = repr(val) val2 = t(eval(val_repr)) if not (val2 == 0 and val < 1e-100): assert_equal(val, val2)
def test_complex128_pass(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] xi = x + x*1j eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) # The test condition needs to be at least a factor of sqrt(2) smaller # because the real and imaginary parts both change y = x + x*eps*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) y = x - x*epsneg*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def negativeLogLikelihoodWeighted(self, y, weightPerClass): #Weighting the cost of the different classes in the cost-function, in order to counter class imbalance. e1 = np.finfo(np.float32).tiny addTinyProbMatrix = T.lt(self.p_y_given_x_train, 4*e1) * e1 weights = weightPerClass.dimshuffle('x', 0, 'x', 'x', 'x') log_p_y_given_x_train = T.log(self.p_y_given_x_train + addTinyProbMatrix) weighted_log_probs = log_p_y_given_x_train * weights wShape = weighted_log_probs.shape # Re-arrange idx0 = T.arange( wShape[0] ).dimshuffle( 0, 'x','x','x') idx2 = T.arange( wShape[2] ).dimshuffle('x', 0, 'x','x') idx3 = T.arange( wShape[3] ).dimshuffle('x','x', 0, 'x') idx4 = T.arange( wShape[4] ).dimshuffle('x','x','x', 0) return -T.mean( weighted_log_probs[ idx0, y, idx2, idx3, idx4] )
def cochleagram_extractor(xx, sr, win_len, shift_len, channel_number, win_type): fcoefs, f = make_erb_filters(sr, channel_number, 50) fcoefs = np.flipud(fcoefs) xf = erb_frilter_bank(xx, fcoefs) if win_type == 'hanning': window = np.hanning(channel_number) elif win_type == 'hamming': window = np.hamming(channel_number) elif win_type == 'triangle': window = (1 - (np.abs(channel_number - 1 - 2 * np.arange(1, channel_number + 1, 1)) / (channel_number + 1))) else: window = np.ones(channel_number) window = window.reshape((channel_number, 1)) xe = np.power(xf, 2.0) frames = 1 + ((np.size(xe, 1)-win_len) // shift_len) cochleagram = np.zeros((channel_number, frames)) for i in range(frames): one_frame = np.multiply(xe[:, i*shift_len:i*shift_len+win_len], np.repeat(window, win_len, 1)) cochleagram[:, i] = np.sqrt(np.mean(one_frame, 1)) cochleagram = np.where(cochleagram == 0.0, np.finfo(float).eps, cochleagram) return cochleagram
def rasta_plp_extractor(x, sr, plp_order=0, do_rasta=True): spec = log_power_spectrum_extractor(x, int(sr*0.02), int(sr*0.01), 'hamming', False) bark_filters = int(np.ceil(freq2bark(sr//2))) wts = get_fft_bark_mat(sr, int(sr*0.02), bark_filters) bark_spec = np.matmul(wts, spec) if do_rasta: bark_spec = np.where(bark_spec == 0.0, np.finfo(float).eps, bark_spec) log_bark_spec = np.log(bark_spec) rasta_log_bark_spec = rasta_filt(log_bark_spec) bark_spec = np.exp(rasta_log_bark_spec) post_spec = postaud(bark_spec, sr/2.) if plp_order > 0: lpcas = do_lpc(post_spec, plp_order) else: lpcas = post_spec return lpcas
def __init__(self, agent, env, n_runs, eval_interval, outdir, max_episode_len=None, explorer=None, step_offset=0, logger=None): self.agent = agent self.env = env self.max_score = np.finfo(np.float32).min self.start_time = time.time() self.n_runs = n_runs self.eval_interval = eval_interval self.outdir = outdir self.max_episode_len = max_episode_len self.explorer = explorer self.step_offset = step_offset self.prev_eval_t = (self.step_offset - self.step_offset % self.eval_interval) self.logger = logger or logging.getLogger(__name__) # Write a header line first with open(os.path.join(self.outdir, 'scores.txt'), 'w') as f: custom_columns = tuple(t[0] for t in self.agent.get_statistics()) column_names = _basic_columns + custom_columns print('\t'.join(column_names), file=f)
def __init__(self, n_runs, eval_interval, outdir, max_episode_len=None, explorer=None, step_offset=0, logger=None): self.start_time = time.time() self.n_runs = n_runs self.eval_interval = eval_interval self.outdir = outdir self.max_episode_len = max_episode_len self.explorer = explorer self.step_offset = step_offset self.logger = logger or logging.getLogger(__name__) # Values below are shared among processes self.prev_eval_t = mp.Value( 'l', self.step_offset - self.step_offset % self.eval_interval) self._max_score = mp.Value('f', np.finfo(np.float32).min) self.wrote_header = mp.Value('b', False) # Create scores.txt with open(os.path.join(self.outdir, 'scores.txt'), 'a'): pass
def _calculate_new_weights(self, instance_probabilites, bag_probabilities): weights = [] for p_ij, p_i, Y_i in zip(self._bag_split(instance_probabilites), bag_probabilities, self._bag_labels): if Y_i > 0: if p_i == 0.0: p_i = np.finfo(float).resolution term_1 = (2 * p_ij * (1 - p_ij)) / p_i else: if p_i == 1.0: p_i = 1 - np.finfo(float).resolution term_1 = -((2 * p_ij * (1 - p_ij)) / (1 - p_i)) weights += (term_1 * self.softmax_fcn.d_dt(p_ij)).tolist() return np.array(weights) / np.sum(np.abs(weights))
def randn_abs_clip(self, axes, clip_min=0, clip_max=0, dtype=None): """ Returns a tensor initialized with a absolute value of normal distribution with mean 0 and std 1 clipped to given range Arguments: axes: The axes of the tensor. clip_min: If supplied number below this value are clipped to this value clip_max: If supplied number above this value are clipped to this value dtype: If supplied, the type of the values. Returns: The initialized tensor. """ if dtype is None: dtype = self.dtype if clip_max == 0: clip_max = np.finfo(dtype).max return np.clip(np.absolute(np.random.randn(*axes.lengths)), clip_min, clip_max)
def __init__( self, initial_params=None, variance=1.0, covariance=None, n_samples_per_update=None, active=False, bounds=None, maximize=True, min_variance=2 * np.finfo(np.float).eps ** 2, min_fitness_dist=2 * np.finfo(np.float).eps, max_condition=1e7, log_to_file=False, log_to_stdout=False, random_state=None): self.initial_params = initial_params self.variance = variance self.covariance = covariance self.n_samples_per_update = n_samples_per_update self.active = active self.bounds = bounds self.maximize = maximize self.min_variance = min_variance self.min_fitness_dist = min_fitness_dist self.max_condition = max_condition self.log_to_file = log_to_file self.log_to_stdout = log_to_stdout self.random_state = random_state
def logistic_regression_cost_gradient(parameters, input, output): """ Cost and gradient for logistic regression :param parameters: weight vector :param input: feature vector :param output: binary label (0 or 1) :return: cost and gradient for the input and output """ prediction = expit(np.dot(input, parameters)) if output: inside_log = prediction else: inside_log = 1.0 - prediction if inside_log != 0.0: cost = -np.log(inside_log) else: cost = np.finfo(float).min gradient = (prediction - output) * input return cost, gradient
def compute_overlap(a, b): """ Parameters ---------- a: (N, 4) ndarray of float b: (K, 4) ndarray of float Returns ------- overlaps: (N, K) ndarray of overlap between boxes and query_boxes """ area = (b[:, 2] - b[:, 0] + 1) * (b[:, 3] - b[:, 1] + 1) iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0]) + 1 ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1]) + 1 iw = np.maximum(iw, 0) ih = np.maximum(ih, 0) ua = np.expand_dims((a[:, 2] - a[:, 0] + 1) * (a[:, 3] - a[:, 1] + 1), axis=1) + area - iw * ih ua = np.maximum(ua, np.finfo(float).eps) intersection = iw * ih return intersection / ua
def configureActions(self, discrete_actions): # true if action space is discrete; 3 values; no push, left, right # false if action space is continuous; fx, both (-action_force, action_force) self.discrete_actions = discrete_actions # 3 discrete actions: no push, left, right # 1 continuous action elements; fx if self.discrete_actions: self.action_space = spaces.Discrete(3) else: self.action_space = spaces.Box(-1.0, 1.0, shape=(1, 1)) # Our observations can be within this box float_max = np.finfo(np.float32).max self.observation_space = gym.spaces.Box(-float_max, float_max, self.state_shape)
def configureActions(self, discrete_actions): # true if action space is discrete; 5 values; no push, left, right, up & down # false if action space is continuous; fx, fy both (-action_force, action_force) self.discrete_actions = discrete_actions # 5 discrete actions: no push, left, right # 2 continuous action elements; fx & fy if self.discrete_actions: self.action_space = spaces.Discrete(5) else: self.action_space = spaces.Box(-1.0, 1.0, shape=(2,)) # Our observations can be within this box float_max = np.finfo(np.float32).max self.observation_space = gym.spaces.Box(-float_max, float_max, self.state_shape)
def __init__(self, model_xml, robot_name, timestep, frame_skip, action_dim, obs_dim, repeats): self.action_space = gym.spaces.Box(-1.0, 1.0, shape=(action_dim,)) float_max = np.finfo(np.float32).max # obs space for problem is (R, obs_dim) # R = number of repeats # obs_dim d tuple self.state_shape = (repeats, obs_dim) self.observation_space = gym.spaces.Box(-float_max, float_max, shape=self.state_shape) # no state until reset. self.state = np.empty(self.state_shape, dtype=np.float32) self.frame_skip = frame_skip self.timestep = timestep self.model_xml = model_xml self.parts, self.joints, = self.getScene(p.loadMJCF(model_xml)) self.robot_name = robot_name self.dt = timestep * frame_skip self.metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': int(np.round(1.0 / timestep / frame_skip)) } self._seed()
def configureActions(self, discrete_actions): # if it is possible to switch actions, do this here # true if action space is discrete # false if action space is continuous self.discrete_actions = discrete_actions # if self.discrete_actions: # self.action_space = spaces.Discrete(3) # else: # self.action_space = spaces.Box(-1.0, 1.0, shape=(1, 1)) # # Our observations can be within this box # float_max = np.finfo(np.float32).max # self.observation_space = gym.spaces.Box(-float_max, float_max, self.state_shape)
def _ncrs_python(self, Delta, delta, d, R, G): if R == 0 or R < np.finfo(float).eps: return 0 GAMMA = 267.5987E6 alpha_roots = jnp_zeros(1, 20) / R sum = 0 for i in range(20): alpha = alpha_roots[i] num = (2 * d * alpha**2 * delta - 2 + 2 * np.exp(-d * alpha**2 * delta) + 2 * np.exp(-d * alpha**2 * Delta) - np.exp(-d * alpha**2 * (Delta - delta)) - np.exp(-d * alpha**2 * (Delta + delta))) dem = d**2 * alpha**6 * (R**2 * alpha**2 - 1) sum += (num / dem) return -2 * GAMMA**2 * G**2 * sum
def _sample_discrete_actions(batch_probs): """Sample a batch of actions from a batch of action probabilities. Args: batch_probs (ndarray): batch of action probabilities BxA Returns: List consisting of sampled actions """ action_indices = [] # Subtract a tiny value from probabilities in order to avoid # "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial batch_probs = batch_probs - np.finfo(np.float32).epsneg for i in range(batch_probs.shape[0]): histogram = np.random.multinomial(1, batch_probs[i]) action_indices.append(int(np.nonzero(histogram)[0])) return action_indices
def make_2d_gaussian(self, center=(0, 0)): '''Makes a 2D Gaussian filter with arbitary mean and variance. Args: center (tuple): The coordinates of the center of the Gaussian, specified as :data:`(row, col)`. The center of the image is :data:`(0, 0)`. Returns: numpy array: The Gaussian mask. ''' sigma = self.sigma n_rows = (self.patch_size - 1.) / 2. n_cols = (self.patch_size - 1.) / 2. y, x = np.ogrid[-n_rows: n_rows + 1, -n_cols: n_cols + 1] y0, x0 = center[1], center[0] gaussian_mask = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2. * sigma ** 2)) gaussian_mask[gaussian_mask < np.finfo(gaussian_mask.dtype).eps * gaussian_mask.max()] = 0 gaussian_mask = 1. / gaussian_mask.max() * gaussian_mask return gaussian_mask
def assert_rel_equal(a1, a2, decimals, err_msg='', verbose=True): # We have nan checks in here because occasionally we have fields that get # weighted without non-zero weights. I'm looking at you, particle fields! if isinstance(a1, np.ndarray): assert(a1.size == a2.size) # Mask out NaNs assert((np.isnan(a1) == np.isnan(a2)).all()) a1[np.isnan(a1)] = 1.0 a2[np.isnan(a2)] = 1.0 # Mask out 0 ind1 = np.array(np.abs(a1) < np.finfo(a1.dtype).eps) ind2 = np.array(np.abs(a2) < np.finfo(a2.dtype).eps) assert((ind1 == ind2).all()) a1[ind1] = 1.0 a2[ind2] = 1.0 elif np.any(np.isnan(a1)) and np.any(np.isnan(a2)): return True if not isinstance(a1, np.ndarray) and a1 == a2 == 0.0: # NANS! a1 = a2 = 1.0 return assert_almost_equal(np.array(a1)/np.array(a2), 1.0, decimals, err_msg=err_msg, verbose=verbose)
def _initialize_projected_units(self, fields, chunk): for field in self.data_source._determine_fields(fields): finfo = self.ds._get_field_info(*field) if finfo.units is None: # First time calling a units="auto" field, infer units and cache # for future field accesses. finfo.units = str(chunk[field].units) field_unit = Unit(finfo.units, registry=self.ds.unit_registry) if self.method == "mip" or self._sum_only: path_length_unit = Unit(registry=self.ds.unit_registry) else: ax_name = self.ds.coordinates.axis_name[self.axis] path_element_name = ("index", "path_element_%s" % (ax_name)) path_length_unit = self.ds.field_info[path_element_name].units path_length_unit = Unit(path_length_unit, registry=self.ds.unit_registry) # Only convert to appropriate unit system for path # elements that aren't angles if not path_length_unit.is_dimensionless: path_length_unit = path_length_unit.get_base_equivalent( unit_system=self.ds.unit_system) if self.weight_field is None: self._projected_units[field] = field_unit*path_length_unit else: self._projected_units[field] = field_unit
def _split_fields(self, fields_to_get): fill, gen = self.index._split_fields(fields_to_get) particles = [] alias = {} for field in gen: finfo = self.ds._get_field_info(*field) if finfo._function.__name__ == "_TranslationFunc": alias[field] = finfo continue try: finfo.check_available(self) except NeedsOriginalGrid: fill.append(field) for field in fill: finfo = self.ds._get_field_info(*field) if finfo.particle_type: particles.append(field) gen = [f for f in gen if f not in fill and f not in alias] fill = [f for f in fill if f not in particles] return fill, gen, particles, alias
def __init__(self, samples, increment): """Class constructor. Args: samples: Number of samples in the axis. increment: Increment between samples. """ self.samples = int(samples) self.increment = increment self.snap_radius = np.finfo(float).eps * 10
def invcheck(x): eps2 = 2 * np.finfo(np.float).eps if(x > eps2): x = 1 / x else: x = 0 warnings.warn( "Ill-conditioning encountered, result accuracy may be poor") return(x)
def eStep(self): P = np.zeros((self.M, self.N)) for i in range(0, self.M): diff = self.X - np.tile(self.TY[i, :], (self.N, 1)) diff = np.multiply(diff, diff) P[i, :] = P[i, :] + np.sum(diff, axis=1) c = (2 * np.pi * self.sigma2) ** (self.D / 2) c = c * self.w / (1 - self.w) c = c * self.M / self.N P = np.exp(-P / (2 * self.sigma2)) den = np.sum(P, axis=0) den = np.tile(den, (self.M, 1)) den[den==0] = np.finfo(float).eps self.P = np.divide(P, den) self.Pt1 = np.sum(self.P, axis=0) self.P1 = np.sum(self.P, axis=1) self.Np = np.sum(self.P1)
def null(self): return self.length() <= 10 * np.finfo(np.float32).eps
def dice(im1, im2): """ Computes the Dice coefficient, a measure of set similarity. Parameters ---------- im1 : array-like, bool Any array of arbitrary size. If not boolean, will be converted. im2 : array-like, bool Any other array of identical size. If not boolean, will be converted. Returns ------- dice : float Dice coefficient as a float on range [0,1]. Maximum similarity = 1 No similarity = 0 Notes ----- The order of inputs for `dice` is irrelevant. The result will be identical if `im1` and `im2` are switched. """ im1 = np.asarray(im1).astype(np.bool) im2 = np.asarray(im2).astype(np.bool) if im1.shape != im2.shape: raise ValueError("Shape mismatch: im1 and im2 must have the same shape.") # Compute Dice coefficient intersection = np.logical_and(im1, im2) return (2. * intersection.sum() + np.finfo('float').eps) / (im1.sum() + im2.sum() + np.finfo('float').eps)
def dice(im1, im2): """ Computes the Dice coefficient, a measure of set similarity. Parameters ---------- im1 : array-like, bool Any array of arbitrary size. If not boolean, will be converted. im2 : array-like, bool Any other array of identical size. If not boolean, will be converted. Returns ------- dice : float Dice coefficient as a float on range [0,1]. Maximum similarity = 1 No similarity = 0 Notes ----- The order of inputs for `dice` is irrelevant. The result will be identical if `im1` and `im2` are switched. """ im1 = np.asarray(im1).astype(np.bool) im2 = np.asarray(im2).astype(np.bool) if im1.shape != im2.shape: raise ValueError("Shape mismatch: im1 and im2 must have the same shape.") # Compute Dice coefficient intersection = np.logical_and(im1, im2) return 2. * (intersection.sum() + np.finfo('float').eps) / (im1.sum() + im2.sum() + 2*np.finfo('float').eps)
def dice(im1, im2): """ Computes the Dice coefficient, a measure of set similarity. Parameters ---------- im1 : array-like, bool Any array of arbitrary size. If not boolean, will be converted. im2 : array-like, bool Any other array of identical size. If not boolean, will be converted. Returns ------- dice : float Dice coefficient as a float on range [0,1]. Maximum similarity = 1 No similarity = 0 Notes ----- The order of inputs for `dice` is irrelevant. The result will be identical if `im1` and `im2` are switched. """ im1 = np.asarray(im1).astype(np.bool) im2 = np.asarray(im2).astype(np.bool) if im1.shape != im2.shape: raise ValueError("Shape mismatch: im1 and im2 must have the same shape.") # Compute Dice coefficient intersection = np.logical_and(im1, im2) return 2. * (intersection.sum() + np.finfo('float').eps) / (im1.sum() + im2.sum() + 2 * np.finfo('float').eps)
def dice_core(y, y_pred): y_core = np.zeros(y.shape) y_core[np.where( (y==1) | (y==3) | (y==4) )[0]] = 1 y_pred_core = np.zeros(y_pred.shape) y_pred_core[np.where( (y_pred==1) | (y_pred==3) | (y_pred==4) )[0] ] = 1 return (2. * np.sum(y_pred_core*y_core) + np.finfo(float).eps) / (np.sum(y_core) + np.sum(y_pred_core)+ np.finfo(float).eps)
def dice_enhance(y,y_pred): y_enhance = np.zeros(y.shape) y_enhance[np.where(y == 4)[0]] = 1 y_pred_enhance = np.zeros(y_pred.shape) y_pred_enhance[np.where( y_pred == 4)[0]] = 1 return (2. * np.sum(y_pred_enhance * y_enhance) + np.finfo(float).eps ) / (np.sum(y_enhance) + np.sum(y_pred_enhance) + np.finfo(float).eps)
def split_train_val(self, subject_list): L = len(subject_list) np.random.seed(42) np.random.shuffle(subject_list) L_train = int(np.round(self.train_size * L)) L_val = int(np.round((1 - self.train_size) * L - np.finfo(float).eps)) if L_val == 0: return subject_list, subject_list else: return subject_list[:L_train], subject_list[L_train:L_train + L_val]
def __condjumpprobability(self, expstate, observation): # Note that scipy.stats.norm.pdf takes the standard deviation (rather # than variance) as one of its arguments if self.__params.jumpintensity < np.finfo(float).eps: return np.zeros(np.shape(expstate)) if self.__oneminusjumpintensity < np.finfo(float).eps: return np.ones(np.shape(expstate)) numerator = scipy.stats.norm.pdf(observation, 0., np.sqrt(expstate + self.__jumpvar)) * self.__params.jumpintensity denominator = numerator + scipy.stats.norm.pdf(observation, 0., np.sqrt(expstate)) * self.__oneminusjumpintensity return numerator / denominator
def add_mip_starts(mip, indices, pool, max_mip_starts=float('inf'), mip_start_effort_level=4): """ Parameters ---------- mip - RiskSLIM surrogate MIP indices - indices of RiskSLIM surrogate MIP pool - solution pool max_mip_starts - max number of mip starts to add (optional; default is add all) mip_start_effort_level - effort that CPLEX will spend trying to fix (optional; default is 4) Returns ------- """ try: obj_cutoff = mip.parameters.mip.tolerances.uppercutoff.get() except: obj_cutoff = np.inf n_added = 0 for k in range(0, len(pool)): if n_added < max_mip_starts: if pool.objvals[0] <= (obj_cutoff + np.finfo('float').eps): mip_start_name = "mip_start_" + str(n_added) mip_start_obj, _ = convert_to_risk_slim_cplex_solution(rho=pool.solutions[k, ], indices = indices, objval=pool.objvals[k]) mip.MIP_starts.add(mip_start_obj, mip_start_effort_level, mip_start_name) n_added += 1 else: break return mip # Data-Related Computation