我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.fmax()。
def imax(arrays, axis, ignore_nan = False): """ Maximum of a stream of arrays along an axis. Parameters ---------- arrays : iterable Arrays to be reduced. axis : int or None, optional Axis along which the maximum is found. The default is to find the maximum along the 'stream axis', as if all arrays in ``array`` were stacked along a new dimension. If ``axis = None``, arrays in ``arrays`` are flattened before reduction. ignore_nan : bool, optional If True, NaNs are ignored. Default is propagation of NaNs. Yields ------ online_max : ndarray Cumulative maximum. """ ufunc = np.fmax if ignore_nan else np.maximum yield from ireduce_ufunc(arrays, ufunc, axis)
def test_reduce(self): dflt = np.typecodes['AllFloat'] dint = np.typecodes['AllInteger'] seq1 = np.arange(11) seq2 = seq1[::-1] func = np.fmax.reduce for dt in dint: tmp1 = seq1.astype(dt) tmp2 = seq2.astype(dt) assert_equal(func(tmp1), 10) assert_equal(func(tmp2), 10) for dt in dflt: tmp1 = seq1.astype(dt) tmp2 = seq2.astype(dt) assert_equal(func(tmp1), 10) assert_equal(func(tmp2), 10) tmp1[::2] = np.nan tmp2[::2] = np.nan assert_equal(func(tmp1), 9) assert_equal(func(tmp2), 9)
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod ] # These functions still return NotImplemented. Will be fixed in # future. # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal] a = np.array('1') b = 1 for f in binary_funcs: assert_raises(TypeError, f, a, b)
def fea_plot(xg_model, feature, label, type = 'weight', max_num_features = None): fig, AX = plt.subplots(nrows=1, ncols=2) xgb.plot_importance(xg_model, xlabel=type, importance_type='weight', ax=AX[0], max_num_features=max_num_features) fscore = xg_model.get_score(importance_type=type) fscore = sorted(fscore.items(), key=itemgetter(1), reverse=True) # sort scores fea_index = get_fea_index(fscore, max_num_features) feature = feature[:, fea_index] dimension = len(fea_index) X = range(1, dimension+1) Yp = np.mean(feature[np.where(label==1)[0]], axis=0) Yn = np.mean(feature[np.where(label!=1)[0]], axis=0) for i in range(0, dimension): param = np.fmax(Yp[i], Yn[i]) Yp[i] /= param Yn[i] /= param p1 = AX[1].bar(X, +Yp, facecolor='#ff9999', edgecolor='white') p2 = AX[1].bar(X, -Yn, facecolor='#9999ff', edgecolor='white') AX[1].legend((p1,p2), ('Malware', 'Normal')) AX[1].set_title('Comparison of selected features by their means') AX[1].set_xlabel('Feature Index') AX[1].set_ylabel('Mean Value') AX[1].set_ylim(-1.1, 1.1) plt.xticks(X, fea_index+1, rotation=80) plt.suptitle('Feature Selection results')
def test_obj_value_points_correctly_class_far_from_hyperplane(self): bias = 0.0 w = np.array([-1, 1, bias]) l = self.svm.l2reg X = np.array([[5, 0.3], [1, -0.8], [1, 6], [-0.6, 3]]) Y = np.array([-1, -1, 1, 1]) # compute loss for all X -> 1-yi*(xi*w+b) out = np.fmax(0, 1 - Y * (X.dot(w[0:-1]) + w[-1])) expectedObj = 1.0 result, _ = self.svm._obj_func(w, X, Y, out) self.assertAlmostEqual(expectedObj, result)
def variability_prob(ndvi, ndsi, whiteness): """Use the probability of the spectral variability to identify clouds over land. Equation 15 (Zhu and Woodcock, 2012) Parameters ---------- ndvi: ndarray ndsi: ndarray whiteness: ndarray Output ------ ndarray : probability of cloud over land based on variability """ ndi_max = np.fmax(np.absolute(ndvi), np.absolute(ndsi)) f_max = 1.0 - np.fmax(ndi_max, whiteness) return f_max # Eq 16, land_cloud_prob # lCloud_Prob = lTemperature_Prob x Variability_Prob
def test_reduce_complex(self): assert_equal(np.fmax.reduce([1, 2j]), 1) assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self): nan = np.nan arg1 = np.array([0, nan, nan]) arg2 = np.array([nan, 0, nan]) out = np.array([0, 0, nan]) assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self): nan = np.nan for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: arg1 = np.array([0, cnan, cnan], dtype=np.complex) arg2 = np.array([cnan, 0, cnan], dtype=np.complex) out = np.array([0, 0, nan], dtype=np.complex) assert_equal(np.fmax(arg1, arg2), out)
def _obj_func(self, w, X, Y, out): """ Computes primal value end gradient Parameters ---------- w : {array-like} - hyperplane normal vector X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. Y : array-like, shape = [n_samples] Target vector relative to X out: loss function values Returns ------- (obj,grad) : tuple, obj - function value, grad - gradient """ l2reg = self.l2reg # we remember bias, to recover it after gradient computation bias = w[-1] # set bias to zero, don't penalize b w[-1] = 0 max_out = np.fmax(0, out) obj = np.sum(max_out ** 2) / 2 + l2reg * w.dot(w) / 2 grad = l2reg * w - np.append([np.dot(max_out * Y, X)], [np.sum(max_out * Y)]) w[-1] = bias return (obj, grad)
def test_obj_value_points_correctly_class_close_to_hyperplane(self): bias = 0.0 w = np.array([-1, 1, bias]) l = self.svm.l2reg X = np.array([[0.5, 0.3], [1, 0.8], [1, 1.4], [0.6, 0.9]]) Y = np.array([-1, -1, 1, 1]) # compute loss for all X -> 1-yi*(xi*w+b) out = np.fmax(0, 1 - Y * (X.dot(w[0:-1]) + w[-1])) expectedObj = 2.0650000000000004 result, _ = self.svm._obj_func(w, X, Y, out) self.assertAlmostEqual(expectedObj, result)
def test_obj_grad_points(self): bias = 0.0 w = np.array([-1, 1, bias]) l = self.svm.l2reg X = np.array([[0.5, 0.3], [1, 0.8], [1, 1.4], [0.6, 0.9]]) Y = np.array([-1, -1, 1, 1]) # compute loss for all X -> 1-yi*(xi*w+b) out = np.fmax(0, 1 - Y * (X.dot(w[0:-1]) + w[-1])) expected = np.array([-0.82, 0.41, 0.3]) (obj, grad) = self.svm._obj_func(w, X, Y, out) np.testing.assert_array_almost_equal(expected, grad)
def _scale_cosine_similarity(x, metric='cosine', inverse=False): """ Given a cosine similarity on L2 normalized data, appriximately convert it to Jaccard similarity, and/or normalize it to the [0, 1] interval Parameters ---------- x : {float, ndarray} the cosine similarity value metric : str the conversion to apply one of ['cosine', 'jaccard'] inverse : bool perform the inverse de-normalization operation """ valid_metrics = ['cosine', 'jaccard', 'cosine_norm', 'jaccard_norm', 'cosine-positive'] if metric not in valid_metrics: raise ValueError('metric {} not supported, must be in {}' .format(metric, valid_metrics)) if metric == 'cosine': return x elif metric == 'cosine-positive': if isinstance(x, (int, float)): return max(x, 0.0) else: return np.fmax(x, 0.0) if metric.startswith('jaccard'): if not inverse: x = cosine2jaccard_similarity(x) else: x = jaccard2cosine_similarity(x) if metric.endswith('norm'): x = _normalize_similarity(x, metric=metric.split('_')[0], inverse=inverse) return x
def forward_cpu(self, inputs): x, = inputs # y = log(1 + exp(beta * x)) / beta bx = self.beta * x y = (numpy.fmax(bx, 0) + numpy.log1p(numpy.exp(-numpy.fabs(bx)))) * self.beta_inv return utils.force_array(y.astype(x.dtype)),
def twoFactorGaussianExample(inds,t_max=1.0,tau_max=3.0,b0=0.0759,b1=-0.0439,k=0.4454,a2=0.5,s1=0.02,s2=0.01,K=0.5,verbose=False): ''' Compute the two factor Gaussian Example in Beck-Tempone-Szepessy-Zouraris ''' f0 = lambda tau: b0+b1*np.exp(-1.0*k*tau) F = lambda x: np.exp(-1.0*x) G = lambda x: np.fmax(np.exp(-1.0*x)-K) Psi = lambda x: 1.0*x U = lambda x: 0.0*x d1 = lambda s: s1*s1*s d20 = lambda s: np.exp(-0.5*a2*s) d2 = lambda s: 2*s2*s2/a2*d20(s)*(1.0-d20(s)) drift = lambda s: d1(s)+d2(s) v1 = lambda s: s1*np.ones(np.shape(s)) v2 = lambda s: s2*d20(s) vols = [v1,v2] identifierString = 'Evaluating the Two Factor Gaussian example.\n' identifierString += 's1: %f, s2: %f, b0: %f, tau_max: %f, t_max: %f\n'%(s1,s2,b0,tau_max,t_max) identifierString += 'k: %f, a2: %f, K: %f, b1: %f'%(k,a2,K,b1) return multiLevelHjmModel(inds,F,G,U,Psi,drift,vols,f0,t_max=t_max,tau_max=tau_max,identifierString=identifierString,verbose=verbose)
def clip_to_window(boxlist, window): """Clip bounding boxes to a window. This op clips input bounding boxes (represented by bounding box corners) to a window, optionally filtering out boxes that do not overlap at all with the window. Args: boxlist: BoxList holding M_in boxes window: a numpy array of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip boxes. Returns: a BoxList holding M_out boxes where M_out <= M_in """ y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) win_y_min = window[0] win_x_min = window[1] win_y_max = window[2] win_x_max = window[3] y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min) y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min) x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min) x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min) clipped = np_box_list.BoxList( np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped])) clipped = _copy_extra_fields(clipped, boxlist) areas = area(clipped) nonzero_area_indices = np.reshape(np.nonzero(np.greater(areas, 0.0)), [-1]).astype(np.int32) return gather(clipped, nonzero_area_indices)
def load_contour_data(fpath, normalize=True): """ Load contour data from vamp output csv file. Initializes DataFrame to have all future columns. Parameters ---------- fpath : str Path to vamp output csv file. Returns ------- contour_data : DataFrame Pandas data frame with all contour data. """ try: contour_data = pd.read_csv(fpath, header=None, index_col=None, delimiter=',').astype(float) del contour_data[0] # all zeros del contour_data[1] # just an unnecessary index headers = contour_data.columns.values.astype('str') headers[0:12] = ['onset', 'offset', 'duration', 'pitch mean', 'pitch std', 'salience mean', 'salience std', 'salience tot', 'vibrato', 'vib rate', 'vib extent', 'vib coverage'] contour_data.columns = headers except: contour_data = loadpickle(fpath) # trying to load with pickle # Check if there is any column with all nans... it should not be considered df = contour_data.isnull().all() if np.where(df)[0]: contour_data = contour_data.drop(contour_data.columns[np.where(df)[0][0]], axis=1) # To ensure the contour has a duration > 0 contour_data['duration'] = np.fmax(contour_data['duration'].values,0.001) contour_data.num_end_cols = 0 contour_data['overlap'] = -1 # overlaps are unset contour_data['labels'] = -1 # all labels are unset contour_data['melodiness'] = "" contour_data['mel prob'] = -1 contour_data.num_end_cols = 4 if normalize: contour_data = normalize_features(contour_data) return contour_data
def test_datetime_minmax(self): # The metadata of the result should become the GCD # of the operand metadata a = np.array('1999-03-12T13', dtype='M8[2m]') b = np.array('1999-03-12T12', dtype='M8[s]') assert_equal(np.minimum(a, b), b) assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]')) assert_equal(np.fmin(a, b), b) assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]')) assert_equal(np.maximum(a, b), a) assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]')) assert_equal(np.fmax(a, b), a) assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]')) # Viewed as integers, the comparison is opposite because # of the units chosen assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8')) # Interaction with NaT a = np.array('1999-03-12T13', dtype='M8[2m]') dtnat = np.array('NaT', dtype='M8[h]') assert_equal(np.minimum(a, dtnat), a) assert_equal(np.minimum(dtnat, a), a) assert_equal(np.maximum(a, dtnat), a) assert_equal(np.maximum(dtnat, a), a) # Also do timedelta a = np.array(3, dtype='m8[h]') b = np.array(3*3600 - 3, dtype='m8[s]') assert_equal(np.minimum(a, b), b) assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]')) assert_equal(np.fmin(a, b), b) assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]')) assert_equal(np.maximum(a, b), a) assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]')) assert_equal(np.fmax(a, b), a) assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]')) # Viewed as integers, the comparison is opposite because # of the units chosen assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8')) # should raise between datetime and timedelta # # TODO: Allowing unsafe casting by # default in ufuncs strikes again... :( a = np.array(3, dtype='m8[h]') b = np.array('1999-03-12T12', dtype='M8[s]') #assert_raises(TypeError, np.minimum, a, b) #assert_raises(TypeError, np.maximum, a, b) #assert_raises(TypeError, np.fmin, a, b) #assert_raises(TypeError, np.fmax, a, b) assert_raises(TypeError, np.minimum, a, b, casting='same_kind') assert_raises(TypeError, np.maximum, a, b, casting='same_kind') assert_raises(TypeError, np.fmin, a, b, casting='same_kind') assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
def gen_anscombe_forward(signal, gauss_std, gauss_mean = 0, poisson_multi = 1): """ Applies the generalized Anscombe variance-stabilization transform assuming a mixed Poisson-Gaussian noise model as: signal = poisson_multi*Poisson{signal0} + Gauss{gauss_mean, gauss_std}, where Poisson{} and Gauss{} are generalized descriptions of Poisson and Gaussian noise. Parameters ---------- signal : ndarray Noisy signal (1-,2-,3D) gauss_std : float, int Standard deviation of Gaussian noise poisson_multi : float or int, optional (default = 1) Effectively a multiplier that scales the effect of the Poisson noise gauss_mean : float or int, optional (default = 0) Mean Gaussian noise level Returns ------- fsignal : ndarray (matched to signal shape) "Anscombe-transformed" signal with an approximate unity standard \ deviation/variance (~ 1) Note ---- This software is a direct translation (with minor alterations) of the original MATLAB software created by Alessandro Foi and Markku Mäkitalo (Tampere University of Technology - 2011-2012). Please cite the references below if using this software. http://www.cs.tut.fi/~foi/ References ---------- [1] J.L. Starck, F. Murtagh, and A. Bijaoui, Image Processing and Data Analysis, Cambridge University Press, Cambridge, 1998) """ SMALL_VAL = 1 fsignal = 2/poisson_multi * _np.sqrt(_np.fmax(SMALL_VAL,poisson_multi*signal + (3/8)*poisson_multi**2 + gauss_std**2 - poisson_multi*gauss_mean)) # fsignal = _ne.evaluate('2/poisson_multi * sqrt(where(poisson_multi*signal + (3/8)*poisson_multi**2 +\ # gauss_std**2 - poisson_multi*gauss_mean > SMALL_VAL,\ # poisson_multi*signal + (3/8)*poisson_multi**2 +\ # gauss_std**2 - poisson_multi*gauss_mean, SMALL_VAL))') #fsignal = 2/poisson_multi * _np.sqrt(_np.fmax(SMALL_VAL,fsignal)) return fsignal