我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.iscomplexobj()。
def dct(a, b, type=2, axis=0, overwrite_input=False, threads=1, planner_effort="FFTW_EXHAUSTIVE"): global dct_object key = (a.shape, a.dtype, overwrite_input, axis, type) if not key in dct_object: if iscomplexobj(a): ac = a.real.copy() else: ac = a dct_object[key] = pyfftw.builders.dct(ac, axis=axis, type=type, overwrite_input=overwrite_input, threads=threads, planner_effort=planner_effort) dobj = dct_object[key] c = dobj.get_output_array() if iscomplexobj(a): dobj(a.real, c) b.real[:] = c dobj(a.imag, c) b.imag[:] = c else: dobj(a) b[:] = c return b
def _sort(group_idx, a, size, fill_value, dtype=None, reversed_=False): if np.iscomplexobj(a): raise NotImplementedError("a must be real, could use np.lexsort or " "sort with recarray for complex.") if not (np.isscalar(fill_value) or len(fill_value) == 0): raise ValueError("fill_value must be scalar or an empty sequence") if reversed_: order_group_idx = np.argsort(group_idx + -1j * a, kind='mergesort') else: order_group_idx = np.argsort(group_idx + 1j * a, kind='mergesort') counts = np.bincount(group_idx, minlength=size) if np.ndim(a) == 0: a = np.full(size, a, dtype=type(a)) ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1]) ret = np.asarray(ret, dtype=object) if np.isscalar(fill_value): fill_untouched(group_idx, ret, fill_value) return ret
def _sum(group_idx, a, size, fill_value, dtype=None): dtype = minimum_dtype_scalar(fill_value, dtype, a) if np.ndim(a) == 0: ret = np.bincount(group_idx, minlength=size).astype(dtype) if a != 1: ret *= a else: if np.iscomplexobj(a): ret = np.empty(size, dtype=dtype) ret.real = np.bincount(group_idx, weights=a.real, minlength=size) ret.imag = np.bincount(group_idx, weights=a.imag, minlength=size) else: ret = np.bincount(group_idx, weights=a, minlength=size).astype(dtype) if fill_value != 0: fill_untouched(group_idx, ret, fill_value) return ret
def _mean(group_idx, a, size, fill_value, dtype=np.dtype(np.float64)): if np.ndim(a) == 0: raise ValueError("cannot take mean with scalar a") counts = np.bincount(group_idx, minlength=size) if np.iscomplexobj(a): dtype = a.dtype # TODO: this is a bit clumsy sums = np.empty(size, dtype=dtype) sums.real = np.bincount(group_idx, weights=a.real, minlength=size) sums.imag = np.bincount(group_idx, weights=a.imag, minlength=size) else: sums = np.bincount(group_idx, weights=a, minlength=size).astype(dtype) with np.errstate(divide='ignore'): ret = sums.astype(dtype) / counts if not np.isnan(fill_value): ret[counts == 0] = fill_value return ret
def score(self,Y,type="loglikelihood",method="BIC"): if type=="loglikelihood": score=log_likelihood=self.compute_loglikelihood(Y) if type=="penalized_loglikelihood": N=np.prod(Y.shape) if np.iscomplexobj(Y)==True: N=N*2 log_likelihood=self.compute_loglikelihood(Y) nb_free_parameters=self.get_nb_free_parameters() penalty_term=penalty_term_IC(nb_free_parameters,N,method=method) score=-2*log_likelihood+penalty_term return score ## method for plotting
def test_poly(self): assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), [1, -3, -2, 6]) # From matlab docs A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) # Should produce real output for perfect conjugates assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j, 1-2j, 1.+3.5j, 1-3.5j]))) assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j]))) assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j]))) assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) assert_(np.isrealobj(np.poly([1j, -1j]))) assert_(np.isrealobj(np.poly([1, -1]))) assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) np.random.seed(42) a = np.random.randn(100) + 1j*np.random.randn(100) assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a))))))
def _augmented_orthonormal_cols(x, k): # extract the shape of the x array n, m = x.shape # create the expanded array and copy x into it y = np.empty((n, m+k), dtype=x.dtype) y[:, :m] = x # do some modified gram schmidt to add k random orthonormal vectors for i in range(k): # sample a random initial vector v = np.random.randn(n) if np.iscomplexobj(x): v = v + 1j*np.random.randn(n) # subtract projections onto the existing unit length vectors for j in range(m+i): u = y[:, j] v -= (np.dot(v, u.conj()) / np.dot(u, u.conj())) * u # normalize v v /= np.sqrt(np.dot(v, v.conj())) # add v into the output array y[:, m+i] = v # return the expanded array return y
def dct(a, b, type=2, axis=0, **kw): if iscomplexobj(a): b.real[:] = dct1(a.real, type=type, axis=axis) b.imag[:] = dct1(a.imag, type=type, axis=axis) return b else: b[:] = dct1(a, type=type, axis=axis) return b # Define functions taking both input array and output array
def rms(x,axis=None): 'calculate the root-mean-square of a signal, if axis!=None, then reduction will only be along the given axis/axes' if np.iscomplexobj(x): x=abs(x) return np.sqrt(np.mean(np.square(x),axis) )
def fwd(self,X): 'forward linear operator' assert np.iscomplexobj(X) == self.cpx,'wrong value for cpx in constructor' return np.einsum('...jk,mj->...mk',X,self.S)
def adj(self,X): 'adjoint linear operator' assert np.iscomplexobj(Y) == self.cpx,'wrong value for cpx in constructor' return np.einsum('...jk,mj->...mk',X,self.S.T.conj())
def _test_awgn(self,cpx): snr = np.random.uniform(3,20) p = Problem(cpx=cpx,SNR_dB=snr) X = p.genX(5) self.assertEqual( np.iscomplexobj(X) , cpx ) Y0 = p.fwd(X) self.assertEqual( np.iscomplexobj(Y0) , cpx ) Y,wvar = p.add_noise(Y0) self.assertEqual( np.iscomplexobj(Y) , cpx ) snr_obs = -20*np.log10( la.norm(Y-Y0)/la.norm(Y0)) self.assertTrue( abs(snr-snr_obs) < 1.0, 'gross error in add_noise') wvar_obs = la.norm(Y0-Y)**2/Y.size self.assertTrue( .5 < wvar_obs/wvar < 1.5, 'gross error in add_noise wvar')
def var(self, axis=None, dtype=None, out=None, ddof=0): "" # Easy case: nomask, business as usual if self._mask is nomask: return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof) # Some data are masked, yay! cnt = self.count(axis=axis) - ddof danom = self.anom(axis=axis, dtype=dtype) if iscomplexobj(self): danom = umath.absolute(danom) ** 2 else: danom *= danom dvar = divide(danom.sum(axis), cnt).view(type(self)) # Apply the mask if it's not a scalar if dvar.ndim: dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0)) dvar._update_from(self) elif getattr(dvar, '_mask', False): # Make sure that masked is returned when the scalar is masked. dvar = masked if out is not None: if isinstance(out, MaskedArray): out.flat = 0 out.__setmask__(True) elif out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or "\ "more location." raise MaskError(errmsg) else: out.flat = np.nan return out # In case with have an explicit output if out is not None: # Set the data out.flat = dvar # Set the mask if needed if isinstance(out, MaskedArray): out.__setmask__(dvar.mask) return out return dvar
def analysis(self, **kwargs): """ Decompose a real or complex signal using ISAP. Fill the instance 'analysis_data' and 'analysis_header' parameters. Parameters ---------- kwargs: dict (optional) the parameters that will be passed to 'pisap.extensions.mr_tansform'. """ # Checks if self._data is None: raise ValueError("Please specify first the input data.") # Update ISAP parameters kwargs["type_of_multiresolution_transform"] = self.isap_transform_id kwargs["number_of_scales"] = self.nb_scale # Analysis if numpy.iscomplexobj(self._data): analysis_data_real, self.analysis_header = self._analysis( self._data.real, **kwargs) analysis_data_imag, _ = self._analysis( self._data.imag, **kwargs) if isinstance(analysis_data_real, numpy.ndarray): self._analysis_data = ( analysis_data_real + 1.j * analysis_data_imag) else: self._analysis_data = [ re + 1.j * ima for re, ima in zip(analysis_data_real, analysis_data_imag)] else: self._analysis_data, self._analysis_header = self._analysis( self._data, **kwargs)
def iscomplex(self): """Returns True if any part of the signal is complex.""" return np.any(np.iscomplex(self._ydata)) # return np.iscomplexobj(self._ydata)
def __init__(self, data, img_shape, mask=None, use_imag=True, img_all=None, spect_all=None, parent=None): super(DialogSVD, self).__init__(parent=parent) ### EDIT ### self.setup(parent=parent) self.setupData(img_shape=img_shape) self.ui_changes() self.U = data[0] self.s = data[1] self.Vh = data[2] self._n_factors = self.s.size self.mask = mask self._use_imag = use_imag # By default, use imag portion of complex data if (img_all is None) and (spect_all is None): cube_all = self.combiner(selections=_np.arange(self._n_factors)) self.img_all = self.mean_spatial(cube_all) self.spect_all = self.mean_spectral(cube_all) else: self.img_all = img_all.real if _np.iscomplexobj(spect_all): if self._use_imag: self.spect_all = spect_all.imag else: self.spect_all = spect_all.real else: self.spect_all = spect_all self.updatePlots(0) self.updateCurrentRemainder()
def mean_spatial(self, cube): ret = cube.mean(axis=-1) ret = ret.reshape((self._n_y, self._n_x)) if _np.iscomplexobj(ret): if self._use_imag: return _np.imag(ret) else: return _np.real(ret) else: return ret
def mean_spectral(self, cube): ret = cube.mean(axis=0) if _np.iscomplexobj(ret): if self._use_imag: return _np.imag(ret) else: return _np.real(ret) else: return ret
def get_spatial_slice(self, num): img = self.U[...,num].reshape((self._n_y, self._n_x)) return _np.real(img) # Used to return complex, but the SVD of complex numbers tends to # shove everything in U into the real component # if _np.iscomplexobj(img): # if self._use_imag: # return _np.imag(img) # else: # return _np.real(img) # else: # return img
def get_spectral_slice(self, num): spect = self.Vh[num,:] if _np.iscomplexobj(spect): if self._use_imag: return _np.imag(spect) else: return _np.real(spect) else: return spect
def errorCorrectScale(self): """ Error Correction: Scale """ rand_spectra = self.hsi.get_rand_spectra(5, pt_sz=3, quads=True, full=False) if _np.iscomplexobj(rand_spectra): rand_spectra = rand_spectra.real rng = self.hsi.freq.op_range_pix plugin = _widgetSG(window_length=601, polyorder=2) winPlotEffect = _DialogPlotEffect.dialogPlotEffect(rand_spectra, x=self.hsi.f, plugin=plugin, parent=self) if winPlotEffect is not None: win_size = winPlotEffect.parameters['window_length'] order = winPlotEffect.parameters['polyorder'] scale_err_correct_sg = _ScaleErrCorrectSG(win_size=win_size, order=order, rng=rng) scale_err_correct_sg.transform(self.hsi.data) # Backup for Undo self.bcpre.add_step(['ScaleErrorCorrectSG', 'win_size', win_size, 'order', order]) if self.ui.actionUndo_Backup_Enabled.isChecked(): try: _BCPre.backup_pickle(self.hsi, self.bcpre.id_list[-1]) except: print('Error in pickle backup (Undo functionality)') else: self.bcpre.backed_up() self.changeSlider()
def data_imag_over_real(self): if _np.iscomplexobj(self._data): if isinstance(self._data, _np.ndarray): return self._data.imag else: return _np.imag(self._data) else: return self._data
def data_real_over_imag(self): if _np.iscomplexobj(self._data): if isinstance(self._data, _np.ndarray): return self._data.real else: return _np.real(self._data) else: return self._data
def _calc(self, data, ret_obj, **kwargs): self._inst_als = _AlsCvxopt(**kwargs) try: # Get the subarray shape shp = data.shape[0:-2] total_num = _np.array(shp).prod() # Iterate over the sub-array -- super slick way of doing it for num, idx in enumerate(_np.ndindex(shp)): print('Detrended iteration {} / {}'.format(num+1, total_num)) # Imaginary portion set if self.use_imag and _np.iscomplexobj(data): if self.rng is None: ret_obj[idx] -= 1j*self._inst_als.calculate(data[idx].imag) else: ret_obj[idx][..., self.rng] -= 1j*self._inst_als.calculate(data[idx][..., self.rng].imag) else: # Real portion set or real object if self.rng is None: ret_obj[idx] -= self._inst_als.calculate(data[idx].real) else: ret_obj[idx][..., self.rng] -= self._inst_als.calculate(data[idx][..., self.rng].real) except: return False else: # print(self._inst_als.__dict__) return True
def _maybe_null_out(result, axis, mask): if axis is not None and getattr(result, 'ndim', False): null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 if np.any(null_mask): if np.iscomplexobj(result): result = result.astype('c16') else: result = result.astype('f8') result[null_mask] = np.nan elif result is not tslib.NaT: null_mask = mask.size - mask.sum() if null_mask == 0: result = np.nan return result
def get_nb_free_parameters(self): nb_free_parameters=0 for parameter_name in self.estimated_parameters: parameter_value=np.ravel(getattr(self,parameter_name)) if np.iscomplexobj(parameter_value)==True: nb_free_parameters=nb_free_parameters+2*len(parameter_value) else: nb_free_parameters=nb_free_parameters+len(parameter_value) return nb_free_parameters
def rvs(self): S=self.S N,M=S.shape iscomplex=self.iscomplex if iscomplex == None: iscomplex=np.iscomplexobj(S) if iscomplex==True: noise=self.noise.rvs(scale=np.sqrt(self.sigma2/2),size=(N,M))+1j*self.noise.rvs(scale=np.sqrt(self.sigma2/2),size=(N,M)) else: noise=self.noise.rvs(scale=np.sqrt(self.sigma2),size=(N,M)) return S+noise
def plot(self,**kwargs): """ Plot a realisation of the signal waveform """ y=self.rvs() if np.iscomplexobj(y) == True: plt.plot(np.arange(self.N)/self.Fe,np.real(y)) plt.ylabel("Signal (Real part)") else: plt.plot(np.arange(self.N)/self.Fe,y) plt.ylabel("Signal") plt.xlabel("Time")
def combine_xyz(vec, square=False): """Compute the three Cartesian components of a vector or matrix together Parameters ---------- vec : 2d array of shape [3 n x p] Input [ x1 y1 z1 ... x_n y_n z_n ] where x1 ... z_n can be vectors Returns ------- comb : array Output vector [sqrt(x1^2+y1^2+z1^2), ..., sqrt(x_n^2+y_n^2+z_n^2)] """ if vec.ndim != 2: raise ValueError('Input must be 2D') if (vec.shape[0] % 3) != 0: raise ValueError('Input must have 3N rows') n, p = vec.shape if np.iscomplexobj(vec): vec = np.abs(vec) comb = vec[0::3] ** 2 comb += vec[1::3] ** 2 comb += vec[2::3] ** 2 if not square: comb = np.sqrt(comb) return comb
def __init__(self, Nr=64, C=7, Nu=64, Ns=64, beta=.01,L=5,ang=10,rice_k_dB=10,ple=4,SNR_dB=10.0,ambig=False,scramble=False,S=None,cpx=False,mmv2d=False,normS=None): """ Nr : number of Rx antennas C : number of cells (>1 indicates there are "far" users) Nu : max # users per cell Ns : spreading code length beta : user load (i.e.,expected active / total user ratio) L : paths per cluster ang : angular spread within cluster (in degrees) rice_k_dB : rice k parameter in dB ple : path-loss exponent: gain = 1/(1+d^ple) for distance d S : set of spreading codes, shape=(Ns,C*Nu) """ if S is None: S = random_qpsk(Ns,C*Nu) self.Nr = Nr self.C = C self.Nu = Nu self.Ns = Ns self.beta = beta self.L = L self.ang = ang self.rice_k_dB = rice_k_dB self.ple = ple self.SNR_dB = SNR_dB self.ambig = ambig self.scramble = scramble self.cpx = cpx self.mmv2d = mmv2d if self.cpx == np.iscomplexobj(S): self.S = S else: if not self.cpx: top = np.concatenate( (S.real, -S.imag),axis=1 ) btm = np.concatenate( (S.imag, S.real),axis=1 ) self.S = np.concatenate( (top,btm),axis=0 ) else: assert False,'WHY!?' if self.cpx: assert self.S.shape == (Ns,C*Nu) else: assert self.S.shape == (2*Ns,2*C*Nu) if normS is not None: dnorm = np.asarray(normS) / np.sqrt( np.square(self.S).sum(axis=0) ) self.S = self.S * dnorm self.timegen = 0 # time spent waiting for generation of YX (does NOT count subprocess cpus time if nsubprocs>0
def assert_array_almost_equal_nulp(x, y, nulp=1): """ Compare two arrays relatively to their spacing. This is a relatively robust method to compare two arrays whose amplitude is variable. Parameters ---------- x, y : array_like Input arrays. nulp : int, optional The maximum number of unit in the last place for tolerance (see Notes). Default is 1. Returns ------- None Raises ------ AssertionError If the spacing between `x` and `y` for one or more elements is larger than `nulp`. See Also -------- assert_array_max_ulp : Check that all items of arrays differ in at most N Units in the Last Place. spacing : Return the distance between x and the nearest adjacent number. Notes ----- An assertion is raised if the following condition is not met:: abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y))) Examples -------- >>> x = np.array([1., 1e-10, 1e-20]) >>> eps = np.finfo(x.dtype).eps >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) Traceback (most recent call last): ... AssertionError: X and Y are not equal to 1 ULP (max is 2) """ __tracebackhide__ = True # Hide traceback for py.test import numpy as np ax = np.abs(x) ay = np.abs(y) ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) if not np.all(np.abs(x-y) <= ref): if np.iscomplexobj(x) or np.iscomplexobj(y): msg = "X and Y are not equal to %d ULP" % nulp else: max_nulp = np.max(nulp_diff(x, y)) msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) raise AssertionError(msg)
def nulp_diff(x, y, dtype=None): """For each item in x and y, return the number of representable floating points between them. Parameters ---------- x : array_like first input array y : array_like second input array dtype : dtype, optional Data-type to convert `x` and `y` to if given. Default is None. Returns ------- nulp : array_like number of representable floating point numbers between each item in x and y. Examples -------- # By definition, epsilon is the smallest number such as 1 + eps != 1, so # there should be exactly one ULP between 1 and 1 + eps >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) 1.0 """ import numpy as np if dtype: x = np.array(x, dtype=dtype) y = np.array(y, dtype=dtype) else: x = np.array(x) y = np.array(y) t = np.common_type(x, y) if np.iscomplexobj(x) or np.iscomplexobj(y): raise NotImplementedError("_nulp not implemented for complex array") x = np.array(x, dtype=t) y = np.array(y, dtype=t) if not x.shape == y.shape: raise ValueError("x and y do not have the same shape: %s - %s" % (x.shape, y.shape)) def _diff(rx, ry, vdt): diff = np.array(rx-ry, dtype=vdt) return np.abs(diff) rx = integer_repr(x) ry = integer_repr(y) return _diff(rx, ry, t)
def synthesis(self): """ Reconstruct a real or complex signal from the wavelet coefficients using ISAP. Returns ------- data: pisap.Image the reconstructed data/signal. """ # Checks if self._analysis_data is None: raise ValueError("Please specify first the decomposition " "coefficients array.") if self.use_wrapping and self._analysis_header is None: raise ValueError("Please specify first the decomposition " "coefficients header.") # Message if self.verbose > 1: print("[info] Synthesis header:") pprint(self._analysis_header) # Reorganize the coefficents with ISAP convention # TODO: do not backup the list of bands if self.use_wrapping: analysis_buffer = numpy.zeros( self._analysis_buffer_shape, dtype=self.analysis_data[0].dtype) for scale, nb_bands in enumerate(self.nb_band_per_scale): for band in range(nb_bands): self._set_linear_band(scale, band, analysis_buffer, self.band_at(scale, band)) _saved_analysis_data = self._analysis_data self._analysis_data = analysis_buffer self._analysis_data = [self.unflatten_fct(self)] # Synthesis if numpy.iscomplexobj(self._analysis_data[0]): data_real = self._synthesis( [arr.real for arr in self._analysis_data], self._analysis_header) data_imag = self._synthesis( [arr.imag for arr in self._analysis_data], self._analysis_header) data = data_real + 1.j * data_imag else: data = self._synthesis( self._analysis_data, self._analysis_header) # TODO: remove this code asap if self.use_wrapping: self._analysis_data = _saved_analysis_data return pisap.Image(data=data, metadata=self._image_metadata)
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128): """ Compute the spectrogram for a real signal. The parameters follow the naming convention of matplotlib.mlab.specgram Args: samples (1D array): input audio signal fft_length (int): number of elements in fft window sample_rate (scalar): sample rate hop_length (int): hop length (relative offset between neighboring fft windows). Returns: x (2D array): spectrogram [frequency x time] freq (1D array): frequency of each row in x Note: This is a truncating computation e.g. if fft_length=10, hop_length=5 and the signal has 23 elements, then the last 3 elements will be truncated. """ assert not np.iscomplexobj(samples), "Must not pass in complex numbers" window = np.hanning(fft_length)[:, None] window_norm = np.sum(window ** 2) # The scaling below follows the convention of # matplotlib.mlab.specgram which is the same as # matlabs specgram. scale = window_norm * sample_rate trunc = (len(samples) - fft_length) % hop_length x = samples[:len(samples) - trunc] # "stride trick" reshape to include overlap nshape = (fft_length, (len(x) - fft_length) // hop_length + 1) nstrides = (x.strides[0], x.strides[0] * hop_length) x = as_strided(x, shape=nshape, strides=nstrides) # window stride sanity check assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)]) # broadcast window, compute fft over columns and square mod # This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). x = np.fft.rfft(x * window, axis=0) x = np.absolute(x) ** 2 # scale, 2.0 for everything except dc and fft_length/2 x[1:-1, :] *= (2.0 / scale) x[(0, -1), :] /= scale freqs = float(sample_rate) / fft_length * np.arange(x.shape[0]) return x, freqs
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Returns the variance of the array elements along given axis. Masked entries are ignored, and result elements which are not finite will be masked. Refer to `numpy.var` for full documentation. See Also -------- ndarray.var : corresponding function for ndarrays numpy.var : Equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} # Easy case: nomask, business as usual if self._mask is nomask: return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) # Some data are masked, yay! cnt = self.count(axis=axis, **kwargs) - ddof danom = self - self.mean(axis, dtype, keepdims=True) if iscomplexobj(self): danom = umath.absolute(danom) ** 2 else: danom *= danom dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) # Apply the mask if it's not a scalar if dvar.ndim: dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) dvar._update_from(self) elif getattr(dvar, '_mask', False): # Make sure that masked is returned when the scalar is masked. dvar = masked if out is not None: if isinstance(out, MaskedArray): out.flat = 0 out.__setmask__(True) elif out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or "\ "more location." raise MaskError(errmsg) else: out.flat = np.nan return out # In case with have an explicit output if out is not None: # Set the data out.flat = dvar # Set the mask if needed if isinstance(out, MaskedArray): out.__setmask__(dvar.mask) return out return dvar