我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.double()。
def logp_trace(model): """ return a trace of logp for model """ #init db = model.db n_samples = db.trace('deviance').length() logp = np.empty(n_samples, np.double) #loop over all samples for i_sample in range(n_samples): #set the value of all stochastic to their 'i_sample' value for stochastic in model.stochastics: try: value = db.trace(stochastic.__name__)[i_sample] stochastic.value = value except KeyError: print("No trace available for %s. " % stochastic.__name__) #get logp logp[i_sample] = model.logp return logp
def score_mod(gold, prediction, method): """ Computes correlation coefficient for two lists of values. :param gold: list of gold values :param prediction: list of predicted values :param method: string, can be either of "pearson", "spearman" or "auc" (area under curve) :return: correlation coefficient and p-value """ if len(gold) != len(prediction): raise ValueError("The two arrays must have the same length!") gold = np.array(gold, dtype=np.double) prediction = np.array(prediction, dtype=np.double) if method == "pearson": return pearson(gold, prediction) elif method == "spearman": return spearman(gold, prediction) elif method == "auc": return auc(gold, prediction) else: raise NotImplementedError("Unknown scoring measure:%s" % method)
def test_basic(self): dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128, np.longdouble, np.clongdouble] for dt in dts: c = np.ones(53, dtype=np.bool) assert_equal(np.where( c, dt(0), dt(1)), dt(0)) assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) assert_equal(np.where(True, dt(0), dt(1)), dt(0)) assert_equal(np.where(False, dt(0), dt(1)), dt(1)) d = np.ones_like(c).astype(dt) e = np.zeros_like(d) r = d.astype(dt) c[7] = False r[7] = e[7] assert_equal(np.where(c, e, e), e) assert_equal(np.where(c, d, e), r) assert_equal(np.where(c, d, e[0]), r) assert_equal(np.where(c, d[0], e), r) assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def do(self, a, b): d = linalg.det(a) (s, ld) = linalg.slogdet(a) if asarray(a).dtype.type in (single, double): ad = asarray(a).astype(double) else: ad = asarray(a).astype(cdouble) ev = linalg.eigvals(ad) assert_almost_equal(d, multiply.reduce(ev, axis=-1)) assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) s = np.atleast_1d(s) ld = np.atleast_1d(ld) m = (s != 0) assert_almost_equal(np.abs(s[m]), 1) assert_equal(ld[~m], -inf)
def test_UPLO(self): Klo = np.array([[0, 0], [1, 0]], dtype=np.double) Kup = np.array([[0, 1], [0, 0]], dtype=np.double) tgt = np.array([-1, 1], dtype=np.double) rtol = get_rtol(np.double) # Check default is 'L' w = np.linalg.eigvalsh(Klo) assert_allclose(w, tgt, rtol=rtol) # Check 'L' w = np.linalg.eigvalsh(Klo, UPLO='L') assert_allclose(w, tgt, rtol=rtol) # Check 'l' w = np.linalg.eigvalsh(Klo, UPLO='l') assert_allclose(w, tgt, rtol=rtol) # Check 'U' w = np.linalg.eigvalsh(Kup, UPLO='U') assert_allclose(w, tgt, rtol=rtol) # Check 'u' w = np.linalg.eigvalsh(Kup, UPLO='u') assert_allclose(w, tgt, rtol=rtol)
def test_UPLO(self): Klo = np.array([[0, 0], [1, 0]], dtype=np.double) Kup = np.array([[0, 1], [0, 0]], dtype=np.double) tgt = np.array([-1, 1], dtype=np.double) rtol = get_rtol(np.double) # Check default is 'L' w, v = np.linalg.eigh(Klo) assert_allclose(w, tgt, rtol=rtol) # Check 'L' w, v = np.linalg.eigh(Klo, UPLO='L') assert_allclose(w, tgt, rtol=rtol) # Check 'l' w, v = np.linalg.eigh(Klo, UPLO='l') assert_allclose(w, tgt, rtol=rtol) # Check 'U' w, v = np.linalg.eigh(Kup, UPLO='U') assert_allclose(w, tgt, rtol=rtol) # Check 'u' w, v = np.linalg.eigh(Kup, UPLO='u') assert_allclose(w, tgt, rtol=rtol)
def test_mode_raw(self): # The factorization is not unique and varies between libraries, # so it is not possible to check against known values. Functional # testing is a possibility, but awaits the exposure of more # of the functions in lapack_lite. Consequently, this test is # very limited in scope. Note that the results are in FORTRAN # order, hence the h arrays are transposed. a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double) # Test double h, tau = linalg.qr(a, mode='raw') assert_(h.dtype == np.double) assert_(tau.dtype == np.double) assert_(h.shape == (2, 3)) assert_(tau.shape == (2,)) h, tau = linalg.qr(a.T, mode='raw') assert_(h.dtype == np.double) assert_(tau.dtype == np.double) assert_(h.shape == (3, 2)) assert_(tau.shape == (2,))
def __init__(self, env, shape, clip=10.0, update_freq=100): self.env = env self.clip = clip self.update_freq = update_freq self.count = 0 self.sum = 0.0 self.sum_sqr = 0.0 self.mean = np.zeros(shape, dtype=np.double) self.std = np.ones(shape, dtype=np.double)
def prepare_default(N=100, dtype=np.double): return ( np.asarray(np.random.rand(N, N), dtype=dtype), ) #return toc/trials, (4/3)*N*N*N*1e-9, times
def prepare_eig(N=100, dtype=np.double): N/=4 return ( np.asarray(np.random.rand(int(N), int(N)), dtype=dtype), )
def prepare_svd(N=100, dtype=np.double): N/=2 return ( np.asarray(np.random.rand(int(N), int(N)), dtype=dtype), False ) #det: return toc/trials, N*N*N*1e-9, times
def prepare_dot(N=100, dtype=np.double): N=N*N*10 A = np.asarray(np.random.rand(int(N)), dtype=dtype) return (A, A) #return 1.0*toc/(trials), 2*N*N*N*1e-9, times
def prepare_ivi(N=100, dtype=np.double): A = np.random.rand(3260, 3260) B = np.random.rand(3260, 3000) return (A, B) #return 1.0*toc/(trials), 2*N*N*N*1e-9, times
def prepare_cholesky(N=100, dtype=np.double): N = int(N*2) A = np.asarray(np.random.rand(N, N), dtype=dtype) return ( A*A.transpose() + N*np.eye(N), ) #return toc/trials, N*N*N/3.0*1e-9, times #inv: return toc/trials, 2*N*N*N*1e-9, times ##################################################################################
def TermProgress_nocb(*args, **kwargs): """TermProgress_nocb(double dfProgress, char const * pszMessage=None, void * pData=None) -> int""" return _gdal_array.TermProgress_nocb(*args, **kwargs)
def BandRasterIONumPy(*args, **kwargs): """ BandRasterIONumPy(Band band, int bWrite, double xoff, double yoff, double xsize, double ysize, PyArrayObject * psArray, int buf_type, GDALRIOResampleAlg resample_alg, GDALProgressFunc callback=0, void * callback_data=None) -> CPLErr """ return _gdal_array.BandRasterIONumPy(*args, **kwargs)
def RATWriteArray(rat, array, field, start=0): """ Pure Python implementation of writing a chunk of the RAT from a numpy array. Type of array is coerced to one of the types (int, double, string) supported. Called from RasterAttributeTable.WriteArray """ if array is None: raise ValueError("Expected array of dim 1") # if not the array type convert it to handle lists etc if not isinstance(array, numpy.ndarray): array = numpy.array(array) if array.ndim != 1: raise ValueError("Expected array of dim 1") if (start + array.size) > rat.GetRowCount(): raise ValueError("Array too big to fit into RAT from start position") if numpy.issubdtype(array.dtype, numpy.integer): # is some type of integer - coerce to standard int # TODO: must check this is fine on all platforms # confusingly numpy.int 64 bit even if native type 32 bit array = array.astype(numpy.int32) elif numpy.issubdtype(array.dtype, numpy.floating): # is some type of floating point - coerce to double array = array.astype(numpy.double) elif numpy.issubdtype(array.dtype, numpy.character): # cast away any kind of Unicode etc array = array.astype(numpy.character) else: raise ValueError("Array not of a supported type (integer, double or string)") return RATValuesIONumPyWrite(rat, field, start, array)
def test_signals_write(self): block = Block() seg = Segment() block.segments.append(seg) asig = AnalogSignal(signal=self.rquant((10, 3), pq.mV), sampling_rate=pq.Quantity(10, "Hz")) seg.analogsignals.append(asig) self.write_and_compare([block]) anotherblock = Block("ir signal block") seg = Segment("ir signal seg") anotherblock.segments.append(seg) irsig = IrregularlySampledSignal( signal=np.random.random((20, 3)), times=self.rquant(20, pq.ms, True), units=pq.A ) seg.irregularlysampledsignals.append(irsig) self.write_and_compare([anotherblock]) block.segments[0].analogsignals.append( AnalogSignal(signal=[10.0, 1.0, 3.0], units=pq.S, sampling_period=pq.Quantity(3, "s"), dtype=np.double, name="signal42", description="this is an analogsignal", t_start=45 * pq.ms), ) self.write_and_compare([block, anotherblock]) block.segments[0].irregularlysampledsignals.append( IrregularlySampledSignal(times=np.random.random(10), signal=np.random.random((10, 3)), units="mV", time_units="s", dtype=np.float, name="some sort of signal", description="the signal is described") ) self.write_and_compare([block, anotherblock])
def __read_comment(self): """ Read a single comment. The comment is stored as an Event in Segment 0, which is specifically for comments. ---------------------- Returns an empty list. The returned object is already added to the Block. No ID number: always called from another method """ # float64 -- timestamp (number of days since dec 30th 1899) time = np.fromfile(self._fsrc, dtype=np.double, count=1)[0] # int16 -- length of next string numchars1 = np.asscalar(np.fromfile(self._fsrc, dtype=np.int16, count=1)) # char * numchars -- the one who sent the comment sender = self.__read_str(numchars1) # int16 -- length of next string numchars2 = np.asscalar(np.fromfile(self._fsrc, dtype=np.int16, count=1)) # char * numchars -- comment text text = self.__read_str(numchars2, utf=False) comment = Event(times=pq.Quantity(time, units=pq.d), labels=text, sender=sender, file_origin=self._file_origin) self._seg0.events.append(comment) return []
def __read_spiketrain_timestamped(self): """ Read a SpikeTrain This SpikeTrain contains a time stamp for when it was recorded The timestamp is stored as an annotation in the SpikeTrain. ------------------------------------------------- Returns a SpikeTrain object with multiple spikes. The returned object must be added to the Block. ID: 29110 """ # float64 -- timeStamp (number of days since dec 30th 1899) timestamp = np.fromfile(self._fsrc, dtype=np.double, count=1)[0] # convert to datetime object timestamp = self._convert_timestamp(timestamp) # seq_list -- spike list # combine the spikes into a single SpikeTrain spiketrain = self._combine_spiketrains(self.__read_list()) # add the timestamp spiketrain.annotations['timestamp'] = timestamp return spiketrain
def __read_unit_list_timestamped(self): """ A list of a list of Units. This is the same as __read_unit_list, except that it also has a timestamp. This is added ad an annotation to all Units. ----------------------------------------------- Returns a list of Units modified in the method. The returned objects are already added to the Block. ID: 29119 """ # double -- time zero (number of days since dec 30th 1899) timestamp = np.fromfile(self._fsrc, dtype=np.double, count=1)[0] # convert to to days since UNIX epoc time: timestamp = self._convert_timestamp(timestamp) # sorter -- this is based off a sorter units = self.__read_unit_list() for unit in units: unit.annotations['timestamp'].append(timestamp) return units
def computeallcpus(self): """ overall stats for all cores on the nodes """ ratios = numpy.empty((self._ncpumetrics, self._totalcores), numpy.double) coreindex = 0 for host, last in self._last.iteritems(): try: elapsed = last - self._first[host] if numpy.amin(numpy.sum(elapsed, 0)) < 1.0: # typically happens if the job was very short and the datapoints are too close together return {"error": ProcessingError.JOB_TOO_SHORT} coresperhost = len(last[0, :]) ratios[:, coreindex:(coreindex+coresperhost)] = 1.0 * elapsed / numpy.sum(elapsed, 0) coreindex += coresperhost except ValueError: # typically happens if the linux pmda crashes during the job return {"error": ProcessingError.INSUFFICIENT_DATA} results = {} for i, name in enumerate(self._outnames): results[name] = calculate_stats(ratios[i, :]) results['all'] = {"cnt": self._totalcores} return results
def computejobcpus(self): """ stats for the cores on the nodes that were assigend to the job (if available) """ proc = self._job.getdata('proc') if proc == None: return {"error": ProcessingError.CPUSET_UNKNOWN}, {"error": ProcessingError.CPUSET_UNKNOWN} cpusallowed = self._job.getdata('proc')['cpusallowed'] ratios = numpy.empty((self._ncpumetrics, self._totalcores), numpy.double) coreindex = 0 for host, last in self._last.iteritems(): elapsed = last - self._first[host] if host in cpusallowed and 'error' not in cpusallowed[host]: elapsed = elapsed[:, cpusallowed[host]] else: return {"error": ProcessingError.CPUSET_UNKNOWN}, {"error": ProcessingError.CPUSET_UNKNOWN} coresperhost = len(elapsed[0, :]) ratios[:, coreindex:(coreindex+coresperhost)] = 1.0 * elapsed / numpy.sum(elapsed, 0) coreindex += coresperhost allowedcores = numpy.array(ratios[:, :coreindex]) results = {} for i, name in enumerate(self._outnames): results[name] = calculate_stats(allowedcores[i, :]) results['all'] = {"cnt": coreindex} effective = numpy.compress(allowedcores[1, :] < 0.95, allowedcores , axis=1) effectiveresults = { 'all': len(effective[i, :]) } if effectiveresults['all'] > 0: for i, name in enumerate(self._outnames): effectiveresults[name] = calculate_stats(effective[i, :]) return results, effectiveresults
def quantize_data(x, y, wc=None, continuous_rate=0.1, separate=False): if isinstance(x, list): xt = map(list, zip(*x)) else: xt = x.T features = [set(feat) for feat in xt] if wc is None: wc = np.array([len(feat) >= int(continuous_rate * len(y)) for feat in features]) else: wc = np.asarray(wc) feat_dics = [{_l: i for i, _l in enumerate(feats)} if not wc[i] else None for i, feats in enumerate(features)] if not separate: if np.all(~wc): dtype = np.int else: dtype = np.double x = np.array([[feat_dics[i][_l] if not wc[i] else _l for i, _l in enumerate(sample)] for sample in x], dtype=dtype) else: x = np.array([[feat_dics[i][_l] if not wc[i] else _l for i, _l in enumerate(sample)] for sample in x], dtype=np.double) x = (x[:, ~wc].astype(np.int), x[:, wc]) label_dic = {_l: i for i, _l in enumerate(set(y))} y = np.array([label_dic[yy] for yy in y], dtype=np.int8) label_dic = {i: _l for _l, i in label_dic.items()} return x, y, wc, features, feat_dics, label_dic
def test_element_size(self): byte = torch.ByteStorage().element_size() char = torch.CharStorage().element_size() short = torch.ShortStorage().element_size() int = torch.IntStorage().element_size() long = torch.LongStorage().element_size() float = torch.FloatStorage().element_size() double = torch.DoubleStorage().element_size() self.assertEqual(byte, torch.ByteTensor().element_size()) self.assertEqual(char, torch.CharTensor().element_size()) self.assertEqual(short, torch.ShortTensor().element_size()) self.assertEqual(int, torch.IntTensor().element_size()) self.assertEqual(long, torch.LongTensor().element_size()) self.assertEqual(float, torch.FloatTensor().element_size()) self.assertEqual(double, torch.DoubleTensor().element_size()) self.assertGreater(byte, 0) self.assertGreater(char, 0) self.assertGreater(short, 0) self.assertGreater(int, 0) self.assertGreater(long, 0) self.assertGreater(float, 0) self.assertGreater(double, 0) # These tests are portable, not necessarily strict for your system. self.assertEqual(byte, 1) self.assertEqual(char, 1) self.assertGreaterEqual(short, 2) self.assertGreaterEqual(int, 2) self.assertGreaterEqual(int, short) self.assertGreaterEqual(long, 4) self.assertGreaterEqual(long, int) self.assertGreaterEqual(double, float)
def test_from_numpy(self): dtypes = [ np.double, np.float, np.int64, np.int32, np.uint8 ] for dtype in dtypes: array = np.array([1, 2, 3, 4], dtype=dtype) self.assertEqual(torch.from_numpy(array), torch.Tensor([1, 2, 3, 4]))
def box_to_affine(xc, yc, angle, scalex, aspect): m = np.zeros((2, 3), np.double ) m[0,0] = scalex * math.cos(angle) m[1,0] = scalex * math.sin(angle) m[0,1] = - scalex * math.sin(angle) * aspect m[1,1] = scalex * math.cos(angle) * aspect m[0,2] = xc m[1,2] = yc return m
def from_pb_tile(tile, no_data_value=None, data_type=None): """Creates a ``Tile`` from ``ProtoTile``. Args: tile (ProtoTile): The ``ProtoTile`` instance to be converted. Returns: :class:`~geopyspark.geotrellis.Tile` """ if not data_type: data_type = _mapped_data_types[tile.cellType.dataType] if data_type == 'BIT': cells = np.int8(tile.uint32Cells[:]) elif data_type == 'BYTE': cells = np.int8(tile.sint32Cells[:]) elif data_type == 'UBYTE': cells = np.uint8(tile.uint32Cells[:]) elif data_type == 'SHORT': cells = np.int16(tile.sint32Cells[:]) elif data_type == 'USHORT': cells = np.uint16(tile.uint32Cells[:]) elif data_type == 'INT': cells = np.int32(tile.sint32Cells[:]) elif data_type == 'FLOAT': cells = np.float32(tile.floatCells[:]) else: cells = np.double(tile.doubleCells[:]) return cells.reshape(tile.rows, tile.cols)
def get_coef(self, X): qr, qraux = self.qr, self.qraux n, p = qr.shape # sanity check assert isinstance(qr, np.ndarray), 'internal error: QR should be a np.ndarray but got %s' % type(qr) assert isinstance(qraux, np.ndarray), 'internal error: qraux should be a np.ndarray but got %s' % type(qraux) # validate input array X = check_array(X, dtype='numeric', copy=True, order='F') nx, ny = X.shape if nx != n: raise ValueError('qr and X must have same number of rows') # check on size _validate_matrix_size(n, p) # get the rank of the decomposition k = self.rank # get ix vector # if p > n: # ix = np.ones(n + (p - n)) * np.nan # ix[:n] = np.arange(n) # i.e., array([0,1,2,nan,nan,nan]) # else: # ix = np.arange(n) # set up the structures to alter coef, info = (np.zeros((k, ny), dtype=np.double, order='F'), np.zeros(1, dtype=np.int, order='F')) # call the fortran module IN PLACE _safecall(dqrsl.dqrcf, qr, n, k, qraux, X, ny, coef, 0) # post-processing # if k < p: # cf = np.ones((p,ny)) * np.nan # cf[self.pivot[np.arange(k)], :] = coef return coef if not k < p else coef[self.pivot[np.arange(k)], :]
def _prep_X_Y_for_cython(X, Y): X, Y = check_pairwise_arrays(X, Y) X, Y = X.astype(np.double, order='C'), Y.astype(np.double, order='C').T # transposing Y here! res = np.zeros((X.shape[0], Y.shape[1]), dtype=X.dtype) return X, Y, res # Cython proxies
def _hilbert_dot(x, y, scalar=1.0): # return ``2 * safe_sparse_dot(x, y) - safe_sparse_dot(x, x.T) - safe_sparse_dot(y, y.T)`` x, y = x.astype(np.double, order='C'), y.astype(np.double, order='C') return _hilbert_dot_fast(x, y, scalar)
def _hilbert_matrix(X, Y=None, scalar=1.0): X, Y, res = _prep_X_Y_for_cython(X, Y) _hilbert_matrix_fast(X, Y, res, np.double(scalar)) return res
def test_floats_from_string(self, level=rlevel): # Ticket #640, floats from string fsingle = np.single('1.234') fdouble = np.double('1.234') flongdouble = np.longdouble('1.234') assert_almost_equal(fsingle, 1.234) assert_almost_equal(fdouble, 1.234) assert_almost_equal(flongdouble, 1.234)
def test_byteswap_complex_scalar(self): # Ticket #1259 and gh-441 for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: z = np.array([2.2-1.1j], dtype) x = z[0] # always native-endian y = x.byteswap() if x.dtype.byteorder == z.dtype.byteorder: # little-endian machine assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder())) else: # big-endian machine assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype)) # double check real and imaginary parts: assert_equal(x.real, y.real.byteswap()) assert_equal(x.imag, y.imag.byteswap())
def test_float_types(): """ Check formatting. This is only for the str function, and only for simple types. The precision of np.float and np.longdouble aren't the same as the python float precision. """ for t in [np.float32, np.double, np.longdouble]: yield check_float_type, t
def test_nan_inf_float(): """ Check formatting of nan & inf. This is only for the str function, and only for simple types. The precision of np.float and np.longdouble aren't the same as the python float precision. """ for t in [np.float32, np.double, np.longdouble]: yield check_nan_inf_float, t
def test_float_type_print(): """Check formatting when using print """ for t in [np.float32, np.double, np.longdouble]: yield check_float_type_print, t
def test_export_record(self): dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)], dtype=dt) y = memoryview(x) assert_equal(y.shape, (1,)) assert_equal(y.ndim, 1) assert_equal(y.suboffsets, EMPTY) sz = sum([np.dtype(b).itemsize for a, b in dt]) if np.dtype('l').itemsize == 4: assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') else: assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): assert_equal(y.strides, (sz,)) assert_equal(y.itemsize, sz)
def test_1darray(self): array = np.arange(24, dtype=np.double) from_c = test_as_c_array(array, 3) assert_equal(array[3], from_c)
def test_2darray(self): array = np.arange(24, dtype=np.double).reshape(3, 8) from_c = test_as_c_array(array, 2, 4) assert_equal(array[2, 4], from_c)
def test_3darray(self): array = np.arange(24, dtype=np.double).reshape(2, 3, 4) from_c = test_as_c_array(array, 1, 2, 3) assert_equal(array[1, 2, 3], from_c)
def test_matrix_multiply(self): self.compare_matrix_multiply_results(np.long) self.compare_matrix_multiply_results(np.double)
def test_float_repr(self): # long double test cannot work, because eval goes through a python # float for t in [np.float32, np.float64]: yield self._test_type_repr, t
def mirr(values, finance_rate, reinvest_rate): """ Modified internal rate of return. Parameters ---------- values : array_like Cash flows (must contain at least one positive and one negative value) or nan is returned. The first value is considered a sunk cost at time zero. finance_rate : scalar Interest rate paid on the cash flows reinvest_rate : scalar Interest rate received on the cash flows upon reinvestment Returns ------- out : float Modified internal rate of return """ values = np.asarray(values, dtype=np.double) n = values.size pos = values > 0 neg = values < 0 if not (pos.any() and neg.any()): return np.nan numer = np.abs(npv(reinvest_rate, values*pos)) denom = np.abs(npv(finance_rate, values*neg)) return (numer/denom)**(1.0/(n - 1))*(1 + reinvest_rate) - 1
def test_right_left_behavior(self): # Needs range of sizes to test different code paths. # size ==1 is special cased, 1 < size < 5 is linear search, and # size >= 5 goes through local search and possibly binary search. for size in range(1, 10): xp = np.arange(size, dtype=np.double) yp = np.ones(size, dtype=np.double) incpts = np.array([-1, 0, size - 1, size], dtype=np.double) decpts = incpts[::-1] incres = interp(incpts, xp, yp) decres = interp(decpts, xp, yp) inctgt = np.array([1, 1, 1, 1], dtype=np.float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0) decres = interp(decpts, xp, yp, left=0) inctgt = np.array([0, 1, 1, 1], dtype=np.float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, right=2) decres = interp(decpts, xp, yp, right=2) inctgt = np.array([1, 1, 1, 2], dtype=np.float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0, right=2) decres = interp(decpts, xp, yp, left=0, right=2) inctgt = np.array([0, 1, 1, 2], dtype=np.float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt)
def get_complex_dtype(dtype): return {single: csingle, double: cdouble, csingle: csingle, cdouble: cdouble}[dtype]