我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.intp()。
def reset_index(self): """Reset index to range based """ dfs = self.to_delayed() sizes = np.asarray(compute(*map(delayed(len), dfs))) prefixes = np.zeros_like(sizes) prefixes[1:] = np.cumsum(sizes[:-1]) @delayed def fix_index(df, startpos): return df.set_index(np.arange(start=startpos, stop=startpos + len(df), dtype=np.intp)) outdfs = [fix_index(df, startpos) for df, startpos in zip(dfs, prefixes)] return from_delayed(outdfs)
def __init__(self, data, bucket_size=128): if bucket_size < 1: raise ValueError("A minimum bucket size of 1 is expected.") self._data = data self._n, self._k = self._data.shape self._nodes = None self._buckets = [] self._bucket_size = bucket_size self._node_dtype = numpy.dtype([ ('size', numpy.intp), ('bucket', numpy.intp), ('lower_bounds', (numpy.float_, self._k)), ('upper_bounds', (numpy.float_, self._k)), ]) self._neighbour_dtype = numpy.dtype([ ('squared_distance', numpy.float_), ('index', numpy.intp), ]) self._build()
def test_reverse_strides_and_subspace_bufferinit(self): # This tests that the strides are not reversed for simple and # subspace fancy indexing. a = np.ones(5) b = np.zeros(5, dtype=np.intp)[::-1] c = np.arange(5)[::-1] a[b] = c # If the strides are not reversed, the 0 in the arange comes last. assert_equal(a[0], 0) # This also tests that the subspace buffer is initialized: a = np.ones((5, 2)) c = np.arange(10).reshape(5, 2)[::-1] a[b, :] = c assert_equal(a[0], [0, 1])
def test_unaligned(self): v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] d = v.view(np.dtype("S8")) # unaligned source x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] x = x.view(np.dtype("S8")) x[...] = np.array("b" * 8, dtype="S") b = np.arange(d.size) #trivial assert_equal(d[b], d) d[b] = x # nontrivial # unaligned index array b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] b = b.view(np.intp)[:d.size] b[...] = np.arange(d.size) assert_equal(d[b.astype(np.int16)], d) d[b.astype(np.int16)] = x # boolean d[b % 2 == 0] d[b % 2 == 0] = x[::2]
def test_count_func(self): # Tests count assert_equal(1, count(1)) assert_equal(0, array(1, mask=[1])) ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) res = count(ott) self.assertTrue(res.dtype.type is np.intp) assert_equal(3, res) ott = ott.reshape((2, 2)) res = count(ott) assert_(res.dtype.type is np.intp) assert_equal(3, res) res = count(ott, 0) assert_(isinstance(res, ndarray)) assert_equal([1, 2], res) assert_(getmask(res) is nomask) ott = array([0., 1., 2., 3.]) res = count(ott, 0) assert_(isinstance(res, ndarray)) assert_(res.dtype.type is np.intp) assert_raises(IndexError, ott.count, 1)
def check_function(self, function, sz): from threading import Thread out1 = np.empty((len(self.seeds),) + sz) out2 = np.empty((len(self.seeds),) + sz) # threaded generation t = [Thread(target=function, args=(np.random.RandomState(s), o)) for s, o in zip(self.seeds, out1)] [x.start() for x in t] [x.join() for x in t] # the same serial for s, o in zip(self.seeds, out2): function(np.random.RandomState(s), o) # these platforms change x87 fpu precision mode in threads if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"): np.testing.assert_array_almost_equal(out1, out2) else: np.testing.assert_array_equal(out1, out2)
def csc_matvec(mat_csc, vec, dense_output=True, dtype=None): v_nnz = vec.indices v_val = vec.data m_val = mat_csc.data m_ind = mat_csc.indices m_ptr = mat_csc.indptr res_dtype = dtype or np.result_type(mat_csc.dtype, vec.dtype) if dense_output: res = np.zeros((mat_csc.shape[0],), dtype=res_dtype) matvec2dense(m_ptr, m_ind, m_val, v_nnz, v_val, res) else: sizes = m_ptr.take(v_nnz+1) - m_ptr.take(v_nnz) sizes = np.concatenate(([0], np.cumsum(sizes))) n = sizes[-1] data = np.empty((n,), dtype=res_dtype) indices = np.empty((n,), dtype=np.intp) indptr = np.array([0, n], dtype=np.intp) matvec2sparse(m_ptr, m_ind, m_val, v_nnz, v_val, sizes, indices, data) res = sp.sparse.csr_matrix((data, indices, indptr), shape=(1, mat_csc.shape[0]), dtype=res_dtype) res.sum_duplicates() # expensive operation return res
def to_coo(self, tensor_mode=False): userid, itemid, feedback = self.fields user_item_data = self.training[[userid, itemid]].values if tensor_mode: # TODO this recomputes feedback data every new functon call, # but if data has not changed - no need for this, make a property new_feedback, feedback_transform = self.reindex(self.training, feedback, inplace=False) self.index = self.index._replace(feedback=feedback_transform) idx = np.hstack((user_item_data, new_feedback[:, np.newaxis])) idx = np.ascontiguousarray(idx) val = np.ones(self.training.shape[0],) else: idx = user_item_data val = self.training[feedback].values shp = tuple(idx.max(axis=0) + 1) idx = idx.astype(np.intp) val = np.ascontiguousarray(val) return idx, val, shp
def test_to_coo(self, tensor_mode=False): userid, itemid, feedback = self.fields test_data = self.test.testset user_idx = test_data[userid].values.astype(np.intp) item_idx = test_data[itemid].values.astype(np.intp) fdbk_val = test_data[feedback].values if tensor_mode: fdbk_idx = self.index.feedback.set_index('old').loc[fdbk_val, 'new'].values if np.isnan(fdbk_idx).any(): raise NotImplementedError('Not all values of feedback are present in training data') else: fdbk_idx = fdbk_idx.astype(np.intp) test_coo = (user_idx, item_idx, fdbk_idx) else: test_coo = (user_idx, item_idx, fdbk_val) return test_coo
def _compile_and_prepare_functions(self, **kwargs): module_text = _module_reader(find_kernel('lomb'), self._cpp_defs) self.module = SourceModule(module_text, options=self.module_options) self.dtypes = dict( lomb=[np.intp, np.intp, np.intp, np.intp, np.int32, self.real_type, self.real_type, np.int32, np.int32], lomb_dirsum=[np.intp, np.intp, np.intp, np.intp, np.intp, np.int32, np.int32, self.real_type, self.real_type, self.real_type, self.real_type, np.int32] ) self.nfft_proc._compile_and_prepare_functions(**kwargs) for fname, dtype in self.dtypes.items(): func = self.module.get_function(fname) self.prepared_functions[fname] = func.prepare(dtype) self.function_tuple = tuple(self.prepared_functions[fname] for fname in sorted(self.dtypes.keys()))
def default(self, obj): # convert dates and numpy objects in a json serializable format if isinstance(obj, datetime): return obj.strftime('%Y-%m-%dT%H:%M:%SZ') elif isinstance(obj, date): return obj.strftime('%Y-%m-%d') elif type(obj) in (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): return int(obj) elif type(obj) in (np.bool_,): return bool(obj) elif type(obj) in (np.float_, np.float16, np.float32, np.float64, np.complex_, np.complex64, np.complex128): return float(obj) # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)
def nn_vec_basic(arr1, arr2, topn, sort=True, return_sims=False, nthreads=8): """ For each row in arr1 (m1 x d) find topn most similar rows from arr2 (m2 x d). Similarity is defined as dot product. Please note, that in the case of normalized rows in arr1 and arr2 dot product will be equal to cosine and will be monotonically decreasing function of Eualidean distance. :param arr1: array of vectors to find nearest neighbours for :param arr2: array of vectors to search for nearest neighbours in :param topn: number of nearest neighbours :param sort: indices in i-th row of returned array should sort corresponding rows of arr2 in descending order of similarity to i-th row of arr2 :param return_sims: return similarities along with indices of nearest neighbours :param nthreads: :return: array (m1 x topn) where i-th row contains indices of rows in arr2 most similar to i-th row of m1, and, if return_sims=True, an array (m1 x topn) of corresponding similarities. """ sims = np.dot(arr1, arr2.T) best_inds = argmaxk_rows(sims, topn, sort=sort, nthreads=nthreads) if not return_sims: return best_inds # generate row indices corresponding to best_inds (just current row id in each row) (m x k) rows = np.arange(best_inds.shape[0], dtype=np.intp)[:, np.newaxis].repeat(best_inds.shape[1], axis=1) return best_inds, sims[rows, best_inds]
def argmaxk_rows_opt1(arr, k=10, sort=False): """ Optimized implementation. When sort=False it is equal to argmaxk_rows_basic. When sort=True and k << arr.shape[1], it is should be faster, because we argsort only subarray of k max elements from each row of arr (arr.shape[0] x k) instead of the whole array arr (arr.shape[0] x arr.shape[1]). """ best_inds = np.argpartition(arr, kth=-k, axis=1)[:, -k:] # column indices of k max elements in each row (m x k) if not sort: return best_inds # generate row indices corresponding to best_ids (just current row id in each row) (m x k) rows = np.arange(best_inds.shape[0], dtype=np.intp)[:, np.newaxis].repeat(best_inds.shape[1], axis=1) best_elems = arr[rows, best_inds] # select k max elements from each row using advanced indexing (m x k) # indices which sort each row of best_elems in descending order (m x k) best_elems_inds = np.argsort(best_elems, axis=1)[:, ::-1] # reorder best_indices so that arr[i, sorted_best_inds[i,:]] will be sorted in descending order sorted_best_inds = best_inds[rows, best_elems_inds] return sorted_best_inds
def check_function(self, function, sz): from threading import Thread out1 = np.empty((len(self.seeds),) + sz) out2 = np.empty((len(self.seeds),) + sz) # threaded generation t = [Thread(target=function, args=(mt19937.RandomState(s), o)) for s, o in zip(self.seeds, out1)] [x.start() for x in t] [x.join() for x in t] # the same serial for s, o in zip(self.seeds, out2): function(mt19937.RandomState(s), o) # these platforms change x87 fpu precision mode in threads if np.intp().dtype.itemsize == 4 and sys.platform == "win32": assert_array_almost_equal(out1, out2) else: assert_array_equal(out1, out2)
def test_count_func(self): # Tests count assert_equal(1, count(1)) assert_equal(0, array(1, mask=[1])) ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) res = count(ott) self.assertTrue(res.dtype.type is np.intp) assert_equal(3, res) ott = ott.reshape((2, 2)) res = count(ott) assert_(res.dtype.type is np.intp) assert_equal(3, res) res = count(ott, 0) assert_(isinstance(res, ndarray)) assert_equal([1, 2], res) assert_(getmask(res) is nomask) ott = array([0., 1., 2., 3.]) res = count(ott, 0) assert_(isinstance(res, ndarray)) assert_(res.dtype.type is np.intp) assert_raises(ValueError, ott.count, axis=1)
def default(self, obj): # convert dates and numpy objects in a json serializable format if isinstance(obj, datetime): return obj.strftime('%Y-%m-%dT%H:%M:%SZ') elif isinstance(obj, date): return obj.strftime('%Y-%m-%d') elif type(obj) in [np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]: return int(obj) elif type(obj) in [np.bool_]: return bool(obj) elif type(obj) in [np.float_, np.float16, np.float32, np.float64, np.complex_, np.complex64, np.complex128]: return float(obj) # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)
def test_big_indices(self): # ravel_multi_index for big indices (issue #7546) if np.intp == np.int64: arr = ([1, 29], [3, 5], [3, 117], [19, 2], [2379, 1284], [2, 2], [0, 1]) assert_equal( np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), [5627771580, 117259570957]) # test overflow checking for too big array (issue #7546) dummy_arr = ([0],[0]) half_max = np.iinfo(np.intp).max // 2 assert_equal( np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) assert_raises(ValueError, np.ravel_multi_index, dummy_arr, (half_max+1, 2)) assert_equal( np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) assert_raises(ValueError, np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F')
def check_function(self, function, sz): from threading import Thread out1 = np.empty((len(self.seeds),) + sz) out2 = np.empty((len(self.seeds),) + sz) # threaded generation t = [Thread(target=function, args=(np.random.RandomState(s), o)) for s, o in zip(self.seeds, out1)] [x.start() for x in t] [x.join() for x in t] # the same serial for s, o in zip(self.seeds, out2): function(np.random.RandomState(s), o) # these platforms change x87 fpu precision mode in threads if np.intp().dtype.itemsize == 4 and sys.platform == "win32": assert_array_almost_equal(out1, out2) else: assert_array_equal(out1, out2)
def predict(self, X): if not hasattr(self, "classes_"): raise ValueError('fit') if self.normalize_: X = self._sc_X.fit_transform(X) X_ = self.transform(X) y_pred = self.estimator.predict(X_) return self.classes_.take(np.asarray(y_pred, dtype=np.intp)) # elif self.predict_with == 'all': # # predict_ = [] # # for mask in self.mask_: # self.estimator.fit(X=self.transform(self.X_, mask=mask), y=self.y_) # X_ = self.transform(X, mask=mask) # y_pred = self.estimator.predict(X_) # predict_.append(self.classes_.take(np.asarray(y_pred, dtype=np.intp))) # return np.asarray(predict_)
def get_curand_int_func(): code = """ #include "curand_kernel.h" extern "C" { __global__ void rand_setup(curandStateXORWOW_t* state, int size, unsigned long long seed) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int total_threads = blockDim.x * gridDim.x; for(int i = tid; i < size; i+=total_threads) { curand_init(seed, i, 0, &state[i]); } } } """ mod = SourceModule(code, no_extern_c = True) func = mod.get_function("rand_setup") func.prepare('PiL')#[np.intp, np.int32, np.uint64]) return func
def get_fill_function(dtype, pitch = True): type_dst = dtype_to_ctype(dtype) name = "fill" if pitch: func = SourceModule( fill_pitch_template % { "name": name, "type_dst": type_dst }, options=["--ptxas-options=-v"]).get_function(name) func.prepare('iiPi'+np.dtype(dtype).char) # [np.int32, np.int32, np.intp, np.int32, _get_type(dtype)]) else: func = SourceModule( fill_nonpitch_template % { "name": name, "type_dst": type_dst }, options=["--ptxas-options=-v"]).get_function(name) func.prepare('iP'+np.dtype(dtype).char)#[np.int32, np.intp, _get_type(dtype)]) return func
def get_transpose_function(dtype, conj = False): src_type = dtype_to_ctype(dtype) name = "trans" operation = "" if conj: if dtype == np.complex128: operation = "pycuda::conj" elif dtype == np.complex64: operation = "pycuda::conj" func = SourceModule( transpose_template % { "name": name, "type": src_type, "operation": operation }, options=["--ptxas-options=-v"]).get_function(name) func.prepare('iiPiPi')#[np.int32, np.int32, np.intp, # np.int32, np.intp, np.int32]) return func
def npy2py_type(npy_type): int_types = [ np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64 ] float_types = [np.float_, np.float16, np.float32, np.float64] bytes_types = [np.str_, np.string_] if npy_type in int_types: return int if npy_type in float_types: return float if npy_type in bytes_types: return bytes if hasattr(npy_type, 'char'): if npy_type.char in ['S', 'a']: return bytes raise TypeError return npy_type
def test_multinomial_binary(): # Test multinomial LR on a binary problem. target = (iris.target > 0).astype(np.intp) target = np.array(["setosa", "not-setosa"])[target] for solver in ['lbfgs', 'newton-cg', 'sag']: clf = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, max_iter=2000) clf.fit(iris.data, target) assert_equal(clf.coef_.shape, (1, iris.data.shape[1])) assert_equal(clf.intercept_.shape, (1,)) assert_array_equal(clf.predict(iris.data), target) mlr = LogisticRegression(solver=solver, multi_class='multinomial', random_state=42, fit_intercept=False) mlr.fit(iris.data, target) pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)] assert_greater(np.mean(pred == target), .9)
def test_int_float_dict(): rng = np.random.RandomState(0) keys = np.unique(rng.randint(100, size=10).astype(np.intp)) values = rng.rand(len(keys)) d = IntFloatDict(keys, values) for key, value in zip(keys, values): assert_equal(d[key], value) assert_equal(len(d), len(keys)) d.append(120, 3.) assert_equal(d[120], 3.0) assert_equal(len(d), len(keys) + 1) for i in xrange(2000): d.append(i + 1000, 4.0) assert_equal(d[1100], 4.0)
def get_indices(self, i): """Row and column indices of the i'th bicluster. Only works if ``rows_`` and ``columns_`` attributes exist. Returns ------- row_ind : np.array, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : np.array, dtype=np.intp Indices of columns in the dataset that belong to the bicluster. """ rows = self.rows_[i] columns = self.columns_[i] return np.nonzero(rows)[0], np.nonzero(columns)[0]
def predict(self, X): """Perform classification on samples in X. For an one-class model, +1 or -1 is returned. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train] Returns ------- y_pred : array, shape (n_samples,) Class labels for samples in X. """ y = super(BaseSVC, self).predict(X) return self.classes_.take(np.asarray(y, dtype=np.intp)) # Hacky way of getting predict_proba to raise an AttributeError when # probability=False using properties. Do not use this in new code; when # probabilities are not available depending on a setting, introduce two # estimators.
def fit(self, X, y, **kwargs): # Determine output settings n_samples, self.n_features_ = X.shape if self.max_features is None: self.max_features = 'auto' y = np.atleast_1d(y) if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] self.classes_ = [None] * self.n_outputs_ self.n_classes_ = [1] * self.n_outputs_ self.n_classes_ = np.array(self.n_classes_, dtype=np.intp) if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: y = np.ascontiguousarray(y, dtype=DOUBLE) if len(y) != n_samples: raise ValueError( "Number of labels=%d does not match number of samples=%d" % (len(y), n_samples)) # Build tree self.tree_ = ExtraTree( self.max_features, self.min_samples_split, self.n_classes_, self.n_outputs_, self.classification) self.tree_.build(X, y) if self.n_outputs_ == 1: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] return self
def test_intp(self,level=rlevel): # Ticket #99 i_width = np.int_(0).nbytes*2 - 1 np.intp('0x' + 'f'*i_width, 16) self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) self.assertRaises(ValueError, np.intp, '0x1', 32) assert_equal(255, np.intp('0xFF', 16)) assert_equal(1024, np.intp(1024))