我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.long()。
def _is_integer(x): """Determine whether some object ``x`` is an integer type (int, long, etc). This is part of the ``fixes`` module, since Python 3 removes the long datatype, we have to check the version major. Parameters ---------- x : object The item to assess whether is an integer. Returns ------- bool True if ``x`` is an integer type """ return (not isinstance(x, (bool, np.bool))) and \ isinstance(x, (numbers.Integral, int, np.int, np.long, long)) # no long type in python 3
def test_object_array_self_reference(self): # Object arrays with references to themselves can cause problems a = np.array(0, dtype=object) a[()] = a assert_raises(TypeError, int, a) assert_raises(TypeError, long, a) assert_raises(TypeError, float, a) assert_raises(TypeError, oct, a) assert_raises(TypeError, hex, a) # Test the same for a circular reference. b = np.array(a, dtype=object) a[()] = b assert_raises(TypeError, int, a) # Numpy has no tp_traverse currently, so circular references # cannot be detected. So resolve it: a[()] = 0 # This was causing a to become like the above a = np.array(0, dtype=object) a[...] += 1 assert_equal(a, 1)
def test_attribute_wrapper(): def attribute_value_test(attribute_value): node = make_node('Abs', ['X'], [], name='test_node', test_attribute=attribute_value) model = make_model(make_graph([node], 'test_graph', [ make_tensor_value_info('X', onnx.TensorProto.FLOAT, [1, 2]), ], []), producer_name='ngraph') wrapped_attribute = ModelWrapper(model).graph.node[0].get_attribute('test_attribute') return wrapped_attribute.get_value() tensor = make_tensor('test_tensor', onnx.TensorProto.FLOAT, [1], [1]) assert attribute_value_test(1) == 1 assert type(attribute_value_test(1)) == np.long assert attribute_value_test(1.0) == 1.0 assert type(attribute_value_test(1.0)) == np.float assert attribute_value_test('test') == 'test' assert attribute_value_test(tensor)._proto == tensor assert attribute_value_test([1, 2, 3]) == [1, 2, 3] assert attribute_value_test([1.0, 2.0, 3.0]) == [1.0, 2.0, 3.0] assert attribute_value_test(['test1', 'test2']) == ['test1', 'test2'] assert attribute_value_test([tensor, tensor])[1]._proto == tensor
def test_respect_dtype_singleton(self): # See gh-7203 for dt in self.itype: lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 sample = self.rfunc(lbnd, ubnd, dtype=dt) self.assertEqual(sample.dtype, np.dtype(dt)) for dt in (np.bool, np.int, np.long): lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 # gh-7284: Ensure that we get Python data types sample = self.rfunc(lbnd, ubnd, dtype=dt) self.assertFalse(hasattr(sample, 'dtype')) self.assertEqual(type(sample), dt)
def test_object_array_self_reference(self): # Object arrays with references to themselves can cause problems a = np.array(0, dtype=object) a[()] = a assert_raises(TypeError, int, a) assert_raises(TypeError, long, a) assert_raises(TypeError, float, a) assert_raises(TypeError, oct, a) assert_raises(TypeError, hex, a) # Test the same for a circular reference. b = np.array(a, dtype=object) a[()] = b assert_raises(TypeError, int, a) # NumPy has no tp_traverse currently, so circular references # cannot be detected. So resolve it: a[()] = 0 # This was causing a to become like the above a = np.array(0, dtype=object) a[...] += 1 assert_equal(a, 1)
def _extract_field_names(self, event): # extract field names from sids (price, volume etc), make sure # every sid has the same fields. sid_keys = [] for sid in itervalues(event.data): keys = set([name for name, value in sid.items() if isinstance(value, (int, float, numpy.integer, numpy.float, numpy.long)) ]) sid_keys.append(keys) # with CUSTOM data events, there may be different fields # per sid. So the allowable keys are the union of all events. union = set.union(*sid_keys) unwanted_fields = { 'portfolio', 'sid', 'dt', 'type', 'source_id', '_initial_len', } return union - unwanted_fields
def fit(self, X): _X = self.__aggregate_dataset(X) self.polynomial = np.polyfit(_X['expenses'].astype(np.long), _X['distance_traveled'].astype(np.long), 3) self._polynomial_fn = np.poly1d(self.polynomial) return self
def test_is_numeric(): assert is_numeric(1) assert is_numeric(1.) assert is_numeric(np.long(1)) assert is_numeric(np.int(1.0)) assert is_numeric(np.float(1)) assert is_numeric(1e-12) assert not is_numeric('a') assert not is_numeric(True)
def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like or pandas DataFrame, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float Notes ----- * The long-standing behavior of this method changed in version 0.16. * It no longer uses the metric provided by ``estimator.score`` if the ``scoring`` parameter was set when fitting. """ X = _validate_X(X) y = _validate_y(y) if not hasattr(self, 'scorer_') or self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) # we've already fit, and we have a scorer if self.scoring is not None and hasattr(self.best_estimator_, 'score'): warnings.warn("The long-standing behavior to use the estimator's " "score function in {0}.score has changed. The " "scoring parameter is now used." "".format(self.__class__.__name__), UserWarning) return self.scorer_(self.best_estimator_, X, y)
def is_integer(x): """Determine whether some object ``x`` is an integer type (int, long, etc). Parameters ---------- x : object The item to assess Returns ------- bool True if ``x`` is an integer type """ try: python_major_version = sys.version_info.major assert(python_major_version == 2 or python_major_version == 3) if python_major_version == 2: return (not isinstance(x, (bool, np.bool))) and \ isinstance(x, (numbers.Integral, int, long, np.int, np.long)) elif python_major_version == 3: return (not isinstance(x, (bool, np.bool))) and \ isinstance(x, (numbers.Integral, int, np.int, np.long)) except AssertionError: _, _, tb = sys.exc_info() traceback.print_tb(tb) # Fixed format tb_info = traceback.extract_tb(tb) filename, line, func, text = tb_info[-1] print('An error occurred on line {} in statement {}'.format(line, text)) exit(1) return _is_integer(x)
def __init__(self, weights=None, size_average=None): super(ClassNLLCriterion, self).__init__() if size_average: self.size_average = size_average else: self.size_average = True if weights: # assert(weights:dim() == 1, "weights input should be 1-D Tensor") self.weights = weights self.output_tensor = np.zeros(1) self.total_weight_tensor = np.ones(1) self.target = np.zeros(1) # , dtype=np.long)
def test_signed_integer_division_overflow(self): # Ticket #1317. def test_type(t): min = np.array([np.iinfo(t).min]) min //= -1 with np.errstate(divide="ignore"): for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long): test_type(t)
def test_array_side_effect(self): # The second use of itemsize was throwing an exception because in # ctors.c, discover_itemsize was calling PyObject_Length without # checking the return code. This failed to get the length of the # number 2, and the exception hung around until something checked # PyErr_Occurred() and returned an error. assert_equal(np.dtype('S10').itemsize, 10) np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_) assert_equal(np.dtype('S10').itemsize, 10)
def test_sequence_long(self): assert_equal(np.array([long(4), long(4)]).dtype, np.long) assert_equal(np.array([long(4), 2**80]).dtype, np.object) assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object) assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_matrix_multiply(self): self.compare_matrix_multiply_results(np.long) self.compare_matrix_multiply_results(np.double)
def test_random_integers_max_int(self): # Tests whether random_integers can generate the # maximum allowed Python int that can be converted # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) desired = np.iinfo('l').max np.testing.assert_equal(actual, desired)
def get_landmarks(self,img,box=None,left=None,top=None,right=None,bottom=None): if box is not None: left,top,right,bottom = box left = np.long(left) top = np.long(top) right = np.long(right) bottom = np.long(bottom) bb = dlib.rectangle(left,top,right,bottom) landmarks = self.align_tool.findLandmarks(img,bb) npLandmarks = np.float32(landmarks) npLandmarkIndices = np.array(self.landmarkIndices) return npLandmarks[npLandmarkIndices]
def test_random_integers_max_int(self): # Tests whether random_integers can generate the # maximum allowed Python int that can be converted # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. with suppress_warnings() as sup: w = sup.record(DeprecationWarning) actual = mt19937.random_integers(np.iinfo('l').max, np.iinfo('l').max) assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired)
def test_random_integers_max_int(self): # Tests whether random_integers can generate the # maximum allowed Python int that can be converted # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. with suppress_warnings() as sup: w = sup.record(DeprecationWarning) actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired)
def assert_valid_percent(x, eq_lower=False, eq_upper=False): # these are all castable to float assert_is_type(x, (float, np.float, np.int, int, long, np.long)) x = float(x) # test lower bound: if not ((eq_lower and 0. <= x) or ((not eq_lower) and 0. < x)): raise ValueError('Expected 0. %s x, but got x=%r' % ('<=' if eq_lower else '<', x)) if not ((eq_upper and x <= 1.) or ((not eq_upper) and x < 1.)): raise ValueError('Expected x %s 1., but got x=%r' % ('<=' if eq_upper else '<', x)) return x
def get_random_state(random_state): # if it's a seed, return a new seeded RandomState if isinstance(random_state, (int, np.int, long, np.long, NoneType)): return RandomState(random_state) # if it's a RandomState, it's been initialized elif isinstance(random_state, RandomState): return random_state else: raise TypeError('cannot seed new RandomState with type=%s' % type(random_state))