我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.generic()。
def write_metadata(path, meta='.meta.yaml', **params): """Writes metadata for a dataset. Args: path (str): path to **dataset** (not meta file) whose metadata is to be written. If the meta file already exists, it will be overwritten. meta (str): suffix identifying the dataset's meta file **params: all other keyword arguments are treated as dataset attributes, and added to the meta file """ if 'n_channels' in params: del params['n_channels'] if 'n_samples' in params: del params['n_samples'] if os.path.isdir(path): metafile = os.path.join(path, meta[1:]) else: metafile = path + meta for k, v in params.items(): if isinstance(v, (np.ndarray, np.generic)): params[k] = v.tolist() with codecs.open(metafile, 'w', encoding='utf-8') as yaml_file: yaml_file.write(yaml.safe_dump(params, default_flow_style=False))
def json_conversion(obj): """Encode additional objects to JSON.""" try: # numpy isn't an explicit dependency of bowtie # so we can't assume it's available import numpy as np if isinstance(obj, (np.ndarray, np.generic)): return obj.tolist() except ImportError: pass try: # pandas isn't an explicit dependency of bowtie # so we can't assume it's available import pandas as pd if isinstance(obj, pd.Index): return obj.tolist() except ImportError: pass if isinstance(obj, (datetime, time, date)): return obj.isoformat() raise TypeError('Not sure how to serialize {} of type {}'.format(obj, type(obj)))
def encoders(obj): """Convert Python object to msgpack encodable ones.""" try: # numpy isn't an explicit dependency of bowtie # so we can't assume it's available import numpy as np if isinstance(obj, (np.ndarray, np.generic)): # https://docs.scipy.org/doc/numpy/reference/arrays.scalars.html return obj.tolist() except ImportError: pass try: # pandas isn't an explicit dependency of bowtie # so we can't assume it's available import pandas as pd if isinstance(obj, pd.Index): return obj.tolist() except ImportError: pass if isinstance(obj, (datetime, time, date)): return obj.isoformat() return obj
def fmt(obj, nest_level=0): """ Format any common object """ if nest_level > 10: return "" if isinstance(obj, float): return "{0:.3f}".format(obj) if isinstance(obj, list): return "(" + ",".join(map(lambda x: fmt(x, nest_level + 1), obj)) + ")" if isinstance(obj, (numpy.ndarray, numpy.generic)): return fmt(obj.tolist(), nest_level + 1) if isinstance(obj, dict): pairs = map(lambda x, y: "(" + fmt(x) + "," + fmt(y, nest_level + 1) + ")", obj.items()) return fmt(pairs) if isinstance(obj, Vector3): return "({},{},{})".format(fmt(obj.x), fmt(obj.y), fmt(obj.z)) if isinstance(obj, Quaternion): return "({},{},{},{})".format(fmt(obj.x), fmt(obj.y), fmt(obj.z), fmt(obj.w)) # print " obj " + str(obj) + " is of type " + str(type(obj)) return str(obj)
def np2gdal_dtype(d): """ Get GDAL RasterBand datatype that corresponds with NumPy datatype Input should be numpy array or numpy dtype """ dt_dict = gdal_array.codes if isinstance(d, (np.ndarray, np.generic)): d = d.dtype #This creates dtype from another built-in type #d = np.dtype(d) if isinstance(d, np.dtype): if d.name == 'int8': gdal_dt = 1 elif d.name == 'bool': #Write out as Byte gdal_dt = 1 else: gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)] else: print("Input must be NumPy array or NumPy dtype") gdal_dt = None return gdal_dt
def test_describe_typefiltering_category_bool(self): df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8), 'B_str': ['a', 'b', 'c', 'd'] * 6, 'C_bool': [True] * 12 + [False] * 12, 'D_num': np.arange(24.) + .5, 'E_ts': tm.makeTimeSeries()[:24].index}) desc = df.describe() expected_cols = ['D_num'] expected = DataFrame(dict((k, df[k].describe()) for k in expected_cols), columns=expected_cols) assert_frame_equal(desc, expected) desc = df.describe(include=["category"]) self.assertTrue(desc.columns.tolist() == ["A_cat"]) # 'all' includes numpy-dtypes + category desc1 = df.describe(include="all") desc2 = df.describe(include=[np.generic, "category"]) assert_frame_equal(desc1, desc2)
def __setitem__(self, in_slice, value): """Set ndarray value""" if not self.writable: raise ValueError('trying to assign to a readonly NDArray') if isinstance(in_slice, int): sliced_arr = self._at(in_slice) sliced_arr[:] = value return if not isinstance(in_slice, slice) or in_slice.step is not None: raise ValueError('NDArray only support continuous slicing on axis 0') if in_slice.start is not None or in_slice.stop is not None: sliced_arr = self._slice(in_slice.start, in_slice.stop) sliced_arr[:] = value return if isinstance(value, NDArray): if value.handle is not self.handle: value.copyto(self) elif isinstance(value, numeric_types): NDArray._set_value(float(value), out=self) elif isinstance(value, (np.ndarray, np.generic)): self._sync_copyfrom(value) else: raise TypeError('type %s not supported' % str(type(value)))
def read_attr(self, path, attr_name, default=False): """Read an attribute of an HDF5 group.""" _check_hdf5_path(self._h5py_file, path) attrs = self._h5py_file[path].attrs if attr_name in attrs: try: out = attrs[attr_name] if (isinstance(out, (np.ndarray, np.generic)) and out.dtype.kind == 'S'): out = out.tostring().decode('UTF-8') out = out.replace('\x00', '') return out except (TypeError, IOError): logger.debug("Unable to read attribute `%s` at `%s`.", attr_name, path) return elif default is False: raise KeyError("The attribute '{0:s}' ".format(attr_name) + "at `{}` doesn't exist.".format(path)) return default
def capture_frame(self, frame): if not isinstance(frame, (np.ndarray, np.generic)): raise error.InvalidFrame( 'Wrong type {} for {} (must be np.ndarray or np.generic)'.format(type(frame), frame)) if frame.shape != self.frame_shape: raise error.InvalidFrame( "Your frame has shape {}, but the VideoRecorder is configured for shape {}.".format( frame.shape, self.frame_shape)) if frame.dtype != np.uint8: raise error.InvalidFrame( "Your frame has data type {}, but we require uint8 (i.e. RGB values from 0-255).".format(frame.dtype)) if distutils.version.LooseVersion(np.__version__) >= distutils.version.LooseVersion('1.9.0'): self.proc.stdin.write(frame.tobytes()) else: self.proc.stdin.write(frame.tostring())
def get_generic_name(): # may need to make this a search for the first non-kdllib reference # make generic name from highest calling context prev_function_name = None for i in range(len(inspect.stack())): (frame, filename, line_number, function_name, lines, index) = inspect.stack()[i] #print(frame, filename, line_number, function_name, lines, index) # Use stack to get easier function name than parsing the code itself if i > 0: _, _, _, prev_function_name, _, _ = inspect.stack()[i - 1] else: prev_function_name = function_name script_name = filename.split(os.sep)[-1] lib_location = os.path.realpath(__file__) lib_name = lib_location.split(os.sep)[-1] # cover .py and .pyc if script_name != lib_name and script_name != lib_name[:-1]: name = script_name + "_" + prev_function_name #print(frame, filename, line_number, function_name, lines, index) return name raise ValueError("Issue in generic name getter") # Many of these from Ishaan G.
def default(self, obj): if isinstance(obj, np.ndarray) and obj.ndim == 1: return obj.tolist() elif isinstance(obj, np.generic): return obj.item() return json.JSONEncoder.default(self, obj)
def test_oddfeatures_3(self): # Tests some generic features atest = array([10], mask=True) btest = array([20]) idx = atest.mask atest[idx] = btest[idx] assert_equal(atest, [20])
def test_tolist_specialcase(self): # Test mvoid.tolist: make sure we return a standard Python object a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) # w/o mask: each entry is a np.void whose elements are standard Python for entry in a: for item in entry.tolist(): assert_(not isinstance(item, np.generic)) # w/ mask: each entry is a ma.void whose elements should be # standard Python a.mask[0] = (0, 1) for entry in a: for item in entry.tolist(): assert_(not isinstance(item, np.generic))
def test_masked_where_oddities(self): # Tests some generic features. atest = ones((10, 10, 10), dtype=float) btest = zeros(atest.shape, MaskType) ctest = masked_where(btest, atest) assert_equal(atest, ctest)
def _call(self, *args, **kwargs): axis = kwargs['axis'] if 'axis' in kwargs else None if len(args) == 0: raise Exception('number of arguments must be more than 0') if builtins.any( not isinstance(_, (core.ndarray, numpy.ndarray, numpy.generic)) for _ in args): raise TypeError('Invalid argument type for \'{}\': ({})'.format( self.name, ', '.join(repr(type(_)) for _ in args))) def is_cupy_data(a): return isinstance(a, (core.ndarray, numpy.generic)) if builtins.all(is_cupy_data(_) for _ in args): types = [_.dtype for _ in args] key = tuple(types) if key not in self._memo: if self.input_num is not None: nin = self.input_num else: nin = len(args) f = _get_fusion(self.func, nin, self.reduce, self.post_map, self.identity, types, self.name) self._memo[key] = f f = self._memo[key] if self.reduce is None: return f(*args) else: return f(*args, axis=axis) else: if builtins.any(type(_) is core.ndarray for _ in args): types = '.'.join(repr(type(_)) for _ in args) message = "Can't fuse \n %s(%s)" % (self.name, types) warnings.warn(message) if self.reduce is None: return self.func(*args) elif axis is None: return self.post_map(self.reduce(self.func(*args))) else: return self.post_map(self.reduce(self.func(*args), axis=axis))
def tensor_to_protobuf(tensor): pb_tensor = ops_pb.Tensor() pb_tensor.info.dtype = dtype_to_protobuf(tensor.dtype) pb_tensor.info.shape.extend(tensor.shape) if isinstance(tensor, (np.ndarray, np.generic)): pb_tensor.data = tensor.tobytes() else: raise ValueError("Unknown tensor value of {}".format(tensor)) return pb_tensor
def is_scalar_type(value): return value is None or \ isinstance(value, (str, six.text_type, float, bool, Axis, AxesMap, dict, slice, np.generic) + six.integer_types)
def assign_scalar(message, value): """ Adds the appropriate scalar type of value to the protobuf message """ if value is None: message.null_val = True elif isinstance(value, np.generic): assign_scalar(message, np.asscalar(value)) elif isinstance(value, (str, six.text_type)): message.string_val = value elif isinstance(value, np.dtype): message.dtype_val = dtype_to_protobuf(value) elif isinstance(value, float): message.double_val = value elif isinstance(value, bool): message.bool_val = value elif isinstance(value, six.integer_types): message.int_val = value elif isinstance(value, slice): slice_val = ops_pb.Slice() if value.start is not None: slice_val.start.value = value.start if value.step is not None: slice_val.step.value = value.step if value.stop is not None: slice_val.stop.value = value.stop message.slice_val.CopyFrom(slice_val) elif isinstance(value, dict): for key in value: assign_scalar(message.map_val.map[key], value[key]) # This encodes an empty dict for deserialization assign_scalar(message.map_val.map['_ngraph_map_sentinel_'], '') elif isinstance(value, Axis): message.axis.CopyFrom(axis_to_protobuf(value)) elif isinstance(value, AxesMap): message.axes_map.CopyFrom(axes_map_to_protobuf(value)) else: raise unhandled_scalar_value(value)
def protobuf_to_op(pb_op): """ This will convert a protobuf Op object into its corresponding Python object. But this cannot setup links to other ops (such as args, control_deps) since those ops may not exist yet. We have to wait until all ops are created before connecting them back up together in a second pass, so args, etc will be uninitialized. """ cls = get_ngraph_op_cls(pb_op.op_type) # Skip the class constructor but we'll use the generic op constructor because it sets a lot of # helpful defaults py_op = cls.__new__(cls) op_graph.Op.__init__(py_op) py_op.name = str(pb_op.name) if 'valfun_value' in pb_op.attrs: valfun_value = pb_to_tensor(pb_op.attrs['valfun_value'].tensor) py_op.valfun = lambda x: valfun_value # op.uuid py_op.uuid = uuid.UUID(bytes=pb_op.uuid.uuid) # op.metadata and remaining keys ignored_keys = {'valfun_value', 'dtype', 'metadata'} remaining_keys = set(pb_op.attrs.keys()).difference(ignored_keys) for key in remaining_keys: if key == '_ngraph_ser_handle': py_op._ngraph_ser_handle = True if key.startswith('_ngraph_metadata_'): value = pb_op.attrs[key] py_op.metadata[key[17:]] = protobuf_attr_to_python(value) elif not key.startswith('_is_') and key not in EXCEPTION_ATTRIBUTES and \ key.startswith('_'): continue else: value = pb_op.attrs[key] setattr(py_op, key, protobuf_attr_to_python(value)) return py_op
def test_similarity(): UserDataRow = Row( "normalized_channel", "geo_city", "subsession_length", "os", "locale", "active_addons", "bookmark_count", "tab_open_count", "total_uri", "unique_tlds" ) test_user_1 = UserDataRow("release", "Boston", 10, "Windows", "en-US", [], 1, 2, 3, 4) test_user_2 = UserDataRow("release", "notsoB", 10, "swodniW", "SU-ne", [], 1, 2, 3, 4) test_user_3 = UserDataRow("release", "Boston", 0, "Windows", "en-US", [], 0, 0, 0, 0) test_user_4 = UserDataRow("release", "notsoB", 0, "swodniW", "SU-ne", [], 0, 0, 0, 0) # The following user contains a None value for "total_uri" and geo_city # (categorical feature). The latter should never be possible, but let's be cautious. test_user_5 = UserDataRow("release", None, 10, "swodniW", "SU-ne", [], 1, None, 3, 4) # Identical users should be very close (0 distance) and the result must not # be a Numpy number. similarity_result = taar_similarity.similarity_function(test_user_1, test_user_1) assert not isinstance(similarity_result, np.generic) assert np.isclose(similarity_result, 0.0) # Users with completely different categorical features but identical # continuous features should be slightly different. assert np.isclose(taar_similarity.similarity_function(test_user_1, test_user_2), 0.001) # Users with completely different continuous features but identical # categorical features should be very close. assert np.isclose(taar_similarity.similarity_function(test_user_1, test_user_3), 0.0) # Completely different users should be far away. assert taar_similarity.similarity_function(test_user_1, test_user_4) >= 1.0 # Partial user information should not break the similarity function. assert taar_similarity.similarity_function(test_user_1, test_user_5)
def set_theta(self, theta): """ Function sets theta. Can be called from constructor or outside. """ if isinstance(theta, (np.ndarray, np.generic)): self.theta = theta elif isinstance(theta, list): self.theta = np.array(theta) else: self.theta = None self.history = self.set_list( self.history, 0, (np.copy(self.theta), 0, None)) self.step_no = 0
def filter_invalid_json_values(self, dict): for k, v in six.iteritems(dict): if isinstance(v, (np.ndarray, np.generic)): dict[k] = v.tolist() if math.isnan(v) or math.isinf(v): dict[k] = -1
def invalid_json_values(obj): if isinstance(obj, np.generic): return obj.item() if isinstance(obj, np.ndarray): return obj.tolist() if isinstance(obj, bytes): return obj.decode('cp437') if isinstance(map, type) and isinstance(obj, map): # python 3 map return list(obj) raise TypeError('Invalid data type passed to json encoder: ' + type(obj).__name__)
def contains(self, x): if isinstance(x, int): as_int = x elif isinstance(x, (np.generic, np.ndarray)) and (x.dtype.kind in np.typecodes['AllInteger'] and x.shape == ()): as_int = int(x) else: return False return as_int >= 0 and as_int < self.n
def capture_frame(self, frame): if not isinstance(frame, (np.ndarray, np.generic)): raise error.InvalidFrame('Wrong type {} for {} (must be np.ndarray or np.generic)'.format(type(frame), frame)) if frame.shape != self.frame_shape: raise error.InvalidFrame("Your frame has shape {}, but the VideoRecorder is configured for shape {}.".format(frame.shape, self.frame_shape)) if frame.dtype != np.uint8: raise error.InvalidFrame("Your frame has data type {}, but we require uint8 (i.e. RGB values from 0-255).".format(frame.dtype)) if distutils.version.LooseVersion(np.__version__) >= distutils.version.LooseVersion('1.9.0'): self.proc.stdin.write(frame.tobytes()) else: self.proc.stdin.write(frame.tostring())
def feature_names(self, names): "Stores the text labels for features" if len(names) != self.num_features: raise ValueError("Number of names do not match the number of features!") if not isinstance(names, (Sequence, np.ndarray, np.generic)): raise ValueError("Input is not a sequence. Ensure names are in the same order and length as features.") self.__feature_names = np.array(names)
def is_np_scalar(x): """ Check np types like np.int64 """ return isinstance(x, np.generic)
def contains(self, x): if not isinstance(x, (tuple, list, np.generic, np.ndarray)): return False return np.shape(x) == self.shape and np.sum(x) == 1 and np.max(x) == 1
def __setitem__(self, in_slice, value): """Set ndarray value""" if (not isinstance(in_slice, slice) or in_slice.start is not None or in_slice.stop is not None): raise ValueError('Array only support set from numpy array') if isinstance(value, NDArray): if value.handle is not self.handle: value.copyto(self) elif isinstance(value, (np.ndarray, np.generic)): self._sync_copyfrom(value) else: raise TypeError('type %s not supported' % str(type(value)))
def _validate_date_like_dtype(dtype): try: typ = np.datetime_data(dtype)[0] except ValueError as e: raise TypeError('%s' % e) if typ != 'generic' and typ != 'ns': raise ValueError('%r is too specific of a frequency, try passing %r' % (dtype.name, dtype.type.__name__))
def _get_dtype_from_object(dtype): """Get a numpy dtype.type-style object. This handles the datetime64[ns] and datetime64[ns, TZ] compat Notes ----- If nothing can be found, returns ``object``. """ # type object from a dtype if isinstance(dtype, type) and issubclass(dtype, np.generic): return dtype elif is_categorical(dtype): return CategoricalDtype().type elif is_datetimetz(dtype): return DatetimeTZDtype(dtype).type elif isinstance(dtype, np.dtype): # dtype object try: _validate_date_like_dtype(dtype) except TypeError: # should still pass if we don't have a datelike pass return dtype.type elif isinstance(dtype, compat.string_types): if dtype == 'datetime' or dtype == 'timedelta': dtype += '64' try: return _get_dtype_from_object(getattr(np, dtype)) except (AttributeError, TypeError): # handles cases like _get_dtype(int) # i.e., python objects that are valid dtypes (unlike user-defined # types, in general) # TypeError handles the float16 typecode of 'e' # further handle internal types pass return _get_dtype_from_object(np.dtype(dtype))
def to_numpy(var): #if ia.is_numpy_array(var): if isinstance(var, (np.ndarray, np.generic)): return var else: return var.data.cpu().numpy()