我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.uint64()。
def get_table_and_array_for_set_of_dicts(dicts): for j in range(len(dicts)): if not j: all_keys = np.array([i for i in dicts[j].keys()], dtype=np.uint64) else: all_keys = np.concatenate([all_keys, np.array([i for i in dicts[j].keys()], dtype=np.uint64)]) unique_keys = sorted(set(all_keys)) # The sorted is so that the index of 0 will always be 0 index_lookup_table = create_index_table(np.array(sorted([np.uint64(key) for key in unique_keys]), dtype=np.uint64)) array = np.zeros(shape=[len(dicts), len(unique_keys)], dtype=np.uint64) for square_num, dict in enumerate(dicts): for key, value in dict.items(): array[square_num][ khash_get(ffi.cast("void *", index_lookup_table), np.uint64(key), np.uint64(0))] = np.uint64(value) return index_lookup_table, array
def attacks_mask(board_state, square): bb_square = BB_SQUARES[square] if bb_square & board_state.pawns: if bb_square & board_state.occupied_w: return BB_PAWN_ATTACKS[WHITE][square] else: return BB_PAWN_ATTACKS[BLACK][square] elif bb_square & board_state.knights: return BB_KNIGHT_ATTACKS[square] elif bb_square & board_state.kings: return BB_KING_ATTACKS[square] else: attacks = np.uint64(0) if bb_square & board_state.bishops or bb_square & board_state.queens: attacks = DIAG_ATTACK_ARRAY[square][ khash_get(DIAG_ATTACK_INDEX_LOOKUP_TABLE, BB_DIAG_MASKS[square] & board_state.occupied, 0)] if bb_square & board_state.rooks or bb_square & board_state.queens: attacks |= (RANK_ATTACK_ARRAY[square][ khash_get(RANK_ATTACK_INDEX_LOOKUP_TABLE, BB_RANK_MASKS[square] & board_state.occupied,0)] | FILE_ATTACK_ARRAY[square][ khash_get(FILE_ATTACK_INDEX_LOOKUP_TABLE, BB_FILE_MASKS[square] & board_state.occupied, 0)]) return attacks
def _get_dtype_maps(): """ Get dictionaries to map numpy data types to ITK types and the other way around. """ # Define pairs tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'), (np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'), (np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'), (np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'), (np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ] # Create dictionaries map1, map2 = {}, {} for np_type, itk_type in tmp: map1[np_type.__name__] = itk_type map2[itk_type] = np_type.__name__ # Done return map1, map2
def __init__(self, *args, **kwds): import numpy self.dst_types = [numpy.uint8, numpy.uint16, numpy.uint32] try: self.dst_types.append(numpy.uint64) except AttributeError: pass pygame.display.init() try: unittest.TestCase.__init__(self, *args, **kwds) self.sources = [self._make_src_surface(8), self._make_src_surface(16), self._make_src_surface(16, srcalpha=True), self._make_src_surface(24), self._make_src_surface(32), self._make_src_surface(32, srcalpha=True)] finally: pygame.display.quit()
def _check_valid_data(self, data): """Checks that the incoming data is a 2 x #elements ndarray of ints. Parameters ---------- data : :obj:`numpy.ndarray` The data to verify. Raises ------ ValueError If the data is not of the correct shape or type. """ if data.dtype.type != np.int8 and data.dtype.type != np.int16 \ and data.dtype.type != np.int32 and data.dtype.type != np.int64 \ and data.dtype.type != np.uint8 and data.dtype.type != np.uint16 \ and data.dtype.type != np.uint32 and data.dtype.type != np.uint64: raise ValueError('Must initialize image coords with a numpy int ndarray') if data.shape[0] != 2: raise ValueError('Illegal data array passed to image coords. Must have 2 coordinates') if len(data.shape) > 2: raise ValueError('Illegal data array passed to point cloud. Must have 1 or 2 dimensions')
def DecodeValues(block, values, encoded_values, bz, by, bx, nbits): # get the number of values per 8 byte uint64 if (nbits > 0): values_per_uint64 = 64 / nbits ie = 0 for value in encoded_values: for i in range(0, values_per_uint64): lower_bits_to_remove = ( (values_per_uint64 - i - 1) * nbits ) values[ie] = ( (value >> lower_bits_to_remove) % 2**nbits ) ie += 1 ii = 0 # get the lookup table for iw in range(0, bz): for iv in range(0, by): for iu in range(0, bx): block[iw, iv, iu] = values[ii] ii += 1 return block, values
def to_best_type(array): '''Convert array to lowest possible bitrate. ''' ui8 = np.iinfo(np.uint8) ui8 = ui8.max ui16 = np.iinfo(np.uint16) ui16 = ui16.max ui32 = np.iinfo(np.uint32) ui32 = ui32.max ui64 = np.iinfo(np.uint64) ui64 = ui64.max if array.max() <= ui64: new_type = np.uint64 if array.max() <= ui32: new_type = np.uint32 if array.max() <= ui16: new_type = np.uint16 if array.max() <= ui8: new_type = np.uint8 return array.astype(new_type)
def load_data(name='ac3', N=-1, prefix=None, gold=False): '''Load data ''' if not 'mri' in name: if gold: filename = '~/compresso/data/' + name + '/gold/' + name + '_gold.h5' else: filename = '~/compresso/data/' + name + '/rhoana/' + name + '_rhoana.h5' with h5py.File(os.path.expanduser(filename), 'r') as hf: output = np.array(hf['main'], dtype=np.uint64) else: filename = '~/compresso/data/MRI/' + name + '.h5' with h5py.File(os.path.expanduser(filename), 'r') as hf: output = np.array(hf['main'], dtype=np.uint64) if (not N == -1): output = output[0:N,:,:] return output
def test_int(self): for st, ut, s in [(np.int8, np.uint8, 8), (np.int16, np.uint16, 16), (np.int32, np.uint32, 32), (np.int64, np.uint64, 64)]: for i in range(1, s): assert_equal(hash(st(-2**i)), hash(-2**i), err_msg="%r: -2**%d" % (st, i)) assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (st, i - 1)) assert_equal(hash(st(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (st, i)) i = max(i - 1, 1) assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (ut, i - 1)) assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (ut, i))
def _write(self, samples, keyvals): """Write new metadata to the Digital Metadata channel. This function does no input checking, see `write` for that. Parameters ---------- samples : 1-D numpy array of type uint64 sorted in ascending order An array of sample indices, given in the number of samples since the epoch (time_since_epoch*sample_rate). keyvals : iterable of iterables same length as `samples` Each element of this iterable corresponds to a sample in `samples` and should be another iterable that produces (key, value) pairs to write for that sample. """ grp_iter = self._sample_group_generator(samples) for grp, keyval in zip(grp_iter, keyvals): for key, val in keyval: if val is not None: grp.create_dataset(key, data=val)
def seed(self, seed=None): """Resets the state of the random number generator with a seed. .. seealso:: :func:`cupy.random.seed` for full documentation, :meth:`numpy.random.RandomState.seed` """ if seed is None: try: seed_str = binascii.hexlify(os.urandom(8)) seed = numpy.uint64(int(seed_str, 16)) except NotImplementedError: seed = numpy.uint64(time.clock() * 1000000) else: seed = numpy.asarray(seed).astype(numpy.uint64, casting='safe') curand.setPseudoRandomGeneratorSeed(self._generator, seed) curand.setGeneratorOffset(self._generator, 0)
def get_random_state(): """Gets the state of the random number generator for the current device. If the state for the current device is not created yet, this function creates a new one, initializes it, and stores it as the state for the current device. Returns: RandomState: The state of the random number generator for the device. """ dev = cuda.Device() rs = _random_states.get(dev.id, None) if rs is None: seed = os.getenv('CUPY_SEED') if seed is None: seed = os.getenv('CHAINER_SEED') if seed is not None: seed = numpy.uint64(int(seed)) rs = RandomState(seed) rs = _random_states.setdefault(dev.id, rs) return rs
def test_dtype2(self, dtype): dtype = numpy.dtype(dtype) # randint does not support 64 bit integers if dtype in (numpy.int64, numpy.uint64): return iinfo = numpy.iinfo(dtype) size = (10000,) x = random.randint(iinfo.min, iinfo.max + 1, size, dtype) self.assertEqual(x.dtype, dtype) self.assertLessEqual(iinfo.min, min(x)) self.assertLessEqual(max(x), iinfo.max) # Lower bound check with self.assertRaises(ValueError): random.randint(iinfo.min - 1, iinfo.min + 10, size, dtype) # Upper bound check with self.assertRaises(ValueError): random.randint(iinfo.max - 10, iinfo.max + 2, size, dtype)
def _all_day_bars_of(self, instrument): i = self._index_of(instrument) mongo_data = self._day_bars[i][instrument.order_book_id].find({}, {"_id": 0}) fields = mongo_data[0].keys() fields.remove('date') result = [] dtype = np.dtype(getType(i)) result = np.empty(shape=(mongo_data.count(),), dtype=dtype) for f in fields: bar_attr = [] mongo_data = self._day_bars[i][instrument.order_book_id].find({}, {"_id": 0}) for bar in mongo_data: bar_attr.append(bar[f]) result[f] = np.array(bar_attr) bar_attr = [] mongo_data = self._day_bars[i][instrument.order_book_id].find({}, {"_id": 0}) for bar in mongo_data: bar_attr.append(np.array(bar['date']).astype(np.uint64) * 1000000) result['datetime'] = np.array(bar_attr) return result
def contains(self, order_book_id, dates): try: s, e = self._index[order_book_id] except KeyError: return [False] * len(dates) def _to_dt_int(d): if isinstance(d, (int, np.int64, np.uint64)): if d > 100000000: return int(d // 1000000) else: return d.year*10000 + d.month*100 + d.day date_set = self._get_set(s, e) return [(_to_dt_int(d) in date_set) for d in dates]
def numpy2string(dtype): if dtype == np.int8: return 'i8' elif dtype == np.int16: return 'i16' elif dtype == np.int32: return 'i32' elif dtype == np.int64: return 'i64' elif dtype == np.uint8: return 'u8' elif dtype == np.uint16: return 'u16' elif dtype == np.uint32: return 'u32' elif dtype == np.uint64: return 'u64' elif dtype == np.float16: return 'f16' elif dtype == np.float32: return 'f32' elif dtype == np.float64: return 'f64' elif dtype == np.float128: return 'f128' elif dtype == np.complex64: return 'cf32' elif dtype == np.complex128: return 'cf64' elif dtype == np.complex256: return 'cf128' else: raise TypeError("Unsupported dtype: " + str(dtype))
def make_array(shape=(1,), dtype=np.float32, shared=False, fill_val=None): np_type_to_ctype = {np.float32: ctypes.c_float, np.float64: ctypes.c_double, np.bool: ctypes.c_bool, np.uint8: ctypes.c_ubyte, np.uint64: ctypes.c_ulonglong} if not shared: np_arr = np.empty(shape, dtype=dtype) else: numel = np.prod(shape) arr_ctypes = sharedctypes.RawArray(np_type_to_ctype[dtype], numel) np_arr = np.frombuffer(arr_ctypes, dtype=dtype, count=numel) np_arr.shape = shape if not fill_val is None: np_arr[...] = fill_val return np_arr
def dhash(img): """Compute a perceptual has of an image. Algo explained here : https://blog.bearstech.com/2014/07/numpy-par-lexemple-une-implementation-de-dhash.html :param img: an image :type img: numpy.ndarray :return: a perceptual hash of img coded on 64 bits :rtype: int """ TWOS = np.array([2 ** n for n in range(7, -1, -1)]) BIGS = np.array([256 ** n for n in range(7, -1, -1)], dtype=np.uint64) img = rgb2grey(resize(img, (9, 8))) h = np.array([0] * 8, dtype=np.uint8) for i in range(8): h[i] = TWOS[img[i] > img[i + 1]].sum() return (BIGS * h).sum()
def get_rgb_mask(img, debug=False): assert isinstance(img, numpy.ndarray), 'image must be a numpy array' assert img.ndim == 3, 'skin detection can only work on color images' logger.debug('getting rgb mask') lower_thresh = numpy.array([45, 52, 108], dtype=numpy.uint8) upper_thresh = numpy.array([255, 255, 255], dtype=numpy.uint8) mask_a = cv2.inRange(img, lower_thresh, upper_thresh) mask_b = 255 * ((img[:, :, 2] - img[:, :, 1]) / 20) mask_c = 255 * ((numpy.max(img, axis=2) - numpy.min(img, axis=2)) / 20) mask_d = numpy.bitwise_and(numpy.uint64(mask_a), numpy.uint64(mask_b)) # mask = numpy.zeros_like(mask_d, dtype=numpy.uint8) msk_rgb = numpy.bitwise_and(numpy.uint64(mask_c), numpy.uint64(mask_d)) # msk_rgb = cv2.fromarray(mask_rgb) msk_rgb[msk_rgb < 128] = 0 msk_rgb[msk_rgb >= 128] = 1 if debug: scripts.display('input', img) scripts.display('mask_rgb', msk_rgb) return msk_rgb.astype(float)
def default(self, obj): # convert dates and numpy objects in a json serializable format if isinstance(obj, datetime): return obj.strftime('%Y-%m-%dT%H:%M:%SZ') elif isinstance(obj, date): return obj.strftime('%Y-%m-%d') elif type(obj) in (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): return int(obj) elif type(obj) in (np.bool_,): return bool(obj) elif type(obj) in (np.float_, np.float16, np.float32, np.float64, np.complex_, np.complex64, np.complex128): return float(obj) # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)
def scan_genotypes(self, genotypes, sub_ids=None, db=None): """Pass through all genotypes and return only the indices of those that pass the filter. :param genotypes: np.ndarray[uint64, dim=2] :rtype: np.ndarray[uint64]""" if self.shortcut: return np.zeros(0) N = len(genotypes) if sub_ids is not None: variant_ids = sub_ids elif self.val == 'x_linked' and db: variant_ids = genotypes_service(db).chrX else: variant_ids = np.asarray(range(1,N+1), dtype=np.uint64) active_idx = np.asarray(self.ss.active_idx, dtype=np.uint16) conditions = self.conditions_vector is_and = self.merge_op == AND if len(conditions) == 0: passing = variant_ids else: passing = self.parallel_apply_bitwise(genotypes, variant_ids, conditions, active_idx, is_and) return passing
def scan_genotypes_compound(self, genotypes, batches, parallel=True): """Scan the *genotypes* array for compounds. Variant ids are treated in batches, - one list of variant_ids per gene.""" if self.shortcut: passing, sources, pairs = np.zeros(0), {}, [] else: N = len(genotypes) active_idx = np.asarray(self.ss.active_idx, dtype=np.uint16) batches = list(batches.items()) if parallel: passing, sources, pairs = self.parallel_batches(genotypes, batches, active_idx, N) else: passing, sources, pairs = self.process_batches(genotypes, batches, active_idx, N) passing = np.array(list(passing), dtype=np.uint64) passing.sort() return passing, sources, pairs
def enhchr(indexes): """chromosome length vs. enhancer numbers on it""" lens = np.array([len(sequences[keys[i]]) for i in range(24)], dtype=np.float) nums = np.zeros((24,)) for index in indexes: chrkey = index[0] nums[keys.index(chrkey)] += 1 print "The length of 24 Chromosomes are \n{}".format(np.array(lens, dtype=np.uint64)) print "The number of enhancers on each chromosome are \n{}".format(np.array(nums, dtype=np.uint64)) ind = np.arange(24) w = 0.35 fig, ax = plt.subplots() rects1 = ax.bar(ind, lens / np.sum(lens), w, color='r') rects2 = ax.bar(ind + w, nums / np.sum(nums), w, color='y') ax.set_ylabel('Chrom Length & #Enhancers') ax.set_xticks(ind + w) ax.set_xticklabels(keys) ax.legend((rects1[0], rects2[0]), ('Chrom Length (%)', '#Enahncers (%)')) plt.show()
def _getchunk(self, z, y, x, zsize, ysize, xsize): """Internal function to retrieve data. """ data = None # interface is the same for labels and raw arrays but the function is stateless # and can benefit from extra compression possible in labels in some use cases if self.dtype == ArrayDtype.uint8: data = self.ns.get_array8bit3D(self.instancename, (zsize, ysize, xsize), (z, y, x), self.islabel3D) elif self.dtype == ArrayDtype.uint16: data = self.ns.get_array16bit3D(self.instancename, (zsize, ysize, xsize), (z, y, x), self.islabel3D) elif self.dtype == ArrayDtype.uint32: data = self.ns.get_array32bit3D(self.instancename, (zsize, ysize, xsize), (z, y, x), self.islabel3D) elif self.dtype == ArrayDtype.uint64: data = self.ns.get_array64bit3D(self.instancename, (zsize, ysize, xsize), (z, y, x), self.islabel3D) else: raise DicedException("Invalid datatype for array") return data
def seed(self, seed=None): """Resets the state of the random number generator with a seed. .. seealso:: :func:`cupy.random.seed` for full documentation, :meth:`numpy.random.RandomState.seed` """ if seed is None: try: seed_str = binascii.hexlify(os.urandom(8)) seed = numpy.uint64(int(seed_str, 16)) except NotImplementedError: seed = numpy.uint64(time.clock() * 1000000) else: seed = numpy.uint64(seed) curand.setPseudoRandomGeneratorSeed(self._generator, seed) curand.setGeneratorOffset(self._generator, 0)
def test_constructor_overflow_int64(self): values = np.array([2 ** 64 - i for i in range(1, 10)], dtype=np.uint64) result = DataFrame({'a': values}) self.assertEqual(result['a'].dtype, object) # #2355 data_scores = [(6311132704823138710, 273), (2685045978526272070, 23), (8921811264899370420, 45), (long(17019687244989530680), 270), (long(9930107427299601010), 273)] dtype = [('uid', 'u8'), ('score', 'u8')] data = np.zeros((len(data_scores),), dtype=dtype) data[:] = data_scores df_crawls = DataFrame(data) self.assertEqual(df_crawls['uid'].dtype, object)
def centroid(im, labels, return_mm3=True): """ Centroid (center of mass, barycenter) of a list of labels. :param im: :param labels: list of labels, e.g. [3] or [2, 3, 45] :param return_mm3: if true the answer is in mm if false in voxel indexes. :return: list of centroids, one for each label in the input order. """ centers_of_mass = centroid_array(im.get_data(), labels) ans = [] if return_mm3: for cm in centers_of_mass: if isinstance(cm, np.ndarray): ans += [im.affine[:3, :3].dot(cm.astype(np.float64))] else: ans += [cm] else: for cm in centers_of_mass: if isinstance(cm, np.ndarray): # else it is np.nan. ans += [np.round(cm).astype(np.uint64)] else: ans += [cm] return ans
def execute(self, query, params=[]): if len(params) > 0 and len(query.split(';')) > 1: raise Exception("Multiple queries with parameters is unsupported") # Expand lists in paramters prev = -1 new_params = [] for p in params: prev = query.find('?', prev+1) if type(p) in [np.uint16, np.uint32, np.uint64]: new_params.append(np.int64(p)) # sqlite is really fussy about this elif type(p) in [list, tuple]: rep = "(" + ",".join("?"*len(p)) + ")" query = query[:prev] + rep + query[prev+1:] prev += len(rep) new_params.extend(p) else: new_params.append(p) for q in query.split(';'): self.cur.execute(q, tuple(new_params)) return self.cur
def ctz(val, _type): cnt = int() power = int() if _type == 'uint32': bits = np.uint32(val) while power < 32: if val & 2**power == 0: cnt += 1 else: break power += 1 elif _type == 'uint64': bits = bin(np.uint64(val)) while power < 64: if val & 2**power == 0: cnt += 1 else: break power += 1 else: raise Exception(Colors.red + "unsupported type passed to ctz." + Colors.ENDC) return cnt
def pop_cnt(val, _type): cnt = int() power = int() if _type == 'uint32': bits = np.uint32(val) while power < 32: if val & 2**power != 0: cnt += 1 power += 1 elif _type == 'uint64': bits = bin(np.uint64(val)) while power < 64: if val & 2**power != 0: cnt += 1 power += 1 else: raise Exception(Colors.red + "unsupported type passed to pop_cnt." + Colors.ENDC) return cnt
def test_int64_uint64_corner_case(self): # When stored in Numpy arrays, `lbnd` is casted # as np.int64, and `ubnd` is casted as np.uint64. # Checking whether `lbnd` >= `ubnd` used to be # done solely via direct comparison, which is incorrect # because when Numpy tries to compare both numbers, # it casts both to np.float64 because there is # no integer superset of np.int64 and np.uint64. However, # `ubnd` is too large to be represented in np.float64, # causing it be round down to np.iinfo(np.int64).max, # leading to a ValueError because `lbnd` now equals # the new `ubnd`. dt = np.int64 tgt = np.iinfo(np.int64).max lbnd = np.int64(np.iinfo(np.int64).max) ubnd = np.uint64(np.iinfo(np.int64).max + 1) # None of these function calls should # generate a ValueError now. actual = mt19937.randint(lbnd, ubnd, dtype=dt) assert_equal(actual, tgt)
def _call_nms_kernel(bbox, thresh): n_bbox = bbox.shape[0] threads_per_block = 64 col_blocks = np.ceil(n_bbox / threads_per_block).astype(np.int32) blocks = (col_blocks, col_blocks, 1) threads = (threads_per_block, 1, 1) mask_dev = cp.zeros((n_bbox * col_blocks,), dtype=np.uint64) bbox = cp.ascontiguousarray(bbox, dtype=np.float32) kern = _load_kernel('nms_kernel', _nms_gpu_code) kern(blocks, threads, args=(cp.int32(n_bbox), cp.float32(thresh), bbox, mask_dev)) mask_host = mask_dev.get() selection, n_selec = _nms_gpu_post( mask_host, n_bbox, threads_per_block, col_blocks) return selection, n_selec
def dump(result, fname, no_prefix=False): """Save result to file.""" result = result.eval() if hasattr(result, "eval") else result result = np.asarray(result) if result.shape == (): # savetxt has problems with scalars result = np.expand_dims(result, 0) if no_prefix: location = os.getcwd()+"/"+fname else: location = os.getcwd()+"/data/"+fname # special handling for integer datatypes if ( result.dtype == np.uint8 or result.dtype == np.int8 or result.dtype == np.uint16 or result.dtype == np.int16 or result.dtype == np.uint32 or result.dtype == np.int32 or result.dtype == np.uint64 or result.dtype == np.int64 ): np.savetxt(location, result, fmt="%d", delimiter=',') else: np.savetxt(location, result, delimiter=',') print(location)
def time_seconds(tc_array, year): """Return the time object from the timecodes """ tc_array = np.array(tc_array, copy=True) word = tc_array[:, 0] day = word >> 1 word = tc_array[:, 1].astype(np.uint64) msecs = ((127) & word) * 1024 word = tc_array[:, 2] msecs += word & 1023 msecs *= 1024 word = tc_array[:, 3] msecs += word & 1023 return (np.datetime64( str(year) + '-01-01T00:00:00Z', 's') + msecs[:].astype('timedelta64[ms]') + (day - 1)[:].astype('timedelta64[D]'))
def test_channel_uint64_wrong_dimensions(self): """ Test posting with the wrong xyz dims""" test_mat = np.random.randint(1, 2 ** 16 - 1, (16, 128, 128)) test_mat = test_mat.astype(np.uint64) h = test_mat.tobytes() bb = blosc.compress(h, typesize=64) # Create request factory = APIRequestFactory() request = factory.post('/' + version + '/cutout/col1/exp1/layer1/0/0:100/0:128/0:16/', bb, content_type='application/blosc') # log in user force_authenticate(request, user=self.user) # Make request response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='layer1', resolution='0', x_range='0:100', y_range='0:128', z_range='0:16', t_range=None) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_channel_uint64_wrong_dimensions_numpy(self): """ Test posting with the wrong xyz dims using the numpy interface""" test_mat = np.random.randint(1, 2 ** 16 - 1, (16, 128, 128)) test_mat = test_mat.astype(np.uint64) bb = blosc.pack_array(test_mat) # Create request factory = APIRequestFactory() request = factory.post('/' + version + '/cutout/col1/exp1/layer1/0/0:100/0:128/0:16/', bb, content_type='application/blosc-python') # log in user force_authenticate(request, user=self.user) # Make request response = Cutout.as_view()(request, collection='col1', experiment='exp1', channel='layer1', resolution='0', x_range='0:100', y_range='0:128', z_range='0:16', t_range=None) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def _is_class_a_primitive(cls): ''' Check if class is a number or string including numpy numbers :param cls: any class :return: True if class is a primitive class, else False ''' primitives = [ np.float16, np.float32, np.float64, np.float128, np.int8, np.int16, np.int32, np.int64, bool, str, np.uint8, np.uint16, np.uint32, np.uint64, int, float ] return cls in primitives
def decompress_seq(x, length, bits=64): x = np.uint64(x) assert length <= (bits/2 - 1) if x & (1L << (bits-1)): return 'N' * length result = bytearray(length) for i in xrange(length): result[(length-1)-i] = tk_seq.NUCS[x & np.uint64(0b11)] x = x >> np.uint64(2) return str(result)
def __init__(self, buf, offset = 0, idmap = None, idmap_size = 1024): if idmap is None: idmap = Cache(idmap_size) self.offset = offset if offset != 0: self.buf = buf = buffer(buf, offset) else: self.buf = buf self.total_size, self.index_offset, self.index_elements = self._Header.unpack_from(buf, 0) self.index = numpy.frombuffer(buf, offset = self.index_offset, dtype = numpy.uint64, count = self.index_elements) self.idmap = idmap if self.index_elements > 0 and self.index[0] >= (self._Header.size + self._NewHeader.size): # New version, most likely self.version, min_reader_version, self.schema_offset, self.schema_size = self._NewHeader.unpack_from( buf, self._Header.size) if self._CURRENT_VERSION < min_reader_version: raise ValueError(( "Incompatible buffer, this buffer needs a reader with support for version %d at least, " "this reader supports up to version %d") % ( min_reader_version, self._CURRENT_VERSION )) if self.schema_offset and self.schema_size: if self.schema_offset > len(buf) or (self.schema_size + self.schema_offset) > len(buf): raise ValueError("Corrupted input - bad schema location") stored_schema = cPickle.loads(bytes(buffer(buf, self.schema_offset, self.schema_size))) if not isinstance(stored_schema, Schema): raise ValueError("Corrupted input - unrecognizable schema") if self.schema is None or not self.schema.compatible(stored_schema): self.schema = stored_schema elif self.schema is None: raise ValueError("Cannot map schema-less buffer without specifying schema") elif self.index_elements > 0: raise ValueError("Cannot reliably map version-0 buffers")
def normalise_data(self, timestamp, data): """ Convert the data if needed """ if self._passthrough: return i = 0 for datum in data: if self.needsfixup[i] is None: i += 1 continue if len(datum) == 0: # Ignore entries with no data - this typically occurs when the # plugin requests multiple metrics and the metrics do not all appear # at every timestep i += 1 continue if self.accumulator[i] is None: self.accumulator[i] = numpy.array(datum) self.last[i] = numpy.array(datum) else: self.accumulator[i] += (datum - self.last[i]) % numpy.uint64(1L << self.needsfixup[i]['range']) numpy.copyto(self.last[i], datum) numpy.copyto(datum, self.accumulator[i]) i += 1
def batch(self): """Return a batch of samples sampled uniformly from the database. Returns ------- (numpy.ndarray, ...) The sample values are returned in a tuple in the order of the `keys` specified by the user. """ # Count the number of keys (i.e. data objects) nb_keys = len(self.keys) data = [] for key in self.keys: data.append(np.zeros((self.batch_size,) + self.spec[key]['shape'], dtype=self.spec[key]['dtype'])) while True: # Sample indices uniformly batch_idxs = self.rng.randint(self.db.nb_samples, size=self.batch_size, dtype=np.uint64) for i, v in enumerate(batch_idxs): sample = self.db.get_sample(v) for k in range(nb_keys): data[k][i] = sample[self.keys[k]] # Account for batches with only one key if 1 == len(data): yield tuple(data)[0] else: yield tuple(data)