我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用numpy.int_()。
def render_lane(image, corners, ploty, fitx, ): _, src, dst = perspective_transform(image, corners) Minv = cv2.getPerspectiveTransform(dst, src) # Create an image to draw the lines on warp_zero = np.zeros_like(image[:,:,0]).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) # Recast the x and y points into usable format for cv2.fillPoly() pts = np.vstack((fitx,ploty)).astype(np.int32).T # Draw the lane onto the warped blank image #plt.plot(left_fitx, ploty, color='yellow') cv2.polylines(color_warp, [pts], False, (0, 255, 0), 10) #cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0)) # Warp the blank back to original image space using inverse perspective matrix (Minv) newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0])) # Combine the result with the original image result = cv2.addWeighted(image, 1, newwarp, 0.3, 0) return result
def df_type_to_str(i): ''' Convert into simple datatypes from pandas/numpy types ''' if isinstance(i, np.bool_): return bool(i) if isinstance(i, np.int_): return int(i) if isinstance(i, np.float): if np.isnan(i): return 'NaN' elif np.isinf(i): return str(i) return float(i) if isinstance(i, np.uint): return int(i) if type(i) == bytes: return i.decode('UTF-8') if isinstance(i, (tuple, list)): return str(i) if i is pd.NaT: # not identified as a float null return 'NaN' return str(i)
def console_fill_foreground(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int_) g = numpy.ascontiguousarray(g, dtype=numpy.int_) b = numpy.ascontiguousarray(b, dtype=numpy.int_) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_foreground(con, cr, cg, cb)
def console_fill_background(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int_) g = numpy.ascontiguousarray(g, dtype=numpy.int_) b = numpy.ascontiguousarray(b, dtype=numpy.int_) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_background(con, cr, cg, cb)
def pickle_transitions_matrix_data(): transitions = pickle.load( open( "/ssd/ddimitrov/pickle/transitions", "rb" ) ) vocab = pickle.load( open( "/ssd/ddimitrov/pickle/vocab", "rb" ) ) i_indices = array.array(str("l")) j_indices = array.array(str("l")) values = array.array(str("d")) for s, targets in transitions.iteritems(): for t, v in targets.iteritems(): i_indices.append(vocab[s]) j_indices.append(vocab[t]) values.append(v) i_indices = np.frombuffer(i_indices, dtype=np.int_) j_indices = np.frombuffer(j_indices, dtype=np.int_) values = np.frombuffer(values, dtype=np.float64) transition_matrix=[i_indices,j_indices,values] pickle.dump(transition_matrix, open("/ssd/ddimitrov/pickle/transition_matrix", "wb"), protocol=pickle.HIGHEST_PROTOCOL) print "transition_matrix"
def test_empty_tuple_index(self): # Empty tuple index creates a view a = np.array([1, 2, 3]) assert_equal(a[()], a) assert_(a[()].base is a) a = np.array(0) assert_(isinstance(a[()], np.int_)) # Regression, it needs to fall through integer and fancy indexing # cases, so need the with statement to ignore the non-integer error. with warnings.catch_warnings(): warnings.filterwarnings('ignore', '', DeprecationWarning) a = np.array([1.]) assert_(isinstance(a[0.], np.float_)) a = np.array([np.array(1)], dtype=object) assert_(isinstance(a[0.], np.ndarray))
def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) out = np.ones(10, dtype=np.int_) a.argmax(-1, out=out) assert_equal(out, a.argmax(-1))
def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) out = np.ones(10, dtype=np.int_) a.argmin(-1, out=out) assert_equal(out, a.argmin(-1))
def test_allclose(self): # Tests allclose on arrays a = np.random.rand(10) b = a + np.random.rand(10) * 1e-8 self.assertTrue(allclose(a, b)) # Test allclose w/ infs a[0] = np.inf self.assertTrue(not allclose(a, b)) b[0] = np.inf self.assertTrue(allclose(a, b)) # Test allclose w/ masked a = masked_array(a) a[-1] = masked self.assertTrue(allclose(a, b, masked_equal=True)) self.assertTrue(not allclose(a, b, masked_equal=False)) # Test comparison w/ scalar a *= 1e-8 a[0] = 0 self.assertTrue(allclose(a, 0, masked_equal=True)) # Test that the function works for MIN_INT integer typed arrays a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) self.assertTrue(allclose(a, a))
def get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ): ''' Calculate the frame number to be correlated by giving a X-ray exposure dose Paramters: exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation) exp_time: float, the exposure time for a xpcs time sereies dead_time: dead time for the fast shutter reponse time, CHX = 2ms Return: noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) e.g., no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ], exp_time = 1.34, dead_time = 2) --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) ''' return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att )
def create_time_slice( N, slice_num, slice_width, edges=None ): '''create a ROI time regions ''' if edges is not None: time_edge = edges else: if slice_num==1: time_edge = [ [0,N] ] else: tstep = N // slice_num te = np.arange( 0, slice_num +1 ) * tstep tc = np.int_( (te[:-1] + te[1:])/2 )[1:-1] if slice_width%2: sw = slice_width//2 +1 time_edge = [ [0,slice_width], ] + [ [s-sw+1,s+sw] for s in tc ] + [ [N-slice_width,N]] else: sw= slice_width//2 time_edge = [ [0,slice_width], ] + [ [s-sw,s+sw] for s in tc ] + [ [N-slice_width,N]] return np.array(time_edge)
def get_his_std_qi( data_pixel_qi, max_cts=None): ''' YG. Dev 16, 2016 Calculate the photon histogram for one q by giving Parameters: data_pixel_qi: one-D array, for the photon counts max_cts: for bin max, bin will be [0,1,2,..., max_cts] Return: bins his std ''' if max_cts is None: max_cts = np.max( data_pixel_qi ) +1 bins = np.arange(max_cts) dqn, dqm = data_pixel_qi.shape #get histogram here H = np.apply_along_axis(np.bincount, 1, np.int_(data_pixel_qi), minlength= max_cts )/dqm #do average for different frame his = np.average( H, axis=0) std = np.std( H, axis=0 ) #cal average photon counts kmean= np.average(data_pixel_qi ) return bins, his, std, kmean
def default(self, obj): # convert dates and numpy objects in a json serializable format if isinstance(obj, datetime): return obj.strftime('%Y-%m-%dT%H:%M:%SZ') elif isinstance(obj, date): return obj.strftime('%Y-%m-%d') elif type(obj) in (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): return int(obj) elif type(obj) in (np.bool_,): return bool(obj) elif type(obj) in (np.float_, np.float16, np.float32, np.float64, np.complex_, np.complex64, np.complex128): return float(obj) # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)
def __tiledoutput__(self, net_op, batch_size, num_cols=8, net_recon_const=None): num_rows = np.int_(np.ceil((batch_size*1.)/num_cols)) out_img = np.zeros((num_rows*self.outshape[0], num_cols*self.outshape[1], 3), dtype='uint8') img_lab = np.zeros((self.outshape[0], self.outshape[1], 3), dtype='uint8') c = 0 r = 0 for i in range(batch_size): if(i % num_cols == 0 and i > 0): r = r + 1 c = 0 img_lab[..., 0] = self.__decodeimg__(net_recon_const[i, 0, :, :].reshape(\ self.outshape[0], self.outshape[1])) img_lab[..., 1] = self.__decodeimg__(net_op[i, 0, :, :].reshape(\ self.shape[0], self.shape[1])) img_lab[..., 2] = self.__decodeimg__(net_op[i, 1, :, :].reshape(\ self.shape[0], self.shape[1])) img_rgb = cv2.cvtColor(img_lab, cv2.COLOR_LAB2BGR) out_img[r*self.outshape[0]:(r+1)*self.outshape[0], \ c*self.outshape[1]:(c+1)*self.outshape[1], ...] = img_rgb c = c+1 return out_img
def test_constructors(): from pybind11_tests.array import default_constructors, converting_constructors defaults = default_constructors() for a in defaults.values(): assert a.size == 0 assert defaults["array"].dtype == np.array([]).dtype assert defaults["array_t<int32>"].dtype == np.int32 assert defaults["array_t<double>"].dtype == np.float64 results = converting_constructors([1, 2, 3]) for a in results.values(): np.testing.assert_array_equal(a, [1, 2, 3]) assert results["array"].dtype == np.int_ assert results["array_t<int32>"].dtype == np.int32 assert results["array_t<double>"].dtype == np.float64
def maybe_convert_indices(indices, n): """ if we have negative indicies, translate to postive here if have indicies that are out-of-bounds, raise an IndexError """ if isinstance(indices, list): indices = np.array(indices) if len(indices) == 0: # If list is empty, np.array will return float and cause indexing # errors. return np.empty(0, dtype=np.int_) mask = indices < 0 if mask.any(): indices[mask] += n mask = (indices >= n) | (indices < 0) if mask.any(): raise IndexError("indices are out-of-bounds") return indices
def test_empty_fancy(self): empty_farr = np.array([], dtype=np.float_) empty_iarr = np.array([], dtype=np.int_) empty_barr = np.array([], dtype=np.bool_) # pd.DatetimeIndex is excluded, because it overrides getitem and should # be tested separately. for idx in [self.strIndex, self.intIndex, self.floatIndex]: empty_idx = idx.__class__([]) self.assertTrue(idx[[]].identical(empty_idx)) self.assertTrue(idx[empty_iarr].identical(empty_idx)) self.assertTrue(idx[empty_barr].identical(empty_idx)) # np.ndarray only accepts ndarray of int & bool dtypes, so should # Index. self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, np.ones((1, 10))) out = np.ones(10, dtype=np.int_) a.argmax(-1, out=out) assert_equal(out, a.argmax(-1))
def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, np.ones((1, 10))) out = np.ones(10, dtype=np.int_) a.argmin(-1, out=out) assert_equal(out, a.argmin(-1))
def test_allclose(self): # Tests allclose on arrays a = np.random.rand(10) b = a + np.random.rand(10) * 1e-8 self.assertTrue(allclose(a, b)) # Test allclose w/ infs a[0] = np.inf self.assertTrue(not allclose(a, b)) b[0] = np.inf self.assertTrue(allclose(a, b)) # Test all close w/ masked a = masked_array(a) a[-1] = masked self.assertTrue(allclose(a, b, masked_equal=True)) self.assertTrue(not allclose(a, b, masked_equal=False)) # Test comparison w/ scalar a *= 1e-8 a[0] = 0 self.assertTrue(allclose(a, 0, masked_equal=True)) # Test that the function works for MIN_INT integer typed arrays a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) self.assertTrue(allclose(a, a))
def test_get_all_route_shapes(self): res = self.gtfs.get_all_route_shapes() self.assertTrue(isinstance(res, list)) el = res[0] keys = u"name type agency lats lons".split() for key in keys: self.assertTrue(key in el) for el in res: self.assertTrue(isinstance(el[u"name"], string_types), type(el[u"name"])) self.assertTrue(isinstance(el[u"type"], (int, numpy.int_)), type(el[u'type'])) self.assertTrue(isinstance(el[u"agency"], string_types)) self.assertTrue(isinstance(el[u"lats"], list), type(el[u'lats'])) self.assertTrue(isinstance(el[u"lons"], list)) self.assertTrue(isinstance(el[u'lats'][0], float)) self.assertTrue(isinstance(el[u'lons'][0], float))
def test_get_stop_count_data(self): dt_start_query = datetime.datetime(2007, 1, 1, 7, 59, 59) dt_end_query = datetime.datetime(2007, 1, 1, 10, 2, 1) start_query = self.gtfs.unlocalized_datetime_to_ut_seconds(dt_start_query) end_query = self.gtfs.unlocalized_datetime_to_ut_seconds(dt_end_query) df = self.gtfs.get_stop_count_data(start_query, end_query) self.assertTrue(isinstance(df, pandas.DataFrame)) columns = ["stop_I", "count", "lat", "lon", "name"] for c in columns: self.assertTrue(c in df.columns) el = df[c].iloc[0] if c in ["stop_I", "count"]: self.assertTrue(isinstance(el, (int, numpy.int_))) if c in ["lat", "lon"]: self.assertTrue(isinstance(el, float)) if c in ["name"]: self.assertTrue(isinstance(el, string_types), type(el)) self.assertTrue((df['count'].values > 0).any())
def get_reads_base_sds(chrm_strand_reads, chrm_len, rev_strand): base_sd_sums = np.zeros(chrm_len) base_cov = np.zeros(chrm_len, dtype=np.int_) for r_data in chrm_strand_reads: # extract read means data so data across all chrms is not # in RAM at one time try: read_data = h5py.File(r_data.fn, 'r') except IOError: # probably truncated file continue events_slot = '/'.join(( '/Analyses', r_data.corr_group, 'Events')) if events_slot not in read_data: continue read_sds = read_data[events_slot]['norm_stdev'] if rev_strand: read_sds = read_sds[::-1] base_sd_sums[r_data.start: r_data.start + len(read_sds)] += read_sds base_cov[r_data.start:r_data.start + len(read_sds)] += 1 return base_sd_sums / base_cov
def get_reads_base_lengths(chrm_strand_reads, chrm_len, rev_strand): base_length_sums = np.zeros(chrm_len) base_cov = np.zeros(chrm_len, dtype=np.int_) for r_data in chrm_strand_reads: # extract read means data so data across all chrms is not # in RAM at one time try: read_data = h5py.File(r_data.fn, 'r') except IOError: # probably truncated file continue events_slot = '/'.join(( '/Analyses', r_data.corr_group, 'Events')) if events_slot not in read_data: continue read_lengths = read_data[events_slot]['length'] if rev_strand: read_lengths = read_lengths[::-1] base_length_sums[ r_data.start: r_data.start + len(read_lengths)] += read_lengths base_cov[r_data.start:r_data.start + len(read_lengths)] += 1 return base_length_sums / base_cov
def default(self, obj): # convert dates and numpy objects in a json serializable format if isinstance(obj, datetime): return obj.strftime('%Y-%m-%dT%H:%M:%SZ') elif isinstance(obj, date): return obj.strftime('%Y-%m-%d') elif type(obj) in [np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]: return int(obj) elif type(obj) in [np.bool_]: return bool(obj) elif type(obj) in [np.float_, np.float16, np.float32, np.float64, np.complex_, np.complex64, np.complex128]: return float(obj) # Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)
def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 numpy_int = np.int_(0) if sys.version_info[0] >= 3: # On Py3k int_ should not inherit from int, because it's not # fixed-width anymore assert_equal(isinstance(numpy_int, int), False) else: # Otherwise, it should inherit from int... assert_equal(isinstance(numpy_int, int), True) # ... and fast-path checks on C-API level should also work from numpy.core.multiarray_tests import test_int_subclass assert_equal(test_int_subclass(numpy_int), True)
def data_style_func(df): ''' Default value that can be used as callback for data_style_func Args: df: the dataframe that will be used to build the presentation model Returns: a function table takes idx, col as arguments and returns a dictionary of html style attributes ''' def _style_func(r, c): if isinstance(df.at[r,c], (np.int_, np.float, np.uint)): return td_style_to_str(default_numeric_td_style) return td_style_to_str(default_td_style) return _style_func