我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.take()。
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None): self._open() t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) if self.time_axis == 0: local_chunk = self.data[t_start:t_stop, :] elif self.time_axis == 1: local_chunk = self.data[:, t_start:t_stop].T self._close() if nodes is not None: if not numpy.all(nodes == numpy.arange(self.nb_channels)): local_chunk = numpy.take(local_chunk, nodes, axis=1) return self._scale_data_to_float32(local_chunk)
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None): t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding) local_shape = t_stop - t_start local_chunk = numpy.zeros((self.nb_channels, local_shape), dtype=self.data_dtype) data_slice = self._get_slice_(t_start, t_stop) self._open() count = 0 for s in data_slice: t_slice = len(s)//self.nb_channels local_chunk[:, count:count + t_slice] = self.data[s].reshape(self.nb_channels, len(s)//self.nb_channels) count += t_slice local_chunk = local_chunk.T self._close() if nodes is not None: if not numpy.all(nodes == numpy.arange(self.nb_channels)): local_chunk = numpy.take(local_chunk, nodes, axis=1) return self._scale_data_to_float32(local_chunk)
def load(shape,vertex_array): destination = vertex_array[gx.VA_PTNMTXIDX.name] vertex_index = 0 matrix_table = numpy.zeros(10,numpy.uint32) for batch in shape.batches: source = numpy.concatenate([primitive.vertices[gx.VA_PTNMTXIDX.name] for primitive in batch.primitives]) source //= 3 for i,index in enumerate(batch.matrix_table): if index == 0xFFFF: continue matrix_table[i] = index length = sum(len(primitive.vertices) for primitive in batch.primitives) numpy.take(matrix_table,source,0,destination[vertex_index:vertex_index + length]) vertex_index += length glEnableVertexAttribArray(MATRIX_INDEX_ATTRIBUTE_LOCATION) vertex_type = vertex_array.dtype stride = vertex_type.itemsize offset = vertex_type.fields[gx.VA_PTNMTXIDX.name][1] glVertexAttribIPointer(MATRIX_INDEX_ATTRIBUTE_LOCATION,1,GL_UNSIGNED_INT,stride,GLvoidp(offset))
def get_caption_batch(loaded_data, data_dir, dataset='flowers', batch_size=64): captions = np.zeros((batch_size, loaded_data['max_caps_len'])) batch_idx = np.random.randint(0, loaded_data['data_length'], size=batch_size) image_ids = np.take(loaded_data['image_list'], batch_idx) image_files = [] image_caps = [] image_caps_ids = [] for idx, image_id in enumerate(image_ids): image_file = join(data_dir, dataset, 'jpg' + image_id) random_caption = random.randint(0, 4) image_caps_ids.append(random_caption) captions[idx, :] = \ loaded_data['captions'][image_id][random_caption][ 0:loaded_data['max_caps_len']] image_caps.append(loaded_data['captions'] [image_id][random_caption]) image_files.append(image_file) return captions, image_files, image_caps, image_ids, image_caps_ids
def get_val_caps_batch(batch_size, loaded_data, data_set, data_dir): if data_set == 'flowers': captions = np.zeros((batch_size, loaded_data['max_caps_len'])) batch_idx = np.random.randint(0, loaded_data['val_data_len'], size = batch_size) image_ids = np.take(loaded_data['val_img_list'], batch_idx) image_files = [] image_caps = [] for idx, image_id in enumerate(image_ids) : image_file = join(data_dir, 'flowers/jpg/' + image_id) random_caption = random.randint(0, 4) captions[idx, :] = \ loaded_data['val_captions'][image_id][random_caption][ 0 :loaded_data['max_caps_len']] image_caps.append(loaded_data['str_captions'] [image_id][random_caption]) image_files.append(image_file) return captions, image_files, image_caps, image_ids else: raise Exception('Dataset not found')
def label_ranking_reciprocal_rank(label, # [sent_num] preds): # [sent_num] """ Calcualting the reciprocal rank according to definition, """ rank = np.argsort(preds)[::-1] #pos_rank = np.take(rank, np.where(label == 1)[0]) #return np.mean(1.0 / pos_rank) if_find = False pos = 0 for r in rank: pos += 1 if label[r] == 1: first_pos_r = pos if_find = True break assert(if_find) return 1.0 / first_pos_r
def sample(self, n): """ Sample n elements uniformly from the memory """ indices = np.random.choice(self.cur_size, n, replace=False) s1 = np.take(self.S1, indices, axis=0) a = np.take(self.A, indices) r = np.take(self.R, indices) s2 = np.take(self.S2, indices, axis=0) t = np.take(self.T, indices) return s1, a, r, s2, t # sample_elements = [] # for _ in range(n): # sample_elements.append(self.memory[random.randint(0, len(self.memory)-1)]) # # return sample_elements
def test_TakeTransposeInnerOuter(self): # Test of take, transpose, inner, outer products x = arange(24) y = np.arange(24) x[5:6] = masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) assert_equal(np.inner(filled(x, 0), filled(y, 0)), inner(x, y)) assert_equal(np.outer(filled(x, 0), filled(y, 0)), outer(x, y)) y = array(['abc', 1, 'def', 2, 3], object) y[2] = masked t = take(y, [0, 3, 4]) assert_(t[0] == 'abc') assert_(t[1] == 2) assert_(t[2] == 3)
def test_generic_methods(self): # Tests some MaskedArray methods. a = array([1, 3, 2]) assert_equal(a.any(), a._data.any()) assert_equal(a.all(), a._data.all()) assert_equal(a.argmax(), a._data.argmax()) assert_equal(a.argmin(), a._data.argmin()) assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) assert_equal(a.conj(), a._data.conj()) assert_equal(a.conjugate(), a._data.conjugate()) m = array([[1, 2], [3, 4]]) assert_equal(m.diagonal(), m._data.diagonal()) assert_equal(a.sum(), a._data.sum()) assert_equal(a.take([1, 2]), a._data.take([1, 2])) assert_equal(m.transpose(), m._data.transpose())
def test_testTakeTransposeInnerOuter(self): # Test of take, transpose, inner, outer products x = arange(24) y = np.arange(24) x[5:6] = masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) assert_(eq(np.inner(filled(x, 0), filled(y, 0)), inner(x, y))) assert_(eq(np.outer(filled(x, 0), filled(y, 0)), outer(x, y))) y = array(['abc', 1, 'def', 2, 3], object) y[2] = masked t = take(y, [0, 3, 4]) assert_(t[0] == 'abc') assert_(t[1] == 2) assert_(t[2] == 3)
def test_testArrayMethods(self): a = array([1, 3, 2]) self.assertTrue(eq(a.any(), a._data.any())) self.assertTrue(eq(a.all(), a._data.all())) self.assertTrue(eq(a.argmax(), a._data.argmax())) self.assertTrue(eq(a.argmin(), a._data.argmin())) self.assertTrue(eq(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))) self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) self.assertTrue(eq(a.conj(), a._data.conj())) self.assertTrue(eq(a.conjugate(), a._data.conjugate())) m = array([[1, 2], [3, 4]]) self.assertTrue(eq(m.diagonal(), m._data.diagonal())) self.assertTrue(eq(a.sum(), a._data.sum())) self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2]))) self.assertTrue(eq(m.transpose(), m._data.transpose()))
def test_4(self): """ Test of take, transpose, inner, outer products. """ x = self.arange(24) y = np.arange(24) x[5:6] = self.masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), self.inner(x, y)) assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), self.outer(x, y)) y = self.array(['abc', 1, 'def', 2, 3], object) y[2] = self.masked t = self.take(y, [0, 3, 4]) assert t[0] == 'abc' assert t[1] == 2 assert t[2] == 3
def take(self, indices, axis=None, out=None, mode='raise'): """ """ (_data, _mask) = (self._data, self._mask) cls = type(self) # Make sure the indices are not masked maskindices = getattr(indices, '_mask', nomask) if maskindices is not nomask: indices = indices.filled(0) # Get the data if out is None: out = _data.take(indices, axis=axis, mode=mode).view(cls) else: np.take(_data, indices, axis=axis, mode=mode, out=out) # Get the mask if isinstance(out, MaskedArray): if _mask is nomask: outmask = maskindices else: outmask = _mask.take(indices, axis=axis, mode=mode) outmask |= maskindices out.__setmask__(outmask) return out # Array methods
def recalculate_objects(pred_dict, image): proposals = pred_dict['rpn_prediction']['proposals'] proposals_prob = pred_dict['classification_prediction']['rcnn']['cls_prob'] proposals_target = proposals_prob.argmax(axis=1) - 1 bbox_offsets = pred_dict[ 'classification_prediction']['rcnn']['bbox_offsets'] bbox_offsets = bbox_offsets[proposals_target >= 0] proposals = proposals[proposals_target >= 0] proposals_target = proposals_target[proposals_target >= 0] bbox_offsets_idx_pairs = np.stack( np.array([ proposals_target * 4, proposals_target * 4 + 1, proposals_target * 4 + 2, proposals_target * 4 + 3]), axis=1) bbox_offsets = np.take(bbox_offsets, bbox_offsets_idx_pairs.astype(np.int)) bboxes = decode(proposals, bbox_offsets) return bboxes, proposals_target
def recollect(self, w): if w is None: self.w = w return k = w['kernel'] b = w['biases'] k = np.take(k, self.inp, 2) k = np.take(k, self.out, 3) b = np.take(b, self.out) assert1 = k.shape == tuple(self.wshape['kernel']) assert2 = b.shape == tuple(self.wshape['biases']) assert assert1 and assert2, \ 'Dimension not matching in {} recollect'.format( self._signature) self.w['kernel'] = k self.w['biases'] = b
def recollect(self, w): if w is None: self.w = w return idx = self.keep_idx k = w['kernel'] b = w['biases'] self.w['kernel'] = np.take(k, idx, 3) self.w['biases'] = np.take(b, idx) if self.batch_norm: m = w['moving_mean'] v = w['moving_variance'] g = w['gamma'] self.w['moving_mean'] = np.take(m, idx) self.w['moving_variance'] = np.take(v, idx) self.w['gamma'] = np.take(g, idx)
def sample_from_histogram(p, n_samples=1): """ returns the indice of bin according to the histogram p @param p: histogram @type p: numpy.array @param n_samples: number of samples to generate @type n_samples: integer """ from numpy import add, less, argsort, take, arange from numpy.random import random indices = argsort(p) indices = take(indices, arange(len(p) - 1, -1, -1)) c = add.accumulate(take(p, indices)) / add.reduce(p) return indices[add.reduce(less.outer(c, random(n_samples)), 0)]
def load_dataset(): if(not os.path.exists("./dataset/training.csv")): print("dataset does not exist") raise Exception #load dataset labeled_image = pd.read_csv("./dataset/training.csv") #preprocessing dataframe image = np.array(labeled_image["Image"].values).reshape(-1,1) image = np.apply_along_axis(lambda img: (img[0].split()),1,image) image = image.astype(np.int32) #because train_img elements are string before preprocessing image = image.reshape(-1,96*96) # data 96 * 96 size image label = labeled_image.values[:,:-1] label = label.astype(np.float32) #nan value to mean value col_mean = np.nanmean(label, axis=0) indices = np.where(np.isnan(label)) label[indices] = np.take(col_mean, indices[1]) return image, label
def process_data(coords, nbr_idx, elements): num_atoms = len(nbr_idx) # truncates off zero padding at the end and maps atomic numbers to atom types coords = coords[:num_atoms, :] elements = np.array([atom_dictionary[elements[i]] for i in range(num_atoms)], dtype=np.int32) # pad the neighbor indices with zeros if not enough neighbors elements = np.append(elements, 0) for i in range(num_atoms): if len(nbr_idx[i]) < 12: nbr_idx[i].extend(np.ones([12-len(nbr_idx[i])], dtype=np.int32) * num_atoms) nbr_idx = np.array([nbr_idx[i] for i in range(num_atoms)], dtype=np.int32) # creates neighboring atom type matrix - 0 = nonexistent atom nbr_atoms = np.take(elements, nbr_idx) np.place(nbr_idx, nbr_idx >= num_atoms, 0) elements = elements[:-1] return (coords.astype(np.float32), nbr_idx.astype(np.int32), elements.astype(np.int32), nbr_atoms.astype(np.int32))
def test_take(self): def assert_take_ok(mgr, axis, indexer): mat = mgr.as_matrix() taken = mgr.take(indexer, axis) assert_almost_equal(np.take(mat, indexer, axis), taken.as_matrix()) assert_almost_equal(mgr.axes[axis].take(indexer), taken.axes[axis]) for mgr in self.MANAGERS: for ax in range(mgr.ndim): # take/fancy indexer yield assert_take_ok, mgr, ax, [] yield assert_take_ok, mgr, ax, [0, 0, 0] yield assert_take_ok, mgr, ax, lrange(mgr.shape[ax]) if mgr.shape[ax] >= 3: yield assert_take_ok, mgr, ax, [0, 1, 2] yield assert_take_ok, mgr, ax, [-1, -2, -3]
def check_bool(self, func, value, correct, *args, **kwargs): while getattr(value, 'ndim', True): try: res0 = func(value, *args, **kwargs) if correct: self.assertTrue(res0) else: self.assertFalse(res0) except BaseException as exc: exc.args += ('dim: %s' % getattr(value, 'ndim', value), ) raise if not hasattr(value, 'ndim'): break try: value = np.take(value, 0, axis=-1) except ValueError: break
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False): if len(join_keys) > 1: if not ((isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels)): raise AssertionError("If more than one join key is given then " "'right_ax' must be a MultiIndex and the " "number of join keys must be the number of " "levels in right_ax") left_indexer, right_indexer = \ _get_multiindex_indexer(join_keys, right_ax, sort=sort) else: jkey = join_keys[0] left_indexer, right_indexer = \ _get_single_indexer(jkey, right_ax, sort=sort) if sort or len(left_ax) != len(left_indexer): # if asked to sort or there are 1-to-many matches join_index = left_ax.take(left_indexer) return join_index, left_indexer, right_indexer # left frame preserves order & length of its index return left_ax, None, right_indexer
def _sort_labels(uniques, left, right): if not isinstance(uniques, np.ndarray): # tuplesafe uniques = Index(uniques).values sorter = uniques.argsort() reverse_indexer = np.empty(len(sorter), dtype=np.int64) reverse_indexer.put(sorter, np.arange(len(sorter))) new_left = reverse_indexer.take(com._ensure_platform_int(left)) np.putmask(new_left, left == -1, -1) new_right = reverse_indexer.take(com._ensure_platform_int(right)) np.putmask(new_right, right == -1, -1) return new_left, new_right
def get_patch_values(point, target_image, radius=5, shape='circle', morfo_mask=None): """ To obtain the list of the values below a maks. :param point: :param target_image: :param radius: :param shape: :param morfo_mask: To avoid computing the morphological mask at each iteration if this method is called in a loop, this can be provided as input. :return: """ if morfo_mask is None: morfo_mask = get_morphological_mask(point, target_image.shape, radius=radius, shape=shape) coord = np.nonzero(morfo_mask.flatten())[0] return np.take(target_image.flatten(), coord) # def midpoint_circle_algorithm(center=(0, 0, 0), radius=4): # x, y, z = center # # TODO generalise the midpoint circle algorithm and use it for get_shell_for_given_radius # pass
def glassoBonaFidePartial(gl,X,TrueCov): #take a ep=EmpiricalCovariance().fit(X) emp_cov=ep.covariance_ _,precs=graph_lasso_path(X, gl.cv_alphas_) best_score = -np.inf best_ind=0 for i in xrange(len(gl.cv_alphas_)): try: this_score = log_likelihood(TrueCov, precs[i]) if this_score >= .1 / np.finfo(np.float64).eps: this_score = np.nan if(this_score>best_score): best_score=this_score best_ind=i except: print 'exited:',best_score continue covariance_, precision_, n_iter_ = graph_lasso( emp_cov, alpha=gl.cv_alphas_[best_ind], mode=gl.mode, tol=gl.tol*5., max_iter=gl.max_iter, return_n_iter=True) return np.abs(toPartialCorr(precision_))
def __init__(self, x, y, ival=0., sorted=False, side='left'): if side.lower() not in ['right', 'left']: msg = "side can take the values 'right' or 'left'" raise ValueError(msg) self.side = side _x = np.asarray(x) _y = np.asarray(y) if _x.shape != _y.shape: msg = "x and y do not have the same shape" raise ValueError(msg) if len(_x.shape) != 1: msg = 'x and y must be 1-dimensional' raise ValueError(msg) self.x = np.r_[-np.inf, _x] self.y = np.r_[ival, _y] if not sorted: asort = np.argsort(self.x) self.x = np.take(self.x, asort, 0) self.y = np.take(self.y, asort, 0) self.n = self.x.shape[0]
def project_radii(radii, spacing, r_min, r_max): """ Projects given radii to values between r_min and r_max; good spacing ~ 1000 """ radii_norm = radii / np.max(radii) # Normalize radii # Determine min and max of array and generate spacing radii_to_proj = np.around(np.linspace(np.min(radii_norm), np.max(radii_norm), spacing), 3) values_to_proj = np.around(np.linspace(r_min, r_max, spacing), 3) # Determine respective array positions pos = np.array([np.argmin(np.abs(radii_to_proj - radii_norm[entry])) for entry in range(len(radii_norm))], dtype=np.int) # Determine new radii return np.take(values_to_proj, pos) ############################################################################### # HUNGARIAN (MUNKRES) ALGORITHM - TAKEN FROM SCIPY ###############################################################################
def get_wigner_seitz_radii(self, calc_for='mol1'): """ Calculate Wigner-Seitz radii from nuclear charges """ if calc_for == 'mol1': chg = self.chg_mol1 else: chg = self.chg_mol2 # Wigner-Seitz Radius in A w_s_r = (((3.0 * chg) / (4.0 * np.pi * np.take(pse_mass_dens, chg - 1) * NA)) ** (1.0 / 3.0) * 0.01) / 1.0E-10 # Return result(s) return w_s_r
def get_xsf_stored(self, logger, charge, xsf_type='MoKa'): # Set up dictionary of the prestored scattering factors xsf_dict = {'MoKa': (pse_mo_xsf_1, pse_mo_xsf_2), 'CuKa': (pse_cu_xsf_1, pse_cu_xsf_2), 'CoKa': (pse_co_xsf_1, pse_co_xsf_2), 'FeKa': (pse_fe_xsf_1, pse_fe_xsf_2), 'CrKa': (pse_cr_xsf_1, pse_cr_xsf_2)} if not xsf_type in ['MoKa', 'CuKa', 'CoKa', 'FeKa', 'CrKa']: # Check for valid user input logger.pt_xsf_wrong_source() xsf_type = 'MoKa' # Get scattering factors from nuclear charge chosen_xsf_1, chosen_xsf_2 = xsf_dict[xsf_type] xsf1, xsf2 = np.take(chosen_xsf_1, charge - 1), np.take(chosen_xsf_2, charge - 1) # Return value(s) return xsf1, xsf2
def get_test_set(self): """ Return the test set (the same for each inc. batch). """ scen = self.scenario run = self.run test_idx_list = self.LUP[scen][run][-1] if self.preload: test_x = np.take(self.x, test_idx_list, axis=0).astype(np.float32) else: # test paths test_paths = [] for idx in test_idx_list: test_paths.append(os.path.join(self.root, self.paths[idx])) # test imgs test_x = self.get_batch_from_paths(test_paths).astype(np.float32) test_y = self.labels[scen][run][-1] test_y = np.asarray(test_y, dtype=np.float32) return test_x, test_y
def imcrop_tosquare(img): """Make any image a square image. Parameters ---------- img : np.ndarray Input image to crop, assumed at least 2d. Returns ------- crop : np.ndarray Cropped image. """ size = np.min(img.shape[:2]) extra = img.shape[:2] - size crop = img for i in np.flatnonzero(extra): crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i) return crop
def _get_labels(self, mask): """Transform a mask of class index into a mask containing actual classification labels. Parameters ---------- mask: ndarray (shape: [width, height]) An NumPy representation of a segmentation mask. Each pixel should be a class index (see `SemanticSegmenter.segment` function docstring). Returns ------- mask: ndarray (shape: [width, height]) A NumPy representation of the mask containing the true labels of the image Raises ------ ValueError: if the true labels were not defined """ if self.classes is None: raise ValueError("Class labels are not defined.") return np.take(self.classes, mask)