我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.logical_not()。
def reg2bin_vector(begin, end): '''Vectorized tabix reg2bin -- much faster than reg2bin''' result = np.zeros(begin.shape) # Entries filled done = np.zeros(begin.shape, dtype=np.bool) for (bits, bins) in rev_bit_bins: begin_shift = begin >> bits new_done = (begin >> bits) == (end >> bits) mask = np.logical_and(new_done, np.logical_not(done)) offset = ((1 << (29 - bits)) - 1) / 7 result[mask] = offset + begin_shift[mask] done = new_done return result.astype(np.int32)
def get_max_q_values( self, next_states: np.ndarray, possible_next_actions: Optional[np.ndarray] = None, use_target_network: Optional[bool] = True ) -> np.ndarray: q_values = self.get_q_values_all_actions( next_states, use_target_network ) if possible_next_actions is not None: mask = np.multiply( np.logical_not(possible_next_actions), self.ACTION_NOT_POSSIBLE_VAL ) q_values += mask return np.max(q_values, axis=1, keepdims=True)
def get_training_data_page(self, num_samples): """ Returns a TrainingDataPage with shuffled, transformed transitions from replay memory. :param num_samples: Number of transitions to sample from replay memory. """ states, actions, rewards, next_states, next_actions, terminals,\ possible_next_actions = self.sample_memories(num_samples) return TrainingDataPage( np.array(states, dtype=np.float32), np.array(actions, dtype=np.float32), np.array(rewards, dtype=np.float32), np.array(next_states, dtype=np.float32), np.array(next_actions, dtype=np.float32), np.array(possible_next_actions, dtype=np.float32), None, None, np.logical_not(terminals, dtype=np.bool) )
def foldsplitter(taskcolumn, train_set_sizes): ''' For each task id (in passed taskcolumn) take rows from number train_set_sizes up for testing, and all other rows for training (so training consists of both other task ids and of rows from the same task id up to number train_set_sizes-1. ''' folds = sorted(list(set(taskcolumn))) for fold in folds: for train_set_size in train_set_sizes: testfold2train = taskcolumn == fold cnt = 0 for (i, x) in enumerate(testfold2train): if testfold2train[i]: cnt += 1 if cnt > train_set_size: testfold2train[i] = False remaining_train = taskcolumn != fold x = np.logical_or.reduce([testfold2train, remaining_train]) yield (x, np.logical_not(x))
def CVsplitter(taskcolumn, K): ''' Divide tasks into roughly equal K sets, and do CV over such K sets. ''' tasks = sorted(list(set(taskcolumn))) tasks_splitted = [[] for _ in range(K)] for (ind, task) in enumerate(tasks): tasks_splitted[ind % K].append(task) for fold in range(K): print 'fold:', fold, 'testtasks:', tasks_splitted[fold] test = np.logical_or.reduce([taskcolumn == taskid for taskid in tasks_splitted[fold]]) yield (np.logical_not(test), test)
def random_points_in_circle(n,xx,yy,rr): """ get n random points in a circle. """ rnd = random(size=(n,3)) t = TWOPI*rnd[:,0] u = rnd[:,1:].sum(axis=1) r = zeros(n,'float') mask = u>1. xmask = logical_not(mask) r[mask] = 2.-u[mask] r[xmask] = u[xmask] xyp = reshape(rr*r,(n,1))*column_stack( (cos(t),sin(t)) ) dartsxy = xyp + array([xx,yy]) return dartsxy
def reset(self): self.cur = 0 if self.shuffle: if config.TRAIN.ASPECT_GROUPING: widths = np.array([r['width'] for r in self.roidb]) heights = np.array([r['height'] for r in self.roidb]) horz = (widths >= heights) vert = np.logical_not(horz) horz_inds = np.where(horz)[0] vert_inds = np.where(vert)[0] inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds))) if inds.shape[0] % 2: inds_ = np.reshape(inds[:-1], (-1, 2)) row_perm = np.random.permutation(np.arange(inds_.shape[0])) inds[:-1] = np.reshape(inds_[row_perm, :], (-1, )) else: inds = np.reshape(inds, (-1, 2)) row_perm = np.random.permutation(np.arange(inds.shape[0])) inds = np.reshape(inds[row_perm, :], (-1, )) self.index = inds else: np.random.shuffle(self.index)
def _shuffle_roidb_inds(self): """Randomly permute the training roidb.""" if cfg.TRAIN.ASPECT_GROUPING: widths = np.array([r['width'] for r in self._roidb]) heights = np.array([r['height'] for r in self._roidb]) horz = (widths >= heights) vert = np.logical_not(horz) horz_inds = np.where(horz)[0] vert_inds = np.where(vert)[0] inds = np.hstack(( np.random.permutation(horz_inds), np.random.permutation(vert_inds))) inds = np.reshape(inds, (-1, 2)) row_perm = np.random.permutation(np.arange(inds.shape[0])) inds = np.reshape(inds[row_perm, :], (-1,)) self._perm = inds else: self._perm = np.random.permutation(np.arange(len(self._roidb))) self._cur = 0
def _create_drop_path_choices(self): if not self._drop_path: # Drop path was turned off. return np.zeros(shape=[len(self._choices)], dtype='int32') elif np.random.uniform() < self._p_local_drop_path: # Local drop-path (make each choice independantly at random.) choices = np.random.uniform(size=[len(self._choices)]) drop_base = choices < self._p_drop_base_case drop_recursive = np.logical_and( choices < (self._p_drop_base_case + self._p_drop_recursive_case), np.logical_not(drop_base)) return (np.int32(drop_base)*self._JUST_RECURSE + np.int32(drop_recursive)*self._JUST_BASE) else: # Global (pick a single column.) column = np.random.randint(self._fractal_block_depth) return np.array( [self._JUST_RECURSE if len(binary_seq) < column else self._JUST_BASE for _, binary_seq in self._choices], dtype='int32')
def random_points_in_circle(n,xx,yy,rr): """ get n random points in a circle. """ rnd = random(size=(n,3)) t = 2.*PI*rnd[:,0] u = rnd[:,1:].sum(axis=1) r = zeros(n,'float') mask = u>1. xmask = logical_not(mask) r[mask] = 2.-u[mask] r[xmask] = u[xmask] xyp = reshape(rr*r,(n,1))*column_stack( (cos(t),sin(t)) ) dartsxy = xyp + array([xx,yy]) return dartsxy
def test_object_logical(self): a = np.array([3, None, True, False, "test", ""], dtype=object) assert_equal(np.logical_or(a, None), np.array([x or None for x in a], dtype=object)) assert_equal(np.logical_or(a, True), np.array([x or True for x in a], dtype=object)) assert_equal(np.logical_or(a, 12), np.array([x or 12 for x in a], dtype=object)) assert_equal(np.logical_or(a, "blah"), np.array([x or "blah" for x in a], dtype=object)) assert_equal(np.logical_and(a, None), np.array([x and None for x in a], dtype=object)) assert_equal(np.logical_and(a, True), np.array([x and True for x in a], dtype=object)) assert_equal(np.logical_and(a, 12), np.array([x and 12 for x in a], dtype=object)) assert_equal(np.logical_and(a, "blah"), np.array([x and "blah" for x in a], dtype=object)) assert_equal(np.logical_not(a), np.array([not x for x in a], dtype=object)) assert_equal(np.logical_or.reduce(a), 3) assert_equal(np.logical_and.reduce(a), None)
def test_2d_w_missing(self): # Test cov on 2D variable w/ missing value x = self.data x[-1] = masked x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) frac = np.dot(valid, valid.T) xf = (x - x.mean(1)[:, None]).filled(0) assert_almost_equal(cov(x), np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) assert_almost_equal(cov(x, bias=True), np.cov(xf, bias=True) * x.shape[1] / frac) frac = np.dot(valid.T, valid) xf = (x - x.mean(0)).filled(0) assert_almost_equal(cov(x, rowvar=False), (np.cov(xf, rowvar=False) * (x.shape[0] - 1) / (frac - 1.))) assert_almost_equal(cov(x, rowvar=False, bias=True), (np.cov(xf, rowvar=False, bias=True) * x.shape[0] / frac))
def __ipow__(self, other): """ Raise self to the power other, in place. """ other_data = getdata(other) other_mask = getmask(other) with np.errstate(divide='ignore', invalid='ignore'): self._data.__ipow__(np.where(self._mask, self.dtype.type(1), other_data)) invalid = np.logical_not(np.isfinite(self._data)) if invalid.any(): if self._mask is not nomask: self._mask |= invalid else: self._mask = invalid np.copyto(self._data, self.fill_value, where=invalid) new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) return self
def knn_masked_data(trX,trY,missing_data_dir, input_shape, k): raw_im_data = np.loadtxt(join(script_dir,missing_data_dir,'index.txt'),delimiter=' ',dtype=str) raw_mask_data = np.loadtxt(join(script_dir,missing_data_dir,'index_mask.txt'),delimiter=' ',dtype=str) # Using 'brute' method since we only want to do one query per classifier # so this will be quicker as it avoids overhead of creating a search tree knn_m = KNeighborsClassifier(algorithm='brute',n_neighbors=k) prob_Y_hat = np.zeros((raw_im_data.shape[0],int(np.max(trY)+1))) total_images = raw_im_data.shape[0] pbar = progressbar.ProgressBar(widgets=[progressbar.FormatLabel('\rProcessed %(value)d of %(max)d Images '), progressbar.Bar()], maxval=total_images, term_width=50).start() for i in range(total_images): mask_im=load_image(join(script_dir,missing_data_dir,raw_mask_data[i][0]), input_shape,1).reshape(np.prod(input_shape)) mask = np.logical_not(mask_im > eps) # since mask is 1 at missing locations v_im=load_image(join(script_dir,missing_data_dir,raw_im_data[i][0]), input_shape, 255).reshape(np.prod(input_shape)) rep_mask = np.tile(mask,(trX.shape[0],1)) # Corrupt whole training set according to the current mask corr_trX = np.multiply(trX, rep_mask) knn_m.fit(corr_trX, trY) prob_Y_hat[i,:] = knn_m.predict_proba(v_im.reshape(1,-1)) pbar.update(i) pbar.finish() return prob_Y_hat
def getControls(self): ''' Calculate consumption for each agent this period. Parameters ---------- None Returns ------- None ''' employed = self.eStateNow == 1.0 unemployed = np.logical_not(employed) cLvlNow = np.zeros(self.AgentCount) cLvlNow[employed] = self.solution[0].cFunc(self.mLvlNow[employed]) cLvlNow[unemployed] = self.solution[0].cFunc_U(self.mLvlNow[unemployed]) self.cLvlNow = cLvlNow
def _zscore(a): """ Calculating z-score of data on the first axis. If the numbers in any column are all equal, scipy.stats.zscore will return NaN for this column. We shall correct them all to be zeros. Parameters ---------- a: numpy array Returns ------- zscore: numpy array The z-scores of input "a", with any columns including non-finite numbers replaced by all zeros. """ assert a.ndim > 1, 'a must have more than one dimensions' zscore = scipy.stats.zscore(a, axis=0) zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0 return zscore
def ft_autocorrelation_function(self, k): """Compute the 3D Fourier transform of the isotropic correlation function for an independent sphere for given magnitude k of the 3D wave vector (float). """ X = self.radius * np.asarray(k) volume_sphere = 4.0 / 3 * np.pi * self.radius**3 bessel_term = np.empty_like(X) zero_X = np.isclose(X, 0) non_zero_X = np.logical_not(zero_X) X_non_zero = X[non_zero_X] bessel_term[non_zero_X] = (9 * ((np.sin(X_non_zero) - X_non_zero * np.cos(X_non_zero)) / X_non_zero**3)**2) bessel_term[zero_X] = 1.0 return self.corr_func_at_origin * volume_sphere * bessel_term
def reset(self): self.cur = 0 if self.shuffle: if self.aspect_grouping: widths = np.array([r['width'] for r in self.roidb]) heights = np.array([r['height'] for r in self.roidb]) horz = (widths >= heights) vert = np.logical_not(horz) horz_inds = np.where(horz)[0] vert_inds = np.where(vert)[0] inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds))) extra = inds.shape[0] % self.batch_size inds_ = np.reshape(inds[:-extra], (-1, self.batch_size)) row_perm = np.random.permutation(np.arange(inds_.shape[0])) inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,)) self.index = inds else: np.random.shuffle(self.index)
def reset(self): self.cur = 0 if self.shuffle: #no needed """if self.aspect_grouping: widths = np.array([r['width'] for r in self.roidb]) heights = np.array([r['height'] for r in self.roidb]) horz = (widths >= heights) vert = np.logical_not(horz) horz_inds = np.where(horz)[0] vert_inds = np.where(vert)[0] inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds))) extra = inds.shape[0] % self.batch_size inds_ = np.reshape(inds[:-extra], (-1, self.batch_size)) row_perm = np.random.permutation(np.arange(inds_.shape[0])) inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,)) self.index = inds else:""" np.random.shuffle(self.index)
def findSignificantContours(img, sobel_8u, sobel): image, contours, heirarchy = cv2.findContours(sobel_8u, \ cv2.RETR_EXTERNAL, \ cv2.CHAIN_APPROX_SIMPLE) mask = np.ones(image.shape[:2], dtype="uint8") * 255 level1 = [] for i, tupl in enumerate(heirarchy[0]): if tupl[3] == -1: tupl = np.insert(tupl, 0, [i]) level1.append(tupl) significant = [] tooSmall = sobel_8u.size * 10 / 100 for tupl in level1: contour = contours[tupl[0]]; area = cv2.contourArea(contour) if area > tooSmall: cv2.drawContours(mask, \ [contour], 0, (0, 255, 0), \ 2, cv2.LINE_AA, maxLevel=1) significant.append([contour, area]) significant.sort(key=lambda x: x[1]) significant = [x[0] for x in significant]; peri = cv2.arcLength(contour, True) approx = cv2.approxPolyDP(contour, 0.02 * peri, True) mask = sobel.copy() mask[mask > 0] = 0 cv2.fillPoly(mask, significant, 255, 0) mask = np.logical_not(mask) img[mask] = 0; return img
def replay(self): """Memory Management and training of the agent """ if len(self.memory) < self.batch_size: return state, action, reward, next_state, done = self._get_batches() reward += (self.gamma * np.logical_not(done) * np.amax(self.model.predict(next_state), axis=1)) q_target = self.target_model.predict(state) _ = pd.Series(action) one_hot = pd.get_dummies(_).as_matrix() action_batch = np.where(one_hot == 1) q_target[action_batch] = reward return self.model.fit(state, q_target, batch_size=self.batch_size, epochs=1, verbose=False)
def to_mask(self, x_size, y_size): """ This function ... :param x_size: :param y_size: :return: """ base = self.base.to_mask(x_size, y_size) exclude = self.exclude.to_mask(x_size, y_size) # Return the mask return base * np.logical_not(exclude) # -----------------------------------------------------------------
def masked_outside(region, header, x_size, y_size, expand_factor=1.0): """ This function ... :param region: :param header: :param x_size: :param y_size: :param expand_factor: :return: """ # Create a new region ... region = regions.expand(region, factor=expand_factor) # Create a mask from the region mask = np.logical_not(regions.create_mask(region, header, x_size, y_size)) # Return the mask return mask # -----------------------------------------------------------------
def predictiveQQ(simulations, targets, bands): with warnings.catch_warnings(): warnings.simplefilter("ignore") bands = toCustomLogSpace(np.array(bands)[::-1]) pValues = np.empty_like(targets) for i0 in range(pValues.shape[0]): sims, idxs = np.unique(simulations[i0,:],return_index=True) try: pValues[i0] = interp1d(sims, bands[idxs], kind='linear', assume_sorted=True)(targets[i0]) except np.linalg.linalg.LinAlgError as ex: pValues[i0] = np.nan except ValueError as ex: # TODO: handle better extrapolations if targets[i0]<sims[0]: pValues[i0] = bands[0]+(bands[0]-bands[1])/(sims[0]-sims[1])*(targets[i0]-sims[0]) else: pValues[i0] = bands[-1]+(bands[-1]-bands[-2])/(sims[-1]-sims[-2])*(targets[i0]-sims[-1]) pValues = fromCustomLogSpace(pValues) pValues[pValues<0] = 0 pValues[pValues>1] = 1 pValues = np.sort(1-pValues[np.logical_not(np.isnan(pValues))]) return (np.linspace(0,1, pValues.shape[0]), pValues)
def old_viz(): objects = np.load(sys.argv[1]) if len(objects.shape)==3: objects = [objects] for voxels in objects: print voxels.shape if connect > 0: voxels_keep = (voxels >= threshold) voxels_keep = max_connected(voxels_keep, connect) voxels[np.logical_not(voxels_keep)] = 0 if downsample_factor > 1: print "==> Performing downsample: factor: "+str(downsample_factor)+" method: "+downsample_method, voxels = downsample(voxels, downsample_factor, method=downsample_method) print "Done" visualization(voxels, threshold, title=str(ind+1), uniform_size=uniform_size, use_colormap=use_colormap)
def unstick_contour(edgepoints, unstick_coeff): """ Removes edgepoints near previously discarded points. @type edgepoints: list[bool] @param edgepoints: current edgepoint list @type unstick_coeff: float @param unstick_coeff @return: filtered edgepoints """ (n, init, end) = loop_connected_components(np.logical_not(edgepoints)) filtered = np.copy(edgepoints) n_edgepoint = len(edgepoints) for size, s, e in zip(n, init, end): for j in range(1, int(size * unstick_coeff + 0.5) + 1): filtered[(e + j) % n_edgepoint] = 0 filtered[(s - j) % n_edgepoint] = 0 return filtered
def _get_limits(nifti_file, only_plot_noise=False): from builtins import bytes, str # pylint: disable=W0622 if isinstance(nifti_file, (str, bytes)): nii = nb.as_closest_canonical(nb.load(nifti_file)) data = nii.get_data() else: data = nifti_file data_mask = np.logical_not(np.isnan(data)) if only_plot_noise: data_mask = np.logical_and(data_mask, data != 0) vmin = np.percentile(data[data_mask], 0) vmax = np.percentile(data[data_mask], 61) else: vmin = np.percentile(data[data_mask], 0.5) vmax = np.percentile(data[data_mask], 99.5) return vmin, vmax
def setUp(self, m, m_lsp, m_lspet): # prepare mock. joints = np.array([[[50, 80, 0], [50, 80, 1], [150, 260, 1], [150, 260, 0]], [[100, 200, 1], [100, 200, 0], [120, 280, 0], [120, 280, 1]], [[40, 10, 0], [40, 10, 1], [120, 290, 1], [120, 290, 0]]]) m_lsp_instance = m_lsp.return_value m_lsp_instance.name = 'lsp_dataset' m_lsp_instance.__len__.return_value = 2 lsp_joints = joints.copy() lsp_joints[:, :, 2] = np.logical_not(joints[:, :, 2]).astype(int) m_lsp_instance.get_data = lambda i: ('train', lsp_joints[i], 'im{0:04d}.jpg'.format(i + 1), np.zeros((300, 200, 3))) m_lspet_instance = m_lspet.return_value m_lspet_instance.name = 'lspet_dataset' m_lspet_instance.__len__.return_value = 2 lspet_joints = joints.copy() m_lspet_instance.get_data = lambda i: ('train', lspet_joints[i], 'im{0:05d}.jpg'.format(i + 1), np.zeros((300, 200, 3))) # initialize. self.path = 'test_orig_data' self.output = 'test_data' self.generator = DatasetGenerator(path=self.path, output=self.output)
def _features_in_class(self, X, y_one_hot): ''' Compute complement features counts Parameters ---------- X: numpy array (n_samples, n_features) Matrix of input samples y_one_hot: numpy array (n_samples, n_classes) Binary matrix encoding input ''' if not self.is_fitted: self.complement_features_ = X.T.dot(np.logical_not(y_one_hot)) self.features_ = X.T.dot(y_one_hot) else: self.complement_features_ += X.T.dot(np.logical_not(y_one_hot)) self.features_ += X.T.dot(y_one_hot)
def _features_in_class(self, X, y_one_hot): ''' Compute complement features counts Parameters ---------- X: numpy array (n_samples, n_features) Matrix of input samples y_one_hot: numpy array (n_samples, n_classes) Binary matrix encoding input ''' if not self.is_fitted: self.complement_features = X.T.dot(np.logical_not(y_one_hot)) else: self.complement_features += X.T.dot(np.logical_not(y_one_hot))