Python numpy 模块,argwhere() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.argwhere()

项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def timeIndex(self, slider):
        ## Return the time and frame index indicated by a slider
        if self.image is None:
            return (0,0)

        t = slider.value()

        xv = self.tVals
        if xv is None:
            ind = int(t)
        else:
            if len(xv) < 2:
                return (0,0)
            totTime = xv[-1] + (xv[-1]-xv[-2])
            inds = np.argwhere(xv < t)
            if len(inds) < 1:
                return (0,t)
            ind = inds[-1,0]
        return ind, t
项目:ANN-PONR-Python3    作者:anon-42    | 项目源码 | 文件源码
def chooseErrorData(self, game, lesson=None):
        ''' 
        Choose saved error function data by lesson and game name in 
        history database.
        '''
        self.history.setGame(game)
        self.load()
        if lesson is not None:
            self.error_data_training = np.split(self.data[0,:], 
                np.argwhere(self.data[0,:] == -1))[lesson][1:]
            self.error_data_test = np.split(self.data[1,:], 
                np.argwhere(self.data[1,:] == -1))[lesson][1:]
        else:
            self.error_data_training = np.delete(self.data[0,:], 
                np.argwhere(self.data[0,:]==-1))
            self.error_data_test = np.delete(self.data[1,:], 
                np.argwhere(self.data[1,:]==-1))

# ------------------- for test and show reasons only ----------------------
项目:cache-leak-detector    作者:falsecurity    | 项目源码 | 文件源码
def getSection(self, Address):

        # find
        idx = numpy.argwhere(self._SectionsFast[:]['Start'] <= Address).flatten()
        if len(idx) == 0:
            return None

        # check
        if Address < self._SectionsFast[idx[-1]]['Start'] + \
           self._SectionsFast[idx[-1]]['Size']:
            return (self._Sections[self._SectionsFast[idx[-1]]['Start']])
        else:
            return None

    ##
    # Get symbol from given address.
    #
    #   @param Address address within image
    #   @return the symbol of the address (None if error)
    #
项目:cache-leak-detector    作者:falsecurity    | 项目源码 | 文件源码
def getSymbol(self, Address):

        # find
        idx = numpy.argwhere(self._SymbolsFast[:]['Start'] <= Address).flatten()
        if len(idx) == 0:
            return None

        # check
        if Address < self._SymbolsFast[idx[-1]]['Start'] + \
           self._SymbolsFast[idx[-1]]['Size']:
            return (self._Symbols[self._SymbolsFast[idx[-1]]['Start']])
        else:
            return None

    ##
    # Get instruction from given address.
    #
    #   @param Address address within image
    #   @return size of instr. and assembly code (None if error)
    #
项目:bot2017Fin    作者:AllanYiin    | 项目源码 | 文件源码
def get_antonyms(self,wordA:str, topk:int=10,ispositive:bool=True):
        seed=[['??','??'],['??','??'],['??','??'],['??','??'],['??','??']]
        proposal={}
        for pair in seed:
            if ispositive:
                result=self.analogy(pair[0],pair[1],wordA,topk)
                print(w2v.find_nearest_word((self[pair[0]] + self[pair[1]]) / 2, 3))
            else:
                result = self.analogy(pair[1], pair[0], wordA, topk)
                print(w2v.find_nearest_word((self[pair[0]] + self[pair[1]]) / 2, 3))

            for item in result:
                term_products = np.argwhere(self[wordA] * self[item[0]] < 0)
                #print(item[0] + ':' +wordA + str(term_products))
                #print(item[0] + ':' +wordA+'('+str(pair)+')  '+ str(len(term_products)))
                if len(term_products)>=self.dims/4:
                    if item[0] not in proposal:
                        proposal[item[0]] = item[1]
                    elif item[1]> proposal[item[0]]:
                        proposal[item[0]] +=item[1]
        for k,v in  proposal.items():
            proposal[k]=v/len(seed)
        sortitems=sorted(proposal.items(), key=lambda d: d[1],reverse=True)
        return  [sortitems[i] for i in range(min(topk,len(sortitems)))]
项目:chainer-fcis    作者:knorth55    | 项目源码 | 文件源码
def prepare_data(seg_img, ins_img):
    labels = []
    bboxes = []
    masks = []
    instances = np.unique(ins_img)
    for inst in instances[instances != -1]:
        mask_inst = ins_img == inst
        count = collections.Counter(seg_img[mask_inst].tolist())
        instance_class = max(count.items(), key=lambda x: x[1])[0]

        assert inst not in [-1]
        assert instance_class not in [-1, 0]

        where = np.argwhere(mask_inst)
        (y1, x1), (y2, x2) = where.min(0), where.max(0) + 1

        labels.append(instance_class)
        bboxes.append((y1, x1, y2, x2))
        masks.append(mask_inst)
    labels = np.array(labels)
    bboxes = np.array(bboxes)
    masks = np.array(masks)
    return bboxes, masks, labels
项目:universe    作者:openai    | 项目源码 | 文件源码
def _step(self, action_n):
        observation_n, reward_n, done_n, info = self.env.step(action_n)
        # Pass along ID of potentially-done episode
        for i, info_i in enumerate(info['n']):
            info_i['vectorized.episode_id'] = self.episode_ids[i]

        done_i = np.argwhere(done_n).reshape(-1)
        if len(done_i):
            for i in done_i:
                self.extra_done.add(self.episode_ids[i])
                # Episode completed, so we bump its value
                self.episode_ids[i] += self.n
                if self.episode_limit is not None and self.episode_ids[i] > self.episode_limit:
                    logger.debug('Masking: index=%s episode_id=%s', i, self.episode_ids[i])
                    self.env.mask(i)
            self._set_done_to()

        # Pass along the number of contiguous episodes that are now done
        info['vectorized.done_to'] = self.done_to
        return observation_n, reward_n, done_n, info
项目:MDT    作者:cbclab    | 项目源码 | 文件源码
def roi_index_to_volume_index(roi_indices, brain_mask):
    """Get the 3d index of a voxel given the linear index in a ROI created with the given brain mask.

    This is the inverse function of :func:`volume_index_to_roi_index`.

    This function is useful if you, for example, have sampling results of a specific voxel
    and you want to locate that voxel in the brain maps.

    Please note that this function can be memory intensive for a large list of roi_indices

    Args:
        roi_indices (int or ndarray): the index in the ROI created by that brain mask
        brain_mask (str or 3d array): the brain mask you would like to use

    Returns:
        ndarray: the 3d voxel location(s) of the indicated voxel(s)
    """
    mask = autodetect_brain_mask_loader(brain_mask).get_data()
    return np.argwhere(mask)[roi_indices, :]
项目:ml_defense    作者:arjunbhagoji    | 项目源码 | 文件源码
def _get_Smatrices(self, X, y):

        Sb = np.zeros((X.shape[1], X.shape[1]))

        S = np.inner(X.T, X.T)
        N = len(X)
        mu = np.mean(X, axis=0)
        classLabels = np.unique(y)
        for label in classLabels:
            classIdx = np.argwhere(y == label).T[0]
            Nl = len(classIdx)
            xL = X[classIdx]
            muL = np.mean(xL, axis=0)
            muLbar = muL - mu
            Sb = Sb + Nl * np.outer(muLbar, muLbar)

        Sbar = S - N * np.outer(mu, mu)
        Sw = Sbar - Sb
        self.mean_ = mu

        return (Sw, Sb)
项目:icing    作者:slipguru    | 项目源码 | 文件源码
def _set_sparse_diagonal(rows, cols, data, preferences):
    idx = np.where(rows == cols)
    data[idx] = preferences[rows[idx]]
    mask = np.ones(preferences.shape, dtype=bool)
    mask[rows[idx]] = False
    diag_other = np.argwhere(mask).T[0]
    rows = np.concatenate((rows, diag_other))
    cols = np.concatenate((cols, diag_other))
    data = np.concatenate((data, preferences[mask]))

    # return data sorted by row
    idx_sorted_left_ori = np.lexsort((cols, rows))
    rows = rows[idx_sorted_left_ori]
    cols = cols[idx_sorted_left_ori]
    data = data[idx_sorted_left_ori]
    return rows, cols, data
项目:MicrolensingLCOGT    作者:dg7541    | 项目源码 | 文件源码
def median_buffer_range(mag, magerr):
    """This function returns the ratio of points that are between plus or minus 10% of the
    amplitude value over the mean

    :param mag: the time-varying intensity of the lightcurve. Must be an array.
    :param magerr: photometric error for the intensity. Must be an array.

    :rtype: float
    """

    mag, magerr = remove_bad(mag, magerr)
    n = np.float(len(mag))
    amp = amplitude(mag, magerr) 
    #mean = meanMag(mag, magerr)
    mean = np.median(mag)
    a = mean - amp/10. 
    b = mean + amp/10. 

    median_buffer_range = len(np.argwhere((mag > a) & (mag < b))) / n

    return median_buffer_range
项目:MicrolensingLCOGT    作者:dg7541    | 项目源码 | 文件源码
def median_buffer_range2(mag, magerr):
    """This function returns the ratio of points that are more than 20% of the amplitude
    value over the mean

    :param mag: the time-varying intensity of the lightcurve. Must be an array.
    :param magerr: photometric error for the intensity. Must be an array.

    :rtype: float
    """

    mag, magerr = remove_bad(mag, magerr)
    n = np.float(len(mag))
    amp = amplitude(mag, magerr) 
    #mean = meanMag(mag, magerr)
    mean = np.median(mag)
    a = mean - amp/5. 


    median_buffer_range = len(np.argwhere((mag < a))) / n

    return median_buffer_range
项目:MicrolensingLCOGT    作者:dg7541    | 项目源码 | 文件源码
def above1(mag, magerr):
    """This function measures the ratio of data points that are above 1 standard deviation 
    from the mean magnitude.

    :param mag: the time-varying intensity of the lightcurve. Must be an array.
    :param magerr: photometric error for the intensity. Must be an array.

    :rtype: float
    """

    mag, magerr = remove_bad(mag, magerr)
    a = meanMag(mag, magerr) - deviation(mag, magerr)

    above1 = len(np.argwhere(mag < a) )  

    return above1
项目:MicrolensingLCOGT    作者:dg7541    | 项目源码 | 文件源码
def above3(mag, magerr):
    """This function measures the ratio of data points that are above 3 standard deviations 
    from the mean magnitude.

    :param mag: the time-varying intensity of the lightcurve. Must be an array.
    :param magerr: photometric error for the intensity. Must be an array.

    :rtype: float
    """

    mag, magerr = remove_bad(mag, magerr)
    a = meanMag(mag, magerr) - 3*deviation(mag, magerr)

    above3 = len(np.argwhere(mag < a) )  

    return above3
项目:MicrolensingLCOGT    作者:dg7541    | 项目源码 | 文件源码
def below1(mag, magerr):
    """This function measures the ratio of data points that are below 1 standard deviations 
    from the mean magnitude.

    :param mag: the time-varying intensity of the lightcurve. Must be an array.
    :param magerr: photometric error for the intensity. Must be an array.

    :rtype: float
    """

    mag, magerr = remove_bad(mag, magerr)
    a = meanMag(mag, magerr) + deviation(mag, magerr)

    below1 = len(np.argwhere(mag > a)) 

    return below1
项目:MicrolensingLCOGT    作者:dg7541    | 项目源码 | 文件源码
def below3(mag, magerr):
    """This function measures the ratio of data points that are below 3 standard deviations
    from the mean magnitude.

    :param mag: the time-varying intensity of the lightcurve. Must be an array.
    :param magerr: photometric error for the intensity. Must be an array.

    :rtype: float
    """

    mag, magerr = remove_bad(mag, magerr)
    a = meanMag(mag, magerr) + 3*deviation(mag, magerr)

    below3 = len(np.argwhere(mag > a))

    return below3
项目:MicrolensingLCOGT    作者:dg7541    | 项目源码 | 文件源码
def below5(mag, magerr):
    """This function measures the ratio of data points that are below 5 standard deviations
    from the mean magnitude.

    :param mag: the time-varying intensity of the lightcurve. Must be an array.
    :param magerr: photometric error for the intensity. Must be an array.

    :rtype: float
    """

    mag, magerr = remove_bad(mag, magerr)
    a = meanMag(mag, magerr) + 5*deviation(mag, magerr)

    below5 = len(np.argwhere(mag > a))  

    return below5
项目:maf    作者:gpapamak    | 项目源码 | 文件源码
def gen(self, n_samples=1, u=None):
        """
        Generate samples from made. Requires as many evaluations as number of inputs.
        :param n_samples: number of samples
        :param u: random numbers to use in generating samples; if None, new random numbers are drawn
        :return: samples
        """

        x = np.zeros([n_samples, self.n_inputs], dtype=dtype)
        u = rng.randn(n_samples, self.n_inputs).astype(dtype) if u is None else u

        for i in xrange(1, self.n_inputs + 1):
            m, logp = self.eval_comps(x)
            idx = np.argwhere(self.input_order == i)[0, 0]
            x[:, idx] = m[:, idx] + np.exp(np.minimum(-0.5 * logp[:, idx], 10.0)) * u[:, idx]

        return x
项目:maf    作者:gpapamak    | 项目源码 | 文件源码
def gen(self, n_samples=1, u=None):
        """
        Generate samples from made. Requires as many evaluations as number of inputs.
        :param n_samples: number of samples
        :param u: random numbers to use in generating samples; if None, new random numbers are drawn
        :return: samples
        """

        x = np.zeros([n_samples, self.n_inputs], dtype=dtype)
        u = rng.randn(n_samples, self.n_inputs).astype(dtype) if u is None else u

        for i in xrange(1, self.n_inputs + 1):
            m, logp, loga = self.eval_comps(x)
            idx = np.argwhere(self.input_order == i)[0, 0]
            for n in xrange(n_samples):
                z = util.discrete_sample(np.exp(loga[n, idx]))[0]
                x[n, idx] = m[n, idx, z] + np.exp(np.minimum(-0.5 * logp[n, idx, z], 10.0)) * u[n, idx]

        return x
项目:maf    作者:gpapamak    | 项目源码 | 文件源码
def gen(self, x, n_samples=1, u=None):
        """
        Generate samples from made conditioned on x. Requires as many evaluations as number of outputs.
        :param x: input vector
        :param n_samples: number of samples
        :param u: random numbers to use in generating samples; if None, new random numbers are drawn
        :return: samples
        """

        y = np.zeros([n_samples, self.n_outputs], dtype=dtype)
        u = rng.randn(n_samples, self.n_outputs).astype(dtype) if u is None else u

        xy = (np.tile(x, [n_samples, 1]), y)

        for i in xrange(1, self.n_outputs + 1):
            m, logp = self.eval_comps(xy)
            idx = np.argwhere(self.output_order == i)[0, 0]
            y[:, idx] = m[:, idx] + np.exp(np.minimum(-0.5 * logp[:, idx], 10.0)) * u[:, idx]

        return y
项目:maf    作者:gpapamak    | 项目源码 | 文件源码
def gen(self, x, n_samples=1, u=None):
        """
        Generate samples from made conditioned on x. Requires as many evaluations as number of outputs.
        :param x: input vector
        :param n_samples: number of samples
        :param u: random numbers to use in generating samples; if None, new random numbers are drawn
        :return: samples
        """

        y = np.zeros([n_samples, self.n_outputs], dtype=dtype)
        u = rng.randn(n_samples, self.n_outputs).astype(dtype) if u is None else u

        xy = (np.tile(x, [n_samples, 1]), y)

        for i in xrange(1, self.n_outputs + 1):
            m, logp, loga = self.eval_comps(xy)
            idx = np.argwhere(self.output_order == i)[0, 0]
            for n in xrange(n_samples):
                z = util.discrete_sample(np.exp(loga[n, idx]))[0]
                y[n, idx] = m[n, idx, z] + np.exp(np.minimum(-0.5 * logp[n, idx, z], 10.0)) * u[n, idx]

        return y
项目:fathom    作者:rdadolf    | 项目源码 | 文件源码
def select_action(self, st, runstep=None):
    with self.G.as_default():
      if np.random.rand() > self.params['eps']:
        #greedy with random tie-breaking
        if not self.forward_only:
          Q_pred = self.sess.run(self.qnet.y, feed_dict = {self.qnet.x: np.reshape(st, (1,84,84,4))})[0]
        else:
          Q_pred = runstep(self.sess, self.qnet.y, feed_dict = {self.qnet.x: np.reshape(st, (1,84,84,4))})[0]

        a_winner = np.argwhere(Q_pred == np.amax(Q_pred))
        if len(a_winner) > 1:
          act_idx = a_winner[np.random.randint(0, len(a_winner))][0]
          return act_idx,self.engine.legal_actions[act_idx], np.amax(Q_pred)
        else:
          act_idx = a_winner[0][0]
          return act_idx,self.engine.legal_actions[act_idx], np.amax(Q_pred)
      else:
        #random
        act_idx = np.random.randint(0,len(self.engine.legal_actions))
        if not self.forward_only:
          Q_pred = self.sess.run(self.qnet.y, feed_dict = {self.qnet.x: np.reshape(st, (1,84,84,4))})[0]
        else:
          Q_pred = runstep(self.sess, self.qnet.y, feed_dict = {self.qnet.x: np.reshape(st, (1,84,84,4))})[0]
        return act_idx,self.engine.legal_actions[act_idx], Q_pred[act_idx]
项目:mushroom    作者:carloderamo    | 项目源码 | 文件源码
def get_idxs(self, idxs):
        """
        Returns the states a the provided indexes.

        Args:
            idxs (list): the indexes of the states to return.

        Returns:
            The states at the provided indexes.

        """
        if not self._full and np.any(idxs < self._history_length):
            idxs[np.argwhere(
                idxs < self._history_length).ravel()] += self._history_length

        s = self._get_state(idxs - 1)
        ss = self._get_state(idxs)

        return s, self._actions[idxs - 1, ...], self._rewards[idxs - 1, ...],\
            ss, self._absorbing[idxs - 1, ...], self._last[idxs - 1, ...]
项目:mushroom    作者:carloderamo    | 项目源码 | 文件源码
def select_episodes(dataset, n_episodes, parse=False):
    """
    Return the first `n_episodes` episodes in the provided dataset.

    Args:
        dataset (list): the dataset to consider;
        n_episodes (int): the number of episodes to pick from the dataset;
        parse (bool, False): whether to parse the dataset to return.

    Returns:
        A subset of the dataset containing the first `n_episodes` episodes.

    """
    assert n_episodes >= 0, 'Number of episodes must be greater than or equal' \
                            'to zero.'
    if n_episodes == 0:
        return np.array([[]])

    dataset = np.array(dataset)
    last_idxs = np.argwhere(dataset[:, -1] == 1).ravel()
    sub_dataset = dataset[:last_idxs[n_episodes - 1] + 1, :]

    return sub_dataset if not parse else parse_dataset(sub_dataset)
项目:mushroom    作者:carloderamo    | 项目源码 | 文件源码
def fit(self, state, action, q, **fit_params):
        """
        Fit the model.

        Args:
            state (np.ndarray): states;
            action (np.ndarray): actions;
            q (np.ndarray): target q-values;
            **fit_params (dict): other parameters used by the fit method
                of each regressor.

        """
        state, q = self._preprocess(state, q)

        for i in xrange(len(self.model)):
            idxs = np.argwhere((action == i)[:, 0]).ravel()

            if idxs.size:
                self.model[i].fit(state[idxs, :], q[idxs], **fit_params)
项目:mushroom    作者:carloderamo    | 项目源码 | 文件源码
def _update(self, state, action, reward, next_state, absorbing):
        approximator_idx = 0 if np.random.uniform() < .5 else 1

        q_current = self.Q[approximator_idx][state, action]

        if not absorbing:
            q_ss = self.Q[approximator_idx][next_state, :]
            max_q = np.max(q_ss)
            a_n = np.array(
                [np.random.choice(np.argwhere(q_ss == max_q).ravel())])
            q_next = self.Q[1 - approximator_idx][next_state, a_n]
        else:
            q_next = 0.

        q = q_current + self.alpha[approximator_idx](state, action) * (
            reward + self.mdp_info.gamma * q_next - q_current)

        self.Q[approximator_idx][state, action] = q
项目:mushroom    作者:carloderamo    | 项目源码 | 文件源码
def step(self, action):
        self._grid = self.convert_to_grid(self._state, *self._grid.shape)

        state = np.argwhere(self._grid == self._symbols['S']).ravel()

        new_state, reward, absorbing, info = self._step(state, action)

        if info['success']:
            self._grid[tuple(state)] = self._symbols['.']
            self._grid[tuple(new_state)] = self._symbols['S']

        self._state = self.convert_to_pixel(self._grid,
                                            self.window_size[1],
                                            self.window_size[0])

        return self._state, reward, absorbing, info
项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def grid_to_adjacency_matrix(grid, neighborhood=8):
    """Convert a boolean grid where 0's express holes and 1's connected pixel
    into a sparse adjacency matrix representing the grid-graph.
    Neighborhood for each pixel is calculated from its 4 or 8 more immediate
    surrounding neighbors (defaults to 8)."""
    coords = np.argwhere(grid)
    coords_x = coords[:, 0]
    coords_y = coords[:, 1]
    # lil is the most performance format to build a sparse matrix iteratively
    matrix = sparse.lil_matrix((0, coords.shape[0]), dtype=np.uint8)
    if neighborhood == 4:
        for px, py in coords:
            row = (((px == coords_x) & (np.abs(py - coords_y) == 1)) |
                   ((np.abs(px - coords_x) == 1) & (py == coords_y)))
            matrix = sparse.vstack([matrix, row])
    else:
        for px, py in coords:
            row = (np.abs(px - coords_x) <= 1) & (np.abs(py - coords_y) <= 1)
            matrix = sparse.vstack([matrix, row])
    matrix.setdiag(1)
    # Once built, we convert it to compressed sparse columns or rows
    return matrix.tocsc()  # or .tocsr()
项目:coordinates    作者:markovmodel    | 项目源码 | 文件源码
def test_contacts_count_contacts(self):
        sel = np.array([1, 2, 5, 20], dtype=int)
        pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
        pairs = self.feat.pairs(sel, excluded_neighbors=2)
        assert(pairs.shape == pairs_expected.shape)
        assert(np.all(pairs == pairs_expected))
        self.feat.add_contacts(pairs, threshold=0.5, periodic=False, count_contacts=True)  # unperiodic distances such that we can compare
        # The dimensionality of the feature is now one
        assert(self.feat.dimension() == 1)
        X = self.traj.xyz[:, pairs_expected[:, 0], :]
        Y = self.traj.xyz[:, pairs_expected[:, 1], :]
        D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
        C = np.zeros(D.shape)
        I = np.argwhere(D <= 0.5)
        C[I[:, 0], I[:, 1]] = 1.0
        # Count the contacts
        C = C.sum(1, keepdims=True)
        assert(np.allclose(C, self.feat.transform(self.traj)))
项目:coordinates    作者:markovmodel    | 项目源码 | 文件源码
def test_Group_Mindist_All_Three_Groups_threshold(self):
        threshold = .7
        group0 = [0, 20, 30, 0]
        group1 = [1, 21, 31, 1]
        group2 = [2, 22, 32, 2]
        self.feat.add_group_mindist(group_definitions=[group0, group1, group2], threshold=threshold)
        D = self.feat.transform(self.traj)

        # Now the references, computed separately for each combination of groups
        dist_list_01 = np.array(list(product(np.unique(group0), np.unique(group1))))
        dist_list_02 = np.array(list(product(np.unique(group0), np.unique(group2))))
        dist_list_12 = np.array(list(product(np.unique(group1), np.unique(group2))))
        Dref_01 = mdtraj.compute_distances(self.traj, dist_list_01).min(1)
        Dref_02 = mdtraj.compute_distances(self.traj, dist_list_02).min(1)
        Dref_12 = mdtraj.compute_distances(self.traj, dist_list_12).min(1)
        Dref = np.vstack((Dref_01, Dref_02, Dref_12)).T

        Dbinary = np.zeros_like(Dref)
        I = np.argwhere(Dref <= threshold)
        Dbinary[I[:, 0], I[:, 1]] = 1

        assert np.allclose(D, Dbinary)
        assert len(self.feat.describe())==self.feat.dimension()
项目:coordinates    作者:markovmodel    | 项目源码 | 文件源码
def transform(self, traj):
        # All needed distances
        Dall = mdtraj.compute_distances(traj, self.distance_indexes, periodic=self.periodic)
        # Just the minimas
        Dmin = np.zeros((traj.n_frames,self.dimension))
        res = np.zeros_like(Dmin)
        # Compute the min groupwise
        for ii, (gi, gf) in enumerate(self.group_identifiers):
            Dmin[:, ii] = Dall[:,gi:gf].min(1)
        # Do we want binary?
        if self.threshold is not None:
            I = np.argwhere(Dmin <= self.threshold)
            res[I[:, 0], I[:, 1]] = 1.0
        else:
            res = Dmin

        return res
项目:PyCS    作者:COSMOGRAIL    | 项目源码 | 文件源码
def maskinfo(self):
        """
        Returns a description of the masked points and available properties of them.
        Note that the output format can be directly used as a skiplist.
        """

        cps = self.commonproperties()
        lines = []
        maskindices = np.argwhere(self.mask == False)
        for maskindex in maskindices:
            comment = ", ".join(["%s : %s" % (cp, self.properties[maskindex][cp]) for cp in cps])
            txt = "%.1f    %s" % (self.jds[maskindex], comment)
            lines.append(txt)

        txt = "\n".join(lines)
        txt = "# %i Masked points of %s :\n" % (np.sum(self.mask == False), str(self)) + txt
        return txt
项目:impyute    作者:eltonlaw    | 项目源码 | 文件源码
def find_null(data):
    """ Finds the indices of all missing values.

    Parameters
    ----------
    data: numpy.ndarray
        Data to impute.

    Returns
    -------
    List of tuples
        Indices of all missing values in tuple format; (i, j)

    """
    null_xy = np.argwhere(np.isnan(data))
    return null_xy
项目:poseval    作者:leonid-pishchulin    | 项目源码 | 文件源码
def VOCap(rec,prec):

    mpre = np.zeros([1,2+len(prec)])
    mpre[0,1:len(prec)+1] = prec
    mrec = np.zeros([1,2+len(rec)])
    mrec[0,1:len(rec)+1] = rec
    mrec[0,len(rec)+1] = 1.0

    for i in range(mpre.size-2,-1,-1):
        mpre[0,i] = max(mpre[0,i],mpre[0,i+1])

    i = np.argwhere( ~np.equal( mrec[0,1:], mrec[0,:mrec.shape[1]-1]) )+1
    i = i.flatten()

    # compute area under the curve
    ap = np.sum( np.multiply( np.subtract( mrec[0,i], mrec[0,i-1]), mpre[0,i] ) )

    return ap
项目:plda    作者:RaviSoji    | 项目源码 | 文件源码
def get_principal_components(flattened_images, n_components='default',
                             default_pct_variance_explained=.96):
    """ Standardizes the data and gets the principal components.
    """
    for img in flattened_images:
        assert isinstance(img, np.ndarray)
        assert img.shape == flattened_images[-1].shape
        assert len(img.shape) == 1
    X = np.asarray(flattened_images)
    X -= X.mean(axis=0)  # Center all of the data around the origin.
    X /= np.std(X, axis=0)

    pca = PCA()
    pca.fit(X)

    if n_components == 'default':
        sorted_eig_vals = pca.explained_variance_
        cum_pct_variance = (sorted_eig_vals / sorted_eig_vals.sum()).cumsum()
        idxs = np.argwhere(cum_pct_variance >= default_pct_variance_explained)
        n_components = np.squeeze(idxs)[0]

    V = pca.components_[:n_components + 1, :].T
    principal_components = np.matmul(X, V)

    return principal_components
项目:TreasureBot    作者:SamuelePolimi    | 项目源码 | 文件源码
def fit(self,s,a,r,s_next):

        if not self.first_time:
            q_next = self.Q[0].predict(s_next)
            q_next = q_next.reshape((q_next.shape[0],1))
            for a_i in range(1,self.n_action):
                q_next = np.concatenate((q_next,self.Q[a_i].predict(s_next).reshape((q_next.shape[0],1))), axis=1)
            q_max = np.max(q_next, axis=1)
            for a_i in range(self.n_action):
                indx = np.argwhere(a==a_i)

                y = r[indx].ravel()+ 1 * q_max[indx].ravel() #+ self.gamma * q_max[indx].ravel()
                self.Q[a_i].fit(s[indx.ravel(),:],y)
        else:
            for a_i in range(self.n_action):
                indx = np.argwhere(a == a_i)
                y = r[indx]
                self.Q[a_i].fit(s[indx.ravel(), :], y.ravel())
            self.first_time = False
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def ship_location(image):
    is_ship = np.sum(np.abs(image[185, :, :] - SHIP_COLOR), axis=1) == 0
    w = np.argwhere(is_ship)
    return w[0][0] if len(w) == 1 else None
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def ship_location(image):
    is_ship = np.sum(np.abs(image[185, :, :] - SHIP_COLOR), axis=1) == 0
    w = np.argwhere(is_ship)
    return w[0][0] if len(w) == 1 else None
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def ship_location(image):
    is_ship = np.sum(np.abs(image[185, :, :] - SHIP_COLOR), axis=1) == 0
    w = np.argwhere(is_ship)
    return w[0][0] if len(w) == 1 else None
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def ship_location(image):
    is_ship = np.sum(np.abs(image[185, :, :] - SHIP_COLOR), axis=1) == 0
    w = np.argwhere(is_ship)
    return w[0][0] if len(w) == 1 else None
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def ship_location(image):
    is_ship = np.sum(np.abs(image[185, :, :] - SHIP_COLOR), axis=1) == 0
    w = np.argwhere(is_ship)
    return w[0][0] if len(w) == 1 else None
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def adjustXPositions(self, pts, data):
        """Return a list of Point() where the x position is set to the nearest x value in *data* for each point in *pts*."""
        points = []
        timeIndices = []
        for p in pts:
            x = np.argwhere(abs(data - p.x()) == abs(data - p.x()).min())
            points.append(Point(data[x], p.y()))
            timeIndices.append(x)

        return points, timeIndices
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def read_spiketrain(self, cluster_id, model,
                        lazy=False,
                        cascade=True,
                        get_waveforms=True,
                        ):
        """
        Reads sorted spiketrains

        Parameters:
        get_waveforms: bool, default = False
            Wether or not to get the waveforms
        cluster_id: int,
            Which cluster to load, according to cluster id from klusta
        model: klusta.kwik.KwikModel
            A KwikModel object obtained by klusta.kwik.KwikModel(fname)
        """
        try:
            if ((not(cluster_id in model.cluster_ids))):
                raise ValueError
        except ValueError:
                print("Exception: cluster_id (%d) not found !! " % cluster_id)
                return
        clusters = model.spike_clusters
        idx = np.argwhere(clusters == cluster_id)
        if get_waveforms:
            w = model.all_waveforms[idx]
            # klusta: num_spikes, samples_per_spike, num_chans = w.shape
            w = w.swapaxes(1, 2)
        else:
            w = None
        sptr = SpikeTrain(times=model.spike_times[idx],
                          t_stop=model.duration, waveforms=w, units='s',
                          sampling_rate=model.sample_rate*pq.Hz,
                          file_origin=self.filename,
                          **{'cluster_id': cluster_id})
        return sptr
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def adjustXPositions(self, pts, data):
        """Return a list of Point() where the x position is set to the nearest x value in *data* for each point in *pts*."""
        points = []
        timeIndices = []
        for p in pts:
            x = np.argwhere(abs(data - p.x()) == abs(data - p.x()).min())
            points.append(Point(data[x], p.y()))
            timeIndices.append(x)

        return points, timeIndices
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def read_spiketrain(self, cluster_id, model,
                        lazy=False,
                        cascade=True,
                        get_waveforms=True,
                        ):
        """
        Reads sorted spiketrains

        Parameters:
        get_waveforms: bool, default = False
            Wether or not to get the waveforms
        cluster_id: int,
            Which cluster to load, according to cluster id from klusta
        model: klusta.kwik.KwikModel
            A KwikModel object obtained by klusta.kwik.KwikModel(fname)
        """
        try:
            if ((not(cluster_id in model.cluster_ids))):
                raise ValueError
        except ValueError:
                print("Exception: cluster_id (%d) not found !! " % cluster_id)
                return
        clusters = model.spike_clusters
        idx = np.argwhere(clusters == cluster_id)
        if get_waveforms:
            w = model.all_waveforms[idx]
            # klusta: num_spikes, samples_per_spike, num_chans = w.shape
            w = w.swapaxes(1, 2)
        else:
            w = None
        sptr = SpikeTrain(times=model.spike_times[idx],
                          t_stop=model.duration, waveforms=w, units='s',
                          sampling_rate=model.sample_rate*pq.Hz,
                          file_origin=self.filename,
                          **{'cluster_id': cluster_id})
        return sptr
项目:BlueWhale    作者:caffe2    | 项目源码 | 文件源码
def reset(self):
        self._state = self._index(np.argwhere(self.grid == S)[0])
        return self._state
项目:CRN_ProbabilisticInversion    作者:elaloy    | 项目源码 | 文件源码
def CalcDelta(nCR,delta_tot,delta_normX,CR):
    # Calculate total normalized Euclidean distance for each crossover value

    # Derive sum_p2 for each different CR value 
    for zz in range(0,nCR):

        # Find which chains are updated with zz/MCMCPar.nCR
        idx = np.argwhere(CR==(1.0+zz)/nCR);idx=idx[:,0]

        # Add the normalized squared distance tot the current delta_tot;
        delta_tot[0,zz] = delta_tot[0,zz] + np.sum(delta_normX[idx])

    return delta_tot
项目:geopyspark    作者:locationtech-labs    | 项目源码 | 文件源码
def assertTilesEqual(self, a, b):
        """compare two numpy arrays that are tiles"""
        self.assertEqual(a.shape, b.shape)
        cmp = (a == b)  # result must be array of matching cells
        diff = np.argwhere(cmp == False)
        if np.size(diff) > 0:
            raise Exception("Tiles differ at: ", np.size(diff), diff)
        return True
项目:ProbablisticRobotics2016    作者:RyuYamamoto    | 项目源码 | 文件源码
def learn(self):
        y, x = self.state
    current_acton_list = copy.deepcopy(self.action_list[y,x])
    if np.random.rand() > self.epsilon:
            max_q = self.q[current_acton_list,y,x].max()
        action_list_index = list(np.argwhere(self.q[current_acton_list,y,x] == max_q))
        random.shuffle(action_list_index)
            action = current_acton_list[action_list_index[0]]
    else:
        random.shuffle(current_acton_list)
            action = current_acton_list[0]
        move = self.move_list.get(action)
    self.update_q(action, move)
        self.q_value_list.append(self.q_max_value(move))
        self.state += move
项目:tensorflow_ocr    作者:BowieHsu    | 项目源码 | 文件源码
def pixel_detect(score_map, geo_map, score_map_thresh=0.8, link_thresh=0.8):
    '''
    restore text boxes from score map and geo map
    :param score_map:
    :param geo_map:
    :param timer:
    :param score_map_thresh: threshhold for score map
    :param box_thresh: threshhold for boxes
    :param nms_thres: threshold for nms
    :return:
    '''
    if len(score_map.shape) == 4:
        score_map = score_map[0, :, :, 0]
        geo_map = geo_map[0, :, :, ]

    # filter the score map
    res_map = np.zeros((score_map.shape[0] ,score_map.shape[1] ))
    xy_text = np.argwhere(score_map > score_map_thresh)

    for p in xy_text:
        res_map[p[0], p[1]] = 1

    res = res_map

    for i in range(8):
        geo_map_split = geo_map[:,:,i * 2 + 1]
        link_text = np.argwhere(geo_map_split < link_thresh)
        res[link_text[0], link_text[1]] = 0

    return np.array(res_map, dtype=np.uint8)