Python numpy 模块,where() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.where()

项目:Homology_BG    作者:jyotikab    | 项目源码 | 文件源码
def spec_entropy(Rates,time_range=[],bin_w = 5.,freq_range = []):
    '''Function to calculate the spectral entropy'''

        power,freq,dfreq,dummy,dummy = mypsd(Rates,time_range,bin_w = bin_w)
        if freq_range != []:
                power = power[(freq>=freq_range[0]) & (freq <= freq_range[1])]
                freq = freq[(freq>=freq_range[0]) & (freq <= freq_range[1])]
        maxFreq = freq[np.where(power==np.max(power))]*1000*100
        perMax = (np.max(power)/np.sum(power))*100
        k = len(freq)
        power = power/sum(power)
        sum_power = 0
        for ii in range(k):
                sum_power += (power[ii]*np.log(power[ii]))
        spec_ent = -(sum_power/np.log(k))
        return spec_ent,dfreq,maxFreq,perMax
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def _classify_gems(counts0, counts1):
        """ Infer number of distinct transcriptomes present in each GEM (1 or 2) and
            report cr_constants.GEM_CLASS_GENOME0 for a single cell w/ transcriptome 0,
            report cr_constants.GEM_CLASS_GENOME1 for a single cell w/ transcriptome 1,
            report cr_constants.GEM_CLASS_MULTIPLET for multiple transcriptomes """
        # Assumes that most of the GEMs are single-cell; model counts independently
        thresh0, thresh1 = [cr_constants.DEFAULT_MULTIPLET_THRESHOLD] * 2
        if sum(counts0 > counts1) >= 1 and sum(counts1 > counts0) >= 1:
            thresh0 = np.percentile(counts0[counts0 > counts1], cr_constants.MULTIPLET_PROB_THRESHOLD)
            thresh1 = np.percentile(counts1[counts1 > counts0], cr_constants.MULTIPLET_PROB_THRESHOLD)

        doublet = np.logical_and(counts0 >= thresh0, counts1 >= thresh1)
        dtype = np.dtype('|S%d' % max(len(cls) for cls in cr_constants.GEM_CLASSES))
        result = np.where(doublet, cr_constants.GEM_CLASS_MULTIPLET, cr_constants.GEM_CLASS_GENOME0).astype(dtype)
        result[np.logical_and(np.logical_not(result == cr_constants.GEM_CLASS_MULTIPLET), counts1 > counts0)] = cr_constants.GEM_CLASS_GENOME1

        return result
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def get_concat_reference_sequence(self):
        """Return a concatenated reference sequence.

        Return value:
        - (None,None) if this contig isn't annotated with a V and a J segment.
        Otherwise a tuple (seqs, annos) where annos is a list of Annotation objects
        in the order that they should appear in a VDJ sequence and seqs is a list
        of corresponding sequences from the input fasta.
        """
        v_region = self.get_region_hits(VDJ_V_FEATURE_TYPES)
        j_region = self.get_region_hits(VDJ_J_FEATURE_TYPES)

        if not v_region or not j_region:
            return (None, None)

        seqs = []
        ordered_annos = []
        for region_defs in VDJ_ORDERED_REGIONS:
            regions = self.get_region_hits(region_defs)
            if regions:
                seqs.append(regions[0].feature.sequence)
                ordered_annos.append(regions[0])

        return (seqs, ordered_annos)
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def load_data(infile, chroms, resolutions):
    starts = infile['starts'][...]
    chromosomes = infile['chromosomes'][...]
    data = {}
    for res in resolutions:
        data[res] = {}
        for i, chrom in enumerate(chromosomes):
            if chrom not in chroms:
                continue
            start = (starts[i] / res) * res
            dist = infile['dist.%s.%i' % (chrom, res)][...]
            valid_rows = infile['valid.%s.%i' % (chrom, res)][...]
            corr = infile['corr.%s.%i' % (chrom, res)][...]
            valid = numpy.zeros(corr.shape, dtype=numpy.bool)
            N, M = corr.shape
            valid = numpy.zeros((N, M), dtype=numpy.int32)
            for i in range(min(N - 1, M)):
                P = N - i - 1
                valid[:P, i] = valid_rows[(i + 1):] * valid_rows[:P]
            temp = corr * dist
            valid[numpy.where(numpy.abs(temp) == numpy.inf)] = False
            data[res][chrom] = [start, temp, valid]
    return data
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def load_data(infile, chroms, resolutions):
    starts = infile['starts'][...]
    chromosomes = infile['chromosomes'][...]
    data = {}
    for res in resolutions:
        data[res] = {}
        for i, chrom in enumerate(chromosomes):
            if chrom not in chroms:
                continue
            start = (starts[i] / res) * res
            dist = infile['dist.%s.%i' % (chrom, res)][...]
            valid_rows = infile['valid.%s.%i' % (chrom, res)][...]
            corr = infile['corr.%s.%i' % (chrom, res)][...]
            valid = numpy.zeros(corr.shape, dtype=numpy.bool)
            N, M = corr.shape
            valid = numpy.zeros((N, M), dtype=numpy.int32)
            for i in range(min(N - 1, M)):
                P = N - i - 1
                valid[:P, i] = valid_rows[(i + 1):] * valid_rows[:P]
            temp = corr * dist
            valid[numpy.where(numpy.abs(temp) == numpy.inf)] = False
            data[res][chrom] = [start, temp, valid]
    return data
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def shift_dataset(m,boundarynoise):
    if boundarynoise==0:
        return m
    nonzero_rows=np.where(m.any(axis=1))[0]
    small_m=copy.deepcopy(m)
    small_m=small_m[nonzero_rows,:]
    small_m=small_m[:,nonzero_rows]
    print small_m
    print 'roll'
    small_m=np.roll(small_m,boundarynoise,axis=0)
    print small_m
    print 'roll2'
    small_m=np.roll(small_m,boundarynoise,axis=1)
    print small_m
    outm=np.zeros(m.shape)
    for i_idx in range(len(nonzero_rows)):
        i=nonzero_rows[i_idx]
        for j_idx in range(i_idx,len(nonzero_rows)):
            j=nonzero_rows[j_idx]
            outm[i,j]=small_m[i_idx,j_idx]
            outm[j,i]=outm[i,j]
    return outm
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def _get_bbox_regression_labels(bbox_target_data, num_classes):
  """Bounding-box regression targets (bbox_target_data) are stored in a
  compact form N x (class, tx, ty, tw, th)

  This function expands those targets into the 4-of-4*K representation used
  by the network (i.e. only one class has non-zero targets).

  Returns:
      bbox_target (ndarray): N x 4K blob of regression targets
      bbox_inside_weights (ndarray): N x 4K blob of loss weights
  """

  clss = bbox_target_data[:, 0]
  bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
  bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
  inds = np.where(clss > 0)[0]
  for ind in inds:
    cls = clss[ind]
    start = int(4 * cls)
    end = start + 4
    bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
    bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
  return bbox_targets, bbox_inside_weights
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def remove_snapshot(self, np_paths, ss_paths):
    to_remove = len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT
    for c in range(to_remove):
      nfile = np_paths[0]
      os.remove(str(nfile))
      np_paths.remove(nfile)

    to_remove = len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT
    for c in range(to_remove):
      sfile = ss_paths[0]
      # To make the code compatible to earlier versions of Tensorflow,
      # where the naming tradition for checkpoints are different
      if os.path.exists(str(sfile)):
        os.remove(str(sfile))
      else:
        os.remove(str(sfile + '.data-00000-of-00001'))
        os.remove(str(sfile + '.index'))
      sfile_meta = sfile + '.meta'
      os.remove(str(sfile_meta))
      ss_paths.remove(sfile)
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def filter_roidb(roidb):
  """Remove roidb entries that have no usable RoIs."""

  def is_valid(entry):
    # Valid images have:
    #   (1) At least one foreground RoI OR
    #   (2) At least one background RoI
    overlaps = entry['max_overlaps']
    # find boxes with sufficient overlap
    fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
    # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
    bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
                       (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
    # image is only valid if such boxes exist
    valid = len(fg_inds) > 0 or len(bg_inds) > 0
    return valid

  num = len(roidb)
  filtered_roidb = [entry for entry in roidb if is_valid(entry)]
  num_after = len(filtered_roidb)
  print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
                                                     num, num_after))
  return filtered_roidb
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def recall_from_IoU(IoU, samples=500): 
    """
    plot recall_vs_IoU_threshold
    """

    if not (isinstance(IoU, list) or IoU.ndim == 1):
        raise ValueError('IoU needs to be a list or 1-D')
    iou = np.float32(IoU)

    # Plot intersection over union
    IoU_thresholds = np.linspace(0.0, 1.0, samples)
    recall = np.zeros_like(IoU_thresholds)
    for idx, IoU_th in enumerate(IoU_thresholds):
        tp, relevant = 0, 0
        inds, = np.where(iou >= IoU_th)
        recall[idx] = len(inds) * 1.0 / len(IoU)

    return recall, IoU_thresholds 

# =====================================================================
# Generic utility functions for object recognition
# ---------------------------------------------------------------------
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def mine(self, im, gt_bboxes): 
        """
        Propose bounding boxes using proposer, and
        augment non-overlapping boxes with IoU < 0.1
        to the ground truth set.
        (up to a maximum of num_proposals)
        """
        bboxes = self.proposer_.process(im)

        if len(gt_bboxes): 
            # Determine bboxes that have low IoU with ground truth
            # iou = [N x GT]
            iou = brute_force_match(bboxes, gt_bboxes, 
                                    match_func=lambda x,y: intersection_over_union(x,y))
            # print('Detected {}, {}, {}'.format(iou.shape, len(gt_bboxes), len(bboxes))) # , np.max(iou, axis=1)
            overlap_inds, = np.where(np.max(iou, axis=1) < 0.1)
            bboxes = bboxes[overlap_inds]
            # print('Remaining non-overlapping {}'.format(len(bboxes)))

        bboxes = bboxes[:self.num_proposals_]
        targets = self.generate_targets(len(bboxes))
        return bboxes, targets
项目:sea-lion-counter    作者:rdinse    | 项目源码 | 文件源码
def compHistDistance(h1, h2):
  def normalize(h):    
    if np.sum(h) == 0: 
        return h
    else:
        return h / np.sum(h)

  def smoothstep(x, x_min=0., x_max=1., k=2.):
      m = 1. / (x_max - x_min)
      b = - m * x_min
      x = m * x + b
      return betainc(k, k, np.clip(x, 0., 1.))

  def fn(X, Y, k):
    return 4. * (1. - smoothstep(Y, 0, (1 - Y) * X + Y + .1)) \
      * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) \
             + 2. * smoothstep(Y, 0, (1 - Y) * X + Y + .1) \
             * (1. - 2. * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) - 0.5)

  h1 = normalize(h1)
  h2 = normalize(h2)

  return max(0, np.sum(fn(h2, h1, len(h1))))
  # return np.sum(np.where(h2 != 0, h2 * np.log10(h2 / (h1 + 1e-10)), 0))  # KL divergence
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def cells_walls_coords(self):
        """Return coordinates of the voxels defining a cell wall.

        This function thus returns any voxel in contact with one of different label.

        Args:
          image (SpatialImage) - Segmented image (tissu)

        Returns:
          x,y,z (list) - coordinates of the voxels defining the cell boundaries (walls).
        """
        if self.is3D():
            image = hollow_out_cells(self.image, self.background, verbose=True)
        else:
            image = copy.copy(self.image)
            image[np.where(image==self.background)] = 0

        if self.is3D():
            x,y,z = np.where(image!=0)
            return list(x), list(y), list(z)
        else:
            x,y = np.where(image!=0)
            return list(x), list(y)
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def fuse_labels_in_image(self, labels, verbose = True):
        """ Modify the image so the given labels are fused (to the min value)."""
        assert isinstance(labels, list) and len(labels) >= 2
        assert self.background() not in labels

        min_lab = min(labels)
        labels.remove(min_lab)
        N=len(labels); percent = 0
        if verbose: print "Fusing the following {} labels: {} to value '{}'.".format(N, labels, min_lab)
        for n, label in enumerate(labels):
            if verbose and n*100/float(N) >= percent: print "{}%...".format(percent),; percent += 5
            if verbose and n+1==N: print "100%"
            try:
                bbox = self.boundingbox(label)
                xyz = np.where( (self.image[bbox]) == label )
                self.image[tuple((xyz[0]+bbox[0].start, xyz[1]+bbox[1].start, xyz[2]+bbox[2].start))]=min_lab
            except:
                print "No boundingbox found for cell id #{}, skipping...".format(label)
                continue
        print "Done!"
        return None
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def load_ROI_mask(self):

        proxy = nib.load(self.FLAIR_FILE)
        image_array = np.asarray(proxy.dataobj)

        mask = np.ones_like(image_array)
        mask[np.where(image_array < 90)] = 0

        # img = nib.Nifti1Image(mask, proxy.affine)
        # nib.save(img, join(modalities_path,'mask.nii.gz'))

        struct_element_size = (20, 20, 20)
        mask_augmented = np.pad(mask, [(21, 21), (21, 21), (21, 21)], 'constant', constant_values=(0, 0))
        mask_augmented = binary_closing(mask_augmented, structure=np.ones(struct_element_size, dtype=bool)).astype(
            np.int)

        return mask_augmented[21:-21, 21:-21, 21:-21].astype('bool')
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __detect_spike_peak(self,ang_data,Thr,peak_before,peak_after):
        if Thr < 0:
            dd_0 = np.where(ang_data<Thr)[0]
        elif Thr >=0:
            dd_0 = np.where(ang_data>=Thr)[0]
        dd_1 = np.diff(dd_0,n=1)
        dd_2 = np.where(dd_1 > 1)[0]+1
        dd_3 = np.split(dd_0,dd_2)
        spike_peak = []
        if Thr < 0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmin()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        elif Thr >=0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmax()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        return np.array(spike_peak)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_values(self):
        """
        Tests if the function returns the correct values.
        """

        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-2gex-3Vm-1261-0.dat',
                directory=self.local_test_dir, clean=False)

        id_to_test = 1
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_stop=1000. * pq.ms,
                             sampling_period=pq.ms, lazy=False,
                             id_column_dat=0, time_column_dat=1,
                             value_columns_dat=2, value_types='V_m')

        dat = np.loadtxt(filename)
        target_data = dat[:, 2][np.where(dat[:, 0] == id_to_test)]
        target_data = target_data[:, None]
        st = seg.analogsignals[0]
        np.testing.assert_array_equal(st.magnitude, target_data)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_values(self):
        """
        Tests if the routine loads the correct numbers from the file.
        """
        id_to_test = 1
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_start=400. * pq.ms,
                             t_stop=500. * pq.ms, lazy=False,
                             id_column_gdf=0, time_column_gdf=1)

        dat = np.loadtxt(filename)
        target_data = dat[:, 1][np.where(dat[:, 0] == id_to_test)]

        st = seg.spiketrains[0]
        np.testing.assert_array_equal(st.magnitude, target_data)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_correct_condition_selection(self):
        """
        Test if combination of condition function and condition_column works
        properly.
        """
        condition_column = 0
        condition_function = lambda x: x > 10
        result = self.testIO.get_columns(condition=condition_function,
                                         condition_column=0)
        selected_ids = np.where(condition_function(self.testIO.data[:,
                                                   condition_column]))[0]
        expected = self.testIO.data[selected_ids, :]

        np.testing.assert_array_equal(result, expected)

        assert all(condition_function(result[:, condition_column]))
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def get_event(self, ep, ch, marked_ks):
        """
        Return a :class:`ElphyEvent` which is a
        descriptor of the specified event channel.
        """
        assert ep in range(1, self.n_episodes + 1)
        assert ch in range(1, self.n_channels + 1)

        # find the event channel number
        evt_channel = np.where(marked_ks == -1)[0][0]
        assert evt_channel in range(1, self.n_events(ep) + 1)

        block = self.episode_block(ep)
        ep_blocks = self.get_blocks_stored_in_episode(ep)
        evt_blocks = [k for k in ep_blocks if k.identifier == 'REVT']
        n_events = np.sum([k.n_events[evt_channel - 1] for k in evt_blocks], dtype=int)
        x_unit = block.ep_block.x_unit

        return ElphyEvent(self, ep, evt_channel, x_unit, n_events, ch_number=ch)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __detect_spike_peak(self,ang_data,Thr,peak_before,peak_after):
        if Thr < 0:
            dd_0 = np.where(ang_data<Thr)[0]
        elif Thr >=0:
            dd_0 = np.where(ang_data>=Thr)[0]
        dd_1 = np.diff(dd_0,n=1)
        dd_2 = np.where(dd_1 > 1)[0]+1
        dd_3 = np.split(dd_0,dd_2)
        spike_peak = []
        if Thr < 0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmin()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        elif Thr >=0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmax()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        return np.array(spike_peak)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_values(self):
        """
        Tests if the function returns the correct values.
        """

        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-2gex-3Vm-1261-0.dat',
                directory=self.local_test_dir, clean=False)

        id_to_test = 1
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_stop=1000. * pq.ms,
                             sampling_period=pq.ms, lazy=False,
                             id_column_dat=0, time_column_dat=1,
                             value_columns_dat=2, value_types='V_m')

        dat = np.loadtxt(filename)
        target_data = dat[:, 2][np.where(dat[:, 0] == id_to_test)]
        target_data = target_data[:, None]
        st = seg.analogsignals[0]
        np.testing.assert_array_equal(st.magnitude, target_data)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_values(self):
        """
        Tests if the routine loads the correct numbers from the file.
        """
        id_to_test = 1
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_start=400. * pq.ms,
                             t_stop=500. * pq.ms, lazy=False,
                             id_column_gdf=0, time_column_gdf=1)

        dat = np.loadtxt(filename)
        target_data = dat[:, 1][np.where(dat[:, 0] == id_to_test)]

        st = seg.spiketrains[0]
        np.testing.assert_array_equal(st.magnitude, target_data)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_correct_condition_selection(self):
        """
        Test if combination of condition function and condition_column works
        properly.
        """
        condition_column = 0
        condition_function = lambda x: x > 10
        result = self.testIO.get_columns(condition=condition_function,
                                         condition_column=0)
        selected_ids = np.where(condition_function(self.testIO.data[:,
                                                   condition_column]))[0]
        expected = self.testIO.data[selected_ids, :]

        np.testing.assert_array_equal(result, expected)

        assert all(condition_function(result[:, condition_column]))
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def get_event(self, ep, ch, marked_ks):
        """
        Return a :class:`ElphyEvent` which is a
        descriptor of the specified event channel.
        """
        assert ep in range(1, self.n_episodes + 1)
        assert ch in range(1, self.n_channels + 1)

        # find the event channel number
        evt_channel = np.where(marked_ks == -1)[0][0]
        assert evt_channel in range(1, self.n_events(ep) + 1)

        block = self.episode_block(ep)
        ep_blocks = self.get_blocks_stored_in_episode(ep)
        evt_blocks = [k for k in ep_blocks if k.identifier == 'REVT']
        n_events = np.sum([k.n_events[evt_channel - 1] for k in evt_blocks], dtype=int)
        x_unit = block.ep_block.x_unit

        return ElphyEvent(self, ep, evt_channel, x_unit, n_events, ch_number=ch)
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def random_hue(img, label, max_delta=10):
    """
    Rotates the hue channel
    Args:
        img: input image in float32
        max_delta: Max number of degrees to rotate the hue channel
    """
    # Rotates the hue channel by delta degrees
    delta = -max_delta + 2.0 * max_delta * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    hchannel = hsv[:, :, 0]
    hchannel = delta + hchannel

    # hue should always be within [0,360]
    idx = np.where(hchannel > 360)
    hchannel[idx] = hchannel[idx] - 360
    idx = np.where(hchannel < 0)
    hchannel[idx] = hchannel[idx] + 360

    hsv[:, :, 0] = hchannel
    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def reset(self):
        """ Resets the state of the generator"""
        self.step = 0
        Y = np.argmax(self.Y,1)
        labels = np.unique(Y)
        idx = []
        smallest = len(Y)
        for i,label in enumerate(labels):
            where = np.where(Y==label)[0]
            if smallest > len(where): 
                self.slabel = i
                smallest = len(where)
            idx.append(where)
        self.idx = idx
        self.labels = labels
        self.n_per_class = int(self.batch_size // len(labels))
        self.n_batches = int(np.ceil((smallest//self.n_per_class)))+1
        self.update_probabilities()
项目:hSBM_Topicmodel    作者:martingerlach    | 项目源码 | 文件源码
def __init__(self,args):
        '''
        Initialize hsbm-instance
        - create a folder where to save results: self.args.output
        - make a bipartite word-doc graph from the corpus. save as self.graph
        - do the hsbm inference. save the state as self.inference
        '''
        self.args = args
        self.out_path = self.args.output

        if not os.path.exists(self.out_path):
            os.makedirs(self.out_path)

        ## get the graph-object
        self.graph = self.make_graph()
        ## do the hsbm-inference
        self.state = self.inference(self.graph)
项目:corporadb    作者:nlesc-sherlock    | 项目源码 | 文件源码
def check_dataset(self):
    '''
    check if dataset is already in database
    if found set self.dataset_id to the entry in the database
    return boolean
    '''
    self.cursor.execute('select id as rowid, name from dataset')
    citems = self.cursor.fetchall()
    names = [citem['name'] for citem in citems]  # get all names
    if names:
      try:
        idx = numpy.where(numpy.array(names)==self.datasetname)[0][0]
        self.dataset_id = citems[idx]['rowid']
        return True
      except IndexError:
        return False
    else:
      return False
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def get_language_index(lang_code, feature_database):
    return np.where(feature_database["langs"] == lang_code)[0][0]
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def get_source_index(source_name, feature_database):
    return np.where(feature_database["sources"] == source_name)[0]
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def get_feature_index(feature_name, feature_database):
    return np.where(feature_database["feats"] == feature_name)[0][0]
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def get(languages, feature_set_str, header=False, random=False, minimal=False):

    lang_codes = languages.split()
    feature_names, feature_values = get_concatenated_sets(lang_codes, feature_set_str)
    feature_names = np.array([ f.replace(" ","_") for f in feature_names ])
    feats = {}

    if minimal:
        mask = np.all(feature_values == 0.0, axis=0)
        mask |= np.all(feature_values == 1.0, axis=0)
        mask |= np.all(feature_values == -1.0, axis=0)
        unmasked_indices = np.where(np.logical_not(mask))
    else:
        unmasked_indices = np.where(np.ones(feature_values.shape[1]))

    if random:
        feature_values = np.random.random(feature_values.shape) >= 0.5

    if header:
        print("\t".join(['CODE']+list(feature_names[unmasked_indices])))
    feat_names = feature_names[unmasked_indices]

    for i, lang_code in enumerate(lang_codes):
        values = feature_values[i,unmasked_indices].ravel()
        #values = [ '--' if f == -1 else ("%0.4f"%f).rstrip("0").rstrip(".") for f in values ]
        feats[lang_code] = values
        #print("\t".join([lang_code]+values))
    return feats, feat_names

#if __name__ == '__main__':
#    argparser = argparse.ArgumentParser()
#    argparser.add_argument("languages", default='', help="The languages of interest, in ISO 639-3 codes, separated by spaces (e.g., \"deu eng fra swe\")")
#    argparser.add_argument("feature_set", default='', help="The feature set or sets of interest (e.g., \"syntax_knn\" or \"fam\"), joined by concatenation (+) or element-wise union (|).")
#    argparser.add_argument("-f", "--fields", default=False, action="store_true", help="Print feature names as the first row of data.")
#    argparser.add_argument("-r", "--random", default=False, action="store_true", help="Randomize all feature values (e.g., to make a control group).")
#    argparser.add_argument("-m", "--minimal", default=False, action="store_true", help="Suppress columns that are all 0, all 1, or all nulls.")
#    args = argparser.parse_args()
#    get(args.languages, args.feature_set, args.fields, args.random, args.minimal)
项目:composability_bench    作者:IntelPython    | 项目源码 | 文件源码
def run_numpy():
    x = topk.dot(users)
    x = np.where(users>0, 0, x)
    return x.argmax(axis=0)
项目:composability_bench    作者:IntelPython    | 项目源码 | 文件源码
def run_dask():
    x = t.dot(u)
    x = da.where(u>0, 0, x)
    r = x.argmax(axis=0)
    return r.compute()
项目:uwb_tracker_ros    作者:eth-ait    | 项目源码 | 文件源码
def _solve_equation_least_squares(self, A, B):
        """Solve system of linear equations A X = B.
        Currently using Pseudo-inverse because it also allows for singular matrices.

        Args:
             A (numpy.ndarray): Left-hand side of equation.
             B (numpy.ndarray): Right-hand side of equation.

        Returns:
             X (numpy.ndarray): Solution of equation.
        """
        # Pseudo-inverse
        X = np.dot(np.linalg.pinv(A), B)
        # LU decomposition
        # lu, piv = scipy.linalg.lu_factor(A)
        # X = scipy.linalg.lu_solve((lu, piv), B)
        # Vanilla least-squares from numpy
        # X, _, _, _ = np.linalg.lstsq(A, B)
        # QR decomposition
        # Q, R, P = scipy.linalg.qr(A, mode='economic', pivoting=True)
        # # Find first zero element in R
        # out = np.where(np.diag(R) == 0)[0]
        # if out.size == 0:
        #     i = R.shape[0]
        # else:
        #     i = out[0]
        # B_prime = np.dot(Q.T, B)
        # X = np.zeros((A.shape[1], B.shape[1]), dtype=A.dtype)
        # X[P[:i], :] = scipy.linalg.solve_triangular(R[:i, :i], B_prime[:i, :])
        return X
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def get_index(self, value):
        """Returns the index of a given value.

        Args:
            value: Value the index requested for.

        Returns:
            Index.
        """

        index, = np.where(np.abs(self.vector - value) <= self.snap_radius)
        assert len(index) < 2, "Multiple points found within snap radius of given value."
        assert len(index) > 0, "No point found within snap radius of given value."

        return int(index)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def gen_minibatch(tokens, features, labels, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)[np.where(labels!=0.5)[0]]
    if type(features) is np.ndarray:
      features = np.asarray(features)[np.where(labels!=0.5)[0]]
    else:
      features = np.asarray(features.todense())[np.where(labels!=0.5)[0]]
    labels = np.asarray(labels)[np.where(labels!=0.5)[0]]
#     print tokens.shape
#     print tokens[0]
    for token, feature, label in iterate_minibatches(tokens, features, labels, mini_batch_size, shuffle = shuffle):
#         print 'token', type(token)
#         print token
        token = [_ for _ in pad_batch(token)]
#         print len(token), token[0].size(), token[1].size()
        yield token, Variable(torch.from_numpy(feature)) , Variable(torch.FloatTensor(label), requires_grad= False)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def gen_minibatch(tokens, features, labels, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)[np.where(labels!=0.5)[0]]
    features = np.asarray(features.todense())[np.where(labels!=0.5)[0]]
    labels = np.asarray(labels)[np.where(labels!=0.5)[0]]
#     print tokens.shape
#     print tokens[0]
    for token, feature, label in iterate_minibatches(tokens, features, labels, mini_batch_size, shuffle = shuffle):
#         print 'token', type(token)
#         print token
        token = [_ for _ in pad_batch(token)]
#         print len(token), token[0].size(), token[1].size()
        yield token, Variable(torch.from_numpy(feature)) , Variable(torch.FloatTensor(label), requires_grad= False)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def view_waveforms_clusters(data, halo, threshold, templates, amps_lim, n_curves=200, save=False):

    nb_templates = templates.shape[1]
    n_panels     = numpy.ceil(numpy.sqrt(nb_templates))
    mask         = numpy.where(halo > -1)[0]
    clust_idx    = numpy.unique(halo[mask])
    fig          = pylab.figure()    
    square       = True
    center       = len(data[0] - 1)//2
    for count, i in enumerate(xrange(nb_templates)):
        if square:
            pylab.subplot(n_panels, n_panels, count + 1)
            if (numpy.mod(count, n_panels) != 0):
                pylab.setp(pylab.gca(), yticks=[])
            if (count < n_panels*(n_panels - 1)):
                pylab.setp(pylab.gca(), xticks=[])

        subcurves = numpy.where(halo == clust_idx[count])[0]
        for k in numpy.random.permutation(subcurves)[:n_curves]:
            pylab.plot(data[k], '0.5')

        pylab.plot(templates[:, count], 'r')        
        pylab.plot(amps_lim[count][0]*templates[:, count], 'b', alpha=0.5)
        pylab.plot(amps_lim[count][1]*templates[:, count], 'b', alpha=0.5)

        xmin, xmax = pylab.xlim()
        pylab.plot([xmin, xmax], [-threshold, -threshold], 'k--')
        pylab.plot([xmin, xmax], [threshold, threshold], 'k--')
        #pylab.ylim(-1.5*threshold, 1.5*threshold)
        ymin, ymax = pylab.ylim()
        pylab.plot([center, center], [ymin, ymax], 'k--')
        pylab.title('Cluster %d' %i)

    if nb_templates > 0:
        pylab.tight_layout()
    if save:
        pylab.savefig(os.path.join(save[0], 'waveforms_%s' %save[1]))
        pylab.close()
    else:
        pylab.show()
    del fig
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def view_performance(file_name, triggers, lims=(150,150)):

    params          = CircusParser(file_name)
    N_e             = params.getint('data', 'N_e')
    N_total         = params.getint('data', 'N_total')
    sampling_rate   = params.getint('data', 'sampling_rate')
    do_temporal_whitening = params.getboolean('whitening', 'temporal')
    do_spatial_whitening  = params.getboolean('whitening', 'spatial')
    spike_thresh     = params.getfloat('detection', 'spike_thresh')
    file_out_suff    = params.get('data', 'file_out_suff')
    N_t              = params.getint('detection', 'N_t')
    nodes, edges     = get_nodes_and_edges(params)
    chunk_size       = N_t

    if do_spatial_whitening:
        spatial_whitening  = load_data(params, 'spatial_whitening')
    if do_temporal_whitening:
        temporal_whitening = load_data(params, 'temporal_whitening')

    thresholds       = load_data(params, 'thresholds')    

    try:
        result    = load_data(params, 'results')
    except Exception:
        result    = {'spiketimes' : {}, 'amplitudes' : {}}

    curve     = numpy.zeros((len(triggers), len(result['spiketimes'].keys()), lims[1]+lims[0]), dtype=numpy.int32)
    count     = 0

    for count, t_spike in enumerate(triggers):
        for key in result['spiketimes'].keys():
            elec  = int(key.split('_')[1])
            idx   = numpy.where((result['spiketimes'][key] > t_spike - lims[0]) & (result['spiketimes'][key] <  t_spike + lims[0]))
            curve[count, elec, t_spike - result['spiketimes'][key][idx]] += 1
    pylab.subplot(111)
    pylab.imshow(numpy.mean(curve, 0), aspect='auto') 
    return curve
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def slice_result(result, times):

    sub_results = []

    nb_temp = len(result['spiketimes'])
    for t in times:
        sub_result = {'spiketimes' : {}, 'amplitudes' : {}}
        for key in result['spiketimes'].keys():
            idx = numpy.where((result['spiketimes'][key] >= t[0]) & (result['spiketimes'][key] <= t[1]))[0]
            sub_result['spiketimes'][key] = result['spiketimes'][key][idx] - t[0]
            sub_result['amplitudes'][key] = result['amplitudes'][key][idx]
        sub_results += [sub_result]

    return sub_results
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def encode_segmap(self, mask):
        mask = mask.astype(int)
        label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
        for i, label in enumerate(self.get_pascal_labels()):
            label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = i
        label_mask = label_mask.astype(int)
        return label_mask
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def voc_ap(rec, prec, use_07_metric=False):
    """ ap = voc_ap(rec, prec, [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap
项目:sgcrfpy    作者:dswah    | 项目源码 | 文件源码
def chol_inv(B, lower=True):
    """
    Returns the inverse of matrix A, where A = B*B.T,
    ie B is the Cholesky decomposition of A.

    Solves Ax = I
    given B is the cholesky factorization of A.
    """
    return cho_solve((B, lower), np.eye(B.shape[0]))
项目:sgcrfpy    作者:dswah    | 项目源码 | 文件源码
def active_set_Lam(self, fixed, vary):
        grad = self.grad_wrt_Lam(fixed, vary)
        assert np.allclose(grad, grad.T, 1e-3)
        return np.where((np.abs(np.triu(grad)) > self.lamL) | (self.Lam != 0))
        # return np.where((np.abs(grad) > self.lamL) | (~np.isclose(self.Lam, 0)))
项目:sgcrfpy    作者:dswah    | 项目源码 | 文件源码
def active_set_Theta(self, fixed, vary):
        grad = self.grad_wrt_Theta(fixed, vary)
        return np.where((np.abs(grad) > self.lamT) | (self.Theta != 0))
        # return np.where((np.abs(grad) > self.lamT) | (~np.isclose(self.Theta, 0)))
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def paddle_top(observation, paddle="right"):
    column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
    found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
    r = np.argmax(found)
    if not found[r]:
        return None
    else:
        return r


# def ball_center(observation):
#     w = np.where(np.abs(observation[:,6:36] - 0.30457518) > TOLERANCE)[:2]
#     if len(w[0]) == 0 or len(w[0]) > 4:
#         return None
#     w = np.mean(w, axis=1)
#     return w[0], w[1] + 6
#
# def ball_on_left(observation):
#     w = np.where(np.abs(observation[:,6:21] - 0.30457518) > TOLERANCE)[:2]
#     return(len(w[0]) > 0)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def paddle_top(observation, paddle="right"):
    column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
    found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
    r = np.argmax(found)
    if not found[r]:
        return None
    else:
        return r


# def ball_center(observation):
#     w = np.where(np.abs(observation[:,6:36] - 0.30457518) > TOLERANCE)[:2]
#     if len(w[0]) == 0 or len(w[0]) > 4:
#         return None
#     w = np.mean(w, axis=1)
#     return w[0], w[1] + 6
#
# def ball_on_left(observation):
#     w = np.where(np.abs(observation[:,6:21] - 0.30457518) > TOLERANCE)[:2]
#     return(len(w[0]) > 0)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def paddle_top(observation, paddle="right"):
    column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
    found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
    r = np.argmax(found)
    if not found[r]:
        return None
    else:
        return r


# def ball_center(observation):
#     w = np.where(np.abs(observation[:,6:36] - 0.30457518) > TOLERANCE)[:2]
#     if len(w[0]) == 0 or len(w[0]) > 4:
#         return None
#     w = np.mean(w, axis=1)
#     return w[0], w[1] + 6
#
# def ball_on_left(observation):
#     w = np.where(np.abs(observation[:,6:21] - 0.30457518) > TOLERANCE)[:2]
#     return(len(w[0]) > 0)