Python numpy 模块,any() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.any()

项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def random_walk_rec(current, trace, length, successor_fn): 
    import numpy.random as random
    if length == 0:
        return current
    else:
        sucs = successor_fn(current)
        first = random.randint(len(sucs))
        now = first

        while True:
            suc = sucs[now]
            try:
                assert not np.any([np.all(np.equal(suc, t)) for t in trace])
                result = random_walk_rec(suc, [*trace, suc], length-1, successor_fn)
                assert result is not None
                return result
            except AssertionError:
                now = (now+1)%len(sucs)
                if now == first:
                    print("B",end="")
                    return None
                else:
                    continue
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_against_numpy(self):
        """ Test iany against numpy.any """
        stream = [np.zeros((8, 16, 2)) for _ in range(11)]
        stream[3][3,0,1] = 1    # so that np.all(axis = None) evaluates to False
        stack = np.stack(stream, axis = -1)

        with self.subTest('axis = None'):
            from_numpy = np.any(stack, axis = None)
            from_stream = last(iany(stream, axis = None))
            self.assertEqual(from_numpy, from_stream)

        for axis in range(stack.ndim):
            with self.subTest('axis = {}'.format(axis)):
                from_numpy = np.any(stack, axis = axis)
                from_stream = last(iany(stream, axis = axis))
                self.assertTrue(np.allclose(from_numpy, from_stream))
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def _set_barcode_reads_metrics(self, read_type, read_type_set, bc):
        for genome in self.genomes:
            is_read_type = (genome, cr_constants.TRANSCRIPTOME_REGION) in read_type_set
            if is_read_type:
                barcode_reads = self._get_metric_attr(
                    'barcode_reads', genome, cr_constants.TRANSCRIPTOME_REGION, read_type)
                barcode_reads.add(bc)

        # Don't always-report the multi prefix for the barcode_reads metrics
        if self.has_multiple_genomes:
            is_read_type = any([(genome, cr_constants.TRANSCRIPTOME_REGION) in read_type_set for genome in self.genomes])
            if is_read_type:
                multi_barcode_reads = self._get_metric_attr(
                    'barcode_reads', cr_constants.MULTI_REFS_PREFIX,
                    cr_constants.TRANSCRIPTOME_REGION, read_type)
                multi_barcode_reads.add(bc)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def updateSpots(self, dataSet=None):
        if dataSet is None:
            dataSet = self.data

        invalidate = False
        if self.opts['pxMode']:
            mask = np.equal(dataSet['sourceRect'], None)
            if np.any(mask):
                invalidate = True
                opts = self.getSpotOpts(dataSet[mask])
                sourceRect = self.fragmentAtlas.getSymbolCoords(opts)
                dataSet['sourceRect'][mask] = sourceRect

            self.fragmentAtlas.getAtlas() # generate atlas so source widths are available.

            dataSet['width'] = np.array(list(imap(QtCore.QRectF.width, dataSet['sourceRect'])))/2
            dataSet['targetRect'] = None
            self._maxSpotPxWidth = self.fragmentAtlas.max_width
        else:
            self._maxSpotWidth = 0
            self._maxSpotPxWidth = 0
            self.measureSpotSizes(dataSet)

        if invalidate:
            self.invalidate()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def updateSpots(self, dataSet=None):
        if dataSet is None:
            dataSet = self.data

        invalidate = False
        if self.opts['pxMode']:
            mask = np.equal(dataSet['sourceRect'], None)
            if np.any(mask):
                invalidate = True
                opts = self.getSpotOpts(dataSet[mask])
                sourceRect = self.fragmentAtlas.getSymbolCoords(opts)
                dataSet['sourceRect'][mask] = sourceRect

            self.fragmentAtlas.getAtlas() # generate atlas so source widths are available.

            dataSet['width'] = np.array(list(imap(QtCore.QRectF.width, dataSet['sourceRect'])))/2
            dataSet['targetRect'] = None
            self._maxSpotPxWidth = self.fragmentAtlas.max_width
        else:
            self._maxSpotWidth = 0
            self._maxSpotPxWidth = 0
            self.measureSpotSizes(dataSet)

        if invalidate:
            self.invalidate()
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_mask_value(self):
        result = self.model.predict(self.data)
        np.testing.assert_array_almost_equal(
            result[:, 1:, :],
            np.zeros((
                self.data_size,
                self.max_length - 1,
                self.encoding_size
            ))
        )
        np.testing.assert_equal(
            np.any(
                np.not_equal(
                    result[:, 0:1, self.cell_units:],
                    np.zeros((self.data_size, 1, self.cell_units))
                )
            ),
            True
        )
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_image_data_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertTrue(
            np.all(
                mask[:, self.x_start:self.x_end]
            )
        )
        self.assertFalse(
            np.any(
                mask[:, :self.x_start]
            )
        )
        self.assertFalse(
            np.any(
                mask[:, self.x_end:]
            )
        )
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_seq_data_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.seq_data}
        )
        self.assertTrue(
            np.all(
                mask[:, :self.seq_data_max_length]
            )
        )
        self.assertFalse(
            np.any(
                mask[:, self.seq_data_max_length:]
            )
        )
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def check_for_normalization(self, data_header):

        channels = [c.upper() for c in data_header.ch_names]
        if not data_header.info['sfreq'] == 100 and not self.resample:
            print('WARNING: Data not with 100hz. Use resample=True for resampling')      


#        if not data_header.info['lowpass'] == 50:
#            print('WARNING: lowpass not at 50')
        if (not self.channels['EEG'] in channels) and  not np.any(([ch in channels for ch in self.channels['EEG']])):
            print('WARNING: EEG channel missing')    
        if not self.channels['EMG'] in channels:
            print('WARNING: EMG channel missing')
        if not self.channels['EOG'] in channels:
            print('WARNING: EOG channel missing')


        if self.references['RefEEG'] and not self.references['RefEEG'] in channels:
            print('WARNING: RefEEG channel missing')
        if self.references['RefEMG'] and not self.references['RefEMG'] in channels:
            print('WARNING: RefEMG channel missing')
        if self.references['RefEOG'] and not self.references['RefEOG'] in channels:
            print('WARNING: RefEOG channel missing')
项目:autolab_core    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def __init__(self, min_pt, max_pt, frame='unspecified'):
        """Initialize a box.

        Parameters
        ----------
        min_pt : :obj:`numpy.ndarray` of float
            The minimum x, y, and (optionally) z points.

        max_pt : :obj:`numpy.ndarray` of float
            The maximum x, y, and (optionally) z points.

        frame : :obj:`str`
            The frame in which this box is placed.

        Raises
        ------
        ValueError
            If max_pt is not strictly larger than min_pt in all dims.
        """
        if np.any((max_pt - min_pt) < 0):
            raise ValueError('Min point must be smaller than max point')
        self._min_pt = min_pt
        self._max_pt = max_pt
        self._frame = frame
项目:autolab_core    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def _check_valid_data(self, data):
        """Checks that the incoming data is a 3 x #elements ndarray of normal
        vectors.

        Parameters
        ----------
        data : :obj:`numpy.ndarray`
            The data to verify.

        Raises
        ------
        ValueError
            If the data is not of the correct shape or type, or if the vectors
            therein are not normalized.
        """
        if data.dtype.type != np.float32 and data.dtype.type != np.float64:
            raise ValueError('Must initialize normals clouds with a numpy float ndarray')
        if data.shape[0] != 3:
            raise ValueError('Illegal data array passed to normal cloud. Must have 3 coordinates')
        if len(data.shape) > 2:
            raise ValueError('Illegal data array passed to normal cloud. Must have 1 or 2 dimensions')
        if np.any((np.abs(np.linalg.norm(data, axis=0) - 1) > 1e-4) & (np.linalg.norm(data, axis=0) != 0)):
            raise ValueError('Illegal data array passed to normal cloud. Must have norm=1.0 or norm=0.0')
项目:supremm    作者:ubccr    | 项目源码 | 文件源码
def process(self, nodemeta, timestamp, data, description):

        if self._job.getdata('perf')['active'] != True:
            self._error = ProcessingError.RAW_COUNTER_UNAVAILABLE
            return False

        if len(data[0]) == 0:
            # Ignore datapoints where no data stored
            return True

        if nodemeta.nodename not in self._data:
            self._data[nodemeta.nodename] = {"x": [], "t": []}

        info = self._data[nodemeta.nodename]
        info['x'].append(1.0 * numpy.sum(data[0]))
        info['t'].append(timestamp)

        if len(info['x']) > 1:
            if numpy.any(info['x'][-1] - info['x'][-2] < 0.0):
                self._error = ProcessingError.PMDA_RESTARTED_DURING_JOB
                return False

        return True
项目:supremm    作者:ubccr    | 项目源码 | 文件源码
def process(self, nodemeta, timestamp, data, description):

        if self._job.getdata('perf')['active'] != True:
            self._error = ProcessingError.RAW_COUNTER_UNAVAILABLE
            return False

        ndata = numpy.array(data)

        if nodemeta.nodename not in self._first:
            self._first[nodemeta.nodename] = ndata
            return True

        if ndata.shape == self._first[nodemeta.nodename].shape:
            self._data[nodemeta.nodename] = numpy.sum(ndata - self._first[nodemeta.nodename])
            if numpy.any(numpy.fabs(self._data[nodemeta.nodename]) != self._data[nodemeta.nodename]):
                self._error = ProcessingError.PMDA_RESTARTED_DURING_JOB
                return False
        else:
            # Perf counters changed during the job
            self._error = ProcessingError.RAW_COUNTER_UNAVAILABLE
            return False

        return True
项目:sound_field_analysis-py    作者:QULab    | 项目源码 | 文件源码
def dspneumann(n, kr):
    """Derivative spherical Neumann (Bessel second kind) of order n at kr

    Parameters
    ----------
    n : array_like
       Order
    kr: array_like
       Argument

    Returns
    -------
    Yv' : complex float
       Derivative of spherical Neumann (Bessel second kind)
    """
    n, kr = scalar_broadcast_match(n, kr)
    if _np.any(n < 0) | _np.any(_np.mod(n, 1) != 0) | _np.any(_np.mod(kr, 1) != 0):
        return spneumann(n, kr) * n / kr - spneumann(n + 1, kr)
    else:
        return scy.spherical_yn(n.astype(_np.int), kr.astype(_np.complex), derivative=True)
项目:circletracking    作者:caspervdw    | 项目源码 | 文件源码
def crop_pad(image, corner, shape):
    ndim = len(corner)
    corner = [int(round(c)) for c in corner]
    shape = [int(round(s)) for s in shape]
    original = image.shape[-ndim:]
    zipped = zip(corner, shape, original)

    if np.any(c < 0 or c + s > o for (c, s, o) in zipped):
        no_padding = [(0, 0)] * (image.ndim - ndim)
        padding = [(max(-c, 0), max(c + s - o, 0)) for (c, s, o) in zipped]
        corner = [c + max(-c, 0) for c in corner]
        image_temp = np.pad(image, no_padding + padding, mode=str('constant'))
    else:
        image_temp = image

    no_crop = [slice(o+1) for o in image.shape[:-ndim]]
    crop = [slice(c, c+s) for (c, s) in zip(corner, shape)]
    return image_temp[no_crop + crop]
项目:circletracking    作者:caspervdw    | 项目源码 | 文件源码
def slice_image(pos, image, radius):
    """ Slice a box around a group of features from an image.

    The box is the smallest box that contains all coordinates up to `radius`
    from any coordinate.

    Parameters
    ----------
    image : ndarray
        The image that will be sliced
    pos : iterable
        An iterable (e.g. list or ndarray) that contains the feature positions
    radius : number or tuple of numbers
        Defines the size of the slice. Every pixel that has a distance lower or
        equal to `radius` to a feature position is included.

    Returns
    -------
    tuple of:
    - the sliced image
    - the coordinate of the slice origin (top-left pixel)
    """
    slices, origin = get_slice(pos, image.shape,  radius)
    return image[slices], origin
项目:mixedvines    作者:asnelt    | 项目源码 | 文件源码
def _logcdf(self, samples):
        lower = np.full(2, -np.inf)
        upper = norm.ppf(samples)
        limit_flags = np.zeros(2)
        if upper.shape[0] > 0:

            def func1d(upper1d):
                '''
                Calculates the multivariate normal cumulative distribution
                function of a single sample.
                '''
                return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1]

            vals = np.apply_along_axis(func1d, -1, upper)
        else:
            vals = np.empty((0, ))
        old_settings = np.seterr(divide='ignore')
        vals = np.log(vals)
        np.seterr(**old_settings)
        vals[np.any(samples == 0.0, axis=1)] = -np.inf
        vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1])
        vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0])
        return vals
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def eval(self, t):
        # given a time vector t, return the design matrix column vector(s)

        if self.type is None:
            return np.array([])

        hl = np.zeros((t.shape[0],))
        ht = np.zeros((t.shape[0],))

        if self.type in (0,2):
            hl[t >= self.year] = np.log10(1 + (t[t >= self.year] - self.year) / self.T)

        if self.type in (1,2):
            ht[t >= self.year] = 1

        return np.append(ht,hl) if np.any(hl) else ht
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def validate_transitions_cpu_old(transitions, **kwargs):
    pre = np.array(transitions[0])
    suc = np.array(transitions[1])
    base = setting['base']
    width  = pre.shape[1] // base
    height = pre.shape[1] // base
    load(width,height)

    pre_validation = validate_states(pre, **kwargs)
    suc_validation = validate_states(suc, **kwargs)

    results = []
    for pre, suc, pre_validation, suc_validation in zip(pre, suc, pre_validation, suc_validation):

        if pre_validation and suc_validation:
            c = to_configs(np.array([pre, suc]), verbose=False)
            succs = successors(c[0], width, height)
            results.append(np.any(np.all(np.equal(succs, c[1]), axis=1)))
        else:
            results.append(False)

    return results
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def validate_transitions(transitions, check_states=True, **kwargs):
    pre = np.array(transitions[0])
    suc = np.array(transitions[1])

    if check_states:
        pre_validation = validate_states(pre, verbose=False, **kwargs)
        suc_validation = validate_states(suc, verbose=False, **kwargs)

    pre_configs = to_configs(pre, verbose=False, **kwargs)
    suc_configs = to_configs(suc, verbose=False, **kwargs)

    results = []
    if check_states:
        for pre_c, suc_c, pre_validation, suc_validation in zip(pre_configs, suc_configs, pre_validation, suc_validation):

            if pre_validation and suc_validation:
                succs = successors(pre_c)
                results.append(np.any(np.all(np.equal(succs, suc_c), axis=1)))
            else:
                results.append(False)
    else:
        for pre_c, suc_c in zip(pre_configs, suc_configs):
            succs = successors(pre_c)
            results.append(np.any(np.all(np.equal(succs, suc_c), axis=1)))
    return results
项目:triage    作者:dssg    | 项目源码 | 文件源码
def transform(self, X):
        feature_range = self.feature_range

        X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)

        if X.ndim == 1:
            warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)

        if np.any(X > feature_range[1]) or np.any(X < feature_range[0]):
            warnings.warn(
                "You got data that are out of the range: {}"
                .format(feature_range)
            )

        X[X > feature_range[1]] = feature_range[1]
        X[X < feature_range[0]] = feature_range[0]

        return X
项目:Graphene    作者:ashivni    | 项目源码 | 文件源码
def vorEdges(vor, far):
    """
    Given a voronoi tesselation, retuns the set of voronoi edges.
    far is the length of the "infinity" edges
    """
    edges = []
    for simplex in vor.ridge_vertices:
        simplex = numpy.asarray(simplex)
        if numpy.all(simplex >= 0):
            edge = {}
            edge['p1'], edge['p2'] = vor.vertices[simplex, 0], vor.vertices[simplex, 1]
            edge['p1'] = numpy.array([vor.vertices[simplex, 0][0], vor.vertices[simplex, 1][0]])
            edge['p2'] = numpy.array([vor.vertices[simplex, 0][1], vor.vertices[simplex, 1][1]])
            edge['t'] = (edge['p2'] - edge['p1']) / numpy.linalg.norm(edge['p2'] - edge['p1'])
            edges.append(edge)

    ptp_bound = vor.points.ptp(axis=0)
    center = vor.points.mean(axis=0)
    for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
        simplex = numpy.asarray(simplex)
        if numpy.any(simplex < 0):
            i = simplex[simplex >= 0][0]  # finite end Voronoi vertex

            t = vor.points[pointidx[1]] - vor.points[pointidx[0]]  # tangent
            t /= numpy.linalg.norm(t)
            n = numpy.array([-t[1], t[0]])  # normal

            midpoint = vor.points[pointidx].mean(axis=0)
            direction = numpy.sign(numpy.dot(midpoint - center, n)) * n

            far_point = vor.vertices[i] + direction * ptp_bound.max() * far
            edge = {}
            edge['p1'], edge['p2'] = numpy.array([vor.vertices[i, 0], far_point[0]]), numpy.array(
                [vor.vertices[i, 1], far_point[1]])
            edge['p1'], edge['p2'] = vor.vertices[i, :], far_point
            edge['t'] = (edge['p2'] - edge['p1']) / numpy.linalg.norm(edge['p2'] - edge['p1'])
            edges.append(edge)
    return edges
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def sometrue(a, axis=None, out=None, keepdims=False):
    """
    Check whether some values are true.

    Refer to `any` for full documentation.

    See Also
    --------
    any : equivalent function

    """
    arr = asanyarray(a)

    try:
        return arr.any(axis=axis, out=out, keepdims=keepdims)
    except TypeError:
        return arr.any(axis=axis, out=out)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_ddof_too_big(self):
        nanfuncs = [np.nanvar, np.nanstd]
        stdfuncs = [np.var, np.std]
        dsize = [len(d) for d in _rdat]
        for nf, rf in zip(nanfuncs, stdfuncs):
            for ddof in range(5):
                with warnings.catch_warnings(record=True) as w:
                    warnings.simplefilter('always')
                    tgt = [ddof >= d for d in dsize]
                    res = nf(_ndat, axis=1, ddof=ddof)
                    assert_equal(np.isnan(res), tgt)
                    if any(tgt):
                        assert_(len(w) == 1)
                        assert_(issubclass(w[0].category, RuntimeWarning))
                    else:
                        assert_(len(w) == 0)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def __iadd__(self, other):
        """
        Add other to self in-place.

        """
        m = getmask(other)
        if self._mask is nomask:
            if m is not nomask and m.any():
                self._mask = make_mask_none(self.shape, self.dtype)
                self._mask += m
        else:
            if m is not nomask:
                self._mask += m
        self._data.__iadd__(np.where(self._mask, self.dtype.type(0),
                                     getdata(other)))
        return self
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def __idiv__(self, other):
        """
        Divide self by other in-place.

        """
        other_data = getdata(other)
        dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
        other_mask = getmask(other)
        new_mask = mask_or(other_mask, dom_mask)
        # The following 3 lines control the domain filling
        if dom_mask.any():
            (_, fval) = ufunc_fills[np.divide]
            other_data = np.where(dom_mask, fval, other_data)
        self._mask |= new_mask
        self._data.__idiv__(np.where(self._mask, self.dtype.type(1),
                                     other_data))
        return self
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def __ifloordiv__(self, other):
        """
        Floor divide self by other in-place.

        """
        other_data = getdata(other)
        dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
        other_mask = getmask(other)
        new_mask = mask_or(other_mask, dom_mask)
        # The following 3 lines control the domain filling
        if dom_mask.any():
            (_, fval) = ufunc_fills[np.floor_divide]
            other_data = np.where(dom_mask, fval, other_data)
        self._mask |= new_mask
        self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1),
                                          other_data))
        return self
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def __ipow__(self, other):
        """
        Raise self to the power other, in place.

        """
        other_data = getdata(other)
        other_mask = getmask(other)
        with np.errstate(divide='ignore', invalid='ignore'):
            self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
                                         other_data))
        invalid = np.logical_not(np.isfinite(self._data))
        if invalid.any():
            if self._mask is not nomask:
                self._mask |= invalid
            else:
                self._mask = invalid
            np.copyto(self._data, self.fill_value, where=invalid)
        new_mask = mask_or(other_mask, invalid)
        self._mask = mask_or(self._mask, new_mask)
        return self
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_comparisons(self):
        A = np.arange(100).reshape(10, 10)
        mA = matrix(A)
        mB = matrix(A) + 0.1
        assert_(np.all(mB == A+0.1))
        assert_(np.all(mB == matrix(A+0.1)))
        assert_(not np.any(mB == matrix(A-0.1)))
        assert_(np.all(mA < mB))
        assert_(np.all(mA <= mB))
        assert_(np.all(mA <= mA))
        assert_(not np.any(mA < mA))

        assert_(not np.any(mB < mA))
        assert_(np.all(mB >= mA))
        assert_(np.all(mB >= mB))
        assert_(not np.any(mB > mB))

        assert_(np.all(mA == mA))
        assert_(not np.any(mA == mB))
        assert_(np.all(mB != mA))

        assert_(not np.all(abs(mA) > 0))
        assert_(np.all(abs(mB > 0)))
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def loadData (self, filename, verbose=True, replace_missing=True):
        ''' Get the data from a text file in one of 3 formats: matrix, sparse, binary_sparse'''
        if verbose:  print("========= Reading " + filename)
        start = time.time()
        if self.use_pickle and os.path.exists (os.path.join (self.tmp_dir, os.path.basename(filename) + ".pickle")):
            with open (os.path.join (self.tmp_dir, os.path.basename(filename) + ".pickle"), "r") as pickle_file:
                vprint (verbose, "Loading pickle file : " + os.path.join(self.tmp_dir, os.path.basename(filename) + ".pickle"))
                return pickle.load(pickle_file)
        if 'format' not in self.info.keys():
            self.getFormatData(filename)
        if 'feat_num' not in self.info.keys():
            self.getNbrFeatures(filename)

        data_func = {'dense':data_io.data, 'sparse':data_io.data_sparse, 'sparse_binary':data_io.data_binary_sparse}

        data = data_func[self.info['format']](filename, self.info['feat_num'])

        # INPORTANT: when we replace missing values we double the number of variables

        if self.info['format']=='dense' and replace_missing and np.any(map(np.isnan,data)):
            vprint (verbose, "Replace missing values by 0 (slow, sorry)")
            data = data_converter.replace_missing(data)
        if self.use_pickle:
            with open (os.path.join (self.tmp_dir, os.path.basename(filename) + ".pickle"), "wb") as pickle_file:
                vprint (verbose, "Saving pickle file : " + os.path.join (self.tmp_dir, os.path.basename(filename) + ".pickle"))
                p = pickle.Pickler(pickle_file) 
                p.fast = True 
                p.dump(data)
        end = time.time()
        if verbose:  print( "[+] Success in %5.2f sec" % (end - start))
        return data
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def getTypeProblem (self, solution_filename):
            ''' Get the type of problem directly from the solution file (in case we do not have an info file)'''
        if 'task' not in self.info.keys():
            solution = np.array(data_converter.file_to_array(solution_filename))
            target_num = solution.shape[1]
            self.info['target_num']=target_num
            if target_num == 1: # if we have only one column
                solution = np.ravel(solution) # flatten
                nbr_unique_values = len(np.unique(solution))
                if nbr_unique_values < len(solution)/8:
                    # Classification
                    self.info['label_num'] = nbr_unique_values
                    if nbr_unique_values == 2:
                        self.info['task'] = 'binary.classification'
                        self.info['target_type'] = 'Binary'
                    else:
                        self.info['task'] = 'multiclass.classification'
                        self.info['target_type'] = 'Categorical'
                else:
                    # Regression
                    self.info['label_num'] = 0
                    self.info['task'] = 'regression'
                    self.info['target_type'] = 'Numerical'     
            else:
                # Multilabel or multiclass       
                self.info['label_num'] = target_num
                self.info['target_type'] = 'Binary' 
                if any(item > 1 for item in map(np.sum,solution.astype(int))):
                    self.info['task'] = 'multilabel.classification'     
                else:
                    self.info['task'] = 'multiclass.classification'        
        return self.info['task']
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def conv2d(x,W,strides=[1,1,1,1],name=None):
    # return an op that convolves x with W
    strides = np.array(strides)
    if strides.size == 1:
        strides = np.array([1,strides,strides,1])
    elif strides.size == 2:
        strides = np.array([1,strides[0],strides[1],1])
    if np.any(strides < 1):
        strides = np.around(1./strides).astype(np.uint8)
        return tf.nn.conv2d_transpose(x,W,strides=strides.tolist(),padding='SAME',name=name)
    else:
        return tf.nn.conv2d(x,W,strides=strides.tolist(),padding='SAME',name=name)
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def conv3d(x,W,strides=1,name=None):
    # return an op that convolves x with W
    strides = np.array(strides)
    if strides.size == 1:
        strides = np.array([1,strides,strides,strides[0],1])
    elif strides.size == 3:
        strides = np.array([1,strides[0],strides[1],strides[2],1])
    if np.any(strides < 1):
        strides = np.around(1./strides).astype(np.uint8)
        return tf.nn.conv3d_transpose(x,W,strides=strides.tolist(),padding='SAME',name=name)
    else:
        return tf.nn.conv3d(x,W,strides=strides.tolist(),padding='SAME',name=name)
项目:uwb_tracker_ros    作者:eth-ait    | 项目源码 | 文件源码
def initial_guess(self, ranges):
        """Computes an initial position guess based on range measurements.

        The initial position is computed using Gauss-Newton method.
        The behavior can be modified with some parameters: `self.initial_guess_...`.

        Args:
             ranges (list of floats): Range measurements.

        Returns:
            initial_state (numpy.ndarray): Initial state vector (velocity components are zero).
        """
        num_of_units = len(ranges)
        position = self.initial_guess_position
        H = np.zeros((num_of_units, position.size))
        z = np.zeros((num_of_units, 1))
        h = np.zeros((num_of_units, 1))
        residuals = np.zeros((num_of_units, 1))
        for i in xrange(self.initial_guess_iterations):
            self._compute_measurements_and_jacobians(ranges, position, h, H, z)
            new_residuals = z - h
            position = position + np.dot(self._solve_equation_least_squares(np.dot(H.T, H), H.T), new_residuals)
            if np.sum((new_residuals - residuals) ** 2) < self.initial_guess_tolerance:
                break
            residuals = new_residuals
        rospy.loginfo('initial guess residuals: {}'.format(residuals))
        if np.any(np.abs(residuals) > self.initial_guess_residuals_threshold):
            # This initial guess is not good enough
            return None
        initial_state = np.zeros((6, 1))
        initial_state[0:3] = position
        return initial_state
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def overwrite_text(cursor, text):
    text_length = len(text)
    cursor.clearSelection()
    # Select the text after the current position (if any)
    current_position = cursor.position()
    cursor.movePosition(QTextCursor.Right,
                        mode=QTextCursor.MoveAnchor,
                        n=text_length)
    cursor.movePosition(QTextCursor.Left,
                        mode=QTextCursor.KeepAnchor,
                        n=cursor.position()-current_position)
    # Insert the text (will overwrite the selected text)
    cursor.insertText(text)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def store_tasks(self):
        self.stored_tasks = [cb.isChecked() for cb in self.task_comboboxes]
        if not numpy.any(self.stored_tasks):
            self.ui.btn_run.setEnabled(False)
        elif str(self.ui.edit_file.text()) != '':
            self.ui.btn_run.setEnabled(True)
            self.ui.btn_plots.setEnabled(True)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def _scale_data_to_float32(self, data):
        '''
            This function will convert data from local data dtype into float32, the default format of the algorithm
        '''
        if self.data_dtype != numpy.float32:
            data  = data.astype(numpy.float32)

        if self.dtype_offset != 0:
            data  -= self.dtype_offset

        if numpy.any(self.gain != 1):
            data *= self.gain

        return numpy.ascontiguousarray(data)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def _unscale_data_from_float32(self, data):
        '''
            This function will convert data from float32 back to the original format of the file
        '''

        if numpy.any(self.gain != 1):
            data /= self.gain

        if self.dtype_offset != 0:
            data  += self.dtype_offset

        if (data.dtype != self.data_dtype) and (self.data_dtype != numpy.float32):
            data = data.astype(self.data_dtype)

        return data
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_ignore_nan(self):
        """ Test that ignore_nan is working """
        for axis in (0, 1, 2, 3, None):
            with self.subTest('axis = {}'.format(axis)):
                out = last(ireduce_ufunc(self.source, np.add, axis = axis, ignore_nan = True))
                self.assertFalse(np.any(np.isnan(out)))

# Dynamics generation of tests on binary ufuncs
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def ffill_buffer_from_prior_values(freq,
                                   field,
                                   buffer_frame,
                                   digest_frame,
                                   pv_frame,
                                   raw=False):
    """
    Forward-fill a buffer frame, falling back to the end-of-period values of a
    digest frame if the buffer frame has leading NaNs.
    """
    # convert to ndarray if necessary
    digest_values = digest_frame
    if raw and isinstance(digest_frame, pd.DataFrame):
        digest_values = digest_frame.values

    buffer_values = buffer_frame
    if raw and isinstance(buffer_frame, pd.DataFrame):
        buffer_values = buffer_frame.values

    nan_sids = pd.isnull(buffer_values[0])
    if np.any(nan_sids) and len(digest_values):
        # If we have any leading nans in the buffer and we have a non-empty
        # digest frame, use the oldest digest values as the initial buffer
        # values.
        buffer_values[0, nan_sids] = digest_values[-1, nan_sids]

    nan_sids = pd.isnull(buffer_values[0])
    if np.any(nan_sids):
        # If we still have leading nans, fall back to the last known values
        # from before the digest.
        key_loc = pv_frame.index.get_loc((freq.freq_str, field))
        filler = pv_frame.values[key_loc, nan_sids]
        buffer_values[0, nan_sids] = filler

    if raw:
        filled = ffill(buffer_values)
        return filled

    return buffer_frame.ffill()
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def ffill_digest_frame_from_prior_values(freq,
                                         field,
                                         digest_frame,
                                         pv_frame,
                                         raw=False):
    """
    Forward-fill a digest frame, falling back to the last known prior values if
    necessary.
    """
    # convert to ndarray if necessary
    values = digest_frame
    if raw and isinstance(digest_frame, pd.DataFrame):
        values = digest_frame.values

    nan_sids = pd.isnull(values[0])
    if np.any(nan_sids):
        # If we have any leading nans in the frame, use values from pv_frame to
        # seed values for those sids.
        key_loc = pv_frame.index.get_loc((freq.freq_str, field))
        filler = pv_frame.values[key_loc, nan_sids]
        values[0, nan_sids] = filler

    if raw:
        filled = ffill(values)
        return filled

    return digest_frame.ffill()
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def __new__(cls, field=None, frequency_delta=None, length_delta=None):
        """
        field is a new field that was added.
        frequency is a FrequencyDelta representing a new frequency was added.
        length is a bar LengthDelta which is a frequency and a bar_count.
        If any field is None, then no change occurred of that type.
        """
        return super(HistoryContainerDelta, cls).__new__(
            cls, field, frequency_delta, length_delta,
        )
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def create_digest_panels(self,
                             initial_sids,
                             initial_dt):
        """
        Initialize a RollingPanel for each unique panel frequency being stored
        by this container.  Each RollingPanel pre-allocates enough storage
        space to service the highest bar-count of any history call that it
        serves.
        """
        # Map from frequency -> first/last minute of the next digest to be
        # rolled for that frequency.
        first_window_starts = {}
        first_window_closes = {}

        # Map from frequency -> digest_panels.
        panels = {}
        for freq, largest_spec in iteritems(self.largest_specs):
            if largest_spec.bar_count == 1:
                # No need to allocate a digest panel; this frequency will only
                # ever use data drawn from self.buffer_panel.
                first_window_starts[freq] = freq.normalize(initial_dt)
                first_window_closes[freq] = freq.window_close(
                    first_window_starts[freq]
                )

                continue

            dt = initial_dt

            rp = self._create_digest_panel(
                dt,
                spec=largest_spec,
                window_starts=first_window_starts,
                window_closes=first_window_closes,
            )

            panels[freq] = rp

        return panels, first_window_starts, first_window_closes
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def update(self, data, algo_dt):
        """
        Takes the bar at @algo_dt's @data, checks to see if we need to roll any
        new digests, then adds new data to the buffer panel.
        """
        frame = self.frame_from_bardata(data, algo_dt)

        self.update_last_known_values()
        self.update_digest_panels(algo_dt, self.buffer_panel)
        self.buffer_panel.add_frame(algo_dt, frame)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def _set_mapping_metrics(self, read_type, read_type_set):
        for genome, region in itertools.product(self.genomes, self.regions):
            is_read_type = (genome, region) in read_type_set
            reads_frac = self._get_metric_attr('reads_frac', genome, region, read_type)
            reads_frac.add(1, filter=is_read_type)

        for region in self.regions:
            is_read_type = any([(genome, region) in read_type_set for genome in self.genomes])
            multi_reads_frac = self._get_metric_attr('reads_frac', cr_constants.MULTI_REFS_PREFIX, region,
                                                     read_type)
            multi_reads_frac.add(1, filter=is_read_type)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def mark_dupes_group_cb(self, gene_id, umis, dupe_type):
        total_counts = sum(umis.values())
        total_umis = len(umis)
        if any([count > 1 for count in umis.itervalues()]):
            umi_hamming_distance = 0
        else:
            umi_hamming_distance = cr_utils.get_kmers_hamming_distance(umis.keys())

        for reference in [cr_utils.get_genome_from_str(gene_id, self.genomes), cr_constants.MULTI_REFS_PREFIX]:
            if total_counts > 0:
                reads_per_dupe_group_histogram = self._get_metric_attr(
                    'reads_per_dupe_group_histogram', reference, dupe_type)
                reads_per_dupe_group_histogram.add(total_counts)

            if total_umis > 0:
                umis_per_dupe_group_histogram = self._get_metric_attr(
                    'umis_per_dupe_group_histogram', reference, dupe_type)
                umis_per_dupe_group_histogram.add(total_umis)

            reads_per_molecule_histogram = self._get_metric_attr(
                'reads_per_molecule_histogram', reference, dupe_type)
            for count in umis.itervalues():
                reads_per_molecule_histogram.add(count)

            if umi_hamming_distance is not None:
                umi_hamming_distance_per_dupe_group_histogram = self._get_metric_attr(
                    'umi_hamming_distance_per_dupe_group_histogram', reference, dupe_type)
                umi_hamming_distance_per_dupe_group_histogram.add(umi_hamming_distance)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def merge_h5(in_filenames, out_filename):
    """ Merge a list of h5 files """
    out_h5 = h5.File(out_filename, 'a')
    for filename in in_filenames:
        if filename is None:
            continue
        in_h5 = h5.File(filename, 'r')
        for name in in_h5.keys():
            # If the dataset already exists,
            # They must be equal or one must be all-zero.
            if name in out_h5.keys():
                src_data, dest_data = in_h5[name][()], out_h5[name][()]
                if src_data.dtype.kind != 'S' and dest_data.dtype.kind != 'S':
                    # Both numeric
                    if not np.any(src_data):
                        # Source is all zero. Do nothing.
                        continue
                    elif not np.any(dest_data):
                        # Dest is all zero. Overwrite.
                        del out_h5[name]
                        h5.h5o.copy(in_h5.id, name, out_h5.id, name)
                    else:
                        # Both non-zero. Assert equality and do nothing.
                        assert np.array_equal(src_data, dest_data)
                else:
                    # Either are non-numeric. Assert equality and do nothing.
                    assert np.array_equal(src_data, dest_data)
            else:
                # Only exists in src. Copy to dest.
                h5.h5o.copy(in_h5.id, name, out_h5.id, name)

    out_h5.flush()
    out_h5.close()
项目:galario    作者:mtazzari    | 项目源码 | 文件源码
def assert_allclose(x, y, rtol=1e-10, atol=1e-8):
    """Drop in replacement for `numpy.testing.assert_allclose` that shows the nonmatching elements"""
    if np.isscalar(x) and np.isscalar(y) == 1:
        return np.testing.assert_allclose(x, y, rtol=rtol, atol=atol)

    if x.shape != y.shape:
        raise AssertionError("Shape mismatch: %s vs %s" % (str(x.shape), str(y.shape)))

    d = ~np.isclose(x, y, rtol, atol)
    if np.any(d):
        miss = np.where(d)[0]
        raise AssertionError("""Mismatch of %d elements (%g %%) at the level of rtol=%g, atol=%g
    %s
    %s
    %s""" % (len(miss), len(miss)/x.size, rtol, atol, repr(miss), str(x[d]), str(y[d])))
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def _nn_pose_fill(valid): 
        """
        Looks up closest True for each False and returns
        indices for fill-in-lookup
        In: [True, False, True, ... , False, True]
        Out: [0, 0, 2, ..., 212, 212]
        """

        valid_inds,  = np.where(valid)
        invalid_inds,  = np.where(~valid)

        all_inds = np.arange(len(valid))
        all_inds[invalid_inds] = -1

        for j in range(10): 
            fwd_inds = valid_inds + j
            bwd_inds = valid_inds - j

            # Forward fill
            invalid_inds, = np.where(all_inds < 0)
            fwd_fill_inds = np.intersect1d(fwd_inds, invalid_inds)
            all_inds[fwd_fill_inds] = all_inds[fwd_fill_inds-j]

            # Backward fill
            invalid_inds, = np.where(all_inds < 0)
            if not len(invalid_inds): break
            bwd_fill_inds = np.intersect1d(bwd_inds, invalid_inds)
            all_inds[bwd_fill_inds] = all_inds[bwd_fill_inds+j]

            # Check if any missing 
            invalid_inds, = np.where(all_inds < 0)
            if not len(invalid_inds): break

        # np.set_printoptions(threshold=np.nan)

        # print valid.astype(np.int)
        # print np.array_str(all_inds)
        # print np.where(all_inds < 0)

        return all_inds
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def roidb(self, target_hash, targets=[], every_k_frames=1, verbose=True, skip_empty=True): 
        """
        @param target_hash: target hash map (name -> unique id)
        @param targets: return only provided target names 

        Returns (img, bbox, targets [hashed with target_hash (int32)])
        """

        self.check_ground_truth_availability()

        if every_k_frames > 1 and skip_empty: 
            raise RuntimeError('roidb not meant for skipping frames,'
                               'and skipping empty simultaneously ')

        # Iterate through all images
        for idx, (t,ch,data) in enumerate(self.iterframes()): 

            # Skip every k frames, if requested
            if idx % every_k_frames != 0: 
                continue

            # Annotations may be empty, if 
            # unlabeled, however we can request
            # to yield if its empty or not
            bboxes = data.annotation.bboxes
            if not len(bboxes) and skip_empty: 
                continue
            target_names = data.annotation.pretty_names

            if len(targets): 
                inds, = np.where([np.any([t in name for t in targets]) for name in target_names])

                target_names = [target_names[ind] for ind in inds]
                bboxes = bboxes[inds]

            yield (data.img, bboxes, np.int32(map(lambda key: target_hash.get(key, -1), target_names)))