Python numpy 模块,asfarray() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.asfarray()

项目:CSB    作者:csb-toolbox    | 项目源码 | 文件源码
def rotation_matrix(axis, angle):
    """
    Calculate a three dimensional rotation matrix for a rotation around
    the given angle and axis.

    @type axis: (3,) numpy array
    @param angle: angle in radians
    @type angle: float

    @rtype: (3,3) numpy.array
    """
    axis = numpy.asfarray(axis) / norm(axis)
    assert axis.shape == (3,)

    c = math.cos(angle)
    s = math.sin(angle)

    r = (1.0 - c) * numpy.outer(axis, axis)
    r.flat[[0,4,8]] += c
    r.flat[[5,6,1]] += s * axis
    r.flat[[7,2,3]] -= s * axis

    return r
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def medianThreshold(img, threshold=0.1, size=3, condition='>', copy=True):
    '''
    set every the pixel value of the given [img] to the median filtered one
    of a given kernel [size]
    in case the relative [threshold] is exeeded
    condition = '>' OR '<'
    '''
    from scipy.ndimage import median_filter

    indices = None
    if threshold > 0:
        blur = np.asfarray(median_filter(img, size=size))
        with np.errstate(divide='ignore', invalid='ignore', over='ignore'):

            if condition == '>':
                indices = abs((img - blur) / blur) > threshold
            else:
                indices = abs((img - blur) / blur) < threshold

        if copy:
            img = img.copy()

        img[indices] = blur[indices]
    return img, indices
项目:PiLL    作者:lofar-astron    | 项目源码 | 文件源码
def __init__(self, x, y):
        if len(x) != len(y):
            raise IndexError('x and y must be equally sized.')
        self.x = np.asfarray(x)
        self.y = np.asfarray(y)

        # Closes the polygon if were open
        x1, y1 = x[0], y[0]
        xn, yn = x[-1], y[-1]
        if x1 != xn or y1 != yn:
            self.x = np.concatenate((self.x, [x1]))
            self.y = np.concatenate((self.y, [y1]))

        # Anti-clockwise coordinates
        if _det(self.x, self.y) < 0:
            self.x = self.x[::-1]
            self.y = self.y[::-1]
项目:causal_bandits    作者:finnhacks42    | 项目源码 | 文件源码
def V_short(self,eta):
        sum0 = np.zeros(7,dtype=float)
        sum1 = np.zeros(7,dtype=float)
        for n1,n2 in product(range(self.N1+1),range(self.N2+1)):
             wdo = comb(self.N1,n1,exact=True)*comb(self.N2,n2,exact=True)
             wdox10 = comb(self.N1-1,n1,exact=True)*comb(self.N2,n2,exact=True)
             wdox11 = comb(self.N1-1,n1-1,exact=True)*comb(self.N2,n2,exact=True)
             wdox20 = comb(self.N1,n1,exact=True)*comb(self.N2-1,n2,exact=True)
             wdox21 = comb(self.N1,n1,exact=True)*comb(self.N2-1,n2-1,exact=True)
             w = np.asarray([wdox10,wdox20,wdox11,wdox21,wdo,wdo,wdo])

             pz0,pz1 = self.p_n_given_z(n1,n2)

             counts = [self.N1-n1,self.N2-n2,n1,n2,1,1,1]
             Q = (eta*pz0*counts*(1-self.pZgivenA)+eta*pz1*counts*self.pZgivenA).sum()

             ratio = np.nan_to_num(np.true_divide(pz0*(1-self.pZgivenA)+pz1*self.pZgivenA,Q))

             sum0 += np.asfarray(w*pz0*ratio)
             sum1 += np.asfarray(w*pz1*ratio)
        result = self.pZgivenA*sum1+(1-self.pZgivenA)*sum0
        return result
项目:optimize-stencil    作者:Ablinne    | 项目源码 | 文件源码
def _optimize_single(self, x0):
        x0 = list(x0)

        if x0[0] == None:
            x0[0] = 0
            dt_ok = np.asscalar(self.dispersion.dt_ok(x0))
            if dt_ok < 0:
                # Initial conditions violate constraints, reject
                return x0, None, float('inf')

            x0[0] = dt_ok
            x0[0] = min(x0[0], self.dtmax)
            x0[0] = max(x0[0], self.dtmin)

        x0 = np.asfarray(x0)

        stencil_ok = self.dispersion.stencil_ok(x0)
        if stencil_ok < 0:
            # Initial conditions violate constraints, reject
            return x0, None, float('inf')

        res = scop.minimize(self.dispersion.norm, x0, method='SLSQP', constraints = self.constraints, options = dict(disp=False, iprint = 2))
        norm = self.dispersion_high.norm(res.x)

        return x0, res, norm
项目:CNN_Own_Dataset    作者:YeongHyeon    | 项目源码 | 文件源码
def next_batch(self, batch_size=10):

        datas = np.empty((0, self._height, self._width, self._dimension), int)
        labels = np.empty((0, self._class_len), int)


        for idx in range(batch_size):
            random.randint(0, len(self._datas)-1)
            tmp_img = scipy.misc.imread(self._datas[idx])
            tmp_img = scipy.misc.imresize(tmp_img, (self._height, self._width))
            tmp_img = tmp_img.reshape(1, self._height, self._width, self._dimension)

            datas = np.append(datas, tmp_img, axis=0)
            labels = np.append(labels, np.eye(self._class_len)[int(np.asfarray(self._labels[idx]))].reshape(1, self._class_len), axis=0)


        return datas, labels
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def main():
    f = open('label.txt','w')
    #target_names = np.array(args.names)
    X, target_names, y = getXY(args.image_dir)
    X = np.asfarray(X,dtype='float')
    colors = cm.gnuplot2(np.linspace(0, 1, len(target_names)))

    #X_pca = PCA(n_components=128).fit_transform(X)
    X_pca = X
    tsne = TSNE(n_components=2, init='random', random_state=0)
    X_r = tsne.fit_transform(X_pca)

    for c, i, target_name in zip(colors,
                             list(range(0, len(target_names))),
                             target_names):
        plt.scatter(X_r[y[i], 0], X_r[y[i], 1],
                c=c, label=str(i+1))
        f.write(target_name+'\n')
    plt.legend()
    plt.savefig("{}/10crop1.png".format('./'))
    f.close()
项目:irgan    作者:geek-ai    | 项目源码 | 文件源码
def dcg_at_k(r, k, method=1):
    """Score is discounted cumulative gain (dcg)
    Relevance is positive real values.  Can use binary
    as the previous methods.
    Returns:
        Discounted cumulative gain
    """
    r = np.asfarray(r)[:k]
    if r.size:
        if method == 0:
            return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
        elif method == 1:
            return np.sum(r / np.log2(np.arange(2, r.size + 2)))
        else:
            raise ValueError('method must be 0 or 1.')
    return 0.
项目:j3dview    作者:blank63    | 项目源码 | 文件源码
def gl_convert(self):
        array = numpy.asfarray(self,numpy.float32)

        if self.component_type != gx.F32 and self.scale_exponent != 0:
            array *= 2**(-self.scale_exponent)

        array = array.view(GLArray)
        array.attribute = self.attribute
        array.component_type = GL_FLOAT
        array.component_count = self.shape[1]
        array.normalize = False
        return array
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def _det(xvert, yvert):
    '''Compute twice the area of the triangle defined by points with using
    determinant formula.

    Input parameters:

    xvert -- A vector of nodal x-coords (array-like).
    yvert -- A vector of nodal y-coords (array-like).

    Output parameters:

    Twice the area of the triangle defined by the points.

    Notes:

    _det is positive if points define polygon in anticlockwise order.
    _det is negative if points define polygon in clockwise order.
    _det is zero if at least two of the points are concident or if
        all points are collinear.

    '''
    xvert = np.asfarray(xvert)
    yvert = np.asfarray(yvert)
    x_prev = np.concatenate(([xvert[-1]], xvert[:-1]))
    y_prev = np.concatenate(([yvert[-1]], yvert[:-1]))
    return np.sum(yvert * x_prev - xvert * y_prev, axis=0)
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def __init__(self, x, y):
        if len(x) != len(y):
            raise IndexError('x and y must be equally sized.')
        self.x = np.asfarray(x)
        self.y = np.asfarray(y)
        # Closes the polygon if were open
        x1, y1 = x[0], y[0]
        xn, yn = x[-1], y[-1]
        if x1 != xn or y1 != yn:
            self.x = np.concatenate((self.x, [x1]))
            self.y = np.concatenate((self.y, [y1]))
        # Anti-clockwise coordinates
        if _det(self.x, self.y) < 0:
            self.x = self.x[::-1]
            self.y = self.y[::-1]
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_asfarray_none(self, level=rlevel):
        # Test for changeset r5065
        assert_array_equal(np.array([np.nan]), np.asfarray([None]))
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def dcg_k(r,k,method = 0):
    '''
    Score is discounted cumulative gain (dcg)
    Relevance is positive real values.  Can use binary
    as the previous methods.
    Example from
    http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
    Parameters
    ----------
    r: Relevance scores (list or numpy) in rank order
            (first element is the first item)
    k: Number of results to consider
    method: 0 or 1

    Returns
    -------
    Discounted cumulative gain
    '''
    r = np.asfarray(r)[:k]
    if r.size:
        if method == 0:
            #standard definition
            return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
        elif method == 1:
            #used in Kaggle
            return np.sum((np.power(2,r) - 1.0) / np.log2(np.arange(2, r.size + 2)))
        else:
            raise ValueError('method must be 0 or 1.')
    return 0.
项目:vec4ir    作者:lgalke    | 项目源码 | 文件源码
def dcg_at_k(r, k, method=0):
    """Score is discounted cumulative gain (dcg)

    Relevance is positive real values.  Can use binary
    as the previous methods.

    Example from
    http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
    >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
    >>> dcg_at_k(r, 1)
    3.0
    >>> dcg_at_k(r, 1, method=1)
    3.0
    >>> dcg_at_k(r, 2)
    5.0
    >>> dcg_at_k(r, 2, method=1)
    4.2618595071429155
    >>> dcg_at_k(r, 10)
    9.6051177391888114
    >>> dcg_at_k(r, 11)
    9.6051177391888114

    Args:
        r: Relevance scores (list or numpy) in rank order
            (first element is the first item)
        k: Number of results to consider
        method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
                If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]

    Returns:
        Discounted cumulative gain
    """
    r = np.asfarray(r)[:k]
    if r.size:
        if method == 0:
            return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
        elif method == 1:
            return np.sum(r / np.log2(np.arange(2, r.size + 2)))
        else:
            raise ValueError('method must be 0 or 1.')
    return 0.
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_asfarray_none(self, level=rlevel):
        # Test for changeset r5065
        assert_array_equal(np.array([np.nan]), np.asfarray([None]))
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def SNR_IEC(i1, i2, ibg=0, allow_color_images=False):
    '''
    Calculate the averaged signal-to-noise ratio SNR50
    as defined by IEC NP 60904-13

    needs 2 reference EL images and one background image
    '''
    # ensure images are type float64 (double precision):
    i1 = np.asfarray(i1)
    i2 = np.asfarray(i2)
    if ibg is not 0:
        ibg = np.asfarray(ibg)
        assert i1.shape == ibg.shape, 'all input images need to have the same resolution'

    assert i1.shape == i2.shape, 'all input images need to have the same resolution'
    if not allow_color_images:
        assert i1.ndim == 2, 'Images need to be in grayscale according to the IEC standard'

    # SNR calculation as defined in 'IEC TS 60904-13':
    signal = 0.5 * (i1 + i2) - ibg
    noise = 0.5**0.5 * np.abs(i1 - i2) * ((2 / np.pi)**-0.5)
    if signal.ndim == 3:  # color
        signal = np.average(signal, axis=2, weights=(0.114, 0.587, 0.299))
        noise = np.average(noise, axis=2, weights=(0.114, 0.587, 0.299))
    signal = signal.sum()
    noise = noise.sum()
    return signal / noise
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def scaleSignal(img, fitParams=None,
                backgroundToZero=False, reference=None):
    '''
    scale the image between...

    backgroundToZero=True -> 0 (average background) and 1 (maximum signal)
    backgroundToZero=False -> signal+-3std

    reference -> reference image -- scale image to fit this one

    returns:
    scaled image
    '''
    img = imread(img)
    if reference is not None:
        #         def fn(ii, m,n):
        #             return ii*m+n
        #         curve_fit(fn, img[::10,::10], ref[::10,::10])

        low, high = signalRange(img, fitParams)
        low2, high2 = signalRange(reference)
        img = np.asfarray(img)
        ampl = (high2 - low2) / (high - low)
        img -= low
        img *= ampl
        img += low2
        return img
    else:
        offs, div = scaleParams(img, fitParams, backgroundToZero)
        img = np.asfarray(img) - offs
        img /= div
        print('offset: %s, divident: %s' % (offs, div))
        return img
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def __init__(self, imgs):  # , z=None):
        #         if z is None:
        #             self.z = np.arange(len(imgs))
        #         else:
        #             self.z = np.asfarray(z)
        self.imgs = np.asfarray(imgs)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def cdf(arr, pos=None):
    '''
    Return the cumulative density function of a given array or
    its intensity at a given position (0-1)
    '''

    r = (arr.min(), arr.max())
    hist, bin_edges = np.histogram(arr, bins=2 * int(r[1] - r[0]), range=r)
    hist = np.asfarray(hist) / hist.sum()
    cdf = np.cumsum(hist)
    if pos is None:
        return cdf
    i = np.argmax(cdf > pos)
    return bin_edges[i]
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def correct(self, img):
        '''
        ...from perspective distortion:
         --> perspective transformation
         --> apply tilt factor (view factor) correction 
        '''
        print("CORRECT PERSPECTIVE ...")
        self.img = imread(img)

        if not self._homography_is_fixed:
            self._homography = None
        h = self.homography

        if self.opts['do_correctIntensity']:
            tf = self.tiltFactor()
            self.img = np.asfarray(self.img)
            if self.img.ndim == 3:
                for col in range(self.img.shape[2]):
                    self.img[..., col] /= tf
            else:
                self.img = self.img / tf
        warped = cv2.warpPerspective(self.img,
                                     h,
                                     self._newBorders[::-1],
                                     flags=cv2.INTER_LANCZOS4,
                                     **self.opts['cv2_opts'])
        return warped
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def averageSameExpTimes(imgs_path):
    '''
    average background images with same exposure time
    '''
    firsts = imgs_path[:2]
    imgs = imgs_path[2:]
    for n, i in enumerate(firsts):
        firsts[n] = np.asfarray(imread(i))
    d = DarkCurrentMap(firsts)
    for i in imgs:
        i = imread(i)
        d.addImg(i)
    return d.map()
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def __init__(self, pos=[20, 20], size=[20, 20], grid=[4, 5],
                 shape='Rect', gap=[0, 0], subgrid=([], []),
                 subgrid_width=0.05, pen='w', **kwargs):
        '''
        shape = ['Rect', 'Square', 'Circular', 'Pseudosquare']
        '''
        self.opts = {'shape': shape,
                     'grid': np.asarray(grid),
                     'gap': np.asfarray(gap),
                     'subgrid': subgrid,
                     'subgrid_width': subgrid_width
                     }
        # TODO: limit max cell size while rescale
        self.maxCellSize = size / self.opts['grid']

        self.cells = []
        self._createCells()
        self._createSubgrid()

        # cannot set brush at the moment, so:
        if 'brush' in kwargs:
            kwargs.pop('brush')

        pg.ROI.__init__(self, pos, size, pen=pen, **kwargs)

        self.translatable = False
        self.mouseHovering = False

        self._setCellSize(self.state['size'])
        self._setCellPos(pos)

        self.layout_rescaling = False

        self.addScaleHandle([1, 1], [0, 0])
        self.addScaleHandle([0, 0], [1, 1])
        self.addScaleHandle([1, 0], [0, 1])
        self.addScaleHandle([0, 1], [1, 0])

        self.addRotateHandle([0.5, 1], [0.5, 0.5])
项目:tartarus    作者:sergiooramas    | 项目源码 | 文件源码
def dcg_at_k(r, k):
    """
    Args:
        r: Relevance scores (list or numpy) in rank order
            (first element is the first item)
        k: Number of results to consider
    Returns:
        Discounted cumulative gain
    """
    r = np.asfarray(r)[:k]
    if r.size:
        return np.sum(r / np.log2(np.arange(2, r.size + 2)))
    return 0.
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_asfarray_none(self, level=rlevel):
        # Test for changeset r5065
        assert_array_equal(np.array([np.nan]), np.asfarray([None]))
项目:sdaopt    作者:sgubianpm    | 项目源码 | 文件源码
def success(self, x, tol=1.e-5):
        """
        Tests if a candidate solution at the global minimum.
        The default test is

        Parameters
        ----------
        x : sequence
            The candidate vector for testing if the global minimum has been
            reached. Must have ``len(x) == self.N``
        tol : float
            The evaluated function and known global minimum must differ by less
            than this amount to be at a global minimum.

        Returns
        -------
        bool : is the candidate vector at the global minimum?
        """
        val = self.fun(asarray(x))
        if abs(val - self.fglob) < tol:
            return True

        # the solution should still be in bounds, otherwise immediate fail.
        if np.any(x > np.asfarray(self.bounds)[:, 1]):
            return False
        if np.any(x < np.asfarray(self.bounds)[:, 0]):
            return False

        # you found a lower global minimum.  This shouldn't happen.
        if val < self.fglob:
            raise ValueError("Found a lower global minimum",
                             x,
                             val,
                             self.fglob)

        return False
项目:optimize-stencil    作者:Ablinne    | 项目源码 | 文件源码
def __call__(self, args):
        """Convert a list or an array into  a :class:`numpy.recarray`

        :param args: List or Array to be converted
        :type args: iterable"""

        #print(self._fields, args)
        return np.asfarray(args).view(dtype = self.dtype).view(np.recarray)
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_asfarray_none(self, level=rlevel):
        # Test for changeset r5065
        assert_array_equal(np.array([np.nan]), np.asfarray([None]))
项目:thinstack-rl    作者:hans    | 项目源码 | 文件源码
def _compute_gradient(x,
                      x_shape,
                      dx,
                      y,
                      y_shape,
                      dy,
                      x_init_value=None,
                      delta=1e-3,
                      feed_dict=None,
                      prep_fn=None,
                      limit=0):
  """Computes the theoretical and numerical jacobian."""
  t = dtypes.as_dtype(x.dtype)
  allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64,
                   dtypes.complex64, dtypes.complex128]
  assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
  t2 = dtypes.as_dtype(y.dtype)
  assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name

  if x_init_value is not None:
    i_shape = list(x_init_value.shape)
    assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
        x_shape, i_shape)
    x_data = x_init_value
  else:
    if t == dtypes.float16:
      dtype = np.float16
    elif t == dtypes.float32:
      dtype = np.float32
    else:
      dtype = np.float64
    x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype)

  print("\ttheoretical jacobian..")
  jacob_t = _compute_theoretical_jacobian(x, x_shape, x_data, dy, y_shape, dx, feed_dict, prep_fn=prep_fn)

  print("\tnumeric jacobian..")
  jacob_n = _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta, feed_dict, prep_fn=prep_fn, limit=limit)
  return jacob_t, jacob_n
项目:PassportEye    作者:konstantint    | 项目源码 | 文件源码
def __init__(self, center, width, height, angle, points=None):
        """Creates a new RotatedBox.

        :param points: This parameter may be used to indicate the set of points used to create the box.
        """
        self.center = np.asfarray(center)
        self.width = width
        self.height = height
        self.angle = angle
        self.points = points
项目:PassportEye    作者:konstantint    | 项目源码 | 文件源码
def rotated(self, rotation_center, angle):
        """Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.

        >>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
        """
        rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
        t = np.asfarray(rotation_center)
        new_c = np.dot(rot.T, (self.center - t)) + t
        return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
项目:007    作者:wabyking    | 项目源码 | 文件源码
def dcg_at_k(r, k):
    r = np.asfarray(r)[:k]
    return np.sum(r / np.log2(np.arange(2, r.size + 2)))
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_asfarray_none(self, level=rlevel):
        # Test for changeset r5065
        assert_array_equal(np.array([np.nan]), np.asfarray([None]))
项目:R-CNN_LIGHT    作者:YeongHyeon    | 项目源码 | 文件源码
def next_batch(self, batch_size=10, start=-1, end=-1, nth=-1):
        data = np.empty((0, self._data_len), float)
        label = np.empty((0, self._class_len), int)

        with open(PACK_PATH+"/dataset/"+str(self._who_am_i)+".csv") as f:
            lines = f.readlines()

        if(nth == -1):
            if((start == -1) and (end == -1)):
                datas = random.sample(lines, batch_size)
            else:
                datas = lines[start:end]
        else:
            datas = []
            datas.append(lines[nth])

        for d in datas:
            sv_data = d.split(',')
            tmp_label = sv_data[0]
            tmp_data = sv_data[1:len(sv_data)-1]

            tmp_data = np.asarray(tmp_data).reshape((1, len(tmp_data)))

            label = np.append(label, np.eye(self._class_len)[int(np.asfarray(tmp_label))].reshape(1, self._class_len), axis=0)
            data = np.append(data, tmp_data, axis=0)

        return data, label
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def main1():
    #target_names = np.array(args.names)
    X, Y = getFeatureAndPath(args.image_dir)
    X = np.asfarray(X,dtype='float')
    #X_pca = PCA(n_components=128).fit_transform(X)
    X_pca = X
    tsne = TSNE(n_components=2, init='random', random_state=0)
    X_r = tsne.fit_transform(X_pca)
    imgPlot(X_r,Y)
项目:irgan    作者:geek-ai    | 项目源码 | 文件源码
def dcg_at_k(r, k):
    r = np.asfarray(r)[:k]
    return np.sum(r / np.log2(np.arange(2, r.size + 2)))
项目:irgan    作者:geek-ai    | 项目源码 | 文件源码
def dcg_at_k(r, k):
    r = np.asfarray(r)[:k]
    return np.sum(r / np.log2(np.arange(2, r.size + 2)))
项目:irgan    作者:geek-ai    | 项目源码 | 文件源码
def recall_at_k(r, k, all_pos_num):
    r = np.asfarray(r)[:k]
    return np.sum(r) / all_pos_num
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def __init__(self, ci, cn, transf):

        Layer.__init__(self, ci, cn, cn, {'w': (cn, ci), 'b': cn})

        self.transf = transf
        if not hasattr(transf, 'out_minmax'):
            test = np.asfarry([-1e100, -100, -10, -1, 0, 1, 10, 100, 1e100])
            val = self.transf(test)
            self.out_minmax = np.array([val.min(), val.max()] * self.co)
        else:
            self.out_minmax = np.asfarray([transf.out_minmax] * self.co)
        # default init function
        self.initf = init.initwb_reg
        #self.initf = init.initwb_nw
        self.s = np.zeros(self.cn)
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def __init__(self, ci, cn, transf, max_iter, delta):
        Layer.__init__(self, ci, cn, cn, {'w': (cn, ci), 'b': cn})
        self.max_iter = max_iter
        self.delta = delta
        self.transf = transf
        self.outs = []
        if not hasattr(transf, 'out_minmax'):
            test = np.asfarry([-1e100, -100, -10, -1, 0, 1, 10, 100, 1e100])
            val = self.transf(test)
            self.out_minmax = np.array([val.min(), val.max()] * self.co)
        else:
            self.out_minmax = np.asfarray([transf.out_minmax] * self.co)
        self.initf = None
        self.s = np.zeros(self.cn)
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def newlvq(minmax, cn0, pc):
    """
    Create a learning vector quantization (LVQ) network

    :Parameters:
        minmax: list of list, the outer list is the number of input neurons, 
            inner lists must contain 2 elements: min and max
            Range of input value
        cn0: int
            Number of neurons in input layer
        pc: list
            List of percent, sum(pc) == 1
    :Returns:
        net: Net
    :Example:
        >>> # create network with 2 inputs,
        >>> # 2 layers and 10 neurons in each layer
        >>> net = newlvq([[-1, 1], [-1, 1]], 10, [0.6, 0.4])

    """
    pc = np.asfarray(pc)
    assert sum(pc) == 1
    ci = len(minmax)
    cn1 = len(pc)
    assert cn0 > cn1

    layer_inp = layer.Competitive(ci, cn0)
    layer_out = layer.Perceptron(cn0, cn1, trans.PureLin())
    layer_out.initf = None
    layer_out.np['b'].fill(0.0)
    layer_out.np['w'].fill(0.0)
    inx = np.floor(cn0 * pc.cumsum())
    for n, i in enumerate(inx):
        st = 0 if n == 0 else inx[n - 1]
        layer_out.np['w'][n][st:i].fill(1.0)
    net = Net(minmax, cn1, [layer_inp, layer_out],
                            [[-1], [0], [1]], train.train_lvq, error.MSE())

    return net
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def __init__(self, x):

        x = np.asfarray(x)
        if x.ndim != 2:
            raise ValueError('x mast have 2 dimensions')
        min = np.min(x, axis=0)
        dist = np.max(x, axis=0) - min

        min.shape = 1, min.size
        dist.shape = 1, dist.size

        self.min = min
        self.dist = dist
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def __call__(self, x):
        x = np.asfarray(x)
        res = (x - self.min) / self.dist

        return res
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def renorm(self, x):
        x = np.asfarray(x)

        res = x * self.dist + self.min
        return res

#------------------------------------------------------------
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def step(self, inp):
        """
        Simulated step

        :Parameters:
            inp: array like
                Input vector
        :Returns:
            out: array
                Output vector

        """
        #TODO: self.inp=np.asfarray(inp)?

        self.inp = inp
        for nl, nums in enumerate(self.connect):
            if len(nums) > 1:
                signal = []
                for ns in nums:
                    s = self.layers[ns].out if ns != -1 else inp
                    signal.append(s)
                signal = np.concatenate(signal)
            else:
                ns = nums[0]
                signal = self.layers[ns].out if ns != -1 else inp
            if nl != len(self.layers):
                self.layers[nl].step(signal)
        self.out = signal
        return self.out
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def test_asfarray_none(self, level=rlevel):
        # Test for changeset r5065
        assert_array_equal(np.array([np.nan]), np.asfarray([None]))
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def SNR(img1, img2=None, bg=None,
        noise_level_function=None,
        constant_noise_level=False,
        imgs_to_be_averaged=False):
    '''
    Returns a signal-to-noise-map
    uses algorithm as described in BEDRICH 2016 JPV (not jet published)

    :param constant_noise_level: True, to assume noise to be constant
    :param imgs_to_be_averaged: True, if SNR is for average(img1, img2)
    '''
    # dark current subtraction:
    img1 = np.asfarray(img1)
    if bg is not None:
        img1 = img1 - bg

    # SIGNAL:
    if img2 is not None:
        img2_exists = True
        img2 = np.asfarray(img2) - bg
        # signal as average on both images
        signal = 0.5 * (img1 + img2)
    else:
        img2_exists = False
        signal = img1
    # denoise:
    signal = median_filter(signal, 3)

    # NOISE
    if constant_noise_level:
        # CONSTANT NOISE
        if img2_exists:
            d = img1 - img2
            # 0.5**0.5 because of sum of variances
            noise = 0.5**0.5 * np.mean(np.abs((d))) * F_RMS2AAD
        else:
            d = (img1 - signal) * F_NOISE_WITH_MEDIAN
            noise = np.mean(np.abs(d)) * F_RMS2AAD
    else:
        # NOISE LEVEL FUNCTION
        if noise_level_function is None:
            noise_level_function, _ = oneImageNLF(img1, img2, signal)
        noise = noise_level_function(signal)
        noise[noise < 1] = 1  # otherwise SNR could be higher than image value

    if imgs_to_be_averaged:
        # SNR will be higher if both given images are supposed to be averaged:
        # factor of noise reduction if SNR if for average(img1, img2):
        noise *= 0.5**0.5

    # BACKGROUND estimation and removal if background not given:
    if bg is None:
        bg = getBackgroundLevel(img1)
        signal -= bg
    snr = signal / noise

    # limit to 1, saying at these points signal=noise:
    snr[snr < 1] = 1
    return snr
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def SNRaverage(snr, method='average', excludeBackground=True,
               checkBackground=True,
               backgroundLevel=None):
    '''
    average a signal-to-noise map
    :param method:  ['average','X75', 'RMS', 'median'] - X75: this SNR will be exceeded by 75% of the signal
    :type method: str
    :param checkBackground:  check whether there is actually a background level to exclude
    :type  checkBackground: bool
    :returns: averaged SNR as float
    '''
    if excludeBackground:
        # get background level
        if backgroundLevel is None:
            try:
                f = FitHistogramPeaks(snr).fitParams
                if checkBackground:
                    if not hasBackground(f):
                        excludeBackground = False
                if excludeBackground:
                    backgroundLevel = getSignalMinimum(f)
            except (ValueError, AssertionError):
                backgroundLevel = snr.min()
        if excludeBackground:
            snr = snr[snr >= backgroundLevel]

    if method == 'RMS':
        avg = (snr**2).mean()**0.5

    elif method == 'average':
        avg = snr.mean()
#         if np.isnan(avg):
#             avg = np.nanmean(snr)
    elif method == 'median':
        avg = np.median(snr)
#         if np.isnan(avg):
#             avg = np.nanmedian(snr) 

    elif method == 'X75':
        r = (snr.min(), snr.max())
        hist, bin_edges = np.histogram(snr, bins=2 * int(r[1] - r[0]), range=r)
        hist = np.asfarray(hist) / hist.sum()
        cdf = np.cumsum(hist)
        i = np.argmax(cdf > 0.25)
        avg = bin_edges[i]
    else:
        raise NotImplemented("given SNR average doesn't exist")

    return avg
项目:R-CNN_LIGHT    作者:YeongHyeon    | 项目源码 | 文件源码
def imagelist_to_dataset(image_dir, image_lists, imsize=28):
    master_key, sub_key = key_from_dictionary(image_lists)

    print("\n***** Make image list *****")
    result_dir = "dataset/"
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    else:
        shutil.rmtree(result_dir)
        os.makedirs(result_dir)

    x_train = []
    t_train = np.empty((0), int)
    x_test = []
    t_test = np.empty((0), int)
    x_valid = []
    t_valid = np.empty((0), int)
    for key_i in [0, 1, 3]:
        if key_i == 0:
            result_name = "train"
        elif key_i == 1:
            result_name = "test"
        else:
            result_name = "valid"
        sys.stdout.write(" Make \'"+result_name+" list\'...")
        # m: class
        for m in master_key:

                for i in range(len(image_lists[m][sub_key[key_i]])):
                    # m: category
                    # image_lists[m][sub_key[key_i]][i]: image name
                    image_path = "./"+image_dir+"/"+m+"/"+image_lists[m][sub_key[key_i]][i]
                    # Read jpg images and resizing it.
                    origin_image = cv2.imread(image_path)
                    resized_image = cv2.resize(origin_image, (imsize, imsize))
                    grayscale_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)

                    image_save(result_dir+"origin/"+result_name+"/", image_lists[m][sub_key[key_i]][i], origin_image)
                    image_save(result_dir+"resize/"+result_name+"/", image_lists[m][sub_key[key_i]][i], resized_image)
                    image_save(result_dir+"gray/"+result_name+"/", image_lists[m][sub_key[key_i]][i], grayscale_image)

                    if key_i == 0:
                        x_train.append(resized_image)
                        t_train = np.append(t_train, np.array([int(np.asfarray(m))]), axis=0)
                    elif key_i == 1:
                        x_test.append(resized_image)
                        t_test = np.append(t_test, np.array([int(np.asfarray(m))]), axis=0)
                    else:
                        x_valid.append(resized_image)
                        t_valid = np.append(t_valid, np.array([int(np.asfarray(m))]), axis=0)

        print(" complete.")
    #print(" x_train shape: " + str(np.array(x_train).shape))
    #print(" t_train shape: " + str(np.array(t_train).shape))
    #print(" x_test shape: " + str(np.array(x_test).shape))
    #print(" t_test shape: " + str(np.array(t_test).shape))
    x_train = np.asarray(x_train)
    t_train = np.asarray(t_train)
    x_test = np.asarray(x_test)
    t_test = np.asarray(t_test)
    return (x_train, t_train), (x_test, t_test), len(master_key)
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def newhop(target, transf=None, max_init=10, delta=0):
    """
    Create a Hopfield recurrent network

    :Parameters:
        target: array like (l x net.co)
            train target patterns
        transf: func (default HardLims)
            Activation function
        max_init: int (default 10)
            Maximum of recurrent iterations
        delta: float (default 0)
            Minimum difference between 2 outputs for stop recurrent cycle
    :Returns:
        net: Net
    :Example:
        >>> net = newhem([[-1, -1, -1], [1, -1, 1]])
        >>> output = net.sim([[-1, 1, -1], [1, -1, 1]])

    """

    target = np.asfarray(target)
    assert target.ndim == 2

    ci = len(target[0])
    if transf is None:
        transf = trans.HardLims()
    l = layer.Reccurent(ci, ci, transf, max_init, delta)
    w = l.np['w']
    b = l.np['b']

    # init weight
    for i in range(ci):
        for j in range(ci):
            if i == j:
                w[i, j] = 0.0
            else:
                w[i, j] = np.sum(target[:, i] * target[:, j]) / ci
        b[i] = 0.0
    l.initf = None

    minmax = transf.out_minmax if hasattr(transf, 'out_minmax') else [-1, 1]

    net = Net([minmax] * ci, ci, [l], [[-1], [0]], None, None)
    return net
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def newhem(target, transf=None, max_iter=10, delta=0):
    """
    Create a Hemming recurrent network with 2 layers

    :Parameters:
        target: array like (l x net.co)
            train target patterns
        transf: func (default SatLinPrm(0.1, 0, 10))
            Activation function of input layer
        max_init: int (default 10)
            Maximum of recurrent iterations
        delta: float (default 0)
            Minimum dereference between 2 outputs for stop recurrent cycle
    :Returns:
        net: Net
    :Example:
        >>> net = newhop([[-1, -1, -1], [1, -1, 1]])
        >>> output = net.sim([[-1, 1, -1], [1, -1, 1]])

    """

    target = np.asfarray(target)
    assert target.ndim == 2

    cn = target.shape[0]
    ci = target.shape[1]

    if transf is None:
        transf = trans.SatLinPrm(0.1, 0, 10)
    layer_inp = layer.Perceptron(ci, cn, transf)

    # init input layer
    layer_inp.initf = None
    layer_inp.np['b'][:] = float(ci) / 2
    for i, tar in enumerate(target):
        layer_inp.np['w'][i][:] = tar / 2

    layer_out = layer.Reccurent(cn, cn, trans.SatLinPrm(1, 0, 1e6), max_iter, delta)
    # init output layer
    layer_out.initf = None
    layer_out.np['b'][:] = 0
    eps = - 1.0 / cn
    for i in range(cn):
        layer_out.np['w'][i][:] = [eps] * cn
        layer_out.np['w'][i][i] = 1
    # create network
    minmax = [[-1, 1]] * ci
    layers = [layer_inp, layer_out]
    connect = [[-1], [0], [1]]
    net = Net(minmax, cn, layers, connect, None, None)
    return net