Python numpy 模块,nan_to_num() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.nan_to_num()

项目:uchroma    作者:cyanogen    | 项目源码 | 文件源码
def blend(img_in: np.ndarray, img_layer: np.ndarray, blend_op: None, opacity: float=1.0):
    # sanity check of inputs
    assert img_in.dtype == np.float, 'Input variable img_in should be of numpy.float type.'
    assert img_layer.dtype == np.float, 'Input variable img_layer should be of numpy.float type.'
    assert img_in.shape[2] == 4, 'Input variable img_in should be of shape [:, :,4].'
    assert img_layer.shape[2] == 4, 'Input variable img_layer should be of shape [:, :,4].'
    assert 0.0 <= opacity <= 1.0, 'Opacity needs to be between 0.0 and 1.0.'

    ratio = _compose_alpha(img_in, img_layer, opacity)

    if blend_op is None:
        blend_op = BlendOp.screen
    elif isinstance(blend_op, str):
        if hasattr(BlendOp, blend_op):
            blend_op = getattr(BlendOp, blend_op)
        else:
            raise ValueError('Invalid blend mode: %s' % blend_op)

    comp = blend_op(img_in, img_layer)

    ratio_rs = np.reshape(np.repeat(ratio, 3), [comp.shape[0], comp.shape[1], comp.shape[2]])
    img_out = comp*ratio_rs + img_in[:, :, :3] * (1.0-ratio_rs)
    img_out = np.nan_to_num(np.dstack((img_out, img_in[:, :, 3])))  # add alpha channel and replace nans
    return img_out
项目:neat_py    作者:NathanZabriskie    | 项目源码 | 文件源码
def load_arff(arff_file, one_hot=True, normalize=True):
    with open(arff_file, 'r') as f:
        obj = arff.load(f, encode_nominal=True)

    data = obj[DATA]

    labels = [x[-1] for x in (x for x in data)]
    data = np.array(data)
    data = data[:,:-1]
    if normalize:
        data = (data - data.min(axis=0)) / data.ptp(axis=0)
        data = np.nan_to_num(data)
    if one_hot:
        label_binarizer = sklearn.preprocessing.LabelBinarizer()
        label_binarizer.fit(range(max(labels) + 1))
        labels = label_binarizer.transform(labels)

    labels = np.array(labels, dtype=np.float32)

    return data, labels
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def _normalize_for_correlation(data, axis):
    """normalize the data before computing correlation

    The data will be z-scored and divided by sqrt(n)
    along the assigned axis

    Parameters
    ----------
    data: 2D array

    axis: int
        specify which dimension of the data should be normalized

    Returns
    -------
    data: 2D array
        the normalized data
    """
    shape = data.shape
    data = zscore(data, axis=axis, ddof=0)
    # if zscore fails (standard deviation is zero),
    # set all values to be zero
    data = np.nan_to_num(data)
    data = data / math.sqrt(shape[axis])
    return data
项目:py-factorgraph    作者:mbforbes    | 项目源码 | 文件源码
def divide_safezero(a, b):
    '''
    Divies a by b, then turns nans and infs into 0, so all division by 0
    becomes 0.

    Args:
        a (np.ndarray)
        b (np.ndarray|int|float)

    Returns:
        np.ndarray
    '''
    # deal with divide-by-zero: turn x/0 (inf) into 0, and turn 0/0 (nan) into
    # 0.
    c = a / b
    c[c == np.inf] = 0.0
    c = np.nan_to_num(c)
    return c


# Classes
# -----------------------------------------------------------------------------
项目:scipyplot    作者:robertocalandra    | 项目源码 | 文件源码
def set_data(self, y, variance, x=None):
        """
        update a gauss_1D with new data
        :param y: 
        :param variance: 
        :param x: 
        :return: 
        """
        n_points = len(y)
        if x is None:
            x = np.arange(n_points)
        self._handle.set_data(x, y)  # Update mean
        new_percentiles = []
        out = self.distribution.split("+")
        n_percentiles = len(out)
        sub_alpha = str(self.alpha / n_percentiles)  # Normalize w.r.t. the number of percentiles
        for i, percentile in enumerate(self._percentiles):
            percentile.remove()
            percentile = float(out[i])
            assert 0 <= percentile <= 100, 'Percentile must be >0 & <100. Instead is %f' % percentile
            interval = scipy.stats.norm.interval(percentile/100, loc=y, scale=np.sqrt(variance))
            interval = np.nan_to_num(interval)  # Fix stupid case of norm.interval(0) returning nan
            new_percentiles.append(plt.fill_between(x, interval[0], interval[1], color=self._handle.get_color(), alpha=sub_alpha))
        #       TODO: not implemented yet
        pass
项目:hydrus    作者:mark-r-g    | 项目源码 | 文件源码
def __init__(self, z, w, name='', quadrature=None, cfg=None):
        self.t0 = datetime.datetime.now()

        # If given a namespace of configuration settings, use it.
        # Otherwise fall back to whatever is in `constants.py`.
        self.cfg = cfg or constants

        # If w or z are DataFrames, convert them to ndarrays.
        self.w = w.values if hasattr(w, 'values') else w
        self.z = z.values if hasattr(z, 'values') else z

        self.w2 = np.nan_to_num(self.w)
        self.num2 = np.nan_to_num(self.z)

        self.name, self.n = name, w.shape[0]
        self.ests_init = np.array(pack(self.cfg.INITIAL_LVM_PARAMS, w.shape[1]))

        if quadrature or (cfg is not None and cfg.QUADRATURE):
            self.ests_ll = self.ests_ll_quad
            self.ests_bounds = pack(self.cfg.QUAD_BOUNDS, w.shape[1])
        else:
            self.ests_ll = self.ests_ll_exact
            self.ests_bounds = pack(self.cfg.EXACT_BOUNDS, w.shape[1])
项目:hydrus    作者:mark-r-g    | 项目源码 | 文件源码
def ests_ll_quad(self, params):
        """
        Calculate the loglikelihood given model parameters `params`.

        This method uses Gaussian quadrature, and thus returns an *approximate*
        integral.
        """
        mu0, gamma0, err0 = np.split(params, 3)
        x = np.tile(self.z, (self.cfg.QCOUNT, 1, 1))  # (QCOUNTXnhospXnmeas)
        loc = mu0 + np.outer(QC1, gamma0)
        loc = np.tile(loc, (self.n, 1, 1))
        loc = np.transpose(loc, (1, 0, 2))
        scale = np.tile(err0, (self.cfg.QCOUNT, self.n, 1))
        zs = lpdf_3d(x=x, loc=loc, scale=scale)

        w2 = np.tile(self.w, (self.cfg.QCOUNT, 1, 1))
        wted = np.nansum(w2 * zs, axis=2).T  # (nhosp X QCOUNT)
        qh = np.tile(QC1, (self.n, 1))  # (nhosp X QCOUNT)
        combined = wted + norm.logpdf(qh)  # (nhosp X QCOUNT)

        return logsumexp(np.nan_to_num(combined), b=QC2, axis=1)  # (nhosp)
项目:chainladder-python    作者:jbogaardt    | 项目源码 | 文件源码
def __model_form(self, tri_array):
        w = np.nan_to_num(self.weights/tri_array[:,:,:-1]**(2-self.alpha))
        x = np.nan_to_num(tri_array[:,:,:-1]*(tri_array[:,:,1:]*0+1))
        y = np.nan_to_num(tri_array[:,:,1:])
        LDF = np.sum(w*x*y,axis=1)/np.sum(w*x*x,axis=1)
        #Chainladder (alpha=1/delta=1)
        #LDF = np.sum(np.nan_to_num(tri_array[:,:,1:]),axis=1) / np.sum(np.nan_to_num((tri_array[:,:,1:]*0+1)*tri_array[:,:,:-1]),axis=1)
        #print(LDF.shape)
        # assumes no tail
        CDF = np.append(np.cumprod(LDF[:,::-1],axis=1)[:,::-1],np.array([1]*tri_array.shape[0]).reshape(tri_array.shape[0],1),axis=1)    
        latest = np.flip(tri_array,axis=1).diagonal(axis1=1,axis2=2)   
        ults = latest*CDF
        lu = list(ults)
        lc = list(CDF)
        exp_cum_triangle = np.array([np.flipud(lu[num].reshape(tri_array.shape[2],1).dot(1/lc[num].reshape(1,tri_array.shape[2]))) for num in range(tri_array.shape[0])])
        exp_incr_triangle = np.append(exp_cum_triangle[:,:,0,np.newaxis],np.diff(exp_cum_triangle),axis=2)
        return LDF, CDF, ults, exp_incr_triangle
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def init_state(indata, test=False):
    close = indata['close'].values
    diff = np.diff(close)
    diff = np.insert(diff, 0, 0)
    sma15 = SMA(indata, timeperiod=15)
    sma60 = SMA(indata, timeperiod=60)
    rsi = RSI(indata, timeperiod=14)
    atr = ATR(indata, timeperiod=14)

    #--- Preprocess data
    xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))

    xdata = np.nan_to_num(xdata)
    if test == False:
        scaler = preprocessing.StandardScaler()
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
        joblib.dump(scaler, 'data/scaler.pkl')
    elif test == True:
        scaler = joblib.load('data/scaler.pkl')
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
    state = xdata[0:1, 0:1, :]

    return state, xdata, close

#Take Action
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def init_state(data):

    close = data
    diff = np.diff(data)
    diff = np.insert(diff, 0, 0)

    #--- Preprocess data
    xdata = np.column_stack((close, diff))
    xdata = np.nan_to_num(xdata)
    scaler = preprocessing.StandardScaler()
    xdata = scaler.fit_transform(xdata)

    state = xdata[0:1, :]
    return state, xdata

#Take Action
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def init_state(data):

    close = data
    diff = np.diff(data)
    diff = np.insert(diff, 0, 0)

    #--- Preprocess data
    xdata = np.column_stack((close, diff))
    xdata = np.nan_to_num(xdata)
    scaler = preprocessing.StandardScaler()
    xdata = scaler.fit_transform(xdata)

    state = xdata[0:1, :]
    return state, xdata

#Take Action
项目:groundfailure    作者:usgs    | 项目源码 | 文件源码
def test_validateTerms():
    data = {'b1': "np.nan_to_num(self.layerdict['friction'].getSlice("
                  "rowstart, rowend, colstart, colend, name='friction'))",
            'b2': "self.layerdict['slope'].getSlice(rowstart, rowend, "
                  "colstart, colend, name='slope')/100.",
            'b3': "np.log(self.layerdict['vs30'].getSlice(rowstart, rowend, "
                  "colstart, colend, name='vs30'))",
            'b4': "self.layerdict['cti1'].getSlice(rowstart, rowend, "
                  "colstart, colend, name='cti1')",
            'b5': "self.layerdict['precip'].getSlice(rowstart, rowend, "
                  "colstart, colend, name='precip')"}
    timeField = 'MONTH'
    coeff = LM.validateCoefficients(cmodel)
    layers = LM.validateLayers(cmodel)
    terms, time = LM.validateTerms(cmodel, coeff, layers)
    assert time == timeField
    assert data == terms
项目:causal_bandits    作者:finnhacks42    | 项目源码 | 文件源码
def V_short(self,eta):
        sum0 = np.zeros(7,dtype=float)
        sum1 = np.zeros(7,dtype=float)
        for n1,n2 in product(range(self.N1+1),range(self.N2+1)):
             wdo = comb(self.N1,n1,exact=True)*comb(self.N2,n2,exact=True)
             wdox10 = comb(self.N1-1,n1,exact=True)*comb(self.N2,n2,exact=True)
             wdox11 = comb(self.N1-1,n1-1,exact=True)*comb(self.N2,n2,exact=True)
             wdox20 = comb(self.N1,n1,exact=True)*comb(self.N2-1,n2,exact=True)
             wdox21 = comb(self.N1,n1,exact=True)*comb(self.N2-1,n2-1,exact=True)
             w = np.asarray([wdox10,wdox20,wdox11,wdox21,wdo,wdo,wdo])

             pz0,pz1 = self.p_n_given_z(n1,n2)

             counts = [self.N1-n1,self.N2-n2,n1,n2,1,1,1]
             Q = (eta*pz0*counts*(1-self.pZgivenA)+eta*pz1*counts*self.pZgivenA).sum()

             ratio = np.nan_to_num(np.true_divide(pz0*(1-self.pZgivenA)+pz1*self.pZgivenA,Q))

             sum0 += np.asfarray(w*pz0*ratio)
             sum1 += np.asfarray(w*pz1*ratio)
        result = self.pZgivenA*sum1+(1-self.pZgivenA)*sum0
        return result
项目:MicrolensingLCOGT    作者:dg7541    | 项目源码 | 文件源码
def stetsonJ(mag, magerr):
    """The variability index K was first suggested by Peter B. Stetson and serves as a 
    measure of the kurtosis of the magnitude distribution. 
    See: (P. B. Stetson, Publications of the Astronomical Society of the Pacific 108, 851 (1996)).

    :param mag: the time-varying intensity of the object. Must be an array.
    :param magerr: photometric error for the intensity. Must be an array.

    :rtype: float
    """

    mag, magerr = remove_bad(mag, magerr)
    n = np.float(len(mag))
    #mean = meanMag(mag, magerr)
    mean = np.median(mag)
    delta_list = []

    for i in range(0, len(mag)):
        delta = np.sqrt(n/(n-1.))*((mag[i] - mean)/magerr[i])
        delta_list.append(delta)

    val = np.nan_to_num([x*y for x,y in zip(delta_list[0:int(n)-1], delta_list[1:int(n)])])    
    sign = np.sign(val)
    stetj = sum(sign*np.sqrt(np.abs(val)))
    return stetj
项目:MicrolensingLCOGT    作者:dg7541    | 项目源码 | 文件源码
def stetsonK(mag, magerr):
    """The variability index K was first suggested by Peter B. Stetson and serves as a 
    measure of the kurtosis of the magnitude distribution. 
    See: (P. B. Stetson, Publications of the Astronomical Society of the Pacific 108, 851 (1996)).

    :param mag: the time-varying intensity of the object. Must be an array.
    :param magerr: photometric error for the intensity. Must be an array.

    :rtype: float
    """

    mag, magerr = remove_bad(mag, magerr)            
    n = np.float(len(mag))
    #mean = meanMag(mag, magerr)
    mean = np.median(mag)

    delta = np.sqrt((n/(n-1.)))*((mag - mean)/magerr)

    stetsonK = ((1./n)*sum(abs(delta)))/(np.sqrt((1./n)*sum(delta**2)))
    return np.nan_to_num(stetsonK)
项目:Link-Prediction    作者:CodeZWT    | 项目源码 | 文件源码
def Salton(MatrixAdjacency_Train):
    similarity_StartTime = time.clock()

    similarity = np.dot(MatrixAdjacency_Train,MatrixAdjacency_Train)

    deg_row = sum(MatrixAdjacency_Train)
    deg_row.shape = (deg_row.shape[0],1)
    deg_row_T = deg_row.T
    tempdeg = np.dot(deg_row,deg_row_T)
    temp = np.sqrt(tempdeg)

    np.seterr(divide='ignore', invalid='ignore')
    Matrix_similarity = np.nan_to_num(similarity / temp)

#     print np.isnan(Matrix_similarity)
#     Matrix_similarity = np.nan_to_num(Matrix_similarity)
#     print np.isnan(Matrix_similarity)
    similarity_EndTime = time.clock()
    print "    SimilarityTime: %f s" % (similarity_EndTime- similarity_StartTime)
    return Matrix_similarity
项目:Link-Prediction    作者:CodeZWT    | 项目源码 | 文件源码
def ACT(MatrixAdjacency_Train):
    similarity_StartTime = time.clock()

    Matrix_D = np.diag(sum(MatrixAdjacency_Train))
    Matrix_Laplacian = Matrix_D - MatrixAdjacency_Train
    INV_Matrix_Laplacian  = np.linalg.pinv(Matrix_Laplacian)

    Array_Diag = np.diag(INV_Matrix_Laplacian)
    Matrix_ONE = np.ones([MatrixAdjacency_Train.shape[0],MatrixAdjacency_Train.shape[0]])
    Matrix_Diag = Array_Diag * Matrix_ONE

    Matrix_similarity = Matrix_Diag + Matrix_Diag.T - (2 * Matrix_Laplacian)
    print Matrix_similarity
    Matrix_similarity = Matrix_ONE / Matrix_similarity
    Matrix_similarity = np.nan_to_num(Matrix_similarity)

    similarity_EndTime = time.clock()
    print "    SimilarityTime: %f s" % (similarity_EndTime- similarity_StartTime)
    return Matrix_similarity
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def load_from_hdf5(self, path):
        """load model in compressed sparse row format from hdf5 file

        hdf5 file should contain row_ptr, col_ind and data array

        Args:
            path: path to the embeddings folder
        """
        self.load_metadata(path)
        f = tables.open_file(os.path.join(path, 'cooccurrence_csr.h5p'), 'r')
        row_ptr = np.nan_to_num(f.root.row_ptr.read())
        col_ind = np.nan_to_num(f.root.col_ind.read())
        data = np.nan_to_num(f.root.data.read())
        dim = row_ptr.shape[0] - 1
        self.matrix = scipy.sparse.csr_matrix(
            (data, col_ind, row_ptr), shape=(dim, dim), dtype=np.float32)
        f.close()
        self.vocabulary = Vocabulary_cooccurrence()
        self.vocabulary.load(path)
        self.name += os.path.basename(os.path.normpath(path))
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def load_with_alpha(self, path, power=0.6):
        # self.load_provenance(path)
        f = tables.open_file(os.path.join(path, 'vectors.h5p'), 'r')
#        left = np.nan_to_num(f.root.vectors.read())
        left = f.root.vectors.read()
        sigma = f.root.sigma.read()
        logger.info("loaded left singular vectors and sigma")
        sigma = np.power(sigma, power)
        self.matrix = np.dot(left, np.diag(sigma))
        logger.info("computed the product")
        self.metadata["pow_sigma"] = power
        self.metadata["size_dimensions"] = int(self.matrix.shape[1])
        f.close()
        self.vocabulary = Vocabulary_simple()
        self.vocabulary.load(path)
        self.name += os.path.basename(os.path.normpath(path)) + "_a" + str(power)
项目:TemporalConvolutionalNetworks    作者:colincsl    | 项目源码 | 文件源码
def macro_accuracy(P, Y, n_classes, bg_class=None, return_all=False, **kwargs):
    def macro_(P, Y, n_classes=None, bg_class=None, return_all=False):
        conf_matrix = sm.confusion_matrix(Y, P, labels=np.arange(n_classes))
        conf_matrix = conf_matrix/(conf_matrix.sum(0)[:,None]+1e-5)
        conf_matrix = np.nan_to_num(conf_matrix)
        diag = conf_matrix.diagonal()*100.

        # Remove background score
        if bg_class is not None:
            diag = np.array([diag[i] for i in range(n_classes) if i!=bg_class])

        macro = diag.mean()
        if return_all:
            return macro, diag
        else:
            return macro

    if type(P) == list:
        out = [macro_(P[i], Y[i], n_classes=n_classes, bg_class=bg_class, return_all=return_all) for i in range(len(P))]
        if return_all:
            return (np.mean([o[0] for o in out]), np.mean([o[1] for o in out],0))
        else:
            return np.mean(out)
    else:
        return macro_(P,Y, n_classes=n_classes, bg_class=bg_class, return_all=return_all)
项目:AutoTrading    作者:curme    | 项目源码 | 文件源码
def calQnt(self, price, volume, method, fixedFraction):

            Equity = self.queryCapital()
            proportion = 0.15
            maxDrawDown = 3800

            if method is 'FixedFraction':
                # TradeRisk = maxDrawDown(data)
                TradeRisk = maxDrawDown
                N = fixedFraction * Equity / abs(TradeRisk)
                if N >= volume * proportion : return math.trunc(volume * proportion)
                else                        : return int(np.nan_to_num(N))
                # return int(N)

            if method is 'MaxDrawDown':
                margin = 0.65
                # allocation = maxDrawDown(data) * 1.5 + margin * price
                allocation = maxDrawDown * 1.5 + margin * price
                N = Equity / allocation
                if N >= volume * proportion : return math.trunc(volume * proportion)
                else                        : return int(np.nan_to_num(N))
                # return int(N)

        # query capital of self strategy
项目:Collaborative-Filtering-recommendation    作者:LunaBlack    | 项目源码 | 文件源码
def userSim(data_matrix):
    n = np.shape(data_matrix)[0]  # ?userNum
    userSimArr = np.zeros(shape=(n, n))  # ???????????????,???

    for i in range(n):
        for j in range(i+1, n):
            overLap = np.nonzero(np.logical_and(data_matrix[i, :]>0, data_matrix[j, :]>0))[0]
            if len(overLap) > 1:
                sim = computeSim(data_matrix[i, overLap], data_matrix[j, overLap])
            else:
                sim = 0
            userSimArr[i][j] = sim
            userSimArr[j][i] = sim

    userSimArr = np.nan_to_num(userSimArr)
    return userSimArr


# ?????????
项目:Collaborative-Filtering-recommendation    作者:LunaBlack    | 项目源码 | 文件源码
def itemSim(data_matrix):
    n = np.shape(data_matrix)[1]  # ?itemNum
    itemSimArr = np.zeros(shape=(n, n))  # ???????????????,???

    for i in range(n):
        for j in range(i+1, n):
            overLap = np.nonzero(np.logical_and(data_matrix[:, i]>0, data_matrix[:, j]>0))[0]
            if len(overLap) > 1:
                sim = computeSim(data_matrix[overLap, i], data_matrix[overLap, j])
            else:
                sim = 0
            itemSimArr[i][j] = sim
            itemSimArr[j][i] = sim

    itemSimArr = np.nan_to_num(itemSimArr)
    return itemSimArr


# ????????????
项目:Collaborative-Filtering-recommendation    作者:LunaBlack    | 项目源码 | 文件源码
def newUserSim(data_matrix):
    n = np.shape(data_matrix)[0]  # ?userNum
    userSimArr = np.zeros(shape=(n, n))  # ???????????????,???
    userCommon = np.zeros(shape=(n, n))  # ???????

    for i in range(n):
        for j in range(i+1, n):
            overLap = np.nonzero(np.logical_and(data_matrix[i, :]>0, data_matrix[j, :]>0))[0]
            if len(overLap) > 1:
                sim = computeSim(data_matrix[i, overLap], data_matrix[j, overLap])
            else:
                sim = 0
            userSimArr[i][j] = sim
            userSimArr[j][i] = sim
            userCommon[i][j] = len(overLap)
            userCommon[j][i] = len(overLap)

    coef = np.exp((userCommon * 1.0 / userCommon.max(axis=0)) - 1)
    newUserSim = coef * userSimArr  # ????,????????????????
    newUserSim = np.nan_to_num(newUserSim)  # ????????,????
    return newUserSim, userCommon


# ????????????
项目:Collaborative-Filtering-recommendation    作者:LunaBlack    | 项目源码 | 文件源码
def newItemSim(data_matrix):
    n = np.shape(data_matrix)[1]  # ?itemNum
    itemSimArr = np.zeros(shape=(n, n))  # ???????????????,???
    itemCommon = np.zeros(shape=(n, n))  # ????????

    for i in range(n):
        for j in range(i+1, n):
            overLap = np.nonzero(np.logical_and(data_matrix[:, i]>0, data_matrix[:, j]>0))[0]
            if len(overLap) > 1:
                sim = computeSim(data_matrix[overLap, i], data_matrix[overLap, j])
            else:
                sim = 0
            itemSimArr[i][j] = sim
            itemSimArr[j][i] = sim
            itemCommon[i][j] = len(overLap)
            itemCommon[j][i] = len(overLap)

    coef = np.exp((itemCommon * 1.0 / itemCommon.max(axis=0)) - 1)
    newItemSim = coef * itemSimArr  # ????,????????????????
    newItemSim = np.nan_to_num(newItemSim)  # ????????,????
    return newItemSim, itemCommon


# ??????????????????
项目:Collaborative-Filtering-recommendation    作者:LunaBlack    | 项目源码 | 文件源码
def newUserSim(dataSet):
    n = np.shape(dataSet)[0]  #?userNum
    userSimArr = np.zeros(shape=(n,n))  #???????????????,???
    userCommon = np.zeros(shape=(n,n))  #???????
    newUserSim = np.zeros(shape=(n,n))  #????????,????
    for i in range(n):
        for j in range(i+1, n):
            overLap = np.nonzero(np.logical_and(dataSet[i,:]>0, dataSet[j,:]>0))[0]
            if len(overLap) > 1:
                sim = pearsSim(dataSet[i,overLap], dataSet[j,overLap])
            else:
                sim = 0
            userSimArr[i][j] = sim
            userSimArr[j][i] = sim
            userCommon[i][j] = len(overLap)
            userCommon[j][i] = len(overLap)

    coef = np.exp((userCommon*1.0/userCommon.max(axis=0))-1)
    newUserSim = coef * userSimArr  #????,????????????????
    newUserSim = np.nan_to_num(newUserSim)
    return newUserSim, userCommon


#????????????
项目:Collaborative-Filtering-recommendation    作者:LunaBlack    | 项目源码 | 文件源码
def newUserSim(dataSet):
    n = np.shape(dataSet)[0]  #?userNum
    userSimArr = np.zeros(shape=(n,n))  #???????????????,???
    userCommon = np.zeros(shape=(n,n))  #???????
    newUserSim = np.zeros(shape=(n,n))  #????????,????
    for i in range(n):
        for j in range(i+1, n):
            sim = 0
            overLap = np.nonzero(np.logical_and(dataSet[i,:]>0, dataSet[j,:]>0))[0]
            if len(overLap) > 0:
                sim = pearsSim(dataSet[i,overLap], dataSet[j,overLap])
            userSimArr[i][j] = sim
            userSimArr[j][i] = sim
            userCommon[i][j] = len(overLap)
            userCommon[j][i] = len(overLap)

    coef = np.exp((userCommon*1.0/userCommon.max(axis=0))-1)
    newUserSim = coef * userSimArr  #????,????????????????
    newUserSim = np.nan_to_num(newUserSim)
    return newUserSim


#????????????
项目:Collaborative-Filtering-recommendation    作者:LunaBlack    | 项目源码 | 文件源码
def newItemSim(dataSet):
    n = np.shape(dataSet)[1]  #?itemNum
    itemSimArr = np.zeros(shape=(n,n))  #???????????????,???
    itemCommon = np.zeros(shape=(n,n))  #????????
    newItemSim = np.zeros(shape=(n,n))  #????????,????
    for i in range(n):
        for j in range(i+1, n):
            sim = 0
            overLap = np.nonzero(np.logical_and(dataSet[:,i]>0, dataSet[:,j]>0))[0]
            if len(overLap) > 0:
                sim = pearsSim(dataSet[overLap,i], dataSet[overLap,j])
            itemSimArr[i][j] = sim
            itemSimArr[j][i] = sim
            itemCommon[i][j] = len(overLap)
            itemCommon[j][i] = len(overLap)

    coef = np.exp((itemCommon*1.0/itemCommon.max(axis=0))-1)
    newItemSim = coef * itemSimArr  #????,????????????????
    newItemSim = np.nan_to_num(newItemSim)
    return newItemSim


#??????????????????
项目:graphAttack    作者:jgolebiowski    | 项目源码 | 文件源码
def perform(self, a, y):
        """Perform costOperation

        Parameters
        ----------
        a : np.array
            Predictions
        y : np.array
            Data labels


        Returns
        -------
        np.array
            Output data
        """
        predLog = np.nan_to_num(-np.log(a))
        cEntropyMat = np.multiply(y, predLog)
        return (1.0 / self.nExamples) * np.sum(cEntropyMat)
项目:rca-evaluation    作者:sieve-microservices    | 项目源码 | 文件源码
def zscore(a, axis=0, ddof=0):
    a = np.asanyarray(a)
    mns = a.mean(axis=axis)
    sstd = a.std(axis=axis, ddof=ddof)
    if axis and mns.ndim < a.ndim:
        res = (((a - np.expand_dims(mns, axis=axis)) /
                np.expand_dims(sstd,axis=axis)))
    else:
        res = (a - mns) / sstd
    return np.nan_to_num(res)
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def test_vwap(context, data):
        """
        Tests the vwap transform by manually keeping track of the prices
        and volumes in a naiive way and asserting that our hand-rolled vwap is
        the same
        """
        mins = sum(context.mins_for_days[-context.days:])
        for sid in data:
            prices = context.price_bars[sid][-mins:]
            vols = context.vol_bars[sid][-mins:]
            manual_vwap = sum(
                map(operator.mul, np.nan_to_num(np.array(prices)), vols),
            ) / sum(vols)

            assert_allclose(
                data[sid].vwap(context.days),
                manual_vwap,
            )
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def kl_divergence(p_samples, q_samples):
    # estimate densities
    # p_samples = np.nan_to_num(p_samples)
    # q_samples = np.nan_to_num(q_samples)

    if isinstance(p_samples, tuple):
        idx, p_samples = p_samples

        if idx not in _cached_p_pdf:
            _cached_p_pdf[idx] = sc.gaussian_kde(p_samples)

        p_pdf = _cached_p_pdf[idx]
    else:
        p_pdf = sc.gaussian_kde(p_samples)

    q_pdf = sc.gaussian_kde(q_samples)

    # joint support
    left = min(min(p_samples), min(q_samples))
    right = max(max(p_samples), max(q_samples))

    p_samples_num = p_samples.shape[0]
    q_samples_num = q_samples.shape[0]

    # quantise
    lin = np.linspace(left, right, min(max(p_samples_num, q_samples_num), MAX_GRID_POINTS))
    p = p_pdf.pdf(lin)
    q = q_pdf.pdf(lin)

    # KL
    kl = min(sc.entropy(p, q), MAX_KL)

    return kl
项目:GraphTime    作者:GlooperLabs    | 项目源码 | 文件源码
def f_score(Theta_true, Theta_est, beta=1, eps=1e-6, per_ts=False):
    """Compute f1 score in the same manner as `precision` and `recall`.
    Therefore see those two functions for the respective waiting and per_ts
    explanation.

    Parameters
    ----------
    Theta_true : 3D ndarray, shape (timesteps, n_vertices, n_vertices)
    Theta_est : 3D ndarray, shape (timesteps, n_vertices, n_vertices)
    beta : float (default 1)
        beta value of the F score to be computed
    eps : float
    per_ts : bool
        whether to compute average or per timestep recall

    Returns
    -------
    ndarray or float
        recall list or single precision value
    """
    prec = precision(Theta_true, Theta_est, eps, per_ts=True)
    rec = recall(Theta_true, Theta_est, eps, per_ts=True)
    with np.errstate(divide='ignore', invalid='ignore'):
        nom = (1 + beta**2) * prec * rec
        print(beta**2 * prec)
        den = beta**2 * prec + rec
        f = np.nan_to_num(np.true_divide(nom, den))
    return f if per_ts else np.sum(f) / len(Theta_true)
项目:GraphTime    作者:GlooperLabs    | 项目源码 | 文件源码
def global_f_score(Theta_true, Theta_est, beta=1, eps=1e-6):
    """In line with `global_precision` and `global_recall`, compute the
    global f score given true and estimated graphical structures. The
    f score has the only parameter beta.

    Parameters
    ----------
    Theta_true : 3D ndarray, shape (timesteps, n_vertices, n_vertices)
    Theta_est : 3D ndarray, shape (timesteps, n_vertices, n_vertices)
    beta : float (default 1)
        beta value of the F score to be computed
    eps : float
    per_ts : bool
        whether to compute average or per timestep recall

    Returns
    -------
    float f-beta score
    """
    assert Theta_est.shape == Theta_true.shape
    d = Theta_true.shape[1]
    n = len(Theta_est)
    tps = fps = fns = tns = 0
    for i in range(n):
        est_edges = set(get_edges(Theta_est[i], eps))
        gt_edges = set(get_edges(Theta_true[i], eps))
        n_joint = len(est_edges.intersection(gt_edges))
        tps += n_joint
        fps += len(est_edges) - n_joint
        fns += len(gt_edges) - n_joint
        tns += d**2 - d - tps - fps - fns
    nom = (1 + beta**2) * tps
    denom = nom + beta**2 * fns + fps
    with np.errstate(divide='ignore', invalid='ignore'):
        f = np.nan_to_num(np.true_divide(nom, denom))
        return f
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def nufft_alpha_kb_fit(N, J, K):
    '''
    find out parameters alpha and beta
    of scaling factor st['sn']
    Note, when J = 1 , alpha is hardwired as [1,0,0...]
    (uniform scaling factor)
    '''
    beta = 1
    Nmid = (N - 1.0) / 2.0
    if N > 40:
        L = 13
    else:
        L = numpy.ceil(N / 3)

    nlist = numpy.arange(0, N) * 1.0 - Nmid
    (kb_a, kb_m) = kaiser_bessel('string', J, 'best', 0, K / N)
    if J > 1:
        sn_kaiser = 1 / kaiser_bessel_ft(nlist / K, J, kb_a, kb_m, 1.0)
    elif J == 1:  # The case when samples are on regular grids
        sn_kaiser = numpy.ones((1, N), dtype=dtype)
    gam = 2 * numpy.pi / K
    X_ant = beta * gam * nlist.reshape((N, 1), order='F')
    X_post = numpy.arange(0, L + 1)
    X_post = X_post.reshape((1, L + 1), order='F')
    X = numpy.dot(X_ant, X_post)  # [N,L]
    X = numpy.cos(X)
    sn_kaiser = sn_kaiser.reshape((N, 1), order='F').conj()
    X = numpy.array(X, dtype=dtype)
    sn_kaiser = numpy.array(sn_kaiser, dtype=dtype)
    coef = numpy.linalg.lstsq(numpy.nan_to_num(X), numpy.nan_to_num(sn_kaiser))[0]  # (X \ sn_kaiser.H);
    alphas = coef
    if J > 1:
        alphas[0] = alphas[0]
        alphas[1:] = alphas[1:] / 2.0
    elif J == 1:  # cases on grids
        alphas[0] = 1.0
        alphas[1:] = 0.0
    alphas = numpy.real(alphas)
    return (alphas, beta)
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def feat_eeg(signals):
    """
    calculate the relative power as defined by Leangkvist (2012),
    assuming signal is recorded with 100hz
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)

    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    feats = np.zeros((signals.shape[0],9),dtype='float32')
    # 5 FEATURE for freq babnds
    w = (fft(signals,axis=1)).real
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    spindle = np.sum(np.abs(w[:,np.arange(12*nsamp/sfreq,14*nsamp/sfreq, dtype=int)]),axis=1)
    sum_abs_pow = delta + theta + alpha + beta + gamma + spindle
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = spindle /sum_abs_pow
    feats[:,6] = np.log10(stats.kurtosis(signals, fisher=False, axis=1))        # kurtosis
    feats[:,7] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    #feats[:,7] = np.polynomial.polynomial.polyfit(np.log(f[np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]), np.log(w[0,np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),1)
    feats[:,8] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5)
    if np.any(feats==np.nan): print('NaN detected')
    return np.nan_to_num(feats)
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def feat_wavelet(signals):
    """
    calculate the relative power as defined by Leangkvist (2012),
    assuming signal is recorded with 100hz
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)

    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    feats = np.zeros((signals.shape[0],8),dtype='float32')
    # 5 FEATURE for freq babnds
    w = (fft(signals,axis=1)).real
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.log10(stats.kurtosis(signals,fisher=False,axis=1))        # kurtosis
    feats[:,6] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    #feats[:,7] = np.polynomial.polynomial.polyfit(np.log(f[np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]), np.log(w[0,np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),1)
    feats[:,7] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5)
    if np.any(feats==np.nan): print('NaN detected')

    return np.nan_to_num(feats)
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def feat_eog(signals):
    """
    calculate the EOG features
    :param signals: 1D or 2D signals
    """

    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    w = (fft(signals,axis=1)).real   
    feats = np.zeros((signals.shape[0],15),dtype='float32')
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
    feats[:,6] = np.sqrt(np.max(signals, axis=1))    #PAV
    feats[:,7] = np.sqrt(np.abs(np.min(signals, axis=1)))   #VAV   
    feats[:,8] = np.argmax(signals, axis=1)/nsamp #PAP
    feats[:,9] = np.argmin(signals, axis=1)/nsamp #VAP
    feats[:,10] = np.sqrt(np.sum(np.abs(signals), axis=1)/ np.mean(np.sum(np.abs(signals), axis=1))) # AUC
    feats[:,11] = np.sum(((np.roll(np.sign(signals), 1,axis=1) - np.sign(signals)) != 0).astype(int),axis=1)/nsamp #TVC
    feats[:,12] = np.log10(np.std(signals, axis=1)) #STD/VAR
    feats[:,13] = np.log10(stats.kurtosis(signals,fisher=False,axis=1))       # kurtosis
    feats[:,14] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    if np.any(feats==np.nan): print('NaN detected')
    return np.nan_to_num(feats)
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def feat_emg(signals):
    """
    calculate the EMG median as defined by Leangkvist (2012),
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    w = (fft(signals,axis=1)).real   
    feats = np.zeros((signals.shape[0],13),dtype='float32')
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
    emg = np.sum(np.abs(w[:,np.arange(12.5*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)
    feats[:,6] = emg / np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)  # ratio of high freq to total motor
    feats[:,7] = np.median(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)    # median freq
    feats[:,8] = np.mean(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)   #  mean freq
    feats[:,9] = np.std(signals, axis=1)    #  std 
    feats[:,10] = np.mean(signals,axis=1)
    feats[:,11] = np.log10(stats.kurtosis(signals,fisher=False,axis=1) )
    feats[:,12] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    if np.any(feats==np.nan): print('NaN detected')

    return np.nan_to_num(feats)
项目:SentEval    作者:facebookresearch    | 项目源码 | 文件源码
def do_prepare(self, params, prepare):
        if 'similarity' in params:
            self.similarity = params.similarity
        else:  # Default similarity is cosine
            self.similarity = lambda s1, s2: np.nan_to_num(cosine(np.nan_to_num(s1), np.nan_to_num(s2)))
        return prepare(params, self.samples)
项目:solar-correlation-map    作者:Zapf-Consulting    | 项目源码 | 文件源码
def transform_to_correlation_dist(data):
    y_corr = np.corrcoef(data.T)
    # we just need the magnitude of the correlation and don't care whether it's positive or not
    abs_corr = np.abs(y_corr)
    return np.nan_to_num(abs_corr)
项目:MOSFiT    作者:guillochon    | 项目源码 | 文件源码
def process(self, **kwargs):
        """Process module."""
        raise RuntimeError('`MultiBlackbody` is not yet functional.')
        kwargs = self.prepare_input(self.key('luminosities'), **kwargs)
        self._luminosities = kwargs[self.key('luminosities')]
        self._bands = kwargs['all_bands']
        self._band_indices = kwargs['all_band_indices']
        self._areas = kwargs[self.key('areas')]
        self._temperature_phots = kwargs[self.key('temperaturephots')]
        xc = self.X_CONST  # noqa: F841
        fc = self.FLUX_CONST  # noqa: F841
        temperature_phot = self._temperature_phot
        zp1 = 1.0 + kwargs[self.key('redshift')]
        seds = []
        for li, lum in enumerate(self._luminosities):
            cur_band = self._bands[li]  # noqa: F841
            bi = self._band_indices[li]
            rest_freqs = [x * zp1  # noqa: F841
                          for x in self._sample_frequencies[bi]]
            wav_arr = np.array(self._sample_wavelengths[bi])  # noqa: F841
            radius_phot = self._radius_phot[li]  # noqa: F841
            temperature_phot = self._temperature_phot[li]  # noqa: F841

            if li == 0:
                sed = ne.evaluate(
                    'fc * radius_phot**2 * rest_freqs**3 / '
                    '(exp(xc * rest_freqs / temperature_phot) - 1.0)')
            else:
                sed = ne.re_evaluate()

            sed = np.nan_to_num(sed)

            seds.append(list(sed))

        seds = self.add_to_existing_seds(seds, **kwargs)

        return {'sample_wavelengths': self._sample_wavelengths,
                self.key('seds'): seds}
项目:Modeling-Cloth    作者:the3dadvantage    | 项目源码 | 文件源码
def rotate_around_axis(coords, Q, origin='empty'):
    '''Uses standard quaternion to rotate a vector. Q requires
    a 4-dimensional vector. coords is the 3d location of the point.
    coords can also be an N x 3 array of vectors. Happens to work
    with Q as a tuple or a np array shape 4'''
    if origin == 'empty':    
        vcV = np.cross(Q[1:], coords)
        RV = np.nan_to_num(coords + vcV * (2*Q[0]) + np.cross(Q[1:],vcV)*2)
    else:
        coords -= origin
        vcV = np.cross(Q[1:],coords)
        RV = (np.nan_to_num(coords + vcV * (2*Q[0]) + np.cross(Q[1:],vcV)*2)) + origin       
        coords += origin #undo in-place offset
    return RV
项目:Modeling-Cloth    作者:the3dadvantage    | 项目源码 | 文件源码
def barycentric_generate(hits, tris):
    '''Create scalars to be used by points and triangles'''
    # where the hit lands on the two tri vecs
    tv = tris[:, 1] - tris[:, 0]
    hv = hits - tris[:, 0]
    d1a = np.einsum('ij, ij->i', hv, tv)
    d1b = np.einsum('ij, ij->i', tv, tv)
    scalar1 = np.nan_to_num(d1a / d1b)

    t2v = tris[:, 2] - tris[:, 0]
    d2a = np.einsum('ij, ij->i', hv, t2v)
    d2b = np.einsum('ij, ij->i', t2v, t2v)
    scalar2 = np.nan_to_num(d2a / d2b)

    # closest point on edge segment between the two points created above
    cp1 = tv * np.expand_dims(scalar1, axis=1)
    cp2 = t2v * np.expand_dims(scalar2, axis=1)
    cpvec = cp2 - cp1
    cp1_at = tris[:,0] + cp1
    hcp = hits - cp1_at # this is cp3 above. Not sure what's it's for yet
    dhcp = np.einsum('ij, ij->i', hcp, cpvec)
    d3b = np.einsum('ij, ij->i', cpvec, cpvec)
    hcp_scalar = np.nan_to_num(dhcp / d3b)
    hcp_vec = cpvec * np.expand_dims(hcp_scalar, axis=1)    

    # base of tri on edge between first two points
    d3 = np.einsum('ij, ij->i', -cp1, cpvec)
    scalar3 = np.nan_to_num(d3 / d3b)
    base_cp_vec = cpvec * np.expand_dims(scalar3, axis=1)
    base_on_span = cp1_at + base_cp_vec

    # Where the point occurs on the edge between the base of the triangle
    #   and the cpoe of the base of the triangle on the cpvec    
    base_vec = base_on_span - tris[:,0]
    dba = np.einsum('ij, ij->i', hv, base_vec)
    dbb = np.einsum('ij, ij->i', base_vec, base_vec)
    scalar_final = np.nan_to_num(dba / dbb)
    p_on_bv = base_vec * np.expand_dims(scalar_final, axis=1)
    perp = (p_on_bv) - (cp1 + base_cp_vec)
    return scalar1, scalar2, hcp_scalar, scalar3, scalar_final
项目:Modeling-Cloth    作者:the3dadvantage    | 项目源码 | 文件源码
def project_points(points, tri_coords):
    '''Using this to get the points off the surface
    Takes the average length of two vecs off triangles
    and applies it to the length of the normals.
    This way the normal scales with the mesh and with
    changes to the individual triangle vectors'''
    t0 = tri_coords[:, 0]
    t1 = tri_coords[:, 1]
    t2 = tri_coords[:, 2]
    tv1 = t1 - t0
    tv2 = t2 - t0
    cross = np.cross(tv1, tv2)

    # get the average length of the two vectors and apply it to the cross product
    sq = np.sqrt(np.einsum('ij,ij->i', cross, cross))
    x1 = np.einsum('ij,ij->i', tv1, tv1)
    x2 = np.einsum('ij,ij->i', tv2, tv2)
    av_root = np.sqrt((x1 + x2) / 2)
    cr_root = (cross / np.expand_dims(sq, axis=1)) * np.expand_dims(av_root, axis=1)    

    v1 = points - t0
    v1_dots = np.einsum('ij,ij->i', cr_root, v1)
    n_dots = np.einsum('ij,ij->i', cr_root, cr_root)
    scale = np.nan_to_num(v1_dots / n_dots)
    offset = cr_root * np.expand_dims(scale, axis=1)
    drop = points - offset # The drop is used by the barycentric generator as points in the triangles
    return drop, scale
项目:Large-scale-bundle-adjustment-in-scipy    作者:bachmmmar    | 项目源码 | 文件源码
def rotate(points, rot_vecs):
    """Rotate points by given rotation vectors.

    Rodrigues' rotation formula is used.
    """
    theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis]
    with np.errstate(invalid='ignore'):
        v = rot_vecs / theta
        v = np.nan_to_num(v)
    dot = np.sum(points * v, axis=1)[:, np.newaxis]
    cos_theta = np.cos(theta)
    sin_theta = np.sin(theta)

    return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v
项目:decision_tree    作者:leeamen    | 项目源码 | 文件源码
def Entropy(vector_class):
  m = np.sum(vector_class)
  if m <= 0.1:return 0.0

  vec = 1.0/m*np.array(vector_class)
#  print vec
  try:
    e = -1.0 * np.dot(vec, np.nan_to_num(np.log2(vec)))
  except RuntimeWarning:
    pass
  return e

#2?
项目:introspective    作者:numeristical    | 项目源码 | 文件源码
def inverse_binary_entropy_nats(entropy_val, num_iter=3):
    guess = (np.arcsin((entropy_val/np.log(2))**(1/.645)))/np.pi
    for i in range(num_iter):
        guess = guess + np.nan_to_num((entropy_val-binary_entropy_nats(guess))/binary_entropy_nats_prime(guess))
    return guess
项目:DeepTrade_keras    作者:happynoom    | 项目源码 | 文件源码
def moving_extract(self, window=30, open_prices=None, close_prices=None, high_prices=None, low_prices=None,
                       volumes=None, with_label=True, flatten=True):
        self.extract(open_prices=open_prices, close_prices=close_prices, high_prices=high_prices, low_prices=low_prices,
                     volumes=volumes)
        feature_arr = numpy.asarray(self.feature)
        p = 0
        rows = feature_arr.shape[0]
        print("feature dimension: %s" % rows)
        if with_label:
            moving_features = []
            moving_labels = []
            while p + window < feature_arr.shape[1]:
                x = feature_arr[:, p:p + window]
                # y = cmp(close_prices[p + window], close_prices[p + window - 1]) + 1
                p_change = (close_prices[p + window] - close_prices[p + window - 1]) / close_prices[p + window - 1]
                # use percent of change as label
                y = p_change
                if flatten:
                    x = x.flatten("F")
                moving_features.append(numpy.nan_to_num(x))
                moving_labels.append(y)
                p += 1

            return numpy.asarray(moving_features), numpy.asarray(moving_labels)
        else:
            moving_features = []
            while p + window <= feature_arr.shape[1]:
                x = feature_arr[:, p:p + window]
                if flatten:
                    x = x.flatten("F")
                moving_features.append(numpy.nan_to_num(x))
                p += 1
            return moving_features
项目:DeepTrade_keras    作者:happynoom    | 项目源码 | 文件源码
def feature_distribution(self):
        k = 0
        for feature_column in self.feature:
            fc = numpy.nan_to_num(feature_column)
            mean = numpy.mean(fc)
            var = numpy.var(fc)
            max_value = numpy.max(fc)
            min_value = numpy.min(fc)
            print("[%s_th feature] mean: %s, var: %s, max: %s, min: %s" % (k, mean, var, max_value, min_value))
            k = k + 1