Python scipy.linalg 模块,solve_triangular() 实例源码

我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用scipy.linalg.solve_triangular()

项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def linear_hotelling_test(X, Y, reg=0):
    n, p = X.shape
    Z = X - Y
    Z_bar = Z.mean(axis=0)

    Z -= Z_bar
    S = Z.T.dot(Z)
    S /= (n - 1)
    if reg:
        S[::p + 1] += reg
    # z' inv(S) z = z' inv(L L') z = z' inv(L)' inv(L) z = ||inv(L) z||^2
    L = linalg.cholesky(S, lower=True, overwrite_a=True)
    Linv_Z_bar = linalg.solve_triangular(L, Z_bar, lower=True, overwrite_b=True)
    stat = n * Linv_Z_bar.dot(Linv_Z_bar)

    p_val = stats.chi2.sf(stat, p)
    return p_val, stat
项目:smt    作者:SMTorg    | 项目源码 | 文件源码
def _predict_variances(self, x):

        # Initialization
        n_eval, n_features_x = x.shape
        x = (x - self.X_mean) / self.X_std
        # Get pairwise componentwise L1-distances to the input training set
        dx = manhattan_distances(x, Y=self.X_norma.copy(), sum_over_features=
                                 False)
        d = self._componentwise_distance(dx)

        # Compute the correlation function
        r = self.options['corr'](self.optimal_theta, d).reshape(n_eval,self.nt)

        C = self.optimal_par['C']
        rt = linalg.solve_triangular(self.optimal_par['C'], r.T, lower=True)

        u = linalg.solve_triangular(self.optimal_par['G'].T,np.dot(self.optimal_par['Ft'].T, rt) -
                             self.options['poly'](x).T)

        MSE = self.optimal_par['sigma2']*(1.-(rt ** 2.).sum(axis=0)+(u ** 2.).sum(axis=0))
        # Mean Squared Error might be slightly negative depending on
        # machine precision: force to zero!
        MSE[MSE < 0.] = 0.
        return MSE
项目:pyins    作者:nmayorov    | 项目源码 | 文件源码
def _kalman_correct(x, P, z, H, R, gain_factor, gain_curve):
    PHT = np.dot(P, H.T)

    S = np.dot(H, PHT) + R
    e = z - H.dot(x)
    L = cholesky(S, lower=True)
    inn = solve_triangular(L, e, lower=True)

    if gain_curve is not None:
        q = (np.dot(inn, inn) / inn.shape[0]) ** 0.5
        f = gain_curve(q)
        if f == 0:
            return inn
        L *= (q / f) ** 0.5

    K = cho_solve((L, True), PHT.T, overwrite_b=True).T
    if gain_factor is not None:
        K *= gain_factor[:, None]

    U = -K.dot(H)
    U[np.diag_indices_from(U)] += 1
    x += K.dot(z - H.dot(x))
    P[:] = U.dot(P).dot(U.T) + K.dot(R).dot(K.T)

    return inn
项目:Thor    作者:JamesBrofos    | 项目源码 | 文件源码
def fit(self, X, y):
        """Fit the Bayesian linear regression model leveraging the available
        data.

        Parameters:
            X (numpy array): A two-dimensional numpy array representing the
                matrix of covariates. Note that if a bias term is expressly
                desired, it must be included in the design matrix.
            y (numpy array): A matrix of target variables to predict.
        """
        P = X.T.dot(X) + self.prior_prec
        L = spla.cholesky(P, lower=True)
        self.mu = spla.cho_solve((L, True), X.T.dot(y))
        self.sigma_sq = np.mean((X.dot(self.mu) - y) ** 2)
        L_inv = spla.solve_triangular(L.T, np.eye(L.shape[0]))
        self.cov = self.sigma_sq * L_inv.dot(L_inv.T)
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices."""
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            try:
                cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                          lower=True)
            except linalg.LinAlgError:
                raise ValueError("'covars' must be symmetric, "
                                 "positive-definite")

        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def log_prob(self, x):
    def _compute(df, loc, scale_tril, x):
      k = scale_tril.shape[-1]
      ildj = np.sum(np.log(np.abs(np.diag(scale_tril))), axis=-1)
      logz = ildj + k * (0.5 * np.log(df) +
                         0.5 * np.log(np.pi) +
                         special.gammaln(0.5 * df) -
                         special.gammaln(0.5 * (df + 1.)))
      y = linalg.solve_triangular(scale_tril, np.matrix(x - loc).T,
                                  lower=True, overwrite_b=True)
      logs = -0.5 * (df + 1.) * np.sum(np.log1p(y**2. / df), axis=-2)
      return logs - logz
    if not self._df.shape:
      return _compute(self._df, self._loc, self._scale_tril, x)
    return np.concatenate([
        [_compute(self._df[i], self._loc[i], self._scale_tril[i], x[:, i, :])]
        for i in range(len(self._df))]).T
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def solveChol(self, L, B, overwrite_b=True):
        cholSolve1 = la.solve_triangular(L, B, trans=1, check_finite=False, overwrite_b=overwrite_b)
        cholSolve2 = la.solve_triangular(L, cholSolve1, check_finite=False, overwrite_b=True)
        return cholSolve2
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0):
        L = post['L']
        if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here
        Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L)))
        ns = diagKTestTest.shape[0]
        nperbatch = 5000
        nact = 0

        #allocate mem
        fmu = np.zeros(ns)  #column vector (of length ns) of predictive latent means
        fs2 = np.zeros(ns)  #column vector (of length ns) of predictive latent variances
        while (nact<(ns-1)):
            id = np.arange(nact, np.minimum(nact+nperbatch, ns))
            kss = diagKTestTest[id]     
            Ks = KtrainTest[:, id]
            if (len(post['alpha'].shape) == 1):
                try: Fmu = intercept[id] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu
            else:
                try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu.mean(axis=1)
            if Lchol:
                V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True)
                fs2[id] = kss - np.sum(V**2, axis=0)                       #predictive variances                        
            else:
                fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0)           #predictive variances
            fs2[id] = np.maximum(fs2[id],0)  #remove numerical noise i.e. negative variances        
            nact = id[-1]    #set counter to index of last processed data point

        return fmu, fs2
项目:pmml-scoring-engine    作者:maxkferg    | 项目源码 | 文件源码
def __init__(self,gamma,beta,nugget,kernelName,k_lambda,xTrain,yTrain):
        """
        Create a new GaussianProcess Object
        gamma: Hyperparameter
        beta: Hyperparameter
        k_lambda: Hyperparameter
        nugget: The noise hyperparameter
        kernelName: The name of the covariance kernel
        xTrain: Numpy array containing x training values
        yTrain: Numpy array containing y training values

        """
        self.xTrain = xTrain
        self.yTrain = yTrain
        self.k_lambda = k_lambda
        self.beta = beta
        self.gamma = gamma
        self.nugget = nugget
        self.kernelName = kernelName

        # Setup the regressor as if gp.fit had been called
        # See https://github.com/scikit-learn/scikit-learn/master/sklearn/gaussian_process/gpr.py
        kernel = self._getKernel()
        gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=0)
        gp.K = kernel(xTrain);
        gp.X_train_ = xTrain;
        gp.y_train_ = yTrain;
        gp.L_ = cholesky(gp.K, lower=True)
        gp.alpha_ = cho_solve((gp.L_, True), yTrain)
        gp.fit(xTrain,yTrain)
        gp.kernel_ = kernel
        self.gp = gp
        self.kernel = kernel

        # Calculate the matrix inverses once. Save time later
        # This is only used for own own implimentation of the scoring engine
        self.L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
        self.K_inv = L_inv.dot(L_inv.T)
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def solve_triangular(x, y, lower=True):
        """Solve triangular."""
        return linalg.solve(x, y)
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample_fun(self, model, **sampler_options):
        params_array = hyperparameter_utils.params_to_array(self.params)

        if model.has_data:
            K_XX      = model.noiseless_kernel.cov(model.inputs)
            current_L = spla.cholesky(K_XX, lower=True)
            nu        = spla.solve_triangular(current_L, model.latent_values.value-model.mean.value, lower=True)
        else:
            nu = None # if no data

        new_params, current_ll = slice_sample(params_array, self.logprob, model, nu, **sampler_options)

        new_latent_values = self._compute_implied_y(model, nu)

        return new_params, new_latent_values, current_ll
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def chol_add(L, A):
    N = L.shape[0]
    S12 = spla.solve_triangular(L, A[:N,N:], lower=True)
    S22 = spla.cholesky(A[N:,N:] - S12.T.dot(S12)).T
    L_update = np.zeros(A.shape)
    L_update[:N,:N] = L
    L_update[N:,:N] = S12.T
    L_update[N:,N:] = S22
    return L_update
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample_fun(self, model, **sampler_options):
        params_array = hyperparameter_utils.params_to_array(self.params)

        if model.has_data:
            K_XX      = model.noiseless_kernel.cov(model.inputs)
            current_L = spla.cholesky(K_XX, lower=True)
            nu        = spla.solve_triangular(current_L, model.latent_values.value-model.mean.value, lower=True)
        else:
            nu = None # if no data

        new_params, current_ll = slice_sample(params_array, self.logprob, model, nu, **sampler_options)

        new_latent_values = self._compute_implied_y(model, nu)

        return new_params, new_latent_values, current_ll
项目:Thor    作者:JamesBrofos    | 项目源码 | 文件源码
def fit(self, X, y):
        """Fit the parameters of the probabilistic process based on the
        available training data.
        """
        # Store the training data (both the inputs and the targets).
        self.X, self.y = X, y
        # Compute the covariance matrix of the observed inputs.
        K = self.kernel.cov(self.X)
        # For a numerically stable algorithm, we use Cholesky decomposition.
        self.L = spla.cholesky(K, lower=True)
        self.alpha = spla.cho_solve((self.L, True), self.y).ravel()
        L_inv = spla.solve_triangular(self.L.T, np.eye(self.L.shape[0]))
        self.K_inv = L_inv.dot(L_inv.T)
        self.beta = self.y.dot(self.alpha)
项目:Thor    作者:JamesBrofos    | 项目源码 | 文件源码
def predict(self, X_pred, covariance=False):
        """Leverage Bayesian posterior inference to compute the predicted mean
        and variance of a given set of inputs given the available training data.
        Notice that it is necessary to first fit the Gaussian process model
        before posterior inference can be performed.
        """
        # Compute the cross covariance between training and the requested
        # inference locations. Also compute the covariance matrix of the
        # observed inputs and the covariance at the inference locations.
        if type(self.kernel) == SumKernel:
            K_pred = self.kernel.cov(X_pred, include_noise=False)
        else:
            K_pred = self.kernel.cov(X_pred)
        K_cross = self.kernel.cov(X_pred, self.X)
        v = spla.solve_triangular(self.L, K_cross.T, lower=True)
        # Posterior inference. Notice that we add a small amount of noise to the
        # diagonal for regulatization purposes.
        mean = K_cross.dot(self.alpha)
        cov = self.predict_prefactor * (
            K_pred - v.T.dot(v) + 1e-8 * np.eye(K_pred.shape[0])
        )
        # Compute the diagonal of the covariance matrix if we wish to disregard
        # all of the covariance information and only focus on the variances at
        # the given inputs.
        if covariance:
            return mean, cov
        else:
            return mean, np.sqrt(np.diag(cov))
项目:l1l2py    作者:slipguru    | 项目源码 | 文件源码
def enet_admm(X, y, z=None, rho=1.0, alpha=1.0, max_iter=1000, abs_tol=1e-6,
              rel_tol=1e-4, tau=0.5, mu=0.5):
    n, d = X.shape

    XTy = np.dot(X.T, y)

    x = np.zeros(d)
    z = np.zeros(d)
    u = np.zeros(d)

    L, U = factor(X, rho, mu)

    for k in xrange(max_iter):
        # x-update
        q = 2. / n * XTy + rho * (z - u)    # temporary value

        if n >= d:      # if skinny
            x = la.solve_triangular(U, la.solve_triangular(L, q, lower=True),
                                    lower=False)
        else:            # if fat
            tmp = la.solve_triangular(U, la.solve_triangular(L, np.dot(X, q),
                                      lower=True), lower=False)
            x = q / rho - np.dot(X.T, tmp) * (2. / (n * rho * rho))

        # z-update with relaxation
        zold = z
        x_hat = alpha * x + (1 - alpha) * zold
        z = shrinkage(x_hat + u, tau / rho)

        # u-update
        u += (x_hat - z)

        # Stopping
        r_norm = la.norm(x - z)
        s_norm = la.norm(-rho * (z - zold))

        eps_pri = np.sqrt(d) * abs_tol + rel_tol * max(la.norm(x), la.norm(-z))
        eps_dual = np.sqrt(d) * abs_tol + rel_tol * la.norm(rho * u)

        if (r_norm < eps_pri) and (s_norm < eps_dual):
            break

    return z, s_norm, eps_dual, k + 1
项目:CADO    作者:BGCECSE2015    | 项目源码 | 文件源码
def projection_onto_quad(self, _point):
        from scipy.linalg import solve_triangular
        import numpy as np

        # first assume that _point is below diagonal BD
        vertexA = self.vertices_plane[0,:]
        vector_vertexA_point = _point - vertexA
        # we want to transform _point to the BASIS=[normal,AB,AC] and use QR decomposition of BASIS = Q*R
        # BASIS * coords = _point -> R * coords = Q' * _point
        R_BAD = np.dot(self.ortho_basis_AB.transpose(),self.basis_BAD)
        b = np.dot(self.ortho_basis_AB.transpose(),vector_vertexA_point)
        x = solve_triangular(R_BAD,b)
        distance = x[0]
        projected_point = _point - distance * self.normal
        u = x[1]
        v = x[2]

        # if not, _point is above diagonal BD
        if u+v > 1:
            vertexC = self.vertices_plane[2,:]
            vector_vertexC_point = _point - vertexC
            R_BCD = np.dot(self.ortho_basis_CB.transpose(),self.basis_BCD)
            b = np.dot(self.ortho_basis_CB.transpose(),vector_vertexC_point)
            x = solve_triangular(R_BCD,b)
            distance = x[0]
            projected_point = _point - distance * self.normal
            u = 1-x[1]
            v = 1-x[2]

        distance = abs(distance)

        u_crop = u
        v_crop = v

        if not (0<=u<=1 and 0<=v<=1):
            if u < 0:
                u_crop = 0
            elif u > 1:
                u_crop = 1

            if v < 0:
                v_crop = 0
            elif v > 1:
                v_crop = 1

            projected_point = self.point_on_quad(u_crop,v_crop)
            distance = np.linalg.norm(_point-projected_point)

        return projected_point, distance, u, v
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def predict(self, pred, full_cov=False, compute_grad=False):
        inputs = self.inputs
        values = self.values

        # Special case if there is no data yet (everything from the prior)
        if inputs is None:
            return self.predict_from_prior(pred, full_cov, compute_grad)

        if pred.shape[1] != self.num_dims:
            raise Exception("Dimensionality of inputs must match dimensionality given at init time.")

        # The primary covariances for prediction.
        cand_cross = self.noiseless_kernel.cross_cov(inputs, pred)

        chol, alpha = self._pull_from_cache_or_compute()

        # Solve the linear systems.
        # Note: if X = LL^T, cho_solve performs X\b whereas solve_triangular performs L\b
        beta = spla.solve_triangular(chol, cand_cross, lower=True)

        # Predict the marginal means at candidates.
        func_m = np.dot(cand_cross.T, alpha) + self.mean.value

        if full_cov:
            # Return the covariance matrix of the pred inputs, 
            # rather than just the individual variances at each input
            cand_cov = self.noiseless_kernel.cov(pred)
            func_v = cand_cov - np.dot(beta.T, beta)
        else:
            cand_cov = self.noiseless_kernel.diag_cov(pred)
            func_v = cand_cov - np.sum(beta**2, axis=0)

        if not compute_grad:
            return func_m, func_v

        grad_cross = self.noiseless_kernel.cross_cov_grad_data(inputs, pred)
        grad_xp_m  = np.tensordot(np.transpose(grad_cross, (1,2,0)), alpha, 1)

        # this should be faster than (and equivalent to) spla.cho_solve((chol, True),cand_cross))
        gamma = spla.solve_triangular(chol.T, beta, lower=False)

        # Using sum and multiplication and summing instead of matrix multiplication
        # because I only want the diagonals of the gradient of the covariance matrix, not the whole thing
        grad_xp_v = -2.0*np.sum(gamma[:,:,np.newaxis] * grad_cross, axis=0)

        # Not very important -- just to make sure grad_xp_v.shape = grad_xp_m.shape
        if values.ndim > 1:
            grad_xp_v = grad_xp_v[:,:,np.newaxis]

        # In case this is a function over a 1D input,
        # return a numpy array rather than a float
        if np.ndim(grad_xp_m) == 0:
            grad_xp_m = np.array([grad_xp_m])
            grad_xp_v = np.array([grad_xp_v])

        return func_m, func_v, grad_xp_m, grad_xp_v
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def predict(self, pred, full_cov=False, compute_grad=False):
        inputs = self.inputs
        values = self.values

        # Special case if there is no data yet (everything from the prior)
        if inputs is None:
            return self.predict_from_prior(pred, full_cov, compute_grad)

        if pred.shape[1] != self.num_dims:
            raise Exception("Dimensionality of inputs must match dimensionality given at init time.")

        # The primary covariances for prediction.
        cand_cross = self.noiseless_kernel.cross_cov(inputs, pred)

        chol, alpha = self._pull_from_cache_or_compute()

        # Solve the linear systems.
        # Note: if X = LL^T, cho_solve performs X\b whereas solve_triangular performs L\b
        beta = spla.solve_triangular(chol, cand_cross, lower=True)

        # Predict the marginal means at candidates.
        func_m = np.dot(cand_cross.T, alpha) + self.mean.value

        if full_cov:
            # Return the covariance matrix of the pred inputs, 
            # rather than just the individual variances at each input
            cand_cov = self.noiseless_kernel.cov(pred)
            func_v = cand_cov - np.dot(beta.T, beta)
        else:
            cand_cov = self.noiseless_kernel.diag_cov(pred)
            func_v = cand_cov - np.sum(beta**2, axis=0)

        if not compute_grad:
            return func_m, func_v

        grad_cross = self.noiseless_kernel.cross_cov_grad_data(inputs, pred)
        grad_xp_m  = np.tensordot(np.transpose(grad_cross, (1,2,0)), alpha, 1)

        # this should be faster than (and equivalent to) spla.cho_solve((chol, True),cand_cross))
        gamma = spla.solve_triangular(chol.T, beta, lower=False)

        # Using sum and multiplication and summing instead of matrix multiplication
        # because I only want the diagonals of the gradient of the covariance matrix, not the whole thing
        grad_xp_v = -2.0*np.sum(gamma[:,:,np.newaxis] * grad_cross, axis=0)

        # Not very important -- just to make sure grad_xp_v.shape = grad_xp_m.shape
        if values.ndim > 1:
            grad_xp_v = grad_xp_v[:,:,np.newaxis]

        # In case this is a function over a 1D input,
        # return a numpy array rather than a float
        if np.ndim(grad_xp_m) == 0:
            grad_xp_m = np.array([grad_xp_m])
            grad_xp_v = np.array([grad_xp_v])

        return func_m, func_v, grad_xp_m, grad_xp_v