我们从Python开源项目中,提取了以下23个代码示例,用于说明如何使用scipy.sparse.dia_matrix()。
def d_x2(self, factors=None): """Creates a sparse matrix for computing the second derivative with respect to x multiplied by factors given for every point. Uses central difference quotient. Args: factors: Factor for each point to be applied after derivation. Returns: Sparse matrix the calculate second derivatives of field components. """ # use ones as factors if none are specified if factors is None: factors = np.array(1).repeat(self.num_points) return sp.dia_matrix((np.array([factors, -2*factors, factors]), [-1, 0, 1]), shape=(self.num_points, self.num_points))
def create_matrix_sparse_from_conf(conf): restypes = ['tdnn', 'lpfb'] # tdnn res weights = [] if 'restype' not in conf or conf['restype'] not in restypes: return None else: if conf['restype'] == 'tdnn': w_ = spa.dia_matrix(np.diag(np.ones((conf['N']-1,)), k = -1)) return w_ elif conf['restype'] == 'lpfb': # w_ = spa.dia_matrix(np.diag(1 - (np.logspace(1e-3, 1e-1, conf['N']) - 1), k = 0)) w_ = spa.dia_matrix(np.diag(1 - np.exp(np.linspace(-6, -0.69, conf['N'])), k = 0)) return w_ return None ################################################################################ # Standalone class for learning rules # - Recursive Least Squares (RLS, depends on rlspy.py): the vanilla online supervised # reservoir training method # - First-order reduced and controlled error or FORCE learning (Sussillo & Abbott, 2012) # - FORCEmdn: Mixture density output layer using FORCE rule (Berthold, 2017) # - Exploratory Hebbian learning (Legenstein & others, 2010)
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False, fit_intercept=True, tol=1e-3): X = X[safe_mask(X, mask)] y = y[mask] if issparse(X): size = len(weights) weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size)) X = X * weight_dia else: X *= (1 - weights) C = np.atleast_1d(np.asarray(C, dtype=np.float64)) scores = np.zeros((X.shape[1], len(C)), dtype=np.bool) for this_C, this_scores in zip(C, scores.T): # XXX : would be great to do it with a warm_start ... clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False, fit_intercept=fit_intercept) clf.fit(X, y) this_scores[:] = np.any( np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0) return scores
def _scale_normalize(X): """Normalize ``X`` by scaling rows and columns independently. Returns the normalized matrix and the row and column scaling factors. """ X = make_nonnegative(X) row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze() col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze() row_diag = np.where(np.isnan(row_diag), 0, row_diag) col_diag = np.where(np.isnan(col_diag), 0, col_diag) if issparse(X): n_rows, n_cols = X.shape r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows)) c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols)) an = r * X * c else: an = row_diag[:, np.newaxis] * X * col_diag return an, row_diag, col_diag
def d_x(self, factors=None, variant='forward'): """Creates a sparse matrix for computing the first derivative with respect to x multiplied by factors given for every point. Uses forward difference quotient by default. Args: factors: Factor for each point to be applied after derivation. variant: Variant for the difference quotient ('forward', 'central', or 'backward'). Returns: Sparse matrix the calculate derivatives of field components. """ # use ones as factors if none are specified if factors is None: factors = np.array(1).repeat(self.num_points) if variant == 'forward': return sp.dia_matrix((np.array([-factors, factors]), [0, 1]), shape=(self.num_points, self.num_points)) elif variant == 'central': return sp.dia_matrix((np.array([-factors / 2, factors / 2]), [-1, 1]), shape=(self.num_points, self.num_points)) elif variant == 'backward': return sp.dia_matrix((np.array([-factors, factors]), [-1, 0]), shape=(self.num_points, self.num_points)) else: raise ValueError('Unknown difference quotient variant {}.'.format(variant))
def d_x(self, factors=None, variant='forward'): """Creates a sparse matrix for computing the first derivative with respect to x multiplied by factors given for every point. Uses forward difference quotient by default. Args: factors: Factor for each point to be applied after derivation. variant: Variant for the difference quotient ('forward', 'central', or 'backward'). Returns: Sparse matrix the calculate derivatives of field components. """ # use ones as factors if none are specified if factors is None: factors = np.array(1).repeat(self.num_points) if variant == 'forward': return sp.dia_matrix((np.array([-factors, factors]), [0, 1]), shape=(self.num_points, self.num_points)) elif variant == 'central': return sp.dia_matrix((np.array([-factors/2, factors/2]), [-1, 1]), shape=(self.num_points, self.num_points)) elif variant == 'backward': return sp.dia_matrix((np.array([-factors, factors]), [-1, 0]), shape=(self.num_points, self.num_points)) else: raise ValueError('Unknown difference quotient variant {}.'.format(variant))
def d_y(self, factors=None, variant='forward'): """Creates a sparse matrix for computing the first derivative with respect to y multiplied by factors given for every point. Uses forward difference quotient by default. Args: factors: Factor for each point to be applied after derivation. variant: Variant for the difference quotient ('forward', 'central', or 'backward'). Returns: Sparse matrix the calculate derivatives of field components. """ # use ones as factors if none are specified if factors is None: factors = np.array(1).repeat(self.num_points) if variant == 'forward': return sp.dia_matrix((np.array([-factors, factors]), [0, self.x.samples]), shape=(self.num_points, self.num_points)) elif variant == 'central': return sp.dia_matrix( (np.array([-factors/2, factors/2]), [-self.x.samples, self.x.samples]), shape=(self.num_points, self.num_points)) elif variant == 'backward': return sp.dia_matrix((np.array([-factors, factors]), [-self.x.samples, 0]), shape=(self.num_points, self.num_points)) else: raise ValueError('Unknown difference quotient variant {}.'.format(variant))
def spzeros(n1, n2): """a sparse matrix of zeros""" return sp.dia_matrix((n1, n2))
def create_uncertainty(uncertainty, mask): """Creates the observational uncertainty matrix. We assume that uncertainty is a single value and we return a diagonal matrix back. We present this diagonal **ONLY** for pixels that have observations (i.e. not masked).""" good_obs = mask.sum() R_mat = np.ones(good_obs)*uncertainty*uncertainty return sp.dia_matrix((R_mat, 0), shape=(R_mat.shape[0], R_mat.shape[0]))
def create_linear_observation_operator(obs_op, n_params, metadata, mask, state_mask, x_forecast, band=None): """A simple **identity** observation opeartor. It is expected that you subclass and redefine things....""" good_obs = mask.sum() # size of H_matrix H_matrix = sp.dia_matrix(np.eye(good_obs)) return H_matrix
def sparse_dia_matrices(): dia = sparse.dia_matrix([[i, j, k], [l, m, n], [p, q, r]]) #print "dia matrices =" #print dia return dia
def _add_diag(self, other): other_diag = sps.dia_matrix( (other, np.array([0])), shape=(other.shape[0], other.shape[0])) return self._binopt(other_diag, '_plus_')
def __init__(self, backend, A, name='mat'): A2 = spp.dia_matrix( (np.conj(A.data), -A.offsets ), shape=A.shape[::-1]) super().__init__(backend, A2, name=name)
def __init__(self, backend, A, name='mat'): """ Create a matrix from the given `scipy.sparse.sppmatrix`. """ assert isinstance(A, spp.dia_matrix) A = A.astype(np.complex64) self._backend = backend self.data = backend.copy_array(A.data.T, name=name+".data") self.offsets = backend.copy_array(A.offsets, name=name+".data") self.shape = A.shape self.dtype = A.dtype self._row_frac = 1 self._col_frac = 1
def test_dia_matrix(backend, M, K, N, alpha, beta, maxoffsets): b = backend() if getattr(b.cdiamm, '__isabstractmethod__', False): pytest.skip("backed <%s> doesn't implement cdiamm" % backend.__name__) c = np.dtype('complex64') offsets = np.array(list(set(np.random.randint(-K, M+K, size=maxoffsets)))) data = np.random.rand(offsets.size, K) + 1j * np.random.rand(offsets.size, K) data = data.astype(c) A = spp.dia_matrix((data, offsets), shape=(M,K)) A_d = b.dia_matrix(b, A) # forward x = (np.random.rand(K,N) + 1j * np.random.rand(K,N)) y = (np.random.rand(M,N) + 1j * np.random.rand(M,N)) x = np.require(x, dtype=c, requirements='F') y = np.require(y, dtype=c, requirements='F') y_exp = beta * y + alpha * (A @ x) x_d = b.copy_array(x) y_d = b.copy_array(y) A_d.forward(y_d, x_d, alpha=alpha, beta=beta) y_act = y_d.to_host() np.testing.assert_allclose(y_act, y_exp, atol=1e-5) x = (np.random.rand(K,N) + 1j * np.random.rand(K,N)) y = (np.random.rand(M,N) + 1j * np.random.rand(M,N)) x = np.require(x, dtype=c, requirements='F') y = np.require(y, dtype=c, requirements='F') x_d = b.copy_array(x) y_d = b.copy_array(y) x_exp = beta * x + alpha * (A.getH() @ y) A_d.adjoint(x_d, y_d, alpha=alpha, beta=beta) x_act = x_d.to_host() np.testing.assert_allclose(x_act, x_exp, atol=1e-5)
def cdiamm(self, y, shape, offsets, data, x, alpha=1.0, beta=0.0, adjoint=True): A = spp.dia_matrix((data._arr.T, offsets._arr), shape=shape) X = x._arr.reshape( x.shape, order='F' ) Y = y._arr.reshape( y.shape, order='F' ) if adjoint: Y[:] = alpha * (A.H @ X) + beta * Y else: Y[:] = alpha * (A @ X) + beta * Y # ----------------------------------------------------------------------- # Misc Routines # -----------------------------------------------------------------------
def makeLaplacianMatrixUmbrellaWeights(VPos, ITris, anchorsIdx, anchorWeights = 1): N = VPos.shape[0] M = ITris.shape[0] I = np.zeros(M*6) J = np.zeros(M*6) V = np.ones(M*6) #Step 1: Set up umbrella entries for shift in range(3): #For all 3 shifts of the roles of triangle vertices #to compute different cotangent weights [i, j, k] = [shift, (shift+1)%3, (shift+2)%3] I[shift*M*2:shift*M*2+M] = ITris[:, i] J[shift*M*2:shift*M*2+M] = ITris[:, j] I[shift*M*2+M:shift*M*2+2*M] = ITris[:, j] J[shift*M*2+M:shift*M*2+2*M] = ITris[:, i] #Step 2: Create laplacian matrix L = sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr() L[L > 0] = 1 #Create the diagonal by summing the rows and subtracting off the nondiagonal entries L = sparse.dia_matrix((L.sum(1).flatten(), 0), L.shape) - L #Step 3: Add anchors L = L.tocoo() I = L.row.tolist() J = L.col.tolist() V = L.data.tolist() I = I + range(N, N+len(anchorsIdx)) J = J + anchorsIdx V = V + [anchorWeights]*len(anchorsIdx) L = sparse.coo_matrix((V, (I, J)), shape=(N+len(anchorsIdx), N)).tocsr() return L
def get_subset_cpd(self, sub_idx): """ Get the cpd over a subset of the variables. :param np.ndarray[int]|np.ndarray[bool] sub_idx: indices of variables to keep :return: a new Gaussian CPD :rtype: GaussianCPD """ if len(sub_idx) == 0 or (sub_idx.dtype == bool and not np.sum(sub_idx)): raise ValueError("sub_idx must not be empty") sub_mean = self.mean[sub_idx] sub_dim = len(sub_mean) if isinstance(self.precision, sp.dia_matrix): sub_precision = sp.dia_matrix((self.precision.diagonal()[sub_idx], np.zeros(1)), shape=(sub_dim, sub_dim)) elif np.isscalar(self.precision): sub_precision = self.precision elif isinstance(self.precision, np.ndarray): if np.prod(self.precision.shape) == self.dim: sub_precision = self.precision[sub_idx] else: # We do the indexing this way for performance reasons. sub_precision = self.precision[sub_idx, :][:, sub_idx] else: # We do the indexing this way for performance reasons. sub_precision = self.precision.tocsr()[sub_idx, :][:, sub_idx] return GaussianCPD(dim=sub_dim, mean=sub_mean, precision=sub_precision, mean_lin_op=get_subset_lin_op(self.mean_lin_op, sub_idx))
def _rescale_data(X, y, sample_weight): """Rescale data so as to support sample_weight""" n_samples = X.shape[0] sample_weight = sample_weight * np.ones(n_samples) sample_weight = np.sqrt(sample_weight) sw_matrix = sparse.dia_matrix((sample_weight, 0), shape=(n_samples, n_samples)) X = safe_sparse_dot(sw_matrix, X) y = safe_sparse_dot(sw_matrix, y) return X, y
def propagate_information_filter_SLOW(x_analysis, P_analysis, P_analysis_inverse, M_matrix, Q_matrix): """Information filter state propagation using the INVERSER state covariance matrix and a linear state transition model. This function returns `None` for the forecast covariance matrix (as this takes forever). This method is based on calculating the actual matrix from the inverse of the inverse covariance, so it is **SLOW**. Mostly here for testing purposes. Parameters ----------- x_analysis : array The analysis state vector. This comes either from the assimilation or directly from a previoulsy propagated state. P_analysis : 2D sparse array The analysis covariance matrix (typically will be a sparse matrix). As this is an information filter update, you will typically pass `None` to it, as it is unused. P_analysis_inverse : 2D sparse array The INVERSE analysis covariance matrix (typically a sparse matrix). M_matrix : 2D array The linear state propagation model. Q_matrix: 2D array (sparse) The state uncertainty inflation matrix that is added to the covariance matrix. Returns ------- x_forecast (forecast state vector), `None` and P_forecast_inverse (forecast inverse covariance matrix)""" x_forecast = M_matrix.dot(x_analysis) # These is an approximation to the information filter equations # (see e.g. Terejanu's notes) M = P_analysis_inverse # for convenience and to stay with # Terejanu's notation # Main assumption here is that the "inflation" factor is # calculated using the main diagonal of M D = 1./(1. + M.diagonal()*Q_matrix.diagonal()) M = sp.dia_matrix((M.diagonal(), 0), shape=M.shape) P_forecast_inverse = M.dot(sp.dia_matrix((D, 0), shape=M.shape)) return x_forecast, None, P_forecast_inverse
def fit(self, Xt, yt, Xh, yh, callback=None): lbin = LabelBinarizer() lbin.fit(yt) Yt_multi = lbin.transform(yt) Yh_multi = lbin.transform(yh) sample_weight_train = np.ones(Xt.shape[0]) sample_weight_test = np.ones(Xh.shape[0]) if Yt_multi.shape[1] == 1: Yt_multi = np.hstack([1 - Yt_multi, Yt_multi]) Yh_multi = np.hstack([1 - Yh_multi, Yh_multi]) print('warning: only two classes detected') n_classes = Yt_multi.shape[1] n_features = Xt.shape[1] # if not np.all(np.unique(yt) == np.array([-1, 1])): # raise ValueError x0 = np.zeros(n_features * n_classes) # assert x0.size == self.alpha0.size def h_func_grad(x, alpha): # x = x.reshape((-1,Yt_multi.shape[1])) return _multinomial_loss_grad( x, Xt, Yt_multi, np.exp(alpha), sample_weight_train)[:2] def h_hessian(x, alpha): # x = x.reshape((-1,Yt_multi.shape[1])) return _multinomial_grad_hess( x, Xt, Yt_multi, np.exp(alpha), sample_weight_train)[1] def g_func_grad(x, alpha): # x = x.reshape((-1,Yt_multi.shape[1])) return _multinomial_loss_grad( x, Xh, Yh_multi, np.zeros(alpha.size), sample_weight_test)[:2] def h_crossed(x, alpha): # return x.reshape((n_classes, -1)) * alpha # x = x.reshape((-1,Yt_multi.shape[1])) tmp = np.exp(alpha) * x return sparse.dia_matrix( (tmp, 0), shape=(n_features * n_classes, n_features * n_classes)) opt = hoag_lbfgs( h_func_grad, h_hessian, h_crossed, g_func_grad, x0, callback=callback, tolerance_decrease=self.tolerance_decrease, lambda0=self.alpha0, maxiter=self.max_iter, verbose=self.verbose) self.coef_ = opt[0] self.alpha_ = opt[1] return self
def _prepare_rerp_preds(n_samples, sfreq, events, event_id=None, tmin=-.1, tmax=1, covariates=None): """Build predictor matrix as well as metadata (e.g. condition time windows), primarily for `linear_regression_raw`. See there for an explanation of parameters and output.""" conds = list(event_id) if covariates is not None: conds += list(covariates) # time windows (per event type) are converted to sample points from times if isinstance(tmin, (float, int)): tmin_s = dict((cond, int(tmin * sfreq)) for cond in conds) else: tmin_s = dict((cond, int(tmin.get(cond, -.1) * sfreq)) for cond in conds) if isinstance(tmax, (float, int)): tmax_s = dict( (cond, int((tmax * sfreq) + 1.)) for cond in conds) else: tmax_s = dict((cond, int((tmax.get(cond, 1.) * sfreq) + 1)) for cond in conds) # Construct predictor matrix # We do this by creating one array per event type, shape (lags, samples) # (where lags depends on tmin/tmax and can be different for different # event types). Columns correspond to predictors, predictors correspond to # time lags. Thus, each array is mostly sparse, with one diagonal of 1s # per event (for binary predictors). cond_length = dict() xs = [] for cond in conds: tmin_, tmax_ = tmin_s[cond], tmax_s[cond] n_lags = int(tmax_ - tmin_) # width of matrix if cond in event_id: # for binary predictors ids = ([event_id[cond]] if isinstance(event_id[cond], int) else event_id[cond]) onsets = -(events[in1d(events[:, 2], ids), 0] + tmin_) values = np.ones((len(onsets), n_lags)) else: # for predictors from covariates, e.g. continuous ones covs = covariates[cond] if len(covs) != len(events): error = ("Condition {0} from ```covariates``` is " "not the same length as ```events```").format(cond) raise ValueError(error) onsets = -(events[np.where(covs != 0), 0] + tmin_)[0] v = np.asarray(covs)[np.nonzero(covs)].astype(float) values = np.ones((len(onsets), n_lags)) * v[:, np.newaxis] cond_length[cond] = len(onsets) xs.append(sparse.dia_matrix((values, onsets), shape=(n_samples, n_lags))) return sparse.hstack(xs), conds, cond_length, tmin_s, tmax_s