我们从Python开源项目中,提取了以下22个代码示例,用于说明如何使用numpy.tri()。
def tril(m, k=0): """ Lower triangle of an array. Return a copy of an array with elements above the `k`-th diagonal zeroed. Parameters ---------- m : array_like, shape (M, N) Input array. k : int, optional Diagonal above which to zero elements. `k = 0` (the default) is the main diagonal, `k < 0` is below it and `k > 0` is above. Returns ------- array, shape (M, N) Lower triangle of `m`, of same shape and data-type as `m`. See Also -------- triu : Same thing, only for the upper triangle. """ return m * tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype)
def __call__(self, h, train=True): """ in_type: h: float32 in_shape: h: (batch_size, hidden_num) out_type: float32 out_shape: (batch_size, rating_num, predicted_item_num) """ xp = cuda.get_array_module(h.data) h = self.p(h) if hasattr(self, 'q'): h = self.q(h) h = F.reshape(h, (-1, self.rating_num, self.item_num, 1)) w = chainer.Variable(xp.asarray(np.tri(self.rating_num, dtype=np.float32).reshape(self.rating_num, self.rating_num, 1, 1)), volatile=h.volatile) h = F.convolution_2d(h, w) return F.reshape(h, (-1, self.rating_num, self.item_num))
def ordinal_loss(y, mask): xp = cuda.get_array_module(y.data) volatile = y.volatile b, c, n = y.data.shape max_y = F.broadcast_to(F.max(y, axis=1, keepdims=True), y.data.shape) y = y - max_y sum_y = F.broadcast_to(F.expand_dims(F.sum(y, axis=1), 1), y.data.shape) down_tri = np.tri(c, dtype=np.float32) up_tri = down_tri.T w1 = Variable(xp.asarray(down_tri.reshape(c, c, 1, 1)), volatile=volatile) w2 = Variable(xp.asarray(up_tri.reshape(c, c, 1, 1)), volatile=volatile) h = F.exp(F.expand_dims(y, -1)) h1 = F.convolution_2d(h, w1) h1 = F.convolution_2d(F.log(h1), w1) h2 = F.convolution_2d(h, w2) h2 = F.convolution_2d(F.log(h2), w2) h = F.reshape(h1 + h2, (b, c, n)) return F.sum((h - sum_y - y) * mask) / b
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None): """Matrix band part of ones.""" if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]): # Needed info is constant, so we construct in numpy if num_lower < 0: num_lower = rows - 1 if num_upper < 0: num_upper = cols - 1 lower_mask = np.tri(rows, cols, num_lower).T upper_mask = np.tri(rows, cols, num_upper) band = np.ones((rows, cols)) * lower_mask * upper_mask if out_shape: band = band.reshape(out_shape) band = tf.constant(band, tf.float32) else: band = tf.matrix_band_part(tf.ones([rows, cols]), tf.cast(num_lower, tf.int64), tf.cast(num_upper, tf.int64)) if out_shape: band = tf.reshape(band, out_shape) return band
def test_euclidean_pdist(self): a = np.arange(12, dtype=np.float).reshape(4, 3) out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) umt.euclidean_pdist(a, out) b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) b = b[~np.tri(a.shape[0], dtype=bool)] assert_almost_equal(out, b) # An output array is required to determine p with signature (n,d)->(p) assert_raises(ValueError, umt.euclidean_pdist, a)
def X_to_vec(X): n = X.shape[0] return X.T[np.tri(n, dtype=np.bool).T]
def vec_to_X(v_X): n = int(math.sqrt(2 * len(v_X))) if len(v_X) != n * (n + 1) / 2: raise ValueError( "v_X is not the right shape for a vectorized lower triangular matrix. Tried to turn vector of size {} into matrix with width {} ".format(len(v_X), n)) Y = np.zeros((n, n)) Y[np.tri(n, dtype=np.bool).T] = v_X return Y + np.triu(Y, 1).T
def J(v_X): X = vec_to_X(v_X) n = X.shape[0] # perform scaling _i = tuple(range(n)) X[_i, _i] *= _sqrt2 Lam, U = np.linalg.eigh(X) idx = np.argsort(Lam) Lam = Lam[idx] U = U[:, idx] L = np.diag(Lam) L_max = np.maximum(L, 0.0) dU_dX, dL_dX = dU_dL_dX(Lam, U) dL_max_dX = dL_dX.copy() for i, l in enumerate(Lam): if l < 0: dL_max_dX[i, :, :] = 0 t1 = dot(U.dot(L_max), np.rollaxis(dU_dX, 1, 0)) t2 = np.rollaxis(t1, 1, 0) t3 = np.rollaxis(dot(multiply_diag(U, dL_max_dX), U.T, (1, 0)), 3, 1) idx = np.nonzero(np.tri(n, dtype=np.bool).T) W = t1 + t2 + t3 # rescale jacobian W[:, :, _i, _i] *= _sqrt2 W[_i, _i, :, :] /= _sqrt2 return W[idx[0], idx[1]][:, idx[0], idx[1]]
def test_tri(self): def check(dtype, N, M_=None, k=0): # Theano does not accept None as a tensor. # So we must use a real value. M = M_ # Currently DebugMode does not support None as inputs even if this is # allowed. if M is None and theano.config.mode in ['DebugMode', 'DEBUG_MODE']: M = N N_symb = tensor.iscalar() M_symb = tensor.iscalar() k_symb = tensor.iscalar() f = function([N_symb, M_symb, k_symb], tri(N_symb, M_symb, k_symb, dtype=dtype)) result = f(N, M, k) self.assertTrue( numpy.allclose(result, numpy.tri(N, M_, k, dtype=dtype))) self.assertTrue(result.dtype == numpy.dtype(dtype)) for dtype in ALL_DTYPES: yield check, dtype, 3 # M != N, k = 0 yield check, dtype, 3, 5 yield check, dtype, 5, 3 # N == M, k != 0 yield check, dtype, 3, 3, 1 yield check, dtype, 3, 3, -1 # N < M, k != 0 yield check, dtype, 3, 5, 1 yield check, dtype, 3, 5, -1 # N > M, k != 0 yield check, dtype, 5, 3, 1 yield check, dtype, 5, 3, -1
def perform(self, node, inp, out_): N, M, k = inp out, = out_ out[0] = numpy.tri(N, M, k, dtype=self.dtype)
def tri(N, M=None, k=0, dtype=None): """ An array with ones at and below the given diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the array. M : int, optional Number of columns in the array. By default, `M` is taken equal to `N`. k : int, optional The sub-diagonal at and below which the array is filled. `k` = 0 is the main diagonal, while `k` < 0 is below it, and `k` > 0 is above. The default is 0. dtype : dtype, optional Data type of the returned array. The default is float. Returns ------- Array of shape (N, M) Array with its lower triangle filled with ones and zero elsewhere; in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise. """ if dtype is None: dtype = config.floatX if M is None: M = N op = Tri(dtype) return op(N, M, k)
def triu(m, k=0): """ Upper triangle of an array. Return a copy of a matrix with the elements below the `k`-th diagonal zeroed. Please refer to the documentation for `tril` for further details. See Also -------- tril : Lower triangle of an array. """ return m * (1 - tri(m.shape[0], m.shape[1], k=k - 1, dtype=m.dtype))
def make_rating_matrix(x, r, item_num, rating_num): y = np.zeros((x.shape[0], item_num, rating_num), dtype=np.float32) for i in six.moves.range(x.shape[0]): index = x[i] >= 0 y[i, x[i, index], r[i, index]] = 1 r_to_v = np.tri(rating_num, dtype=np.float32) y = y.dot(r_to_v) return y.reshape((x.shape[0], -1))
def sample_wishart(sigma, nu): n = sigma.shape[0] chol = np.linalg.cholesky(sigma) # use matlab's heuristic for choosing between the two different sampling schemes if (nu <= 81+n) and (nu == round(nu)): # direct X = np.dot(chol,np.random.normal(size=(n,nu))) else: A = np.diag(np.sqrt(np.random.chisquare(nu - np.arange(n)))) A[np.tri(n,k=-1,dtype=bool)] = np.random.normal(size=(n*(n-1)/2.)) X = np.dot(chol,A) return np.dot(X,X.T)
def testCorrectlyMakesNoBatchLowerTril(self): with self.test_session(): x = ops.convert_to_tensor(self._rng.randn(10)) expected = self._fill_lower_triangular(tensor_util.constant_value(x)) actual = distribution_util.fill_lower_triangular(x, validate_args=True) self.assertAllEqual(expected.shape, actual.get_shape()) self.assertAllEqual(expected, actual.eval()) g = gradients_impl.gradients( distribution_util.fill_lower_triangular(x), x) self.assertAllEqual(np.tri(4).reshape(-1), g[0].values.eval())