我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用torch.addmv()。
def forward(self, add_vector, matrix, vector): self.save_for_backward(matrix, vector) output = self._get_output(add_vector) return torch.addmv(output, self.alpha, add_vector, self.beta, matrix, vector)
def forward(ctx, add_vector, matrix, vector, alpha=1, beta=1, inplace=False): ctx.alpha = alpha ctx.beta = beta ctx.save_for_backward(matrix, vector) output = _get_output(ctx, add_vector, inplace=inplace) return torch.addmv(alpha, add_vector, beta, matrix, vector, out=output)
def test_functional_blas(self): def compare(fn, *args): unpacked_args = tuple(arg.data if isinstance(arg, Variable) else arg for arg in args) self.assertEqual(fn(*args).data, fn(*unpacked_args)) def test_blas_add(fn, x, y, z): # Checks all signatures compare(fn, x, y, z) compare(fn, 0.5, x, y, z) compare(fn, 0.5, x, 0.25, y, z) def test_blas(fn, x, y): compare(fn, x, y) test_blas(torch.mm, Variable(torch.randn(2, 10)), Variable(torch.randn(10, 4))) test_blas_add(torch.addmm, Variable(torch.randn(2, 4)), Variable(torch.randn(2, 10)), Variable(torch.randn(10, 4))) test_blas(torch.bmm, Variable(torch.randn(4, 2, 10)), Variable(torch.randn(4, 10, 4))) test_blas_add(torch.addbmm, Variable(torch.randn(2, 4)), Variable(torch.randn(4, 2, 10)), Variable(torch.randn(4, 10, 4))) test_blas_add(torch.baddbmm, Variable(torch.randn(4, 2, 4)), Variable(torch.randn(4, 2, 10)), Variable(torch.randn(4, 10, 4))) test_blas(torch.mv, Variable(torch.randn(2, 10)), Variable(torch.randn(10))) test_blas_add(torch.addmv, Variable(torch.randn(2)), Variable(torch.randn(2, 10)), Variable(torch.randn(10))) test_blas(torch.ger, Variable(torch.randn(5)), Variable(torch.randn(6))) test_blas_add(torch.addr, Variable(torch.randn(5, 6)), Variable(torch.randn(5)), Variable(torch.randn(6)))
def forward(ctx, add_vector, matrix, vector, alpha=1, beta=1, inplace=False): ctx.alpha = alpha ctx.beta = beta ctx.add_vector_size = add_vector.size() ctx.save_for_backward(matrix, vector) output = _get_output(ctx, add_vector, inplace=inplace) return torch.addmv(alpha, add_vector, beta, matrix, vector, out=output)
def exact_posterior_mean(self, test_mean, alpha): if isinstance(self.var, LazyVariable): return self.var.matmul(alpha) + test_mean return torch.addmv(test_mean, self.var, alpha)
def test_addmv(self): types = { 'torch.DoubleTensor': 1e-8, 'torch.FloatTensor': 1e-4, } for tname, _prec in types.items(): t = torch.randn(10).type(tname) m = torch.randn(10, 100).type(tname) v = torch.randn(100).type(tname) res1 = torch.addmv(t, m, v) res2 = torch.zeros(10).type(tname) res2 += t for i in range(10): for j in range(100): res2[i] += m[i, j] * v[j] self.assertEqual(res1, res2) # Test 0-strided for tname, _prec in types.items(): t = torch.randn(1).type(tname).expand(10) m = torch.randn(10, 1).type(tname).expand(10, 100) v = torch.randn(100).type(tname) res1 = torch.addmv(t, m, v) res2 = torch.zeros(10).type(tname) res2 += t for i in range(10): for j in range(100): res2[i] += m[i, j] * v[j] self.assertEqual(res1, res2)
def _test_broadcast_fused_matmul(self, cast): fns = ["baddbmm", "addbmm", "addmm", "addmv", "addr"] for fn in fns: batch_dim = random.randint(1, 8) n_dim = random.randint(1, 8) m_dim = random.randint(1, 8) p_dim = random.randint(1, 8) def dims_full_for_fn(): if fn == "baddbmm": return ([batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim]) elif fn == "addbmm": return ([n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim]) elif fn == "addmm": return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim]) elif fn == "addmv": return ([n_dim], [n_dim, m_dim], [m_dim]) elif fn == "addr": return ([n_dim, m_dim], [n_dim], [m_dim]) else: raise AssertionError("unknown function") (t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn() (t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full) t0_small = cast(torch.randn(*t0_dims_small).float()) t1 = cast(torch.randn(*t1_dims).float()) t2 = cast(torch.randn(*t2_dims).float()) t0_full = cast(t0_small.expand(*t0_dims_full)) fntorch = getattr(torch, fn) r0 = fntorch(t0_small, t1, t2) r1 = fntorch(t0_full, t1, t2) self.assertEqual(r0, r1)
def test_functional_blas(self): def compare(fn, *args): unpacked_args = tuple(arg.data if isinstance(arg, Variable) else arg for arg in args) unpacked_result = fn(*unpacked_args) packed_result = fn(*args).data # if non-Variable torch function returns a scalar, compare to scalar if not torch.is_tensor(unpacked_result): assert packed_result.dim() == 1 assert packed_result.nelement() == 1 packed_result = packed_result[0] self.assertEqual(packed_result, unpacked_result) def test_blas_add(fn, x, y, z): # Checks all signatures compare(fn, x, y, z) compare(fn, 0.5, x, y, z) compare(fn, 0.5, x, 0.25, y, z) def test_blas(fn, x, y): compare(fn, x, y) test_blas(torch.mm, Variable(torch.randn(2, 10)), Variable(torch.randn(10, 4))) test_blas_add(torch.addmm, Variable(torch.randn(2, 4)), Variable(torch.randn(2, 10)), Variable(torch.randn(10, 4))) test_blas(torch.bmm, Variable(torch.randn(4, 2, 10)), Variable(torch.randn(4, 10, 4))) test_blas_add(torch.addbmm, Variable(torch.randn(2, 4)), Variable(torch.randn(4, 2, 10)), Variable(torch.randn(4, 10, 4))) test_blas_add(torch.baddbmm, Variable(torch.randn(4, 2, 4)), Variable(torch.randn(4, 2, 10)), Variable(torch.randn(4, 10, 4))) test_blas(torch.mv, Variable(torch.randn(2, 10)), Variable(torch.randn(10))) test_blas_add(torch.addmv, Variable(torch.randn(2)), Variable(torch.randn(2, 10)), Variable(torch.randn(10))) test_blas(torch.ger, Variable(torch.randn(5)), Variable(torch.randn(6))) test_blas_add(torch.addr, Variable(torch.randn(5, 6)), Variable(torch.randn(5)), Variable(torch.randn(6))) test_blas(torch.matmul, Variable(torch.randn(6)), Variable(torch.randn(6))) test_blas(torch.matmul, Variable(torch.randn(10, 4)), Variable(torch.randn(4))) test_blas(torch.matmul, Variable(torch.randn(5)), Variable(torch.randn(5, 6))) test_blas(torch.matmul, Variable(torch.randn(2, 10)), Variable(torch.randn(10, 4))) test_blas(torch.matmul, Variable(torch.randn(5, 2, 10)), Variable(torch.randn(5, 10, 4))) test_blas(torch.matmul, Variable(torch.randn(3, 5, 2, 10)), Variable(torch.randn(3, 5, 10, 4))) test_blas(torch.matmul, Variable(torch.randn(3, 5, 2, 10)), Variable(torch.randn(10))) test_blas(torch.matmul, Variable(torch.randn(10)), Variable(torch.randn(3, 5, 10, 4)))