我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.equal()。
def assert_tensors_equal(a, b, prec=1e-5, msg=''): assert a.size() == b.size(), msg if prec == 0: assert (a == b).all(), msg elif a.numel() > 0: b = b.type_as(a) b = b.cuda(device=a.get_device()) if a.is_cuda else b.cpu() # check that NaNs are in the same locations nan_mask = a != a assert torch.equal(nan_mask, b != b), msg diff = a - b diff[nan_mask] = 0 if diff.is_signed(): diff = diff.abs() max_err = diff.max() assert max_err < prec, msg
def test_kronecker_product(): matrix_list = [] matrix1 = torch.Tensor([ [1, 2, 3], [4, 5, 6], ]) matrix2 = torch.Tensor([ [1, 2], [4, 3], ]) matrix_list.append(matrix1) matrix_list.append(matrix2) res = kronecker_product(matrix_list) actual = torch.Tensor([ [1, 2, 2, 4, 3, 6], [4, 3, 8, 6, 12, 9], [4, 8, 5, 10, 6, 12], [16, 12, 20, 15, 24, 18] ]) assert(torch.equal(res, actual))
def test_repeat(self): initial_shape = (8, 4) tensor = torch.rand(*initial_shape) size = (3, 1, 1) torchSize = torch.Size(size) target = [3, 8, 4] self.assertEqual(tensor.repeat(*size).size(), target, 'Error in repeat') self.assertEqual(tensor.repeat(torchSize).size(), target, 'Error in repeat using LongStorage') result = tensor.repeat(*size) self.assertEqual(result.size(), target, 'Error in repeat using result') result = tensor.repeat(torchSize) self.assertEqual(result.size(), target, 'Error in repeat using result and LongStorage') self.assertEqual(result.mean(0).view(8, 4), tensor, 'Error in repeat (not equal)')
def test_repeat(self): result = torch.Tensor() tensor = torch.rand(8, 4) size = (3, 1, 1) torchSize = torch.Size(size) target = [3, 8, 4] self.assertEqual(tensor.repeat(*size).size(), target, 'Error in repeat') self.assertEqual(tensor.repeat(torchSize).size(), target, 'Error in repeat using LongStorage') result = tensor.repeat(*size) self.assertEqual(result.size(), target, 'Error in repeat using result') result = tensor.repeat(torchSize) self.assertEqual(result.size(), target, 'Error in repeat using result and LongStorage') self.assertEqual((result.mean(0).view(8, 4)-tensor).abs().max(), 0, 'Error in repeat (not equal)')
def test_equal(self): # Contiguous, 1D t1 = torch.Tensor((3, 4, 9, 10)) t2 = t1.contiguous() t3 = torch.Tensor((1, 9, 3, 10)) t4 = torch.Tensor((3, 4, 9)) t5 = torch.Tensor() self.assertTrue(t1.equal(t2)) self.assertFalse(t1.equal(t3)) self.assertFalse(t1.equal(t4)) self.assertFalse(t1.equal(t5)) self.assertTrue(torch.equal(t1, t2)) self.assertFalse(torch.equal(t1, t3)) self.assertFalse(torch.equal(t1, t4)) self.assertFalse(torch.equal(t1, t5)) # Non contiguous, 2D s = torch.Tensor(((1, 2, 3, 4), (5, 6, 7, 8))) s1 = s[:,1:3] s2 = s1.clone() s3 = torch.Tensor(((2, 3), (6, 7))) s4 = torch.Tensor(((0, 0), (0, 0))) self.assertFalse(s1.is_contiguous()) self.assertTrue(s1.equal(s2)) self.assertTrue(s1.equal(s3)) self.assertFalse(s1.equal(s4)) self.assertTrue(torch.equal(s1, s2)) self.assertTrue(torch.equal(s1, s3)) self.assertFalse(torch.equal(s1, s4))
def test_archiving(self): # copy params, since they'll get consumed during training params_copy = copy.deepcopy(self.params.as_dict()) # `train_model` should create an archive model = train_model(self.params, serialization_dir=self.TEST_DIR) archive_path = os.path.join(self.TEST_DIR, "model.tar.gz") # load from the archive archive = load_archive(archive_path) model2 = archive.model # check that model weights are the same keys = set(model.state_dict().keys()) keys2 = set(model2.state_dict().keys()) assert keys == keys2 for key in keys: assert torch.equal(model.state_dict()[key], model2.state_dict()[key]) # check that vocabularies are the same vocab = model.vocab vocab2 = model2.vocab assert vocab._token_to_index == vocab2._token_to_index # pylint: disable=protected-access assert vocab._index_to_token == vocab2._index_to_token # pylint: disable=protected-access # check that params are the same params2 = archive.config assert params2.as_dict() == params_copy
def test_regex_matches_are_initialized_correctly(self): class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.linear_1_with_funky_name = torch.nn.Linear(5, 10) self.linear_2 = torch.nn.Linear(10, 5) self.conv = torch.nn.Conv1d(5, 5, 5) def forward(self, inputs): # pylint: disable=arguments-differ pass # pyhocon does funny things if there's a . in a key. This test makes sure that we # handle these kinds of regexes correctly. json_params = """{"initializer": [ ["conv", {"type": "constant", "val": 5}], ["funky_na.*bi", {"type": "constant", "val": 7}] ]} """ params = Params(pyhocon.ConfigFactory.parse_string(json_params)) initializers = InitializerApplicator.from_params(params['initializer']) model = Net() initializers(model) for parameter in model.conv.parameters(): assert torch.equal(parameter.data, torch.ones(parameter.size()) * 5) parameter = model.linear_1_with_funky_name.bias assert torch.equal(parameter.data, torch.ones(parameter.size()) * 7)
def assert_not_equal(x, y, prec=1e-5, msg=''): try: assert_equal(x, y, prec) except AssertionError: pass raise AssertionError("{} \nValues are equal: x={}, y={}, prec={}".format(msg, x, y, prec))
def test_random_module(nn_module): pyro.clear_param_store() nn_module = nn_module() p = Variable(torch.ones(2, 2)) prior = dist.Bernoulli(p) lifted_mod = pyro.random_module("module", nn_module, prior) nn_module = lifted_mod() for name, parameter in nn_module.named_parameters(): assert torch.equal(torch.ones(2, 2), parameter.data)
def assertNotEqual(self, x, y, prec=None, message=''): if prec is None: prec = self.precision if isinstance(x, Variable) and isinstance(y, Variable): x = x.data y = y.data if torch.is_tensor(x) and torch.is_tensor(y): if x.size() != y.size(): super(TestCase, self).assertNotEqual(x.size(), y.size()) self.assertGreater(x.numel(), 0) y = y.type_as(x) y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu() nan_mask = x != x if torch.equal(nan_mask, y != y): diff = x - y if diff.is_signed(): diff = diff.abs() diff[nan_mask] = 0 max_err = diff.max() self.assertGreaterEqual(max_err, prec, message) elif type(x) == str and type(y) == str: super(TestCase, self).assertNotEqual(x, y) elif is_iterable(x) and is_iterable(y): super(TestCase, self).assertNotEqual(x, y) else: try: self.assertGreaterEqual(abs(x - y), prec, message) return except: pass super(TestCase, self).assertNotEqual(x, y, message)
def test_repeat(self): result = torch.Tensor() tensor = torch.rand(8, 4) size = (3, 1, 1) torchSize = torch.Size(size) target = [3, 8, 4] self.assertEqual(tensor.repeat(*size).size(), target, 'Error in repeat') self.assertEqual(tensor.repeat(torchSize).size(), target, 'Error in repeat using LongStorage') result = tensor.repeat(*size) self.assertEqual(result.size(), target, 'Error in repeat using result') result = tensor.repeat(torchSize) self.assertEqual(result.size(), target, 'Error in repeat using result and LongStorage') self.assertEqual((result.mean(0).view(8, 4) - tensor).abs().max(), 0, 'Error in repeat (not equal)')
def test_equal(self): # Contiguous, 1D t1 = torch.Tensor((3, 4, 9, 10)) t2 = t1.contiguous() t3 = torch.Tensor((1, 9, 3, 10)) t4 = torch.Tensor((3, 4, 9)) t5 = torch.Tensor() self.assertTrue(t1.equal(t2)) self.assertFalse(t1.equal(t3)) self.assertFalse(t1.equal(t4)) self.assertFalse(t1.equal(t5)) self.assertTrue(torch.equal(t1, t2)) self.assertFalse(torch.equal(t1, t3)) self.assertFalse(torch.equal(t1, t4)) self.assertFalse(torch.equal(t1, t5)) # Non contiguous, 2D s = torch.Tensor(((1, 2, 3, 4), (5, 6, 7, 8))) s1 = s[:, 1:3] s2 = s1.clone() s3 = torch.Tensor(((2, 3), (6, 7))) s4 = torch.Tensor(((0, 0), (0, 0))) self.assertFalse(s1.is_contiguous()) self.assertTrue(s1.equal(s2)) self.assertTrue(s1.equal(s3)) self.assertFalse(s1.equal(s4)) self.assertTrue(torch.equal(s1, s2)) self.assertTrue(torch.equal(s1, s3)) self.assertFalse(torch.equal(s1, s4))
def assertNotEqual(self, x, y, prec=None, message=''): if prec is None: prec = self.precision x, y = self.unwrapVariables(x, y) if torch.is_tensor(x) and torch.is_tensor(y): if x.size() != y.size(): super(TestCase, self).assertNotEqual(x.size(), y.size()) self.assertGreater(x.numel(), 0) y = y.type_as(x) y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu() nan_mask = x != x if torch.equal(nan_mask, y != y): diff = x - y if diff.is_signed(): diff = diff.abs() diff[nan_mask] = 0 max_err = diff.max() self.assertGreaterEqual(max_err, prec, message) elif type(x) == str and type(y) == str: super(TestCase, self).assertNotEqual(x, y) elif is_iterable(x) and is_iterable(y): super(TestCase, self).assertNotEqual(x, y) else: try: self.assertGreaterEqual(abs(x - y), prec, message) return except: pass super(TestCase, self).assertNotEqual(x, y, message)
def test_python_ir(self): x = Variable(torch.Tensor([0.4]), requires_grad=True) y = Variable(torch.Tensor([0.7]), requires_grad=True) def doit(x, y): return torch.sigmoid(torch.tanh(x * (x + y))) traced, _ = torch.jit.trace(doit, (x, y)) g = torch._C._jit_get_graph(traced) g2 = torch._C.Graph() g_to_g2 = {} for node in g.inputs(): g_to_g2[node] = g2.addInput() for node in g.nodes(): if node.kind() == "PythonOp": n_ = g2.create(node.pyname(), [g_to_g2[i] for i in node.inputs()]) \ .setType(node.typeOption()) \ .s_("note", "from_pyop") \ .i_("some_value", len(node.scalar_args())) assert(n_.i("some_value") == len(node.scalar_args())) else: n_ = g2.createClone(node, lambda x: g_to_g2[x]) assert(n_.kindOf("Offset") == "i") g_to_g2[node] = g2.appendNode(n_) for node in g.outputs(): g2.registerOutput(g_to_g2[node]) t_node = g2.create("TensorTest").t_("a", torch.ones([2, 2])) assert(t_node.attributeNames() == ["a"]) g2.appendNode(t_node) assert(torch.equal(torch.ones([2, 2]), t_node.t("a"))) self.assertExpected(str(g2))
def test_vector_to_parameters(self): conv1 = nn.Conv2d(3, 10, 5) fc1 = nn.Linear(10, 20) model = nn.Sequential(conv1, fc1) vec = Variable(torch.arange(0, 980)) vector_to_parameters(vec, model.parameters()) sample = next(model.parameters())[0, 0, 0] self.assertTrue(torch.equal(sample.data, vec.data[:5]))
def _test_InstanceNorm(self, cls, input): b, c = input.size(0), input.size(1) input_var = Variable(input) IN = cls(c, eps=0) output = IN(input_var) out_reshaped = output.transpose(1, 0).contiguous().view(c, -1) mean = out_reshaped.mean(1) var = out_reshaped.var(1, unbiased=False) self.assertAlmostEqual(torch.abs(mean.data).mean(), 0, delta=1e-5) self.assertAlmostEqual(torch.abs(var.data).mean(), 1, delta=1e-5) # If momentum==1 running_mean/var should be # equal to mean/var of the input IN = cls(c, momentum=1, eps=0) output = IN(input_var) input_reshaped = input_var.transpose(1, 0).contiguous().view(c, -1) mean = input_reshaped.mean(1) input_reshaped = input_var.transpose(1, 0).contiguous().view(c, b, -1) var = input_reshaped.var(2, unbiased=True)[:, :] self.assertAlmostEqual(torch.abs(mean.data - IN.running_mean).mean(), 0, delta=1e-5) self.assertAlmostEqual(torch.abs(var.data.mean(1) - IN.running_var).mean(), 0, delta=1e-5)
def test_add_diag(): lazy_var = make_mul_lazy_var()[0] assert torch.equal(lazy_var.evaluate().data, (t1_t2_t3_eval + added_diag.diag()))
def test_evaluate(): diag_lv = DiagLazyVariable(Variable(diag)) assert torch.equal(diag_lv.evaluate().data, diag.diag())
def test_get_item(): diag_lv = DiagLazyVariable(Variable(diag)) diag_ev = diag_lv.evaluate() assert torch.equal(diag_lv[0:2].evaluate().data, diag_ev[0:2].data)
def test_add_diag(): diag = Variable(torch.Tensor([4])) lazy_var = make_sum_lazy_var().add_diag(diag) assert torch.equal(lazy_var.evaluate().data, (t1_eval + t2_eval + torch.eye(4) * 4))
def test_sparse_eye(): res = sparse_eye(5) actual = torch.eye(5) assert torch.equal(res.to_dense(), actual)
def test_sparse_getitem_one_dim_slice(): actual = dense[2:4] res = sparse_getitem(sparse, slice(2, 4)) assert torch.equal(actual, res.to_dense())
def test_sparse_getitem_two_dim_int_slice(): actual = dense[:, 1] res = sparse_getitem(sparse, (slice(None, None, None), 1)) assert torch.equal(actual, res.to_dense()) actual = dense[1, :] res = sparse_getitem(sparse, (1, slice(None, None, None))) assert torch.equal(actual, res.to_dense())
def test_sparse_getitem_two_dim_slice(): actual = dense[2:4, 1:3] res = sparse_getitem(sparse, (slice(2, 4), slice(1, 3))) assert torch.equal(actual, res.to_dense())
def test_sparse_repeat_1d(): sparse_1d = sparse_getitem(sparse, 1) actual = sparse_1d.to_dense().repeat(3, 1) res = sparse_repeat(sparse_1d, 3, 1) assert torch.equal(actual, res.to_dense()) actual = sparse_1d.to_dense().repeat(2, 3) res = sparse_repeat(sparse_1d, 2, 3) assert torch.equal(actual, res.to_dense())
def test_sparse_repeat_2d(): actual = sparse.to_dense().repeat(3, 2) res = sparse_repeat(sparse, 3, 2) assert torch.equal(actual, res.to_dense()) actual = sparse.to_dense().repeat(1, 2) res = sparse_repeat(sparse, 1, 2) assert torch.equal(actual, res.to_dense()) actual = sparse.to_dense().repeat(3, 1) res = sparse_repeat(sparse, 3, 1) assert torch.equal(actual, res.to_dense())
def test_trace_components_normal_matrices(): a_mat = torch.randn(3, 4) b_mat = torch.randn(3, 4) a_res, b_res = trace_components(a_mat, b_mat) assert torch.equal(a_res, a_mat) assert torch.equal(b_res, b_mat)
def test_sym_toeplitz_constructs_tensor_from_vector(): c = torch.Tensor([1, 6, 4, 5]) res = utils.toeplitz.sym_toeplitz(c) actual = torch.Tensor([ [1, 6, 4, 5], [6, 1, 6, 4], [4, 6, 1, 6], [5, 4, 6, 1], ]) assert torch.equal(res, actual)
def test_reverse(): input = torch.Tensor([ [1, 2, 3], [4, 5, 6], ]) res = torch.Tensor([ [3, 2, 1], [6, 5, 4], ]) assert torch.equal(utils.reverse(input, dim=1), res)
def test_rcumsum(): input = torch.Tensor([ [1, 2, 3], [4, 5, 6], ]) res = torch.Tensor([ [6, 5, 3], [15, 11, 6], ]) assert torch.equal(utils.rcumsum(input, dim=1), res)
def test_input_dropout_WITH_PROB_ZERO(self): rnn = EncoderRNN(self.vocab_size, 50, 16, input_dropout_p=0) for param in rnn.parameters(): param.data.uniform_(-1, 1) output1, _ = rnn(self.input_var, self.lengths) output2, _ = rnn(self.input_var, self.lengths) self.assertTrue(torch.equal(output1.data, output2.data))
def test_input_dropout_WITH_NON_ZERO_PROB(self): rnn = EncoderRNN(self.vocab_size, 50, 16, input_dropout_p=0.5) for param in rnn.parameters(): param.data.uniform_(-1, 1) equal = True for _ in range(50): output1, _ = rnn(self.input_var, self.lengths) output2, _ = rnn(self.input_var, self.lengths) if not torch.equal(output1.data, output2.data): equal = False break self.assertFalse(equal)
def test_dropout_WITH_PROB_ZERO(self): rnn = EncoderRNN(self.vocab_size, 50, 16, dropout_p=0) for param in rnn.parameters(): param.data.uniform_(-1, 1) output1, _ = rnn(self.input_var, self.lengths) output2, _ = rnn(self.input_var, self.lengths) self.assertTrue(torch.equal(output1.data, output2.data))
def test_dropout_WITH_NON_ZERO_PROB(self): # It's critical to set n_layer=2 here since dropout won't work # when the RNN only has one layer according to pytorch's doc rnn = EncoderRNN(self.vocab_size, 50, 16, n_layers=2, dropout_p=0.5) for param in rnn.parameters(): param.data.uniform_(-1, 1) equal = True for _ in range(50): output1, _ = rnn(self.input_var, self.lengths) output2, _ = rnn(self.input_var, self.lengths) if not torch.equal(output1.data, output2.data): equal = False break self.assertFalse(equal)
def test_k_1(self): """ When k=1, the output of topk decoder should be the same as a normal decoder. """ batch_size = 1 eos = 1 for _ in range(10): # Repeat the randomized test multiple times decoder = DecoderRNN(self.vocab_size, 50, 16, 0, eos) for param in decoder.parameters(): param.data.uniform_(-1, 1) topk_decoder = TopKDecoder(decoder, 1) output, _, other = decoder() output_topk, _, other_topk = topk_decoder() self.assertEqual(len(output), len(output_topk)) finished = [False] * batch_size seq_scores = [0] * batch_size for t_step, t_output in enumerate(output): score, _ = t_output.topk(1) symbols = other['sequence'][t_step] for b in range(batch_size): seq_scores[b] += score[b].data[0] symbol = symbols[b].data[0] if not finished[b] and symbol == eos: finished[b] = True self.assertEqual(other_topk['length'][b], t_step + 1) self.assertTrue(np.isclose(seq_scores[b], other_topk['score'][b][0])) if not finished[b]: symbol_topk = other_topk['topk_sequence'][t_step][b].data[0][0] self.assertEqual(symbol, symbol_topk) self.assertTrue(torch.equal(t_output.data, output_topk[t_step].data)) if sum(finished) == batch_size: break
def test_input_dropout_WITH_NON_ZERO_PROB(self): rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, input_dropout_p=0.5) for param in rnn.parameters(): param.data.uniform_(-1, 1) equal = True for _ in range(50): output1, _, _ = rnn() output2, _, _ = rnn() if not torch.equal(output1[0].data, output2[0].data): equal = False break self.assertFalse(equal)
def test_dropout_WITH_PROB_ZERO(self): rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, dropout_p=0) for param in rnn.parameters(): param.data.uniform_(-1, 1) output1, _, _ = rnn() output2, _, _ = rnn() for prob1, prob2 in zip(output1, output2): self.assertTrue(torch.equal(prob1.data, prob2.data))
def test_dropout_WITH_NON_ZERO_PROB(self): rnn = DecoderRNN(self.vocab_size, 50, 16, 0, 1, n_layers=2, dropout_p=0.5) for param in rnn.parameters(): param.data.uniform_(-1, 1) equal = True for _ in range(50): output1, _, _ = rnn() output2, _, _ = rnn() if not torch.equal(output1[0].data, output2[0].data): equal = False break self.assertFalse(equal)
def word_dropout_mask(X, dropout_rate, reserved_codes=()): """ Computes a binary mask across batch examples based on a bernoulli distribution with mean equal to dropout. """ probs = torch.zeros_like(X).float() + dropout_rate # zero reserved_codes (avoid dropping reserved symbols) if len(reserved_codes) > 0: probs[sum((X == x) for x in reserved_codes)] = 0 # return binary mask return torch.bernoulli(probs).byte()
def test_python_ir(self): x = Variable(torch.Tensor([0.4]), requires_grad=True) y = Variable(torch.Tensor([0.7]), requires_grad=True) def doit(x, y): return torch.sigmoid(torch.tanh(x * (x + y))) traced, _ = torch.jit.trace(doit, (x, y)) g = torch._C._jit_get_graph(traced) g2 = torch._C.Graph() g_to_g2 = {} for node in g.inputs(): g_to_g2[node] = g2.addInput() for node in g.nodes(): n_ = g2.createClone(node, lambda x: g_to_g2[x]) g2.appendNode(n_) for o, no in zip(node.outputs(), n_.outputs()): g_to_g2[o] = no for node in g.outputs(): g2.registerOutput(g_to_g2[node]) t_node = g2.create("TensorTest").t_("a", torch.ones([2, 2])) assert(t_node.attributeNames() == ["a"]) g2.appendNode(t_node) assert(torch.equal(torch.ones([2, 2]), t_node.t("a"))) self.assertExpected(str(g2))
def test_orthogonal(self): for as_variable in [True, False]: for use_gain in [True, False]: for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]: input_tensor = torch.zeros(tensor_size) gain = 1.0 if as_variable: input_tensor = Variable(input_tensor) if use_gain: gain = self._random_float(0.1, 2) init.orthogonal(input_tensor, gain=gain) else: init.orthogonal(input_tensor) if as_variable: input_tensor = input_tensor.data rows, cols = tensor_size[0], reduce(mul, tensor_size[1:]) flattened_tensor = input_tensor.view(rows, cols) if rows > cols: self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor), torch.eye(cols) * gain ** 2, prec=1e-6) else: self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()), torch.eye(rows) * gain ** 2, prec=1e-6) # Generates rand tensor with non-equal values. This ensures that duplicate # values won't be causing test failure for modules like MaxPooling. # size should be small, otherwise randperm fails / long overflows.
def assertNotEqual(self, x, y, prec=None, message=''): if prec is None: prec = self.precision x, y = self.unwrapVariables(x, y) if torch.is_tensor(x) and torch.is_tensor(y): if x.size() != y.size(): super(TestCase, self).assertNotEqual(x.size(), y.size()) self.assertGreater(x.numel(), 0) y = y.type_as(x) y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu() nan_mask = x != x if torch.equal(nan_mask, y != y): diff = x - y if diff.is_signed(): diff = diff.abs() diff[nan_mask] = 0 max_err = diff.max() self.assertGreaterEqual(max_err, prec, message) elif type(x) == str and type(y) == str: super(TestCase, self).assertNotEqual(x, y) elif is_iterable(x) and is_iterable(y): super(TestCase, self).assertNotEqual(x, y) else: try: self.assertGreaterEqual(abs(x - y), prec, message) return except (TypeError, AssertionError): pass super(TestCase, self).assertNotEqual(x, y, message)
def test_erfinv(self): def checkType(tensor): inputValues = torch.randn(4, 4, out=tensor()).clamp(-2., 2.) self.assertEqual(tensor(inputValues).erf().erfinv(), tensor(inputValues)) # test inf self.assertTrue(torch.equal(tensor([-1, 1]).erfinv(), tensor([float('-inf'), float('inf')]))) # test nan self.assertEqual(tensor([-2, 2]).erfinv(), tensor([float('nan'), float('nan')])) checkType(torch.FloatTensor) checkType(torch.DoubleTensor)