我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.range()。
def __init__(self, inputsize, outputsize, bias=True): super(PartialLinear, self).__init__() # define the layer as a small network: pt = ParallelTable() pt.add(Identity()).add(LookupTable(outputsize, inputsize)) self.network = Sequential().add(pt).add(MM(False, True)) if bias: self.bias = torch.zeros(1, outputsize) self.gradBias = torch.zeros(1, outputsize) else: self.bias = self.gradBias = None # set partition: self.inputsize = inputsize self.outputsize = outputsize self.allcolumns = torch.range(0, self.outputsize-1).long() self.resetPartition() self.addBuffer = None self.buffer = None
def test_load_parameter_dict(self): l = nn.Linear(5, 5) block = nn.Container( conv=nn.Conv2d(3, 3, 3, bias=False) ) net = nn.Container( linear1=l, linear2=l, block=block, empty=None, ) param_dict = { 'linear1.weight': Variable(torch.ones(5, 5)), 'block.conv.bias': Variable(torch.range(1, 3)), } net.load_parameter_dict(param_dict) self.assertIs(net.linear1.weight, param_dict['linear1.weight']) self.assertIs(net.block.conv.bias, param_dict['block.conv.bias'])
def assertIsOrdered(self, order, x, mxx, ixx, task): SIZE = 4 if order == 'descending': check_order = lambda a, b: a >= b elif order == 'ascending': check_order = lambda a, b: a <= b else: error('unknown order "{}", must be "ascending" or "descending"'.format(order)) are_ordered = True for j, k in product(range(SIZE), range(1, SIZE)): self.assertTrue(check_order(mxx[j][k-1], mxx[j][k]), 'torch.sort ({}) values unordered for {}'.format(order, task)) seen = set() indicesCorrect = True size = x.size(x.dim()-1) for k in range(size): seen.clear() for j in range(size): self.assertEqual(x[k][ixx[k][j]], mxx[k][j], 'torch.sort ({}) indices wrong for {}'.format(order, task)) seen.add(ixx[k][j]) self.assertEqual(len(seen), size)
def test_cat(self): SIZE = 10 # 2-arg cat for dim in range(3): x = torch.rand(13, SIZE, SIZE).transpose(0, dim) y = torch.rand(17, SIZE, SIZE).transpose(0, dim) res1 = torch.cat((x, y), dim) self.assertEqual(res1.narrow(dim, 0, 13), x, 0) self.assertEqual(res1.narrow(dim, 13, 17), y, 0) # Check iterables for dim in range(3): x = torch.rand(13, SIZE, SIZE).transpose(0, dim) y = torch.rand(17, SIZE, SIZE).transpose(0, dim) z = torch.rand(19, SIZE, SIZE).transpose(0, dim) res1 = torch.cat((x, y, z), dim) self.assertEqual(res1.narrow(dim, 0, 13), x, 0) self.assertEqual(res1.narrow(dim, 13, 17), y, 0) self.assertEqual(res1.narrow(dim, 30, 19), z, 0) self.assertRaises(ValueError, lambda: torch.cat([]))
def test_index_copy(self): num_copy, num_dest = 3, 20 dest = torch.randn(num_dest, 4, 5) src = torch.randn(num_copy, 4, 5) idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long() dest2 = dest.clone() dest.index_copy_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]].copy_(src[i]) self.assertEqual(dest, dest2, 0) dest = torch.randn(num_dest) src = torch.randn(num_copy) idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long() dest2 = dest.clone() dest.index_copy_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]] = src[i] self.assertEqual(dest, dest2, 0)
def test_index_add(self): num_copy, num_dest = 3, 3 dest = torch.randn(num_dest, 4, 5) src = torch.randn(num_copy, 4, 5) idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long() dest2 = dest.clone() dest.index_add_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]].add_(src[i]) self.assertEqual(dest, dest2) dest = torch.randn(num_dest) src = torch.randn(num_copy) idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long() dest2 = dest.clone() dest.index_add_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]] = dest2[idx[i]] + src[i] self.assertEqual(dest, dest2) # Fill idx with valid indices.
def test_scatter(self): m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20) elems_per_row = random.randint(1, 10) dim = random.randrange(3) idx_size = [m, n, o] idx_size[dim] = elems_per_row idx = torch.LongTensor().resize_(*idx_size) self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o) src = torch.Tensor().resize_(*idx_size).normal_() actual = torch.zeros(m, n, o).scatter_(dim, idx, src) expected = torch.zeros(m, n, o) for i in range(idx_size[0]): for j in range(idx_size[1]): for k in range(idx_size[2]): ii = [i, j, k] ii[dim] = idx[i,j,k] expected[tuple(ii)] = src[i,j,k] self.assertEqual(actual, expected, 0) idx[0][0][0] = 34 self.assertRaises(RuntimeError, lambda: torch.zeros(m, n, o).scatter_(dim, idx, src))
def test_masked_copy(self): num_copy, num_dest = 3, 10 dest = torch.randn(num_dest) src = torch.randn(num_copy) mask = torch.ByteTensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0)) dest2 = dest.clone() dest.masked_copy_(mask, src) j = 0 for i in range(num_dest): if mask[i]: dest2[i] = src[j] j += 1 self.assertEqual(dest, dest2, 0) # make source bigger than number of 1s in mask src = torch.randn(num_dest) dest.masked_copy_(mask, src) # make src smaller. this should fail src = torch.randn(num_copy - 1) with self.assertRaises(RuntimeError): dest.masked_copy_(mask, src)
def test_deepcopy(self): from copy import deepcopy a = torch.randn(5, 5) b = torch.randn(5, 5) c = a.view(25) q = [a, [a.storage(), b.storage()], b, c] w = deepcopy(q) self.assertEqual(w[0], q[0], 0) self.assertEqual(w[1][0], q[1][0], 0) self.assertEqual(w[1][1], q[1][1], 0) self.assertEqual(w[1], q[1], 0) self.assertEqual(w[2], q[2], 0) # Check that deepcopy preserves sharing w[0].add_(1) for i in range(a.numel()): self.assertEqual(w[1][0][i], q[1][0][i] + 1) self.assertEqual(w[3], c + 1) w[2].sub_(1) for i in range(a.numel()): self.assertEqual(w[1][1][i], q[1][1][i] - 1)
def testClassErrorMeter(self): mtr = meter.ClassErrorMeter(topk=[1]) output = torch.eye(3) if hasattr(torch, "arange"): target = torch.arange(0, 3) else: target = torch.range(0, 2) mtr.add(output, target) err = mtr.value() self.assertEqual(err, [0], "All should be correct") target[0] = 1 target[1] = 0 target[2] = 0 mtr.add(output, target) err = mtr.value() self.assertEqual(err, [50.0], "Half should be correct")
def __getitem__(self, index): assert index <= len(self), 'index range error' index += 1 with self.env.begin(write=False) as txn: img_key = 'image-%09d' % index imgbuf = txn.get(img_key) buf = six.BytesIO() buf.write(imgbuf) buf.seek(0) try: img = Image.open(buf).convert('L') except IOError: print('Corrupted image for %d' % index) return self[index + 1] if self.transform is not None: img = self.transform(img) label_key = 'label-%09d' % index label = str(txn.get(label_key)) if self.target_transform is not None: label = self.target_transform(label) return (img, label)
def test_forward_backward(self): import torch import torch.nn.functional as F from torch.autograd import Variable from reid.loss import OIMLoss criterion = OIMLoss(3, 3, scalar=1.0, size_average=False) criterion.lut = torch.eye(3) x = Variable(torch.randn(3, 3), requires_grad=True) y = Variable(torch.range(0, 2).long()) loss = criterion(x, y) loss.backward() probs = F.softmax(x) grads = probs.data - torch.eye(3) abs_diff = torch.abs(grads - x.grad.data) self.assertEquals(torch.log(probs).diag().sum(), -loss) self.assertTrue(torch.max(abs_diff) < 1e-6)
def test_cat(self): SIZE = 10 for dim in range(-3, 3): pos_dim = dim if dim >= 0 else 3 + dim x = torch.rand(13, SIZE, SIZE).transpose(0, pos_dim) y = torch.rand(17, SIZE, SIZE).transpose(0, pos_dim) z = torch.rand(19, SIZE, SIZE).transpose(0, pos_dim) res1 = torch.cat((x, y, z), dim) self.assertEqual(res1.narrow(pos_dim, 0, 13), x, 0) self.assertEqual(res1.narrow(pos_dim, 13, 17), y, 0) self.assertEqual(res1.narrow(pos_dim, 30, 19), z, 0) x = torch.randn(20, SIZE, SIZE) self.assertEqual(torch.cat(torch.split(x, 7)), x) self.assertEqual(torch.cat(torch.chunk(x, 7)), x) y = torch.randn(1, SIZE, SIZE) z = torch.cat([x, y]) self.assertEqual(z.size(), (21, SIZE, SIZE)) self.assertRaises(RuntimeError, lambda: torch.cat([]))
def test_index_add(self): num_copy, num_dest = 3, 3 dest = torch.randn(num_dest, 4, 5) src = torch.randn(num_copy, 4, 5) idx = torch.randperm(num_dest).narrow(0, 0, num_copy) dest2 = dest.clone() dest.index_add_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]].add_(src[i]) self.assertEqual(dest, dest2) dest = torch.randn(num_dest) src = torch.randn(num_copy) idx = torch.randperm(num_dest).narrow(0, 0, num_copy) dest2 = dest.clone() dest.index_add_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]] = dest2[idx[i]] + src[i] self.assertEqual(dest, dest2) # Fill idx with valid indices.
def test_index_copy(self): num_copy, num_dest = 3, 20 dest = torch.randn(num_dest, 4, 5) src = torch.randn(num_copy, 4, 5) idx = torch.randperm(num_dest).narrow(0, 0, num_copy) dest2 = dest.clone() dest.index_copy_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]].copy_(src[i]) self.assertEqual(dest, dest2, 0) dest = torch.randn(num_dest) src = torch.randn(num_copy) idx = torch.randperm(num_dest).narrow(0, 0, num_copy) dest2 = dest.clone() dest.index_copy_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]] = src[i] self.assertEqual(dest, dest2, 0)
def test_ger(self): types = { 'torch.DoubleTensor': 1e-8, 'torch.FloatTensor': 1e-4, } for tname, _prec in types.items(): v1 = torch.randn(100).type(tname) v2 = torch.randn(100).type(tname) res1 = torch.ger(v1, v2) res2 = torch.zeros(100, 100).type(tname) for i in range(100): for j in range(100): res2[i, j] = v1[i] * v2[j] self.assertEqual(res1, res2) # Test 0-strided for tname, _prec in types.items(): v1 = torch.randn(1).type(tname).expand(100) v2 = torch.randn(100).type(tname) res1 = torch.ger(v1, v2) res2 = torch.zeros(100, 100).type(tname) for i in range(100): for j in range(100): res2[i, j] = v1[i] * v2[j] self.assertEqual(res1, res2)
def test_masked_scatter(self): num_copy, num_dest = 3, 10 dest = torch.randn(num_dest) src = torch.randn(num_copy) mask = torch.ByteTensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0)) dest2 = dest.clone() dest.masked_scatter_(mask, src) j = 0 for i in range(num_dest): if mask[i]: dest2[i] = src[j] j += 1 self.assertEqual(dest, dest2, 0) # make source bigger than number of 1s in mask src = torch.randn(num_dest) dest.masked_scatter_(mask, src) # make src smaller. this should fail src = torch.randn(num_copy - 1) with self.assertRaises(RuntimeError): dest.masked_scatter_(mask, src)
def test_serialization_backwards_compat(self): a = [torch.arange(1 + i, 26 + i).view(5, 5).float() for i in range(2)] b = [a[i % 2] for i in range(4)] b += [a[0].storage()] b += [a[0].storage()[1:4]] path = download_file('https://download.pytorch.org/test_data/legacy_serialized.pt') c = torch.load(path) self.assertEqual(b, c, 0) self.assertTrue(isinstance(c[0], torch.FloatTensor)) self.assertTrue(isinstance(c[1], torch.FloatTensor)) self.assertTrue(isinstance(c[2], torch.FloatTensor)) self.assertTrue(isinstance(c[3], torch.FloatTensor)) self.assertTrue(isinstance(c[4], torch.FloatStorage)) c[0].fill_(10) self.assertEqual(c[0], c[2], 0) self.assertEqual(c[4], torch.FloatStorage(25).fill_(10), 0) c[1].fill_(20) self.assertEqual(c[1], c[3], 0) self.assertEqual(c[4][1:4], c[5], 0)
def eliminate_rows(self, prob_sc, ind, phis): """ eliminate rows of phis and prob_matrix scale """ length = prob_sc.size()[1] mask = (prob_sc[:, :, 0] > 0.85).type(dtype) rang = (Variable(torch.range(0, length - 1).unsqueeze(0) .expand_as(mask)). type(dtype)) ind_sc = torch.sort(rang * (1-mask) + length * mask, 1)[1] # permute prob_sc m = mask.unsqueeze(2).expand_as(prob_sc) mm = m.clone() mm[:, :, 1:] = 0 prob_sc = (torch.gather(prob_sc * (1 - m) + mm, 1, ind_sc.unsqueeze(2).expand_as(prob_sc))) # compose permutations ind = torch.gather(ind, 1, ind_sc) active = torch.gather(1-mask, 1, ind_sc) # permute phis active1 = active.unsqueeze(2).expand_as(phis) ind1 = ind.unsqueeze(2).expand_as(phis) active2 = active.unsqueeze(1).expand_as(phis) ind2 = ind.unsqueeze(1).expand_as(phis) phis_out = torch.gather(phis, 1, ind1) * active1 phis_out = torch.gather(phis_out, 2, ind2) * active2 return prob_sc, ind, phis_out, active
def __iter__(self): n_batch = len(self) // self.batch_size tail = len(self) % self.batch_size index = torch.LongTensor(len(self)).fill_(0) for i in range(n_batch): random_start = random.randint(0, len(self) - self.batch_size) batch_index = random_start + torch.range(0, self.batch_size - 1) index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index # deal with tail if tail: random_start = random.randint(0, len(self) - self.batch_size) tail_index = random_start + torch.range(0, tail - 1) index[(i + 1) * self.batch_size:] = tail_index return iter(index)
def updateGradInput(self, input, gradOutput): input, mask = input if input.type() == 'torch.cuda.FloatTensor': torch.range(self._maskIndexBufferCPU, 0, mask.nelement()-1).resize_(mask.size()) self._maskIndexBuffer.resize_(self._maskIndexBufferCPU.size()).copy_(self._maskIndexBufferCPU) else: torch.range(self._maskIndexBuffer, 0, mask.nelement()-1).resize_(mask.size()) torch.masked_select(self._maskIndices, self._maskIndexBuffer, mask) self._gradBuffer.resize_(input.nelement()).zero_() self._gradBuffer.scatter_(0, self._maskIndices, gradOutput) self._gradBuffer.resize_(input.size()) self.gradInput = [self._gradBuffer, self._gradMask.resize_(mask.size()).fill_(0)] return self.gradInput
def _test_maxpool_indices(self, num_dim): def expected_indices(dim): if dim == 1: return torch.DoubleTensor([1, 3]) lower_dim = expected_indices(dim-1) lower_dim = lower_dim.view(1, *lower_dim.size()) return torch.cat((lower_dim+4, lower_dim+12), 0) def expected_grad(dim): if dim == 1: return torch.DoubleTensor([0, 1, 0, 1]) lower_dim_grad = expected_grad(dim-1) grad = lower_dim_grad.view(1, *lower_dim_grad.size()) zero = torch.zeros(grad.size()) return torch.cat((zero, grad, zero, grad), 0) module_cls = getattr(nn, 'MaxPool{}d'.format(num_dim)) module = module_cls(2, return_indices=True) numel = 4 ** num_dim input = torch.range(1, numel).view(1, 1, *repeat(4, num_dim)) input_var = Variable(input, requires_grad=True) # Check forward output, indices = module(input_var) if num_dim != 3: expected_indices = expected_indices(num_dim) expected_output = expected_indices + 1 self.assertEqual(indices.data.squeeze(), expected_indices) self.assertEqual(output.data.squeeze(), expected_output) self.assertTrue(output.requires_grad) self.assertFalse(indices.requires_grad) # Make sure backward works grad_output = torch.DoubleTensor(output.size()).fill_(1) output.backward(grad_output, retain_variables=True) expected_grad = expected_grad(num_dim) self.assertEqual(input_var.grad, expected_grad.view_as(input)) # Make sure backward after changing indices will result in an error indices.add_(1) self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
def test_ConvTranspose2d_output_size(self): m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2) i = Variable(torch.randn(2, 3, 6, 6)) for h in range(15, 22): for w in range(15, 22): if 18 <= h <= 20 and 18 <= w <= 20: size = (h, w) if h == 19: size = torch.LongStorage(size) elif h == 2: size = torch.LongStorage((2, 4) + size) m(i, output_size=(h, w)) else: self.assertRaises(ValueError, lambda: m(i, (h, w)))
def test_RNN_cell(self): # this is just a smoke test; these modules are implemented through # autograd so no Jacobian test is needed for module in (nn.RNNCell, nn.GRUCell): for bias in (True, False): input = Variable(torch.randn(3, 10)) hx = Variable(torch.randn(3, 20)) cell = module(10, 20, bias=bias) for i in range(6): hx = cell(input, hx) hx.sum().backward()
def test_LSTM_cell(self): # this is just a smoke test; these modules are implemented through # autograd so no Jacobian test is needed for bias in (True, False): input = Variable(torch.randn(3, 10)) hx = Variable(torch.randn(3, 20)) cx = Variable(torch.randn(3, 20)) lstm = nn.LSTMCell(10, 20, bias=bias) for i in range(6): hx, cx = lstm(input, (hx, cx)) (hx+cx).sum().backward()
def small_3d_unique(t): return t(S, S, S).copy_(torch.range(1, S*S*S))
def small_1d_lapack(t): return torch.range(1, 3).view(3)
def small_2d_lapack(t): return torch.range(1, 9).view(3, 3)
def small_2d_lapack_fat(t): return torch.range(1, 12).view(4, 3)
def test_from_sequence(self): seq = [list(range(i*4,i*4+4)) for i in range(5)] reference = torch.range(0, 19).resize_(5, 4) for t in types: cuda_type = get_gpu_type(t) self.assertEqual(cuda_type(seq), reference)
def test_mul(self): m1 = torch.randn(10,10) res1 = m1.clone() res1[:,3].mul_(2) res2 = m1.clone() for i in range(res1.size(0)): res2[i,3] = res2[i,3] * 2 self.assertEqual(res1, res2)
def test_fmod(self): m1 = torch.Tensor(10,10).uniform_(-10., 10.) res1 = m1.clone() q = 2.1 res1[:,3].fmod_(q) res2 = m1.clone() for i in range(m1.size(1)): res2[i,3] = math.fmod(res2[i,3], q) self.assertEqual(res1, res2)
def test_remainder(self): m1 = torch.Tensor(10, 10).uniform_(-10., 10.) res1 = m1.clone() q = 2.1 res1[:,3].remainder_(q) res2 = m1.clone() for i in range(m1.size(0)): res2[i,3] = res2[i,3] % q self.assertEqual(res1, res2)
def test_bmm(self): num_batches = 10 M, N, O = 23, 8, 12 b1 = torch.randn(num_batches, M, N) b2 = torch.randn(num_batches, N, O) res = torch.bmm(b1, b2) for i in range(num_batches): r = torch.mm(b1[i], b2[i]) self.assertEqual(r, res[i])
def test_pow(self): # [res] torch.pow([res,] x) # base - tensor, exponent - number # contiguous m1 = torch.randn(100,100) res1 = torch.pow(m1[4], 3) res2 = res1.clone().zero_() for i in range(res2.size(0)): res2[i] = math.pow(m1[4][i], 3) self.assertEqual(res1, res2) # non-contiguous m1 = torch.randn(100,100) res1 = torch.pow(m1[:,4], 3) res2 = res1.clone().zero_() for i in range(res2.size(0)): res2[i] = math.pow(m1[i,4], 3) self.assertEqual(res1, res2) # base - number, exponent - tensor # contiguous m1 = torch.randn(100,100) res1 = torch.pow(3, m1[4]) res2 = res1.clone().zero_() for i in range(res2.size(0)): res2[i] = math.pow(3, m1[4,i]) self.assertEqual(res1, res2) # non-contiguous m1 = torch.randn(100,100) res1 = torch.pow(3, m1[:,4]) res2 = res1.clone().zero_() for i in range(res2.size(0)): res2[i] = math.pow(3, m1[i][4]) self.assertEqual(res1, res2)
def test_range(self): res1 = torch.range(0, 1) res2 = torch.Tensor() torch.range(res2, 0, 1) self.assertEqual(res1, res2, 0) # Check range for non-contiguous tensors. x = torch.zeros(2, 3) torch.range(x.narrow(1, 1, 2), 0, 3) res2 = torch.Tensor(((0, 0, 1), (0, 2, 3))) self.assertEqual(x, res2, 1e-16) # Check negative res1 = torch.Tensor((1, 0)) res2 = torch.Tensor() torch.range(res2, 1, 0, -1) self.assertEqual(res1, res2, 0) # Equal bounds res1 = torch.ones(1) res2 = torch.Tensor() torch.range(res2, 1, 1, -1) self.assertEqual(res1, res2, 0) torch.range(res2, 1, 1, 1) self.assertEqual(res1, res2, 0) # FloatTensor res1 = torch.range(torch.FloatTensor(), 0.6, 0.9, 0.1) self.assertEqual(res1.size(0), 4) res1 = torch.range(torch.FloatTensor(), 1, 10, 0.3) self.assertEqual(res1.size(0), 31) # DoubleTensor res1 = torch.range(torch.DoubleTensor(), 0.6, 0.9, 0.1) self.assertEqual(res1.size(0), 4) res1 = torch.range(torch.DoubleTensor(), 1, 10, 0.3) self.assertEqual(res1.size(0), 31)
def test_mode(self): x = torch.range(1, SIZE * SIZE).clone().resize_(SIZE, SIZE) x[:2] = 1 x[:,:2] = 1 x0 = x.clone() # Pre-calculated results. res1val = torch.Tensor(SIZE, 1).fill_(1) # The indices are the position of the last appearance of the mode element. res1ind = torch.LongTensor(SIZE, 1).fill_(1) res1ind[0] = SIZE-1 res1ind[1] = SIZE-1 res2val, res2ind = torch.mode(x) self.assertEqual(res1val, res2val, 0) self.assertEqual(res1ind, res2ind, 0) # Test use of result tensor res2val = torch.Tensor() res2ind = torch.LongTensor() torch.mode(res2val, res2ind, x) self.assertEqual(res1val, res2val, 0) self.assertEqual(res1ind, res2ind, 0) # Test non-default dim res2val, res2ind = torch.mode(x, 0) self.assertEqual(res1val.view(1, SIZE), res2val, 0) self.assertEqual(res1ind.view(1, SIZE), res2ind, 0) # input unchanged self.assertEqual(x, x0, 0)
def test_xcorr3_xcorr2_eq(self): def reference(x, k, o3, o32): for i in range(o3.size(1)): for j in range(k.size(1)): o32[i].add(torch.xcorr2(x[i+j-1], k[j])) self._test_conv_corr_eq(lambda x, k: torch.xcorr3(x, k), reference)