我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用torch.ShortTensor()。
def _test_neg(self, cast): float_types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor'] int_types = ['torch.IntTensor', 'torch.ShortTensor'] for t in float_types + int_types: if t in float_types: a = cast(torch.randn(100, 90).type(t)) else: a = cast(torch.Tensor(100, 90).type(t).random_()) zeros = cast(torch.Tensor().type(t)).resize_as_(a).zero_() res_add = torch.add(zeros, -1, a) res_neg = a.clone() res_neg.neg_() self.assertEqual(res_neg, res_add) # test out of place as well res_neg_out_place = a.clone().neg() self.assertEqual(res_neg_out_place, res_add) # test via __neg__ operator res_neg_op = -a.clone() self.assertEqual(res_neg_op, res_add)
def _test_neg(self, cast): float_types = ['torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor'] int_types = ['torch.IntTensor', 'torch.ShortTensor', 'torch.ByteTensor', 'torch.CharTensor'] for t in float_types + int_types: if t in float_types: a = cast(torch.randn(100, 90).type(t)) else: a = cast(torch.Tensor(100, 90).type(t).random_()) zeros = cast(torch.Tensor().type(t)).resize_as_(a).zero_() res_add = torch.add(zeros, -1, a) res_neg = a.clone() res_neg.neg_() self.assertEqual(res_neg, res_add) # test out of place as well res_neg_out_place = a.clone().neg() self.assertEqual(res_neg_out_place, res_add) # test via __neg__ operator res_neg_op = -a.clone() self.assertEqual(res_neg_op, res_add)
def test_element_size(self): byte = torch.ByteStorage().element_size() char = torch.CharStorage().element_size() short = torch.ShortStorage().element_size() int = torch.IntStorage().element_size() long = torch.LongStorage().element_size() float = torch.FloatStorage().element_size() double = torch.DoubleStorage().element_size() self.assertEqual(byte, torch.ByteTensor().element_size()) self.assertEqual(char, torch.CharTensor().element_size()) self.assertEqual(short, torch.ShortTensor().element_size()) self.assertEqual(int, torch.IntTensor().element_size()) self.assertEqual(long, torch.LongTensor().element_size()) self.assertEqual(float, torch.FloatTensor().element_size()) self.assertEqual(double, torch.DoubleTensor().element_size()) self.assertGreater(byte, 0) self.assertGreater(char, 0) self.assertGreater(short, 0) self.assertGreater(int, 0) self.assertGreater(long, 0) self.assertGreater(float, 0) self.assertGreater(double, 0) # These tests are portable, not necessarily strict for your system. self.assertEqual(byte, 1) self.assertEqual(char, 1) self.assertGreaterEqual(short, 2) self.assertGreaterEqual(int, 2) self.assertGreaterEqual(int, short) self.assertGreaterEqual(long, 4) self.assertGreaterEqual(long, int) self.assertGreaterEqual(double, float)
def test_numpy_scalars(self): import numpy as np class ScalarDataset(torch.utils.data.Dataset): def __init__(self, dtype): self.dtype = dtype def __getitem__(self, i): return self.dtype() def __len__(self): return 4 dtypes = { np.float64: torch.DoubleTensor, np.float32: torch.FloatTensor, np.float16: torch.HalfTensor, np.int64: torch.LongTensor, np.int32: torch.IntTensor, np.int16: torch.ShortTensor, np.int8: torch.CharTensor, np.uint8: torch.ByteTensor, } for dt, tt in dtypes.items(): dset = ScalarDataset(dt) loader = DataLoader(dset, batch_size=2) batch = next(iter(loader)) self.assertIsInstance(batch, tt)
def test_nonzero(self): num_src = 12 types = [ 'torch.ByteTensor', 'torch.CharTensor', 'torch.ShortTensor', 'torch.IntTensor', 'torch.FloatTensor', 'torch.DoubleTensor', 'torch.LongTensor', ] shapes = [ torch.Size((12,)), torch.Size((12, 1)), torch.Size((1, 12)), torch.Size((6, 2)), torch.Size((3, 2, 2)), ] for t in types: while True: tensor = torch.rand(num_src).mul(2).floor().type(t) if tensor.sum() > 0: break for shape in shapes: tensor = tensor.clone().resize_(shape) dst1 = torch.nonzero(tensor) dst2 = tensor.nonzero() dst3 = torch.LongTensor() torch.nonzero(tensor, out=dst3) if len(shape) == 1: dst = [] for i in range(num_src): if tensor[i] != 0: dst += [i] self.assertEqual(dst1.select(1, 0), torch.LongTensor(dst), 0) self.assertEqual(dst2.select(1, 0), torch.LongTensor(dst), 0) self.assertEqual(dst3.select(1, 0), torch.LongTensor(dst), 0) elif len(shape) == 2: # This test will allow through some False positives. It only checks # that the elements flagged positive are indeed non-zero. for i in range(dst1.size(0)): self.assertNotEqual(tensor[dst1[i, 0], dst1[i, 1]], 0) elif len(shape) == 3: # This test will allow through some False positives. It only checks # that the elements flagged positive are indeed non-zero. for i in range(dst1.size(0)): self.assertNotEqual(tensor[dst1[i, 0], dst1[i, 1], dst1[i, 2]], 0)
def _graph_constant(g, value, dims, type, *args, **kwargs): assert isinstance(value, numbers.Number) assert type is not None isscalar = False if dims is None or dims == 0 or set(dims) == set([0]): dims = [1] isscalar = True type = type.lower() if type == "char": tensor = torch.CharTensor(*dims) elif type == "short": tensor = torch.ShortTensor(*dims) elif type == "int": tensor = torch.IntTensor(*dims) elif type == "long": tensor = torch.LongTensor(*dims) elif type == "half": tensor = torch.HalfTensor(*dims) elif type == "float": tensor = torch.FloatTensor(*dims) elif type == "double": tensor = torch.DoubleTensor(*dims) else: raise ValueError("Unknown type, type should be one of the following strings: " "char, short, int, long, half, float, double") tensor.fill_(value) if isscalar: return g.op("Constant", *args, value_z=tensor, **kwargs) return g.op("Constant", *args, value_t=tensor, **kwargs)
def _worker_loop(dataset, index_queue, data_queue, collate_fn): global _use_shared_memory _use_shared_memory = True # torch.set_num_threads(1) while True: r = index_queue.get() if r is None: data_queue.put(None) break idx, batch_indices = r try: samples = collate_fn([dataset[i] for i in batch_indices]) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples)) # numpy_type_map = { # 'float64': torch.DoubleTensor, # 'float32': torch.FloatTensor, # 'float16': torch.HalfTensor, # 'int64': torch.LongTensor, # 'int32': torch.IntTensor, # 'int16': torch.ShortTensor, # 'int8': torch.CharTensor, # 'uint8': torch.ByteTensor, # }
def test_nonzero(self): num_src = 12 types = [ 'torch.ByteTensor', 'torch.CharTensor', 'torch.ShortTensor', 'torch.IntTensor', 'torch.FloatTensor', 'torch.DoubleTensor', 'torch.LongTensor', ] shapes = [ torch.Size((12,)), torch.Size((12, 1)), torch.Size((1, 12)), torch.Size((6, 2)), torch.Size((3, 2, 2)), ] for t in types: while True: tensor = torch.rand(num_src).mul(2).floor().type(t) if tensor.sum() > 0: break for shape in shapes: tensor = tensor.clone().resize_(shape) dst1 = torch.nonzero(tensor) dst2 = tensor.nonzero() dst3 = torch.LongTensor() torch.nonzero(dst3, tensor) if len(shape) == 1: dst = [] for i in range(num_src): if tensor[i] != 0: dst += [i] self.assertEqual(dst1.select(1, 0), torch.LongTensor(dst), 0) self.assertEqual(dst2.select(1, 0), torch.LongTensor(dst), 0) self.assertEqual(dst3.select(1, 0), torch.LongTensor(dst), 0) elif len(shape) == 2: # This test will allow through some False positives. It only checks # that the elements flagged positive are indeed non-zero. for i in range(dst1.size(0)): self.assertNotEqual(tensor[dst1[i,0], dst1[i,1]], 0) elif len(shape) == 3: # This test will allow through some False positives. It only checks # that the elements flagged positive are indeed non-zero. for i in range(dst1.size(0)): self.assertNotEqual(tensor[dst1[i,0], dst1[i,1], dst1[i,2]], 0)
def __init__(self, dtype='float'): """ Cast a torch.Tensor to a different type Arguments --------- dtype : string or torch.*Tensor literal or list of such data type to which input(s) will be cast. If list, it should be the same length as inputs. """ if isinstance(dtype, (list,tuple)): dtypes = [] for dt in dtype: if isinstance(dt, str): if dt == 'byte': dt = th.ByteTensor elif dt == 'double': dt = th.DoubleTensor elif dt == 'float': dt = th.FloatTensor elif dt == 'int': dt = th.IntTensor elif dt == 'long': dt = th.LongTensor elif dt == 'short': dt = th.ShortTensor dtypes.append(dt) self.dtype = dtypes else: if isinstance(dtype, str): if dtype == 'byte': dtype = th.ByteTensor elif dtype == 'double': dtype = th.DoubleTensor elif dtype == 'float': dtype = th.FloatTensor elif dtype == 'int': dtype = th.IntTensor elif dtype == 'long': dtype = th.LongTensor elif dtype == 'short': dtype = th.ShortTensor self.dtype = dtype