我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.manual_seed()。
def resample(self, seed=None): """Resample the dataset. Args: seed (int, optional): Seed for resampling. By default no seed is used. """ if seed is not None: gen = torch.manual_seed(seed) else: gen = torch.default_generator if self.replacement: self.perm = torch.LongTensor(len(self)).random_( len(self.dataset), generator=gen) else: self.perm = torch.randperm( len(self.dataset), generator=gen).narrow(0, 0, len(self))
def test_mlpg_gradcheck(): # MLPG is performed dimention by dimention, so static_dim 1 is enough, # 2 just for in case. static_dim = 2 T = 10 for windows in _get_windows_set(): torch.manual_seed(1234) means = Variable(torch.rand(T, static_dim * len(windows)), requires_grad=True) inputs = (means,) # Unit variances case variances = torch.ones(static_dim * len(windows) ).expand(T, static_dim * len(windows)) assert gradcheck(MLPG(variances, windows), inputs, eps=1e-3, atol=1e-3) # Rand variances case variances = torch.rand(static_dim * len(windows) ).expand(T, static_dim * len(windows)) assert gradcheck(MLPG(variances, windows), inputs, eps=1e-3, atol=1e-3)
def set_random_seeds(seed, cuda): """ Set seeds for python random module numpy.random and torch. Parameters ---------- seed: int Random seed. cuda: bool Whether to set cuda seed with torch. """ random.seed(seed) th.manual_seed(seed) if cuda: th.cuda.manual_seed_all(seed) np.random.seed(seed)
def __init__(self, model_path, gpu_id=None, random_seed=None): self._logger = logging.getLogger('nmmt.NMTDecoder') if gpu_id is not None: torch.cuda.set_device(gpu_id) if random_seed is not None: torch.manual_seed(random_seed) random.manual_seed_all(random_seed) using_cuda = gpu_id is not None self._text_processor = SubwordTextProcessor.load_from_file(os.path.join(model_path, 'model.bpe')) with log_timed_action(self._logger, 'Loading model from checkpoint'): self._engine = NMTEngine.load_from_checkpoint(os.path.join(model_path, 'model.pt'), using_cuda=using_cuda) # Public-editable options self.beam_size = 5 self.max_sent_length = 160 self.replace_unk = False self.tuning_epochs = 5
def train(rank, args, model): torch.manual_seed(args.seed + rank) train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, num_workers=1) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, num_workers=1) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) for epoch in range(1, args.epochs + 1): train_epoch(epoch, args, model, train_loader, optimizer) test_epoch(model, test_loader)
def parse_set_seed_once(): global SEED global SEED_SET global ACCEPT parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--seed', type=int, default=123) parser.add_argument('--accept', action='store_true') args, remaining = parser.parse_known_args() if SEED_SET == 0: torch.manual_seed(args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed) SEED = args.seed SEED_SET = 1 ACCEPT = args.accept remaining = [sys.argv[0]] + remaining return remaining
def __init__(self, expt_dir='experiment', loss=NLLLoss(), batch_size=64, random_seed=None, checkpoint_every=100, print_every=100): self._trainer = "Simple Trainer" self.random_seed = random_seed if random_seed is not None: random.seed(random_seed) torch.manual_seed(random_seed) self.loss = loss self.evaluator = Evaluator(loss=self.loss, batch_size=batch_size) self.optimizer = None self.checkpoint_every = checkpoint_every self.print_every = print_every if not os.path.isabs(expt_dir): expt_dir = os.path.join(os.getcwd(), expt_dir) self.expt_dir = expt_dir if not os.path.exists(self.expt_dir): os.makedirs(self.expt_dir) self.batch_size = batch_size self.logger = logging.getLogger(__name__)
def __init__(self, dataset_fname=None, train=False, size=50, num_samples=1000000, random_seed=1111): super(TSPDataset, self).__init__() #start = torch.FloatTensor([[-1], [-1]]) torch.manual_seed(random_seed) self.data_set = [] if not train: with open(dataset_fname, 'r') as dset: for l in tqdm(dset): inputs, outputs = l.split(' output ') sample = torch.zeros(1, ) x = np.array(inputs.split(), dtype=np.float32).reshape([-1, 2]).T #y.append(np.array(outputs.split(), dtype=np.int32)[:-1]) # skip the last one self.data_set.append(x) else: # randomly sample points uniformly from [0, 1] for l in tqdm(range(num_samples)): x = torch.FloatTensor(2, size).uniform_(0, 1) #x = torch.cat([start, x], 1) self.data_set.append(x) self.size = len(self.data_set)
def test_feature_extraction_d2_2(): """ 0.5 point(s) """ global test_sent, gold, word_to_ix, vocab torch.manual_seed(1) feat_extractor = SimpleFeatureExtractor() embedder = VanillaWordEmbeddingLookup(word_to_ix, TEST_EMBEDDING_DIM) combiner = DummyCombiner() embeds = embedder(test_sent) state = ParserState(test_sent, embeds, combiner) state.shift() state.shift() feats = feat_extractor.get_features(state) feats_list = make_list(feats) true = ([ -1.8661, 1.4146, -1.8781, -0.4674 ], [ -0.9596, 0.5489, -0.9901, -0.3826 ], [ 0.5237, 0.0004, -1.2039, 3.5283 ]) pairs = zip(feats_list, true) check_tensor_correctness(pairs)
def test_combiner_d2_4(): """ 1 point(s) """ torch.manual_seed(1) combiner = MLPCombinerNetwork(6) head_feat = ag.Variable(torch.randn(1, 6)) modifier_feat = ag.Variable(torch.randn(1, 6)) combined = combiner(head_feat, modifier_feat) combined_list = combined.view(-1).data.tolist() true_out = [ -0.4897, 0.4484, -0.0591, 0.1778, 0.4223, -0.0940 ] check_tensor_correctness([(combined_list, true_out)]) # ===-------------------------------------------------------------------------------------------=== # Section 3 tests # ===-------------------------------------------------------------------------------------------===
def test_parse_logic_d3_1(): """ 0.5 point(s) """ global test_sent, gold, word_to_ix, vocab torch.manual_seed(1) feat_extract = SimpleFeatureExtractor() word_embed = VanillaWordEmbeddingLookup(word_to_ix, TEST_EMBEDDING_DIM) act_chooser = ActionChooserNetwork(TEST_EMBEDDING_DIM * NUM_FEATURES) combiner = MLPCombinerNetwork(TEST_EMBEDDING_DIM) parser = TransitionParser(feat_extract, word_embed, act_chooser, combiner) output, dep_graph, actions_done = parser(test_sent[:-1], gold) assert len(output) == 15 # Made the right number of decisions # check one of the outputs checked_out = output[10].view(-1).data.tolist() true_out = [ -1.4737, -1.0875, -0.8350 ] check_tensor_correctness([(true_out, checked_out)]) true_dep_graph = dependency_graph_from_oracle(test_sent, gold) assert true_dep_graph == dep_graph assert actions_done == [ 0, 0, 1, 0, 1, 0, 0, 1, 2, 0, 0, 0, 1, 1, 2 ]
def test_pretrained_embeddings_d4_2(): """ 0.5 point(s) """ torch.manual_seed(1) word_to_ix = { "interest": 0, "rate": 1, "swap": 2 } pretrained = { "interest": [ -1.4, 2.6, 3.5 ], "swap": [ 1.6, 5.7, 3.2 ] } embedder = VanillaWordEmbeddingLookup(word_to_ix, 3) initialize_with_pretrained(pretrained, embedder) embeddings = embedder.word_embeddings.weight.data pairs = [] true_rate_embed = [ -2.2820, 0.5237, 0.0004 ] pairs.append((embeddings[word_to_ix["interest"]].tolist(), pretrained["interest"])) pairs.append((embeddings[word_to_ix["rate"]].tolist(), true_rate_embed)) pairs.append((embeddings[word_to_ix["swap"]].tolist(), pretrained["swap"])) check_tensor_correctness(pairs)
def test_lstm_combiner_d4_3(): """ 1 point(s) """ torch.manual_seed(1) combiner = LSTMCombinerNetwork(TEST_EMBEDDING_DIM, 1, 0.0) head_feat = ag.Variable(torch.randn(1, TEST_EMBEDDING_DIM)) modifier_feat = ag.Variable(torch.randn(1, TEST_EMBEDDING_DIM)) # Do the combination a few times to make sure they implemented the sequential # part right combined = combiner(head_feat, modifier_feat) combined = combiner(head_feat, modifier_feat) combined = combiner(head_feat, modifier_feat) combined_list = combined.view(-1).data.tolist() true_out = [ 0.0873, -0.1837, 0.1975, -0.1166 ] check_tensor_correctness([(combined_list, true_out)])
def main(): cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) # 1. dataset root = osp.expanduser('~/TeamProject/Camelyon17/fcn') train_dataset = fcn_datasets.FCNDataset(root, split='train', transform=True) train_loader = torch.utils.data.DataLoader(train_dataset,\ batch_size=1,\ shuffle=False,) for i, (input, target) in enumerate(train_loader): print(i) print(input) print(target)
def set_seed(n: int = DEFAULT_SEED): """ Set the seed of the random number generator. Notes: Default seed is 137. Args: n: Random seed. Returns: None """ # set the seed for the cpu generator torch.manual_seed(int(n)) numpy.random.seed(int(n)) # set the seed for the gpu generator if needed DTYPE.manual_seed(int(n))
def main(): parser = argparse.ArgumentParser(description='PyTorch YOLO') parser.add_argument('--use_cuda', type=bool, default=False, help='use cuda or not') parser.add_argument('--epochs', type=int, default=10, help='Epochs') parser.add_argument('--batch_size', type=int, default=1, help='Batch size') parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate') parser.add_argument('--seed', type=int, default=1234, help='Random seed') args = parser.parse_args() torch.manual_seed(args.seed) torch.backends.cudnn.benchmark = args.use_cuda train.train(args)
def construct_graph(self): # Set the random seed torch.manual_seed(cfg.RNG_SEED) # Build the main computation graph self.net.create_architecture(self.imdb.num_classes, tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS) # Define the loss # loss = layers['total_loss'] # Set learning rate and momentum lr = cfg.TRAIN.LEARNING_RATE params = [] for key, value in dict(self.net.named_parameters()).items(): if value.requires_grad: if 'bias' in key: params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}] else: params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}] self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM) # Write the train and validation information to tensorboard self.writer = tb.writer.FileWriter(self.tbdir) self.valwriter = tb.writer.FileWriter(self.tbvaldir) return lr, self.optimizer
def __init__(self, cuda, in_dim, mem_dim, criterion): super(ChildSumTreeLSTM, self).__init__() self.cudaFlag = cuda self.in_dim = in_dim self.mem_dim = mem_dim # self.emb = nn.Embedding(vocab_size,in_dim, # padding_idx=Constants.PAD) # torch.manual_seed(123) self.ix = nn.Linear(self.in_dim,self.mem_dim) self.ih = nn.Linear(self.mem_dim,self.mem_dim) self.fh = nn.Linear(self.mem_dim, self.mem_dim) self.fx = nn.Linear(self.in_dim,self.mem_dim) self.ux = nn.Linear(self.in_dim,self.mem_dim) self.uh = nn.Linear(self.mem_dim,self.mem_dim) self.ox = nn.Linear(self.in_dim,self.mem_dim) self.oh = nn.Linear(self.mem_dim,self.mem_dim) self.criterion = criterion self.output_module = None
def test_rand(self): torch.manual_seed(123456) res1 = torch.rand(SIZE, SIZE) res2 = torch.Tensor() torch.manual_seed(123456) torch.rand(res2, SIZE, SIZE) self.assertEqual(res1, res2)
def test_randn(self): torch.manual_seed(123456) res1 = torch.randn(SIZE, SIZE) res2 = torch.Tensor() torch.manual_seed(123456) torch.randn(res2, SIZE, SIZE) self.assertEqual(res1, res2)
def test_boxMullerState(self): torch.manual_seed(123) odd_number = 101 seeded = torch.randn(odd_number) state = torch.get_rng_state() midstream = torch.randn(odd_number) torch.set_rng_state(state) repeat_midstream = torch.randn(odd_number) torch.manual_seed(123) reseeded = torch.randn(odd_number) self.assertEqual(midstream, repeat_midstream, 0, 'get_rng_state/set_rng_state not generating same sequence of normally distributed numbers') self.assertEqual(seeded, reseeded, 0, 'repeated calls to manual_seed not generating same sequence of normally distributed numbers')
def test_manual_seed(self): rng_state = torch.get_rng_state() torch.manual_seed(2) x = torch.randn(100) self.assertEqual(torch.initial_seed(), 2) torch.manual_seed(2) y = torch.randn(100) self.assertEqual(x, y) torch.set_rng_state(rng_state)
def __init__(self, train, valid, test, config): # fix seed self.seed = config['seed'] np.random.seed(self.seed) torch.manual_seed(self.seed) torch.cuda.manual_seed(self.seed) self.train = train self.valid = valid self.test = test self.imgdim = len(train['imgfeat'][0]) self.sentdim = len(train['sentfeat'][0]) self.projdim = config['projdim'] self.margin = config['margin'] self.batch_size = 128 self.ncontrast = 30 self.maxepoch = 20 self.early_stop = True config_model = {'imgdim': self.imgdim,'sentdim': self.sentdim, 'projdim': self.projdim} self.model = COCOProjNet(config_model).cuda() self.loss_fn = PairwiseRankingLoss(margin=self.margin).cuda() self.optimizer = optim.Adam(self.model.parameters())
def __init__(self, train, valid, test, devscores, config): # fix seed np.random.seed(config['seed']) torch.manual_seed(config['seed']) assert torch.cuda.is_available(), 'torch.cuda required for Relatedness' torch.cuda.manual_seed(config['seed']) self.train = train self.valid = valid self.test = test self.devscores = devscores self.inputdim = train['X'].shape[1] self.nclasses = config['nclasses'] self.seed = config['seed'] self.l2reg = 0. self.batch_size = 64 self.maxepoch = 1000 self.early_stop = True self.model = nn.Sequential( nn.Linear(self.inputdim, self.nclasses), nn.Softmax(), ) self.loss_fn = nn.MSELoss() if torch.cuda.is_available(): self.model = self.model.cuda() self.loss_fn = self.loss_fn.cuda() self.loss_fn.size_average = False self.optimizer = optim.Adam(self.model.parameters(), weight_decay=self.l2reg)
def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): # fix seed np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) self.inputdim = inputdim self.nclasses = nclasses self.l2reg = l2reg self.batch_size = batch_size self.cudaEfficient = cudaEfficient
def prepare_environment(params: Union[Params, Dict[str, Any]]): """ Sets random seeds for reproducible experiments. This may not work as expected if you use this from within a python project in which you have already imported Pytorch. If you use the scripts/run_model.py entry point to training models with this library, your experiments should be reasonably reproducible. If you are using this from your own project, you will want to call this function before importing Pytorch. Complete determinism is very difficult to achieve with libraries doing optimized linear algebra due to massively parallel execution, which is exacerbated by using GPUs. Parameters ---------- params: Params object or dict, required. A ``Params`` object or dict holding the json parameters. """ seed = params.pop("random_seed", 13370) numpy_seed = params.pop("numpy_seed", 1337) torch_seed = params.pop("pytorch_seed", 133) if seed is not None: random.seed(seed) if numpy_seed is not None: numpy.random.seed(numpy_seed) if torch_seed is not None: torch.manual_seed(torch_seed) # Seed all GPUs with the same seed if available. if torch.cuda.is_available(): torch.cuda.manual_seed_all(torch_seed) log_pytorch_version_info()
def main(config): # ensure directories are setup prepare_dirs(config) if config.num_gpu > 0: torch.cuda.manual_seed(config.random_seed) kwargs = {'num_workers': 1, 'pin_memory': True} else: torch.manual_seed(config.random_seed) kwargs = {} # instantiate data loaders if config.is_train: data_loader = get_train_valid_loader(config.data_dir, config.dataset, config.batch_size, config.augment, config.random_seed, config.valid_size, config.shuffle, config.show_sample, **kwargs) else: data_loader = get_test_loader(config.data_dir, config.dataset, config.batch_size, config.shuffle, **kwargs) # instantiate trainer trainer = Trainer(config, data_loader) # either train if config.is_train: save_config(config) trainer.train() # or load a pretrained model and test else: trainer.test()
def test_functional_mlpg(): static_dim = 2 T = 5 for windows in _get_windows_set(): torch.manual_seed(1234) means = torch.rand(T, static_dim * len(windows)) variances = torch.ones(static_dim * len(windows)) y = G.mlpg(means.numpy(), variances.numpy(), windows) y = Variable(torch.from_numpy(y), requires_grad=False) means = Variable(means, requires_grad=True) # mlpg y_hat = AF.mlpg(means, variances, windows) assert np.allclose(y.data.numpy(), y_hat.data.numpy()) # Test backward pass nn.MSELoss()(y_hat, y).backward() # unit_variance_mlpg R = torch.from_numpy(G.unit_variance_mlpg_matrix(windows, T)) y_hat = AF.unit_variance_mlpg(R, means) assert np.allclose(y.data.numpy(), y_hat.data.numpy()) nn.MSELoss()(y_hat, y).backward() # Test 3D tensor inputs y_hat = AF.unit_variance_mlpg(R, means.view(1, -1, means.size(-1))) assert np.allclose( y.data.numpy(), y_hat.data.view(-1, static_dim).numpy()) nn.MSELoss()(y_hat.view(-1, static_dim), y).backward()
def test_mlpg_variance_expand(): static_dim = 2 T = 10 for windows in _get_windows_set(): torch.manual_seed(1234) means = Variable(torch.rand(T, static_dim * len(windows)), requires_grad=True) variances = torch.rand(static_dim * len(windows)) variances_expanded = variances.expand(T, static_dim * len(windows)) y = AF.mlpg(means, variances, windows) y_hat = AF.mlpg(means, variances_expanded, windows) assert np.allclose(y.data.numpy(), y_hat.data.numpy())
def test_modspec_gradcheck(): static_dim = 12 T = 16 torch.manual_seed(1234) inputs = (Variable(torch.rand(T, static_dim), requires_grad=True),) n = 16 for norm in [None, "ortho"]: assert gradcheck(ModSpec(n=n, norm=norm), inputs, eps=1e-4, atol=1e-4)
def eval_model(model_path, mode='dev'): torch.manual_seed(6) snli_d, mnli_d, embd = data_loader.load_data_sm( config.DATA_ROOT, config.EMBD_FILE, reseversed=False, batch_sizes=(32, 32, 32, 32, 32), device=0) m_train, m_dev_m, m_dev_um, m_test_m, m_test_um = mnli_d m_dev_um.shuffle = False m_dev_m.shuffle = False m_dev_um.sort = False m_dev_m.sort = False m_test_um.shuffle = False m_test_m.shuffle = False m_test_um.sort = False m_test_m.sort = False model = StackBiLSTMMaxout() model.Embd.weight.data = embd if torch.cuda.is_available(): embd.cuda() model.cuda() criterion = nn.CrossEntropyLoss() model.load_state_dict(torch.load(model_path)) model.max_l = 150 m_pred = model_eval(model, m_dev_m, criterion) um_pred = model_eval(model, m_dev_um, criterion) print("dev_mismatched_score (acc, loss):", um_pred) print("dev_matched_score (acc, loss):", m_pred)
def run_tests(): parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--seed', type=int, default=123) args, remaining = parser.parse_known_args() torch.manual_seed(args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed) remaining = [sys.argv[0]] + remaining unittest.main(argv=remaining)
def _async_set_seed(self, rank, device_id, seed): torch.manual_seed(seed)
def set_seed(seed, use_cuda): """ setting the seed for controlling randomness in this example :param seed: seed value (int) :param use_cuda: set the random seed for torch.cuda or not :return: None """ if seed is not None: torch.manual_seed(seed) np.random.seed(seed) if use_cuda: torch.cuda.manual_seed(seed)
def set_rng_seed(rng_seed): """ Sets seeds of torch, numpy, and torch.cuda (if available). :param int rng_seed: The seed value. """ torch.manual_seed(rng_seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(rng_seed) np.random.seed(rng_seed)
def manual_seed(seed): return torch.manual_seed(seed)
def init_random_seed(manual_seed): """Init random seed.""" seed = None if manual_seed is None: seed = random.randint(1, 10000) else: seed = manual_seed print("use random seed: {}".format(seed)) random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
def train(self, dataset): self.model.train() self.embedding_model.train() self.embedding_model.zero_grad() self.optimizer.zero_grad() loss, k = 0.0, 0 # torch.manual_seed(789) indices = torch.randperm(len(dataset)) for idx in tqdm(range(len(dataset)),desc='Training epoch '+str(self.epoch+1)+''): tree, sent, label = dataset[indices[idx]] input = Var(sent) target = Var(torch.LongTensor([int(label)])) if self.args.cuda: input = input.cuda() target = target.cuda() emb = F.torch.unsqueeze(self.embedding_model(input), 1) output, err, _, _ = self.model.forward(tree, emb, training=True) #params = self.model.childsumtreelstm.getParameters() # params_norm = params.norm() err = err/self.args.batchsize # + 0.5*self.args.reg*params_norm*params_norm # custom bias loss += err.data[0] # err.backward() k += 1 if k==self.args.batchsize: for f in self.embedding_model.parameters(): f.data.sub_(f.grad.data * self.args.emblr) self.optimizer.step() self.embedding_model.zero_grad() self.optimizer.zero_grad() k = 0 self.epoch += 1 return loss/len(dataset) # helper function for testing
def set_seed(seed): """Sets random seed everywhere.""" torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) random.seed(seed) np.random.seed(seed)
def set_random_seed(seed): global random_seed random_seed = seed np.random.seed(seed) torch.manual_seed(random_seed) if torch.cuda.is_available(): torch.cuda.manual_seed(random_seed)
def init_random_seed(): """Init random seed.""" seed = None if params.manual_seed is None: seed = random.randint(1, 10000) else: seed = params.manual_seed print("use random seed: {}".format(seed)) random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
def init_random_seed(): """Init random seed.""" seed = None if manual_seed is None: seed = random.randint(1, 10000) else: seed = manual_seed print("use random seed: {}".format(seed)) random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
def setUp(self): random.seed(123) torch.manual_seed(123)
def test_rand(self): torch.manual_seed(123456) res1 = torch.rand(SIZE, SIZE) res2 = torch.Tensor() torch.manual_seed(123456) torch.rand(SIZE, SIZE, out=res2) self.assertEqual(res1, res2)
def test_randn(self): torch.manual_seed(123456) res1 = torch.randn(SIZE, SIZE) res2 = torch.Tensor() torch.manual_seed(123456) torch.randn(SIZE, SIZE, out=res2) self.assertEqual(res1, res2)