我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.FloatTensor()。
def __call__(self, x): """ Args: x (FloatTensor/LongTensor or ndarray) Returns: x_mu (LongTensor or ndarray) """ mu = self.qc - 1. if isinstance(x, np.ndarray): x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu) x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int) elif isinstance(x, (torch.Tensor, torch.LongTensor)): if isinstance(x, torch.LongTensor): x = x.float() mu = torch.FloatTensor([mu]) x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu + 1) / 2 * mu + 0.5).long() return x_mu
def vector2image(x): block_size = chris_domain.BLOCK_SIZE*3 x_temp = torch.FloatTensor( x.size()[0], x.size()[1], 1, block_size, params['GRID_SIZE']*block_size ).cuda().fill_(0.0) for b in range(x.size()[0]): for d in range(x.size()[1]): for i in range(x.size()[2]): from_ = i*block_size to_ = (i+1)*block_size fill_ = float(x[b][d][i]) x_temp[b,d,0,:,from_:to_].fill_(fill_) return x_temp
def plt_to_vis(fig,win,name): canvas=fig.canvas import io buf = io.BytesIO() canvas.print_png(buf) data=buf.getvalue() buf.close() buf=io.BytesIO() buf.write(data) img=Image.open(buf) img = np.asarray(img)/255.0 img = img.astype(float)[:,:,0:3] img = torch.FloatTensor(img).permute(2,0,1) vis.image( img, win=str(MULTI_RUN)+'-'+win, opts=dict(title=str(MULTI_RUN)+'-'+name))
def bn_hat_z_layers(self, hat_z_layers, z_pre_layers): # TODO: Calculate batchnorm using GPU Tensors. assert len(hat_z_layers) == len(z_pre_layers) hat_z_layers_normalized = [] for i, (hat_z, z_pre) in enumerate(zip(hat_z_layers, z_pre_layers)): if self.use_cuda: ones = Variable(torch.ones(z_pre.size()[0], 1).cuda()) else: ones = Variable(torch.ones(z_pre.size()[0], 1)) mean = torch.mean(z_pre, 0) noise_var = np.random.normal(loc=0.0, scale=1 - 1e-10, size=z_pre.size()) if self.use_cuda: var = np.var(z_pre.data.cpu().numpy() + noise_var, axis=0).reshape(1, z_pre.size()[1]) else: var = np.var(z_pre.data.numpy() + noise_var, axis=0).reshape(1, z_pre.size()[1]) var = Variable(torch.FloatTensor(var)) if self.use_cuda: hat_z = hat_z.cpu() ones = ones.cpu() mean = mean.cpu() hat_z_normalized = torch.div(hat_z - ones.mm(mean), ones.mm(torch.sqrt(var + 1e-10))) if self.use_cuda: hat_z_normalized = hat_z_normalized.cuda() hat_z_layers_normalized.append(hat_z_normalized) return hat_z_layers_normalized
def forward_noise(self, tilde_h): # z_pre will be used in the decoder cost z_pre = self.linear(tilde_h) z_pre_norm = self.bn_normalize(z_pre) # Add noise noise = np.random.normal(loc=0.0, scale=self.noise_level, size=z_pre_norm.size()) if self.use_cuda: noise = Variable(torch.cuda.FloatTensor(noise)) else: noise = Variable(torch.FloatTensor(noise)) # tilde_z will be used by decoder for reconstruction tilde_z = z_pre_norm + noise # store tilde_z in buffer self.buffer_tilde_z = tilde_z z = self.bn_gamma_beta(tilde_z) h = self.activation(z) return h
def type(self, type=None, tensorCache=None): if type is None: return self._type self._gradBuffer = self._gradBuffer.type(type) self.gradInput = self.gradInput.type(type) self.output = self.output.type(type) # These casts apply when switching between cuda/non-cuda types if type != 'torch.cuda.FloatTensor': self._maskIndexBuffer = self._maskIndexBuffer.long() self._maskIndices = self._maskIndices.long() self._gradMask = self._gradMask.byte() else: self._maskIndexBuffer = self._maskIndexBuffer.cuda() self._maskIndices = self._maskIndices.cuda() self._gradMask = self._gradMask.cuda() self._type = type return self
def test_Copy(self): input = torch.randn(3,4).double() c = nn.Copy(torch.DoubleTensor, torch.FloatTensor) output = c.forward(input) self.assertEqual(torch.typename(output), 'torch.FloatTensor') self.assertEqual(output, input.float(), 1e-6) gradInput = c.backward(input, output.fill_(1)) self.assertEqual(torch.typename(gradInput), 'torch.DoubleTensor') self.assertEqual(gradInput, output.double(), 1e-6) c.dontCast = True c.double() self.assertEqual(torch.typename(output), 'torch.FloatTensor') # Check that these don't raise errors c.__repr__() str(c)
def test_serialization(self): a = [torch.randn(5, 5).float() for i in range(2)] b = [a[i % 2] for i in range(4)] b += [a[0].storage()] b += [a[0].storage()[1:4]] for use_name in (False, True): with tempfile.NamedTemporaryFile() as f: handle = f if not use_name else f.name torch.save(b, handle) f.seek(0) c = torch.load(handle) self.assertEqual(b, c, 0) self.assertTrue(isinstance(c[0], torch.FloatTensor)) self.assertTrue(isinstance(c[1], torch.FloatTensor)) self.assertTrue(isinstance(c[2], torch.FloatTensor)) self.assertTrue(isinstance(c[3], torch.FloatTensor)) self.assertTrue(isinstance(c[4], torch.FloatStorage)) c[0].fill_(10) self.assertEqual(c[0], c[2], 0) self.assertEqual(c[4], torch.FloatStorage(25).fill_(10), 0) c[1].fill_(20) self.assertEqual(c[1], c[3], 0) self.assertEqual(c[4], c[5][1:4], 0)
def test_reduce_scatter(self): in_size = 32 * nGPUs out_size = 32 inputs = [torch.FloatTensor(in_size).uniform_() for i in range(nGPUs)] expected = torch.FloatTensor(in_size).zero_() for t in inputs: expected.add_(t) expected = expected.view(nGPUs, 32) inputs = [inputs[i].cuda(i) for i in range(nGPUs)] outputs = [torch.cuda.FloatTensor(out_size, device=i) for i in range(nGPUs)] nccl.reduce_scatter(inputs, outputs) for i in range(nGPUs): self.assertEqual(outputs[i], expected[i])
def detection_collate(batch): """Custom collate fn for dealing with batches of images that have a different number of associated object annotations (bounding boxes). Arguments: batch: (tuple) A tuple of tensor images and lists of annotations Return: A tuple containing: 1) (tensor) batch of images stacked on their 0 dim 2) (list of tensors) annotations for a given image are stacked on 0 dim """ targets = [] imgs = [] for sample in batch: imgs.append(sample[0]) targets.append(torch.FloatTensor(sample[1])) return torch.stack(imgs, 0), targets
def test(netG, opt): assert opt.netG != '' test_dir = opt.testdata_dir for f in os.listdir(test_dir): fname, ext = os.path.splitext(f) if ext == '.cmp': print(fname) cmp_file = os.path.join(test_dir, f) ac_data = read_binary_file(cmp_file, dim=47) ac_data = torch.FloatTensor(ac_data) noise = torch.FloatTensor(ac_data.size(0), nz) if opt.cuda: ac_data, noise = ac_data.cuda(), noise.cuda() ac_data = Variable(ac_data) noise = Variable(noise) noise.data.normal_(0, 1) generated_pulses = netG(noise, ac_data) generated_pulses = generated_pulses.data.cpu().numpy() generated_pulses = generated_pulses.reshape(ac_data.size(0), -1) out_file = os.path.join(test_dir, fname + '.pls') with open(out_file, 'wb') as fid: generated_pulses.tofile(fid)
def prepare_split(self, X, y, validation_data=None, validation_split=None): # Preparing validation data assert validation_split or validation_data if validation_data is not None: trainX, trainy = X, y devX, devy = validation_data else: permutation = np.random.permutation(len(X)) trainidx = permutation[int(validation_split*len(X)):] devidx = permutation[0:int(validation_split*len(X))] trainX, trainy = X[trainidx], y[trainidx] devX, devy = X[devidx], y[devidx] if not self.cudaEfficient: trainX = torch.FloatTensor(trainX).cuda() trainy = torch.LongTensor(trainy).cuda() devX = torch.FloatTensor(devX).cuda() devy = torch.LongTensor(devy).cuda() else: trainX = torch.FloatTensor(trainX) trainy = torch.LongTensor(trainy) devX = torch.FloatTensor(devX) devy = torch.LongTensor(devy) return trainX, trainy, devX, devy
def score(self, devX, devy): self.model.eval() correct = 0 if not isinstance(devX, torch.cuda.FloatTensor) or self.cudaEfficient: devX = torch.FloatTensor(devX).cuda() devy = torch.LongTensor(devy).cuda() for i in range(0, len(devX), self.batch_size): Xbatch = Variable(devX[i:i + self.batch_size], volatile=True) ybatch = Variable(devy[i:i + self.batch_size], volatile=True) if self.cudaEfficient: Xbatch = Xbatch.cuda() ybatch = ybatch.cuda() output = self.model(Xbatch) pred = output.data.max(1)[1] correct += pred.long().eq(ybatch.data.long()).sum() accuracy = 1.0*correct / len(devX) return accuracy
def test_forward_gives_correct_output(self): params = Params({ 'input_dim': 2, 'hidden_dims': 3, 'activations': 'relu', 'num_layers': 2 }) feedforward = FeedForward.from_params(params) constant_init = lambda tensor: torch.nn.init.constant(tensor, 1.) initializer = InitializerApplicator([(".*", constant_init)]) initializer(feedforward) input_tensor = Variable(torch.FloatTensor([[-3, 1]])) output = feedforward(input_tensor).data.numpy() assert output.shape == (1, 3) # This output was checked by hand - ReLU makes output after first hidden layer [0, 0, 0], # which then gets a bias added in the second layer to be [1, 1, 1]. assert_almost_equal(output, [[1, 1, 1]])
def test_no_mask(self): attention = Attention() # Testing general non-batched case. vector = Variable(torch.FloatTensor([[0.3, 0.1, 0.5]])) matrix = Variable(torch.FloatTensor([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2]]])) result = attention(vector, matrix).data.numpy() assert_almost_equal(result, numpy.array([[0.52871835, 0.47128162]])) # Testing non-batched case where inputs are all 0s. vector = Variable(torch.FloatTensor([[0, 0, 0]])) matrix = Variable(torch.FloatTensor([[[0, 0, 0], [0, 0, 0]]])) result = attention(vector, matrix).data.numpy() assert_almost_equal(result, numpy.array([[0.5, 0.5]]))
def test_batched_masked(self): attention = Attention() # Testing general masked non-batched case. vector = Variable(torch.FloatTensor([[0.3, 0.1, 0.5], [0.3, 0.1, 0.5]])) matrix = Variable(torch.FloatTensor([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]], [[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]]])) mask = Variable(torch.FloatTensor([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]])) result = attention(vector, matrix, mask).data.numpy() assert_almost_equal(result, numpy.array([[0.52871835, 0.47128162, 0.0], [0.50749944, 0.0, 0.49250056]])) # Test the case where a mask is all 0s and an input is all 0s. vector = Variable(torch.FloatTensor([[0.0, 0.0, 0.0], [0.3, 0.1, 0.5]])) matrix = Variable(torch.FloatTensor([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]], [[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]]])) mask = Variable(torch.FloatTensor([[1.0, 1.0, 0.0], [0.0, 0.0, 0.0]])) result = attention(vector, matrix, mask).data.numpy() assert_almost_equal(result, numpy.array([[0.5, 0.5, 0.0], [0.0, 0.0, 0.0]]))
def test_logsumexp(self): # First a simple example where we add probabilities in log space. tensor = Variable(torch.FloatTensor([[.4, .1, .2]])) log_tensor = tensor.log() log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=False) assert_almost_equal(log_summed.exp().data.numpy(), [.7]) log_summed = util.logsumexp(log_tensor, dim=-1, keepdim=True) assert_almost_equal(log_summed.exp().data.numpy(), [[.7]]) # Then some more atypical examples, and making sure this will work with how we handle # log masks. tensor = Variable(torch.FloatTensor([[float('-inf'), 20.0]])) assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0]) tensor = Variable(torch.FloatTensor([[-200.0, 20.0]])) assert_almost_equal(util.logsumexp(tensor).data.numpy(), [20.0]) tensor = Variable(torch.FloatTensor([[20.0, 20.0], [-200.0, 200.0]])) assert_almost_equal(util.logsumexp(tensor, dim=0).data.numpy(), [20.0, 200.0])
def _prune_and_sort_spans(mention_scores: torch.FloatTensor, num_spans_to_keep: int) -> torch.IntTensor: """ The indices of the top-k scoring spans according to span_scores. We return the indices in their original order, not ordered by score, so that we can rely on the ordering to consider the previous k spans as antecedents for each span later. Parameters ---------- mention_scores : ``torch.FloatTensor``, required. The mention score for every candidate, with shape (batch_size, num_spans, 1). num_spans_to_keep : ``int``, required. The number of spans to keep when pruning. Returns ------- top_span_indices : ``torch.IntTensor``, required. The indices of the top-k scoring spans. Has shape (batch_size, num_spans_to_keep). """ # Shape: (batch_size, num_spans_to_keep, 1) _, top_span_indices = mention_scores.topk(num_spans_to_keep, 1) top_span_indices, _ = torch.sort(top_span_indices, 1) # Shape: (batch_size, num_spans_to_keep) top_span_indices = top_span_indices.squeeze(-1) return top_span_indices
def _read_pretrained_hdf5_format_embedding_file(embeddings_filename: str, # pylint: disable=invalid-name embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens") -> torch.FloatTensor: """ Reads from a hdf5 formatted file. The embedding matrix is assumed to be keyed by 'embedding' and of size ``(num_tokens, embedding_dim)``. """ with h5py.File(embeddings_filename, 'r') as fin: embeddings = fin['embedding'][...] if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]: raise ConfigurationError( "Read shape {0} embeddings from the file, but expected {1}".format( list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim])) return torch.FloatTensor(embeddings)
def __init__(self, num_heads: int, input_dim: int, attention_dim: int, values_dim: int, output_projection_dim: int = None, attention_dropout_prob: float = 0.1) -> None: super(MultiHeadSelfAttention, self).__init__() self._num_heads = num_heads self._input_dim = input_dim self._output_dim = output_projection_dim or input_dim self._attention_dim = attention_dim self._values_dim = values_dim self._query_projections = Parameter(torch.FloatTensor(num_heads, input_dim, attention_dim)) self._key_projections = Parameter(torch.FloatTensor(num_heads, input_dim, attention_dim)) self._value_projections = Parameter(torch.FloatTensor(num_heads, input_dim, values_dim)) self._scale = input_dim ** 0.5 self._output_projection = Linear(num_heads * values_dim, self._output_dim) self._attention_dropout = Dropout(attention_dropout_prob) self.reset_parameters()
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.autograd.Variable): """ Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Variable, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same. """ binary_mask = tensor_for_masking.clone() binary_mask.data.copy_(torch.rand(tensor_for_masking.size()) > dropout_probability) # Scale mask by 1/keep_prob to preserve output statistics. dropout_mask = binary_mask.float().div(1.0 - dropout_probability) return dropout_mask
def logsumexp(tensor: torch.Tensor, dim: int = -1, keepdim: bool = False) -> torch.Tensor: """ A numerically stable computation of logsumexp. This is mathematically equivalent to `tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log probabilities. Parameters ---------- tensor : torch.FloatTensor, required. A tensor of arbitrary size. dim : int, optional (default = -1) The dimension of the tensor to apply the logsumexp to. keepdim: bool, optional (default = False) Whether to retain a dimension of size one at the dimension we reduce over. """ max_score, _ = tensor.max(dim, keepdim=keepdim) if keepdim: stable_vec = tensor - max_score else: stable_vec = tensor - max_score.unsqueeze(dim) return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def accuracy(self, predicted, ground_truth): """ Utility function for calculating the accuracy of the model. Params ------ - predicted: (torch.FloatTensor) - ground_truth: (torch.LongTensor) Returns ------- - acc: (float) % accuracy. """ predicted = torch.max(predicted, 1)[1] total = len(ground_truth) correct = (predicted == ground_truth).sum() acc = 100 * (correct / total) return acc
def mlpg(means, variances, windows): """Maximum Liklihood Paramter Generation (MLPG). The parameters are almost same as :func:`nnmnkwii.paramgen.mlpg` expects. The differences are: - The function assumes ``means`` as :obj:`torch.autograd.Variable` instead of :obj:`numpy.ndarray`. - The fucntion assumes ``variances_frames`` as :obj:`torch.FloatTensor`? instead of :obj:`numpy.ndarray`. Args: means (torch.autograd.Variable): Means variances (torch.FloatTensor): Variances windows (list): A sequence of window specification See also: :obj:`nnmnkwii.autograd.MLPG`, :func:`nnmnkwii.paramgen.mlpg` """ T, D = means.size() if variances.dim() == 1 and variances.shape[0] == D: variances = variances.expand(T, D) assert means.size() == variances.size() return MLPG(variances, windows)(means)
def unit_variance_mlpg(R, means): """Special case of MLPG assuming data is normalized to have unit variance. Args: means (torch.autograd.Variable): Means, of shape (``T x D``) or (``T*num_windows x static_dim``). See :func:`nnmnkwii.paramgen.reshape_means` to reshape means from (``T x D``) to (``T*num_windows x static_dim``). R (torch.FloatTensor): MLPG matrix. See also: :obj:`nnmnkwii.autograd.UnitVarianceMLPG`, :func:`nnmnkwii.paramgen.unit_variance_mlpg_matrix`, :func:`reshape_means`. """ return UnitVarianceMLPG(R)(means)
def getnotes(self, voice = None): # global mat if voice is None: return [self.getnotes(v) for v in xrange(len(self.s.parts))] if self.notes[voice] is None: endtimes = self.s.flat.notesAndRests.stream()._uniqueOffsetsAndEndTimes(endTimesOnly=True) self.notes[voice] = [None] * len(endtimes) notes = list(self.s.parts[voice].flat.notesAndRests) j = 0 # index of current note curr = 0.0 for i in xrange(len(endtimes)): self.notes[voice][i] = map(lambda k: (isinstance(notes[j], note.Note) and \ k == pitchtoid(notes[j].pitch, self.key)) * \ (endtimes[i] - curr), range(Din)) # if current note ends here, go to next note if endtimes[i] == notes[j].offset + notes[j].quarterLength: j += 1 curr = endtimes[i] self.notes[voice] = torch.FloatTensor(self.notes[voice]) n = self.notes[voice].clone().apply_(lambda n: int(n != 0)) # mat += n[:-1].t().mm(n[1:]) return self.notes[voice]
def getchords(self): if self.chords is None: self.cs = self.s.chordify() self.chords = [] for c in self.cs.flat.notesAndRests: self.chords.append( map(lambda k: (isinstance(c, chord.Chord) and \ k == chordtoid(c, self.key)) * float(c.quarterLength), range(Dout))) self.chords = torch.FloatTensor(self.chords) return self.chords # for c in sc.cs.flat.notesAndRests: # if isinstance(c, note.Note): # c = chord.Chord(c) # sc.notes.append(map(lambda n: (isinstance(c, chord.Chord) and \ # (n + lstm.pitchtoid(sc.key.tonic)) % 12 in c.normalOrder) * float(c.quarterLength), range(12)))
def __call__(self, x_mu): """ Args: x_mu (FloatTensor/LongTensor or ndarray) Returns: x (FloatTensor or ndarray) """ mu = self.qc - 1. if isinstance(x_mu, np.ndarray): x = ((x_mu) / mu) * 2 - 1. x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)): if isinstance(x_mu, torch.LongTensor): x_mu = x_mu.float() mu = torch.FloatTensor([mu]) x = ((x_mu) / mu) * 2 - 1. x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu return x
def test_torch(self): try: import torch except ImportError: # pass by default if no torch available return st = SharedTable({'a': torch.FloatTensor([1]), 'b': torch.LongTensor(2)}) assert st['a'][0] == 1.0 assert len(st) == 2 assert 'b' in st del st['b'] assert 'b' not in st assert len(st) == 1 if torch.cuda.is_available(): st = SharedTable({'a': torch.cuda.FloatTensor([1]), 'b': torch.cuda.LongTensor(2)}) assert st['a'][0] == 1.0 assert len(st) == 2 assert 'b' in st del st['b'] assert 'b' not in st assert len(st) == 1
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super(MultiHeadAttention, self).__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k)) self.w_ks = nn.Parameter(torch.FloatTensor(n_head, d_model, d_k)) self.w_vs = nn.Parameter(torch.FloatTensor(n_head, d_model, d_v)) self.attention = ScaledDotProductAttention(d_model) self.layer_norm = LayerNormalization(d_model) self.proj = Linear(n_head*d_v, d_model) self.dropout = nn.Dropout(dropout) init.xavier_normal(self.w_qs) init.xavier_normal(self.w_ks) init.xavier_normal(self.w_vs)
def setUttEncoder(module): # set utterance encoder to the module if SharedModel.args.utt_enc_noise == True: module.uttEncNoise = Variable(torch.FloatTensor(), volatile=True) if SharedModel.args.no_cuda == False: module.uttEncNoise = module.uttEncNoise.cuda() if SharedModel.args.utt_enc_type >= 2: module.uttEncoder = nn.ModuleList() for i in [int(x) for x in SharedModel.args.conv_filters.split('_')]: module.uttEncoder.append( nn.Conv1d(2*SharedModel.args.hid_dim * (2 if SharedModel.args.attn == 2 else 1), SharedModel.args.conv_out_dim, i, 1, int(math.ceil((i-1)/2))) ) if SharedModel.args.utt_enc_bn == True: uttEncOutSize = 2 * SharedModel.args.hid_dim if SharedModel.args.utt_enc_type >= 2: uttEncOutSize = 3 * SharedModel.args.conv_out_dim elif SharedModel.args.attn == 2: uttEncOutSize = 4 * SharedModel.args.hid_dim module.uttBn = nn.BatchNorm1d(uttEncOutSize)
def detection_collate(batch): """Custom collate fn for dealing with batches of images that have a different number of associated object annotations (bounding boxes). Arguments: batch: (tuple) A tuple of tensor images and lists of annotations Return: A tuple containing: 1) (tensor) batch of images stacked on their 0 dim 2) (list of tensors) annotations for a given image are stacked on 0 dim """ targets = [] imgs = [] image_ids = [] for sample in batch: imgs.append(sample[0]) targets.append(torch.FloatTensor(sample[1])) image_ids.append(sample[2]) return torch.stack(imgs, 0), targets, image_ids
def __init__(self, num_features, max_length, eps=1e-5, momentum=0.1, affine=True): """ Most parts are copied from torch.nn.modules.batchnorm._BatchNorm. """ super(SeparatedBatchNorm1d, self).__init__() self.num_features = num_features self.max_length = max_length self.affine = affine self.eps = eps self.momentum = momentum if self.affine: self.weight = nn.Parameter(torch.FloatTensor(num_features)) self.bias = nn.Parameter(torch.FloatTensor(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) for i in range(max_length): self.register_buffer( 'running_mean_{}'.format(i), torch.zeros(num_features)) self.register_buffer( 'running_var_{}'.format(i), torch.ones(num_features)) self.reset_parameters()
def __init__(self, input_size, hidden_size, use_bias=True): """ Most parts are copied from torch.nn.LSTMCell. """ super(LSTMCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.use_bias = use_bias self.weight_ih = nn.Parameter( torch.FloatTensor(input_size, 4 * hidden_size)) self.weight_hh = nn.Parameter( torch.FloatTensor(hidden_size, 4 * hidden_size)) if use_bias: self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size)) else: self.register_parameter('bias', None) self.reset_parameters()
def __init__(self, input_size, hidden_size, max_length, use_bias=True): super(BNLSTMCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.max_length = max_length self.use_bias = use_bias self.weight_ih = nn.Parameter( torch.FloatTensor(input_size, 4 * hidden_size)) self.weight_hh = nn.Parameter( torch.FloatTensor(hidden_size, 4 * hidden_size)) if use_bias: self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size)) else: self.register_parameter('bias', None) # BN parameters self.bn_ih = SeparatedBatchNorm1d( num_features=4 * hidden_size, max_length=max_length) self.bn_hh = SeparatedBatchNorm1d( num_features=4 * hidden_size, max_length=max_length) self.bn_c = SeparatedBatchNorm1d( num_features=hidden_size, max_length=max_length) self.reset_parameters()
def __getitem__(self, index): support_set_x = torch.FloatTensor(self.n_samples, 3, 84, 84) support_set_y = np.zeros((self.n_samples), dtype=np.int) target_x = torch.FloatTensor(self.n_samples_eval, 3, 84, 84) target_y = np.zeros((self.n_samples_eval), dtype=np.int) flatten_support_set_x_batch = [os.path.join(self.miniImagenetImagesDir,item) for sublist in self.support_set_x_batch[index] for item in sublist] support_set_y = np.array([self.classes_dict[item[:9]] for sublist in self.support_set_x_batch[index] for item in sublist]) flatten_target_x = [os.path.join(self.miniImagenetImagesDir,item) for sublist in self.target_x_batch[index] for item in sublist] target_y = np.array([self.classes_dict[item[:9]] for sublist in self.target_x_batch[index] for item in sublist]) for i,path in enumerate(flatten_support_set_x_batch): if self.transform is not None: support_set_x[i] = self.transform(path) for i,path in enumerate(flatten_target_x): if self.transform is not None: target_x[i] = self.transform(path) return support_set_x, torch.IntTensor(support_set_y), target_x, torch.IntTensor(target_y)
def forward(self, pos_u, pos_v, neg_u, neg_v): losses = [] emb_u = [] for i in range(len(pos_u)): emb_ui = self.u_embeddings(Variable(torch.LongTensor(pos_u[i]))) emb_u.append(np.sum(emb_ui.data.numpy(), axis=0).tolist()) emb_u = Variable(torch.FloatTensor(emb_u)) emb_v = self.v_embeddings(Variable(torch.LongTensor(pos_v))) score = torch.mul(emb_u, emb_v) score = torch.sum(score, dim=1) score = F.logsigmoid(score) losses.append(sum(score)) neg_emb_u = [] for i in range(len(neg_u)): neg_emb_ui = self.u_embeddings(Variable(torch.LongTensor(neg_u[i]))) neg_emb_u.append(np.sum(neg_emb_ui.data.numpy(), axis=0).tolist()) neg_emb_u = Variable(torch.FloatTensor(neg_emb_u)) neg_emb_v = self.v_embeddings(Variable(torch.LongTensor(neg_v))) neg_score = torch.mul(neg_emb_u, neg_emb_v) neg_score = torch.sum(neg_score, dim=1) neg_score = F.logsigmoid(-1 * neg_score) losses.append(sum(neg_score)) return -1 * sum(losses)
def prepare_message(self, target_features, source_features, select_mat, gate_module): feature_data = [] transfer_list = np.where(select_mat > 0) source_indices = Variable(torch.from_numpy(transfer_list[1]).type(torch.LongTensor)).cuda() target_indices = Variable(torch.from_numpy(transfer_list[0]).type(torch.LongTensor)).cuda() source_f = torch.index_select(source_features, 0, source_indices) target_f = torch.index_select(target_features, 0, target_indices) transferred_features = gate_module(target_f, source_f) for f_id in range(target_features.size()[0]): if len(np.where(select_mat[f_id, :] > 0)[0]) > 0: feature_indices = np.where(transfer_list[0] == f_id)[0] indices = Variable(torch.from_numpy(feature_indices).type(torch.LongTensor)).cuda() features = torch.index_select(transferred_features, 0, indices).mean(0).view(-1) feature_data.append(features) else: temp = Variable(torch.zeros(target_features.size()[1:]), requires_grad=True).type(torch.FloatTensor).cuda() feature_data.append(temp) return torch.stack(feature_data, 0)
def value(self): """Returns the model's average precision for each class Return: ap (FloatTensor): 1xK tensor, with avg precision for each class k """ if self.scores.numel() == 0: return 0 ap = torch.zeros(self.scores.size(1)) rg = torch.arange(1, self.scores.size(0)).float() # compute average precision for each class for k in range(self.scores.size(1)): # sort scores scores = self.scores[:, k] targets = self.targets[:, k] # compute average precision ap[k] = AveragePrecisionMeter.average_precision(scores, targets, self.difficult_examples) return ap
def MAP(ground_label: torch.FloatTensor, predict_label: torch.FloatTensor): map = 0 map_idx = 0 extracted = {} for idx_, glab in enumerate(ground_label): if ground_label[idx_] != 0: extracted[idx_] = 1 val, key = torch.sort(predict_label, 0, True) for i, idx_ in enumerate(key): if idx_ in extracted: map_idx += 1 map += map_idx / (i + 1) assert (map_idx != 0) map = map / map_idx return map
def MRR(ground_label: torch.FloatTensor, predict_label: torch.FloatTensor): mrr = 0 map_idx = 0 extracted = {} for idx_, glab in enumerate(ground_label): if ground_label[idx_] != 0: extracted[idx_] = 1 val, key = torch.sort(predict_label, 0, True) for i, idx_ in enumerate(key): if idx_ in extracted: mrr = 1.0 / (i + 1) break assert (mrr != 0) return mrr
def uniform_weights(n_sample): """Return uniform weights (almost for debug). EXAMPLE ------- >>> weights = uniform_weights(3) >>> print(weights) <BLANKLINE> 0.3333 0.3333 0.3333 [torch.FloatTensor of size 3] <BLANKLINE> :return: """ weights = torch.ones(n_sample) return weights / weights.sum()
def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.conv_t = nn.ConvTranspose1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=kernel_size, bias=False ) if bias: self.bias = nn.Parameter( torch.FloatTensor(out_channels, kernel_size) ) else: self.register_parameter('bias', None) self.reset_parameters()
def set_cuda(cuda, device=0): global cuda_enabled global cuda_device global Tensor global beta_integration_domain if torch.cuda.is_available() and cuda: cuda_enabled = True cuda_device = device torch.cuda.set_device(device) torch.backends.cudnn.enabled = True Tensor = torch.cuda.FloatTensor beta_integration_domain = beta_integration_domain.cuda() else: cuda_enabled = False Tensor = torch.FloatTensor beta_integration_domain = beta_integration_domain.cpu()
def checkOneHot(self): v = torch.LongTensor([1, 2, 1, 2, 0]) v_length = torch.LongTensor([2, 3]) v_onehot = utils.oneHot(v, v_length, 4) target = torch.FloatTensor([[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]], [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]]) assert target.equal(v_onehot)
def oneHot(v, v_length, nc): batchSize = v_length.size(0) maxLength = v_length.max() v_onehot = torch.FloatTensor(batchSize, maxLength, nc).fill_(0) acc = 0 for i in range(batchSize): length = v_length[i] label = v[acc:acc + length].view(-1, 1).long() v_onehot[i, :length].scatter_(1, label, 1.0) acc += length return v_onehot
def gen_minibatch(tokens, features, labels, mini_batch_size, shuffle= True): tokens = np.asarray(tokens)[np.where(labels!=0.5)[0]] if type(features) is np.ndarray: features = np.asarray(features)[np.where(labels!=0.5)[0]] else: features = np.asarray(features.todense())[np.where(labels!=0.5)[0]] labels = np.asarray(labels)[np.where(labels!=0.5)[0]] # print tokens.shape # print tokens[0] for token, feature, label in iterate_minibatches(tokens, features, labels, mini_batch_size, shuffle = shuffle): # print 'token', type(token) # print token token = [_ for _ in pad_batch(token)] # print len(token), token[0].size(), token[1].size() yield token, Variable(torch.from_numpy(feature)) , Variable(torch.FloatTensor(label), requires_grad= False)