我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用torch.gt()。
def preProc2(x): # Access the global variables global P, expP, negExpP P = P.type_as(x) expP = expP.type_as(x) negExpP = negExpP.type_as(x) # Create a variable filled with -1. Second part of the condition z = Variable(torch.zeros(x.size())).type_as(x) absX = torch.abs(x) cond1 = torch.gt(absX, negExpP) cond2 = torch.le(absX, negExpP) if (torch.sum(cond1) > 0).data.all(): x1 = torch.sign(x[cond1]) z[cond1] = x1 if (torch.sum(cond2) > 0).data.all(): x2 = x[cond2]*expP z[cond2] = x2 return z
def test_accuracy_mini_batch(tokens, features, labels, word_attn, sent_attn): y_pred = get_predictions(tokens, features, word_attn, sent_attn) y_pred = torch.gt(y_pred, 0.5) correct = np.ndarray.flatten(y_pred.data.cpu().numpy()) labels = torch.gt(labels, 0.5) labels = np.ndarray.flatten(labels.data.cpu().numpy()) num_correct = sum(correct == labels) return float(num_correct) / len(correct)
def backward(self, grad_output): input, target = self.saved_tensors grad_input = input.new().resize_as_(input).copy_(target) grad_input[torch.mul(torch.eq(target, -1), torch.gt(input, self.margin))] = 0 if self.size_average: grad_input.mul_(1. / input.nelement()) if grad_output[0] != 1: grad_input.mul_(grad_output[0]) return grad_input, None
def updateGradInput(self, input, y): self.gradInput.resize_as_(input).copy_(y) self.gradInput[torch.mul(torch.eq(y, -1), torch.gt(input, self.margin))] = 0 if self.sizeAverage: self.gradInput.mul_(1. / input.nelement()) return self.gradInput
def test_renorm(self): m1 = torch.randn(10,5) res1 = torch.Tensor() def renorm(matrix, value, dim, max_norm): m1 = matrix.transpose(dim, 0).contiguous() # collapse non-dim dimensions. m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0)))) norms = m2.norm(value, 1) # clip new_norms = norms.clone() new_norms[torch.gt(norms, max_norm)] = max_norm new_norms.div_(norms.add_(1e-7)) # renormalize m1.mul_(new_norms.expand_as(m1)) return m1.transpose(dim, 0) # note that the axis fed to torch.renorm is different (2~=1) maxnorm = m1.norm(2, 1).mean() m2 = renorm(m1, 2, 1, maxnorm) m1.renorm_(2, 1, maxnorm) self.assertEqual(m1, m2, 1e-5) self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), 1e-5) m1 = torch.randn(3, 4, 5) m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4) maxnorm = m2.norm(2, 0).mean() m2 = renorm(m2, 2, 1, maxnorm) m1.renorm_(2, 1, maxnorm) m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4) self.assertEqual(m3, m2) self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))
def test_logical(self): x = torch.rand(100, 100) * 2 - 1 xx = x.clone() xgt = torch.gt(x, 1) xlt = torch.lt(x, 1) xeq = torch.eq(x, 1) xne = torch.ne(x, 1) neqs = xgt + xlt all = neqs + xeq self.assertEqual(neqs.sum(), xne.sum(), 0) self.assertEqual(x.nelement(), all.sum())
def forward(self, enc_outputs, enc_input, dec_input, dec_pos): dec_output = self.dec_ebd(dec_input) + self.pos_ebd(dec_pos) dec_slf_attn_mask = torch.gt( get_attn_padding_mask(dec_input, dec_input) + get_attn_subsequent_mask(dec_input), 0) dec_enc_attn_pad_mask = get_attn_padding_mask(dec_input, enc_input) for layer, enc_output in zip(self.decodes, enc_outputs): dec_output = layer(dec_output, enc_output, dec_slf_attn_mask, dec_enc_attn_pad_mask) return dec_output
def forward(self, tgt_seq, tgt_pos, src_seq, enc_output, return_attns=False): # Word embedding look up dec_input = self.tgt_word_emb(tgt_seq) # Position Encoding addition dec_input += self.position_enc(tgt_pos) # Decode dec_slf_attn_pad_mask = get_attn_padding_mask(tgt_seq, tgt_seq) dec_slf_attn_sub_mask = get_attn_subsequent_mask(tgt_seq) dec_slf_attn_mask = torch.gt(dec_slf_attn_pad_mask + dec_slf_attn_sub_mask, 0) dec_enc_attn_pad_mask = get_attn_padding_mask(tgt_seq, src_seq) if return_attns: dec_slf_attns, dec_enc_attns = [], [] dec_output = dec_input for dec_layer in self.layer_stack: dec_output, dec_slf_attn, dec_enc_attn = dec_layer( dec_output, enc_output, slf_attn_mask=dec_slf_attn_mask, dec_enc_attn_mask=dec_enc_attn_pad_mask) if return_attns: dec_slf_attns += [dec_slf_attn] dec_enc_attns += [dec_enc_attn] if return_attns: return dec_output, dec_slf_attns, dec_enc_attns else: return dec_output,
def preProc1(x): # Access the global variables global P,expP,negExpP P = P.type_as(x) expP = expP.type_as(x) negExpP = negExpP.type_as(x) # Create a variable filled with -1. Second part of the condition z = Variable(torch.zeros(x.size()).fill_(-1)).type_as(x) absX = torch.abs(x) cond1 = torch.gt(absX, negExpP) if (torch.sum(cond1) > 0).data.all(): x1 = torch.log(torch.abs(x[cond1]))/P z[cond1] = x1 return z
def test_renorm(self): m1 = torch.randn(10, 5) res1 = torch.Tensor() def renorm(matrix, value, dim, max_norm): m1 = matrix.transpose(dim, 0).contiguous() # collapse non-dim dimensions. m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0)))) norms = m2.norm(value, 1) # clip new_norms = norms.clone() new_norms[torch.gt(norms, max_norm)] = max_norm new_norms.div_(norms.add_(1e-7)) # renormalize m1.mul_(new_norms.expand_as(m1)) return m1.transpose(dim, 0) # note that the axis fed to torch.renorm is different (2~=1) maxnorm = m1.norm(2, 1).mean() m2 = renorm(m1, 2, 1, maxnorm) m1.renorm_(2, 1, maxnorm) self.assertEqual(m1, m2, 1e-5) self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), 1e-5) m1 = torch.randn(3, 4, 5) m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4) maxnorm = m2.norm(2, 0).mean() m2 = renorm(m2, 2, 1, maxnorm) m1.renorm_(2, 1, maxnorm) m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4) self.assertEqual(m3, m2) self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))
def test_comparison_ops(self): x = torch.randn(5, 5) y = torch.randn(5, 5) eq = x == y for idx in iter_indices(x): self.assertIs(x[idx] == y[idx], eq[idx] == 1) ne = x != y for idx in iter_indices(x): self.assertIs(x[idx] != y[idx], ne[idx] == 1) lt = x < y for idx in iter_indices(x): self.assertIs(x[idx] < y[idx], lt[idx] == 1) le = x <= y for idx in iter_indices(x): self.assertIs(x[idx] <= y[idx], le[idx] == 1) gt = x > y for idx in iter_indices(x): self.assertIs(x[idx] > y[idx], gt[idx] == 1) ge = x >= y for idx in iter_indices(x): self.assertIs(x[idx] >= y[idx], ge[idx] == 1)
def test_renorm(self): m1 = torch.randn(10, 5) res1 = torch.Tensor() def renorm(matrix, value, dim, max_norm): m1 = matrix.transpose(dim, 0).contiguous() # collapse non-dim dimensions. m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0)))) norms = m2.norm(value, 1, True) # clip new_norms = norms.clone() new_norms[torch.gt(norms, max_norm)] = max_norm new_norms.div_(norms.add_(1e-7)) # renormalize m1.mul_(new_norms.expand_as(m1)) return m1.transpose(dim, 0) # note that the axis fed to torch.renorm is different (2~=1) maxnorm = m1.norm(2, 1).mean() m2 = renorm(m1, 2, 1, maxnorm) m1.renorm_(2, 1, maxnorm) self.assertEqual(m1, m2, 1e-5) self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), 1e-5) m1 = torch.randn(3, 4, 5) m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4) maxnorm = m2.norm(2, 0).mean() m2 = renorm(m2, 2, 1, maxnorm) m1.renorm_(2, 1, maxnorm) m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4) self.assertEqual(m3, m2) self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))
def forward(self, input, target): # input = input.view(-1) # assert input.dim() == target.dim() for i in range(input.dim()): assert input.size(i) == target.size(i) # output = self.margin - torch.mul(target, input) # if 'cuda' in input.data.type(): mask = torch.cuda.FloatTensor(input.size()).zero_() else: mask = torch.FloatTensor(input.size()).zero_() mask = Variable(mask) mask[torch.gt(output, 0.0)] = 1.0 # output = torch.mul(output, mask) # size average if self.size_average: output = torch.mul(output, 1.0 / input.nelement()) # sum output = output.sum() # apply sign output = torch.mul(output, self.sign) return output
def forward(ctx, input, target, grad_output, margin, size_average): ctx.margin = margin ctx.size_average = size_average ctx.save_for_backward(input, target, grad_output) grad_input = input.new().resize_as_(input).copy_(target) grad_input[torch.mul(torch.eq(target, -1), torch.gt(input, ctx.margin))] = 0 if ctx.size_average: grad_input.mul_(1. / input.nelement()) if grad_output[0] != 1: grad_input.mul_(grad_output[0]) return grad_input
def greater(x: T.FloatTensor, y: T.FloatTensor) -> T.ByteTensor: """ Elementwise test if x > y. Args: x: A tensor. y: A tensor. Returns: tensor (of bools): Elementwise test of x > y. """ return torch.gt(x, y)
def forward(self, input, context, src_pad_mask, tgt_pad_mask): # Args Checks input_batch, input_len, _ = input.size() contxt_batch, contxt_len, _ = context.size() aeq(input_batch, contxt_batch) src_batch, t_len, s_len = src_pad_mask.size() tgt_batch, t_len_, t_len__ = tgt_pad_mask.size() aeq(input_batch, contxt_batch, src_batch, tgt_batch) aeq(t_len, t_len_, t_len__, input_len) aeq(s_len, contxt_len) # END Args Checks dec_mask = torch.gt(tgt_pad_mask + self.mask[:, :tgt_pad_mask.size(1), :tgt_pad_mask.size(1)] .expand_as(tgt_pad_mask), 0) input_norm = self.layer_norm_1(input) query, attn = self.self_attn(input_norm, input_norm, input_norm, mask=dec_mask) query_norm = self.layer_norm_2(query+input) mid, attn = self.context_attn(context, context, query_norm, mask=src_pad_mask) output = self.feed_forward(mid+query+input) # CHECKS output_batch, output_len, _ = output.size() aeq(input_len, output_len) aeq(contxt_batch, output_batch) n_batch_, t_len_, s_len_ = attn.size() aeq(input_batch, n_batch_) aeq(contxt_len, s_len_) aeq(input_len, t_len_) # END CHECKS return output, attn
def reward(sample_solution, USE_CUDA=False): """ The reward for the sorting task is defined as the length of the longest sorted consecutive subsequence. Input sequences must all be the same length. Example: input | output ==================== [1 4 3 5 2] | [5 1 2 3 4] The output gets a reward of 4/5, or 0.8 The range is [1/sourceL, 1] Args: sample_solution: list of len sourceL of [batch_size] Tensors Returns: [batch_size] containing trajectory rewards """ batch_size = sample_solution[0].size(0) sourceL = len(sample_solution) longest = Variable(torch.ones(batch_size), requires_grad=False) current = Variable(torch.ones(batch_size), requires_grad=False) if USE_CUDA: longest = longest.cuda() current = current.cuda() for i in range(1, sourceL): # compare solution[i-1] < solution[i] res = torch.lt(sample_solution[i-1], sample_solution[i]) # if res[i,j] == 1, increment length of current sorted subsequence current += res.float() # else, reset current to 1 current[torch.eq(res, 0)] = 1 #current[torch.eq(res, 0)] -= 1 # if, for any, current > longest, update longest mask = torch.gt(current, longest) longest[mask] = current[mask] return -torch.div(longest, sourceL)