我们从Python开源项目中,提取了以下27个代码示例,用于说明如何使用torch.nonzero()。
def m_ggnn(self, h_v, h_w, e_vw, opt={}): m = Variable(torch.zeros(h_w.size(0), h_w.size(1), self.args['out']).type_as(h_w.data)) for w in range(h_w.size(1)): if torch.nonzero(e_vw[:, w, :].data).size(): for i, el in enumerate(self.args['e_label']): ind = (el == e_vw[:,w,:]).type_as(self.learn_args[0][i]) parameter_mat = self.learn_args[0][i][None, ...].expand(h_w.size(0), self.learn_args[0][i].size(0), self.learn_args[0][i].size(1)) m_w = torch.transpose(torch.bmm(torch.transpose(parameter_mat, 1, 2), torch.transpose(torch.unsqueeze(h_w[:, w, :], 1), 1, 2)), 1, 2) m_w = torch.squeeze(m_w) m[:,w,:] = ind.expand_as(m_w)*m_w return m
def compute_loss(self, batch, output, target): """ See base class for args description. """ scores = self.generator(self.bottle(output)) gtruth = target.view(-1) if self.confidence < 1: tdata = gtruth.data mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze() likelihood = torch.gather(scores.data, 1, tdata.unsqueeze(1)) tmp_ = self.one_hot.repeat(gtruth.size(0), 1) tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence) if mask.dim() > 0: likelihood.index_fill_(0, mask, 0) tmp_.index_fill_(0, mask, 0) gtruth = Variable(tmp_, requires_grad=False) loss = self.criterion(scores, gtruth) if self.confidence < 1: loss_data = - likelihood.sum(0) else: loss_data = loss.data.clone() stats = self.stats(loss_data, scores.data, target.view(-1).data) return loss, stats
def test_has_storage(self): self.assertIsNotNone(torch.Tensor().storage()) self.assertIsNotNone(torch.Tensor(0).storage()) self.assertIsNotNone(torch.Tensor([]).storage()) self.assertIsNotNone(torch.Tensor().clone().storage()) self.assertIsNotNone(torch.Tensor([0, 0, 0]).nonzero().storage())
def _find_support(B, ns, supp_thresh): """Find features with non-zero coefficients.""" try: support = (B.norm(p=2, dim=1) >= supp_thresh).expand_as(B) support = torch.cat([s_j[:n_j] for s_j, n_j in zip(support, ns)]) return torch.nonzero(support)[:, 0] except IndexError: return None
def test_dirac_properties(self): for as_variable in [True, False]: for dims in [3, 4, 5]: input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5, as_variable=as_variable) init.dirac(input_tensor) if as_variable: input_tensor = input_tensor.data c_out, c_in = input_tensor.size(0), input_tensor.size(1) min_d = min(c_out, c_in) # Check number of nonzeros is equivalent to smallest dim assert torch.nonzero(input_tensor).size(0) == min_d # Check sum of values (can have precision issues, hence assertEqual) is also equivalent self.assertEqual(input_tensor.sum(), min_d)
def test_dirac_identity(self): batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3 # Test 1D input_var = Variable(torch.randn(batch, in_c, size)) filter_var = Variable(torch.zeros(out_c, in_c, kernel_size)) init.dirac(filter_var) output_var = F.conv1d(input_var, filter_var) input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :]) # Assert in_c outputs are preserved assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0 # Assert extra outputs are 0 # Test 2D input_var = Variable(torch.randn(batch, in_c, size, size)) filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size)) init.dirac(filter_var) output_var = F.conv2d(input_var, filter_var) input_tensor, output_tensor = input_var.data, output_var.data self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :]) assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0 # Test 3D input_var = Variable(torch.randn(batch, in_c, size, size, size)) filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size)) init.dirac(filter_var) output_var = F.conv3d(input_var, filter_var) input_tensor, output_tensor = input_var.data, output_var.data self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :]) assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
def test_nonzero(self): num_src = 12 types = [ 'torch.ByteTensor', 'torch.CharTensor', 'torch.ShortTensor', 'torch.IntTensor', 'torch.FloatTensor', 'torch.DoubleTensor', 'torch.LongTensor', ] shapes = [ torch.Size((12,)), torch.Size((12, 1)), torch.Size((1, 12)), torch.Size((6, 2)), torch.Size((3, 2, 2)), ] for t in types: while True: tensor = torch.rand(num_src).mul(2).floor().type(t) if tensor.sum() > 0: break for shape in shapes: tensor = tensor.clone().resize_(shape) dst1 = torch.nonzero(tensor) dst2 = tensor.nonzero() dst3 = torch.LongTensor() torch.nonzero(tensor, out=dst3) if len(shape) == 1: dst = [] for i in range(num_src): if tensor[i] != 0: dst += [i] self.assertEqual(dst1.select(1, 0), torch.LongTensor(dst), 0) self.assertEqual(dst2.select(1, 0), torch.LongTensor(dst), 0) self.assertEqual(dst3.select(1, 0), torch.LongTensor(dst), 0) elif len(shape) == 2: # This test will allow through some False positives. It only checks # that the elements flagged positive are indeed non-zero. for i in range(dst1.size(0)): self.assertNotEqual(tensor[dst1[i, 0], dst1[i, 1]], 0) elif len(shape) == 3: # This test will allow through some False positives. It only checks # that the elements flagged positive are indeed non-zero. for i in range(dst1.size(0)): self.assertNotEqual(tensor[dst1[i, 0], dst1[i, 1], dst1[i, 2]], 0)
def form_mixtures(digit1, digit2, loader, arguments): dataset1, dataset2 = [], [] for i, (ft, tar) in enumerate(loader): # digit 1 mask = torch.eq(tar, digit1) inds = torch.nonzero(mask).squeeze() ft1 = torch.index_select(ft, dim=0, index=inds) dataset1.append(ft1) # digit 2 mask = torch.eq(tar, digit2) inds = torch.nonzero(mask).squeeze() ft2 = torch.index_select(ft, dim=0, index=inds) dataset2.append(ft2) print(i) dataset1 = torch.cat(dataset1, dim=0) dataset2 = torch.cat(dataset2, dim=0) if arguments.input_type == 'noise': inp1 = torch.randn(dataset1.size(0), arguments.L1) inp2 = torch.randn(dataset2.size(0), arguments.L1) elif arguments.input_type == 'autoenc': inp1 = dataset1 inp2 = dataset2 else: raise ValueError('Whaaaaaat input_type?') N1, N2 = dataset1.size(0), dataset2.size(0) Nmix = min([N1, N2]) dataset_mix = dataset1[:Nmix] + dataset2[:Nmix] dataset1 = TensorDataset(data_tensor=inp1, target_tensor=dataset1, lens=[1]*Nmix) dataset2 = data_utils.TensorDataset(data_tensor=inp2, target_tensor=dataset2) dataset_mix = data_utils.TensorDataset(data_tensor=dataset_mix, target_tensor=torch.ones(Nmix)) kwargs = {'num_workers': 1, 'pin_memory': True} if arguments.cuda else {} loader1 = data_utils.DataLoader(dataset1, batch_size=arguments.batch_size, shuffle=False, **kwargs) loader2 = data_utils.DataLoader(dataset2, batch_size=arguments.batch_size, shuffle=False, **kwargs) loader_mix = data_utils.DataLoader(dataset_mix, batch_size=arguments.batch_size, shuffle=False, **kwargs) return loader1, loader2, loader_mix
def scores(output, target, threshold=0.5): # Count true positives, true negatives, false positives and false negatives. outputr = (output > threshold).long() target = target.long() a_sum = 0.0 p_sum = 0.0 r_sum = 0.0 f2_sum = 0.0 def _safe_size(t, n=0): if n < len(t.size()): return t.size(n) else: return 0 count = 0 for o, t in zip(outputr, target): tp = _safe_size(torch.nonzero(o * t)) tn = _safe_size(torch.nonzero((o - 1) * (t - 1))) fp = _safe_size(torch.nonzero(o * (t - 1))) fn = _safe_size(torch.nonzero((o - 1) * t)) a = (tp + tn) / (tp + fp + fn + tn) if tp == 0 and fp == 0 and fn == 0: p = 1.0 r = 1.0 f2 = 1.0 elif tp == 0 and (fp > 0 or fn > 0): p = 0.0 r = 0.0 f2 = 0.0 else: p = tp / (tp + fp) r = tp / (tp + fn) f2 = (5 * p * r) / (4 * p + r) a_sum += a p_sum += p r_sum += r f2_sum += f2 count += 1 accuracy = a_sum / count precision = p_sum / count recall = r_sum / count fmeasure = f2_sum / count return accuracy, precision, recall, fmeasure
def _get_bbox_regression_labels_pytorch(self, bbox_target_data, labels_batch, num_classes): """Bounding-box regression targets (bbox_target_data) are stored in a compact form b x N x (class, tx, ty, tw, th) This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). Returns: bbox_target (ndarray): b x N x 4K blob of regression targets bbox_inside_weights (ndarray): b x N x 4K blob of loss weights """ batch_size = labels_batch.size(0) rois_per_image = labels_batch.size(1) clss = labels_batch bbox_targets = bbox_target_data.new(batch_size, rois_per_image, 4).zero_() bbox_inside_weights = bbox_target_data.new(bbox_targets.size()).zero_() for b in range(batch_size): # assert clss[b].sum() > 0 if clss[b].sum() == 0: continue inds = torch.nonzero(clss[b] > 0).view(-1) for i in range(inds.numel()): ind = inds[i] bbox_targets[b, ind, :] = bbox_target_data[b, ind, :] bbox_inside_weights[b, ind, :] = self.BBOX_INSIDE_WEIGHTS return bbox_targets, bbox_inside_weights
def test_nonzero(self): num_src = 12 types = [ 'torch.ByteTensor', 'torch.CharTensor', 'torch.ShortTensor', 'torch.IntTensor', 'torch.FloatTensor', 'torch.DoubleTensor', 'torch.LongTensor', ] shapes = [ torch.Size((12,)), torch.Size((12, 1)), torch.Size((1, 12)), torch.Size((6, 2)), torch.Size((3, 2, 2)), ] for t in types: while True: tensor = torch.rand(num_src).mul(2).floor().type(t) if tensor.sum() > 0: break for shape in shapes: tensor = tensor.clone().resize_(shape) dst1 = torch.nonzero(tensor) dst2 = tensor.nonzero() dst3 = torch.LongTensor() torch.nonzero(dst3, tensor) if len(shape) == 1: dst = [] for i in range(num_src): if tensor[i] != 0: dst += [i] self.assertEqual(dst1.select(1, 0), torch.LongTensor(dst), 0) self.assertEqual(dst2.select(1, 0), torch.LongTensor(dst), 0) self.assertEqual(dst3.select(1, 0), torch.LongTensor(dst), 0) elif len(shape) == 2: # This test will allow through some False positives. It only checks # that the elements flagged positive are indeed non-zero. for i in range(dst1.size(0)): self.assertNotEqual(tensor[dst1[i,0], dst1[i,1]], 0) elif len(shape) == 3: # This test will allow through some False positives. It only checks # that the elements flagged positive are indeed non-zero. for i in range(dst1.size(0)): self.assertNotEqual(tensor[dst1[i,0], dst1[i,1], dst1[i,2]], 0)
def interpolate(self, x_grid, x_target): interp_points = range(-2, 2) num_grid_points = len(x_grid) num_target_points = len(x_target) num_coefficients = len(interp_points) grid_delta = x_grid[1] - x_grid[0] lower_grid_pt_idxs = torch.floor((x_target - x_grid[0]) / grid_delta).squeeze() lower_pt_rel_dists = (x_target - x_grid[0]) / grid_delta - lower_grid_pt_idxs lower_grid_pt_idxs = lower_grid_pt_idxs - interp_points[-1] C = x_target.new(num_target_points, num_coefficients).zero_() for i in range(num_coefficients): scaled_dist = lower_pt_rel_dists + interp_points[-i - 1] C[:, i] = self._cubic_interpolation_kernel(scaled_dist) # Find points who's closest lower grid point is the first grid point # This corresponds to a boundary condition that we must fix manually. left_boundary_pts = torch.nonzero(lower_grid_pt_idxs < 1) num_left = len(left_boundary_pts) if num_left > 0: left_boundary_pts.squeeze_(1) x_grid_first = x_grid[:num_coefficients].unsqueeze(1).t().expand(num_left, num_coefficients) grid_targets = x_target[left_boundary_pts].unsqueeze(1).expand(num_left, num_coefficients) dists = torch.abs(x_grid_first - grid_targets) closest_from_first = torch.min(dists, 1)[1] for i in range(num_left): C[left_boundary_pts[i], :] = 0 C[left_boundary_pts[i], closest_from_first[i]] = 1 lower_grid_pt_idxs[left_boundary_pts[i]] = 0 right_boundary_pts = torch.nonzero(lower_grid_pt_idxs > num_grid_points - num_coefficients) num_right = len(right_boundary_pts) if num_right > 0: right_boundary_pts.squeeze_(1) x_grid_last = x_grid[-num_coefficients:].unsqueeze(1).t().expand(num_right, num_coefficients) grid_targets = x_target[right_boundary_pts].unsqueeze(1).expand(num_right, num_coefficients) dists = torch.abs(x_grid_last - grid_targets) closest_from_last = torch.min(dists, 1)[1] for i in range(num_right): C[right_boundary_pts[i], :] = 0 C[right_boundary_pts[i], closest_from_last[i]] = 1 lower_grid_pt_idxs[right_boundary_pts[i]] = num_grid_points - num_coefficients J = x_grid.new(num_target_points, num_coefficients).zero_() for i in range(num_coefficients): J[:, i] = lower_grid_pt_idxs + i J = J.long() return J, C
def cat_kkt(Qi, Qv, Qsz, Gi, Gv, Gsz, Ai, Av, Asz, Di, Dv, Dsz, eps): nBatch = Qv.size(0) nineq, nz = Gsz neq, _ = Asz Di = Di + nz Gi_L = Gi.clone() Gi_L[0, :] += nz + nineq Gv_L = Gv Gi_U = torch.stack([Gi[1, :], Gi[0, :]]) Gi_U[1, :] += nz + nineq Gv_U = Gv Ai_L = Ai.clone() Ai_L[0, :] += nz + 2 * nineq Av_L = Av Ai_U = torch.stack([Ai[1, :], Ai[0, :]]) Ai_U[1, :] += nz + 2 * nineq Av_U = Av Ii_L = type(Qi)([range(nineq), range(nineq)]) Ii_U = Ii_L.clone() Ii_L[0, :] += nz + nineq Ii_L[1, :] += nz Ii_U[0, :] += nz Ii_U[1, :] += nz + nineq Iv_L = type(Qv)(nBatch, nineq).fill_(1.0) Iv_U = Iv_L.clone() Ii_11 = type(Qi)([range(nz + nineq), range(nz + nineq)]) Iv_11 = type(Qv)(nBatch, nz + nineq).fill_(eps) Ii_22 = type(Qi)([range(nz + nineq, nz + 2 * nineq + neq), range(nz + nineq, nz + 2 * nineq + neq)]) Iv_22 = type(Qv)(nBatch, nineq + neq).fill_(-eps) Ki = torch.cat((Qi, Di, Gi_L, Gi_U, Ai_L, Ai_U, Ii_L, Ii_U, Ii_11, Ii_22), 1) Kv = torch.cat((Qv, Dv, Gv_L, Gv_U, Av_L, Av_U, Iv_L, Iv_U, Iv_11, Iv_22), 1) k = nz + 2 * nineq + neq Ksz = torch.Size([k, k]) I = torch.LongTensor(np.lexsort( (Ki[1].cpu().numpy(), Ki[0].cpu().numpy()))).cuda() Ki = Ki.t()[I].t().contiguous() Kv = Kv.t()[I].t().contiguous() Ks = [torch.cuda.sparse.DoubleTensor( Ki, Kv[i], Ksz).coalesce() for i in range(nBatch)] Ki = Ks[0]._indices() Kv = torch.stack([Ks[i]._values() for i in range(nBatch)]) Didx = torch.nonzero( (Ki[0] == Ki[1]).__and__(nz <= Ki[0]).__and__(Ki[0] < nz + nineq)).squeeze() return Ks, [Ki, Kv, Ksz], Didx