我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.sqrt()。
def bn_hat_z_layers(self, hat_z_layers, z_pre_layers): # TODO: Calculate batchnorm using GPU Tensors. assert len(hat_z_layers) == len(z_pre_layers) hat_z_layers_normalized = [] for i, (hat_z, z_pre) in enumerate(zip(hat_z_layers, z_pre_layers)): if self.use_cuda: ones = Variable(torch.ones(z_pre.size()[0], 1).cuda()) else: ones = Variable(torch.ones(z_pre.size()[0], 1)) mean = torch.mean(z_pre, 0) noise_var = np.random.normal(loc=0.0, scale=1 - 1e-10, size=z_pre.size()) if self.use_cuda: var = np.var(z_pre.data.cpu().numpy() + noise_var, axis=0).reshape(1, z_pre.size()[1]) else: var = np.var(z_pre.data.numpy() + noise_var, axis=0).reshape(1, z_pre.size()[1]) var = Variable(torch.FloatTensor(var)) if self.use_cuda: hat_z = hat_z.cpu() ones = ones.cpu() mean = mean.cpu() hat_z_normalized = torch.div(hat_z - ones.mm(mean), ones.mm(torch.sqrt(var + 1e-10))) if self.use_cuda: hat_z_normalized = hat_z_normalized.cuda() hat_z_layers_normalized.append(hat_z_normalized) return hat_z_layers_normalized
def sym_distance_matrix(A, B, eps=1e-18, self_similarity=False): """ Defines the symbolic matrix that contains the distances between the vectors of A and B :param A: the first data matrix :param B: the second data matrix :param self_similarity: zeros the diagonial to improve the stability :params eps: the minimum distance between two vectors (set to a very small number to improve stability) :return: """ # Compute the squared distances AA = torch.sum(A * A, 1).view(-1, 1) BB = torch.sum(B * B, 1).view(1, -1) AB = torch.mm(A, B.transpose(0, 1)) D = AA + BB - 2 * AB # Zero the diagonial if self_similarity: D = D.view(-1) D[::B.size(0) + 1] = 0 D = D.view(A.size(0), B.size(0)) # Return the square root D = torch.sqrt(torch.clamp(D, min=eps)) return D
def setUp(self): pyro.clear_param_store() def model(): mu = pyro.sample("mu", Normal(Variable(torch.zeros(1)), Variable(torch.ones(1)))) xd = Normal(mu, Variable(torch.ones(1)), batch_size=50) pyro.observe("xs", xd, self.data) return mu def guide(): return pyro.sample("mu", Normal(Variable(torch.zeros(1)), Variable(torch.ones(1)))) # data self.data = Variable(torch.zeros(50, 1)) self.mu_mean = Variable(torch.zeros(1)) self.mu_stddev = torch.sqrt(Variable(torch.ones(1)) / 51.0) # model and guide self.model = model self.guide = guide
def setUp(self): # normal-normal; known covariance self.lam0 = Variable(torch.Tensor([0.1, 0.1])) # precision of prior self.mu0 = Variable(torch.Tensor([0.0, 0.5])) # prior mean # known precision of observation noise self.lam = Variable(torch.Tensor([6.0, 4.0])) self.n_outer = 3 self.n_inner = 3 self.n_data = Variable(torch.Tensor([self.n_outer * self.n_inner])) self.data = [] self.sum_data = ng_zeros(2) for _out in range(self.n_outer): data_in = [] for _in in range(self.n_inner): data_in.append(Variable(torch.Tensor([-0.1, 0.3]) + torch.randn(2) / torch.sqrt(self.lam.data))) self.sum_data += data_in[-1] self.data.append(data_in) self.analytic_lam_n = self.lam0 + self.n_data.expand_as(self.lam) * self.lam self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n) self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\ self.mu0 * (self.lam0 / self.analytic_lam_n) self.verbose = True # this tests rao-blackwellization in elbo for nested list map_datas
def weights_init(m): """ Not actually using this but let's keep it here in case that changes """ classname = m.__class__.__name__ if classname.find('Conv') != -1: weight_shape = list(m.weight.data.size()) fan_in = np.prod(weight_shape[1:4]) fan_out = np.prod(weight_shape[2:4]) * weight_shape[0] w_bound = np.sqrt(6. / (fan_in + fan_out)) m.weight.data.uniform_(-w_bound, w_bound) m.bias.data.fill_(0) elif classname.find('Linear') != -1: weight_shape = list(m.weight.data.size()) fan_in = weight_shape[1] fan_out = weight_shape[0] w_bound = np.sqrt(6. / (fan_in + fan_out)) m.weight.data.uniform_(-w_bound, w_bound) m.bias.data.fill_(0)
def forward(self, input1): self.batchgrid3d = torch.zeros(torch.Size([input1.size(0)]) + self.grid3d.size()) for i in range(input1.size(0)): self.batchgrid3d[i] = self.grid3d self.batchgrid3d = Variable(self.batchgrid3d) #print(self.batchgrid3d) x = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,0:4]), 3) y = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,4:8]), 3) z = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,8:]), 3) #print(x) r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5 #print(r) theta = torch.acos(z/r)/(np.pi/2) - 1 #phi = torch.atan(y/x) phi = torch.atan(y/(x + 1e-5)) + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor)) phi = phi/np.pi output = torch.cat([theta,phi], 3) return output
def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: weight_shape = list(m.weight.data.size()) fan_in = np.prod(weight_shape[1:4]) fan_out = np.prod(weight_shape[2:4]) * weight_shape[0] w_bound = np.sqrt(6. / (fan_in + fan_out)) m.weight.data.uniform_(-w_bound, w_bound) m.bias.data.fill_(0) elif classname.find('Linear') != -1: weight_shape = list(m.weight.data.size()) fan_in = weight_shape[1] fan_out = weight_shape[0] w_bound = np.sqrt(6. / (fan_in + fan_out)) m.weight.data.uniform_(-w_bound, w_bound) m.bias.data.fill_(0)
def init_weights(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: weight_shape = list(m.weight.data.size()) fan_in = np.prod(weight_shape[1:4]) fan_out = np.prod(weight_shape[2:4]) * weight_shape[0] w_bound = np.sqrt(6. / (fan_in + fan_out)) m.weight.data.uniform_(-w_bound, w_bound) m.bias.data.fill_(0) elif classname.find('Linear') != -1: weight_shape = list(m.weight.data.size()) fan_in = weight_shape[1] fan_out = weight_shape[0] w_bound = np.sqrt(6. / (fan_in + fan_out)) m.weight.data.uniform_(-w_bound, w_bound) m.bias.data.fill_(0)
def step(self): super(Adam, self).step() self.t += 1 if len(self.m) == 0: for p in self.params: self.m[p] = torch.zeros(p.size()) self.v[p] = torch.zeros(p.size()) for p in self.params: mt = self.beta1 * self.m[p] + (1 - self.beta1) * p.grad.data vt = self.beta2 * self.v[p] + (1 - self.beta2) * p.grad.data**2 m = mt / (1 - self.beta1**self.t) v = vt / (1 - self.beta2**self.t) rate = self.lr / (torch.sqrt(v) + self.epsilon) p.data.sub_(rate * m) self.m[p] = mt self.v[p] = vt self.clear_gradients() # Alias
def pdist(x: T.FloatTensor, y: T.FloatTensor) -> T.FloatTensor: """ Compute the pairwise distance matrix between the rows of x and y. Args: x (tensor (num_samples_1, num_units)) y (tensor (num_samples_2, num_units)) Returns: tensor (num_samples_1, num_samples_2) """ inner = dot(x, transpose(y)) x_mag = norm(x, axis=1) ** 2 y_mag = norm(y, axis=1) ** 2 squared = add(unsqueeze(y_mag, axis=0), add(unsqueeze(x_mag, axis=1), -2*inner)) return torch.sqrt(clip(squared, a_min=0))
def __init__(self, state_size, action_size): super(BaselineActor, self).__init__() self.fc1 = nn.Linear(state_size, 64, bias=True) self.fc2 = nn.Linear(64, 64, bias=True) self.mean = nn.Linear(64, action_size, bias=True) # Init for p in [self.fc1, self.fc2, self.mean]: p.weight.data.normal_(0, 1) p.weight.data *= 1.0 / th.sqrt(p.weight.data.pow(2).sum(1, keepdim=True)) p.bias.data.mul_(0.0) self.mean.weight.data.mul_(0.01)
def __init__(self, state_size): super(BaselineCritic, self).__init__() self.fc1 = nn.Linear(state_size, 64, bias=True) self.fc2 = nn.Linear(64, 64, bias=True) self.value = nn.Linear(64, 1, bias=True) # Init for p in [self.fc1, self.fc2, self.value]: p.weight.data.normal_(0, 1) p.weight.data *= 1.0 / th.sqrt(p.weight.data.pow(2).sum(1, keepdim=True)) p.bias.data.mul_(0.0)
def __init__(self, d_in, d_out, use_cuda): super(Decoder, self).__init__() self.d_in = d_in self.d_out = d_out self.use_cuda = use_cuda if self.use_cuda: self.a1 = Parameter(0. * torch.ones(1, d_in).cuda()) self.a2 = Parameter(1. * torch.ones(1, d_in).cuda()) self.a3 = Parameter(0. * torch.ones(1, d_in).cuda()) self.a4 = Parameter(0. * torch.ones(1, d_in).cuda()) self.a5 = Parameter(0. * torch.ones(1, d_in).cuda()) self.a6 = Parameter(0. * torch.ones(1, d_in).cuda()) self.a7 = Parameter(1. * torch.ones(1, d_in).cuda()) self.a8 = Parameter(0. * torch.ones(1, d_in).cuda()) self.a9 = Parameter(0. * torch.ones(1, d_in).cuda()) self.a10 = Parameter(0. * torch.ones(1, d_in).cuda()) else: self.a1 = Parameter(0. * torch.ones(1, d_in)) self.a2 = Parameter(1. * torch.ones(1, d_in)) self.a3 = Parameter(0. * torch.ones(1, d_in)) self.a4 = Parameter(0. * torch.ones(1, d_in)) self.a5 = Parameter(0. * torch.ones(1, d_in)) self.a6 = Parameter(0. * torch.ones(1, d_in)) self.a7 = Parameter(1. * torch.ones(1, d_in)) self.a8 = Parameter(0. * torch.ones(1, d_in)) self.a9 = Parameter(0. * torch.ones(1, d_in)) self.a10 = Parameter(0. * torch.ones(1, d_in)) if self.d_out is not None: self.V = torch.nn.Linear(d_in, d_out, bias=False) self.V.weight.data = torch.randn(self.V.weight.data.size()) / np.sqrt(d_in) # batch-normalization for u self.bn_normalize = torch.nn.BatchNorm1d(d_out, affine=False) # buffer for hat_z_l to be used for cost calculation self.buffer_hat_z_l = None
def test_sqrt(self): self._testMath(torch.sqrt, lambda x: math.sqrt(x) if x > 0 else float('nan'))
def test_rsqrt(self): self._testMath(torch.rsqrt, lambda x: 1 / math.sqrt(x) if x > 0 else float('nan'))
def forward(self, img, sent, imgc, sentc): # imgc : (bsize, ncontrast, imgdim) # sentc : (bsize, ncontrast, sentdim) # img : (bsize, imgdim) # sent : (bsize, sentdim) img = img.unsqueeze(1).expand_as(imgc).contiguous() img = img.view(-1, self.imgdim) imgc = imgc.view(-1, self.imgdim) sent = sent.unsqueeze(1).expand_as(sentc).contiguous() sent = sent.view(-1, self.sentdim) sentc = sentc.view(-1, self.sentdim) imgproj = self.imgproj(img) imgproj = imgproj / torch.sqrt(torch.pow(imgproj, 2).sum(1, keepdim=True)).expand_as(imgproj) imgcproj = self.imgproj(imgc) imgcproj = imgcproj / torch.sqrt(torch.pow(imgcproj, 2).sum(1, keepdim=True)).expand_as(imgcproj) sentproj = self.sentproj(sent) sentproj = sentproj / torch.sqrt(torch.pow(sentproj, 2).sum(1, keepdim=True)).expand_as(sentproj) sentcproj = self.sentproj(sentc) sentcproj = sentcproj / torch.sqrt(torch.pow(sentcproj, 2).sum(1, keepdim=True)).expand_as(sentcproj) # (bsize*ncontrast, projdim) anchor1 = torch.sum((imgproj*sentproj), 1) anchor2 = torch.sum((sentproj*imgproj), 1) img_sentc = torch.sum((imgproj*sentcproj), 1) sent_imgc = torch.sum((sentproj*imgcproj), 1) # (bsize*ncontrast) return anchor1, anchor2, img_sentc, sent_imgc
def proj_sentence(self, sent): output = self.sentproj(sent) output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output) return output # (bsize, projdim)
def proj_image(self, img): output = self.imgproj(img) output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output) return output # (bsize, projdim)
def bivariate_normal_pdf(self, dx, dy): z_x = ((dx-self.mu_x)/self.sigma_x)**2 z_y = ((dy-self.mu_y)/self.sigma_y)**2 z_xy = (dx-self.mu_x)*(dy-self.mu_y)/(self.sigma_x*self.sigma_y) z = z_x + z_y -2*self.rho_xy*z_xy exp = torch.exp(-z/(2*(1-self.rho_xy**2))) norm = 2*np.pi*self.sigma_x*self.sigma_y*torch.sqrt(1-self.rho_xy**2) return exp/norm
def sample_bivariate_normal(mu_x,mu_y,sigma_x,sigma_y,rho_xy, greedy=False): # inputs must be floats if greedy: return mu_x,mu_y mean = [mu_x, mu_y] sigma_x *= np.sqrt(hp.temperature) sigma_y *= np.sqrt(hp.temperature) cov = [[sigma_x * sigma_x, rho_xy * sigma_x * sigma_y],\ [rho_xy * sigma_x * sigma_y, sigma_y * sigma_y]] x = np.random.multivariate_normal(mean, cov, 1) return x[0][0], x[0][1]
def forward(self, x0, x1, y): # euclidian distance diff = x0 - x1 dist_sq = torch.sum(torch.pow(diff, 2), 1) dist = torch.sqrt(dist_sq) mdist = self.margin - dist dist = torch.clamp(mdist, min=0.0) loss = y * dist_sq + (1 - y) * torch.pow(dist, 2) loss = torch.sum(loss) / 2.0 / x0.size()[0] return loss
def _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=False, biased=False): mmd2, var_est = _mmd2_and_variance(K_XX, K_XY, K_YY, const_diagonal=const_diagonal, biased=biased) loss = mmd2 / torch.sqrt(torch.clamp(var_est, min=min_var_est)) return loss, mmd2, var_est
def weights_init_mlp(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0, 1) m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True)) if m.bias is not None: m.bias.data.fill_(0)
def normalized_columns_initializer(weights, std=1.0): out = torch.randn(weights.size()) out *= std / torch.sqrt(out.pow(2).sum(1).expand_as(out)) return out
def save_conv_shrink_bn(fp, conv_model, bn_model, eps=1e-5): if bn_model.bias.is_cuda: bias = bn_model.bias.data - bn_model.running_mean * bn_model.weight.data / torch.sqrt(bn_model.running_var + eps) convert2cpu(bias).numpy().tofile(fp) s = conv_model.weight.data.size() weight = conv_model.weight.data * (bn_model.weight.data / torch.sqrt(bn_model.running_var + eps)).view(-1,1,1,1).repeat(1, s[1], s[2], s[3]) convert2cpu(weight).numpy().tofile(fp) else: bias = bn_model.bias.data - bn_model.running_mean * bn_model.weight.data / torch.sqrt(bn_model.running_var + eps) bias.numpy().tofile(fp) s = conv_model.weight.data.size() weight = conv_model.weight.data * (bn_model.weight.data / torch.sqrt(bn_model.running_var + eps)).view(-1,1,1,1).repeat(1, s[1], s[2], s[3]) weight.numpy().tofile(fp)
def forward(self, x): n = x.size(2) * x.size(3) t = x.view(x.size(0), x.size(1), n) mean = torch.mean(t, 2).unsqueeze(2).unsqueeze(3).expand_as(x) # Calculate the biased var. torch.var returns unbiased var var = torch.var(t, 2).unsqueeze(2).unsqueeze(3).expand_as(x) * ((n - 1) / float(n)) scale_broadcast = self.scale.unsqueeze(1).unsqueeze(1).unsqueeze(0) scale_broadcast = scale_broadcast.expand_as(x) shift_broadcast = self.shift.unsqueeze(1).unsqueeze(1).unsqueeze(0) shift_broadcast = shift_broadcast.expand_as(x) out = (x - mean) / torch.sqrt(var + self.eps) out = out * scale_broadcast + shift_broadcast return out
def set_sig(self, X, Y): Y_pred = self.lin(X) + self.net(X) var = torch.mean((Y_pred-Y)**2, 0) self.sig.data = torch.sqrt(var).cuda().data
def arclength_param(line) : "Arclength parametrisation of a piecewise affine curve." vel = line[1:, :] - line[:-1, :] vel = np.sqrt(np.sum( vel ** 2, 1 )) return np.hstack( ( [0], np.cumsum( vel, 0 ) ) )
def to_measure(self) : """ Outputs the sum-of-diracs measure associated to the curve. Each segment from the connectivity matrix self.c is represented as a weighted dirac located at its center, with weight equal to the segment length. """ segments = self.segments() centers = [ .5 * ( seg[0] + seg[1] ) for seg in segments ] lengths = [np.sqrt(np.sum( (seg[1] - seg[0])**2 )) for seg in segments ] return ( np.array(centers), np.array(lengths) )
def _vertices_to_measure( q, connec ) : """ Transforms a torch array 'q1' into a measure, assuming a connectivity matrix connec. It is the Torch equivalent of 'to_measure'. """ a = q[connec[:,0]] ; b = q[connec[:,1]] # A curve is represented as a sum of diracs, one for each segment x = .5 * (a + b) # Mean mu = torch.sqrt( ((b-a)**2).sum(1) ) # Length return (x, mu)
def reset_parameters(self): std = 1.0 / math.sqrt(self.input_size) for w in self.parameters(): w.data.uniform_(-std, std)
def forward(self, x): size = x.size() x = x.view(x.size(0), -1) x = (x - th.mean(x, 1).unsqueeze(1)) / th.sqrt(th.var(x, 1).unsqueeze(1) + self.epsilon) if self.learnable: x = self.alpha.expand_as(x) * x + self.beta.expand_as(x) return x.view(size)
def reset_parameters(self): std = math.sqrt(3 / self.in_features) nn.init.uniform(self.weight, -std, std) nn.init.uniform(self.bias, -std, std)
def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True): super(NoisyFactorizedLinear, self).__init__(in_features, out_features, bias=bias) sigma_init = sigma_zero / math.sqrt(in_features) self.sigma_weight = nn.Parameter(torch.Tensor(out_features, in_features).fill_(sigma_init)) self.register_buffer("epsilon_input", torch.zeros(1, in_features)) self.register_buffer("epsilon_output", torch.zeros(out_features, 1)) if bias: self.sigma_bias = nn.Parameter(torch.Tensor(out_features).fill_(sigma_init))
def forward(self, input): torch.randn(self.epsilon_input.size(), out=self.epsilon_input) torch.randn(self.epsilon_output.size(), out=self.epsilon_output) func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x)) eps_in = func(self.epsilon_input) eps_out = func(self.epsilon_output) bias = self.bias if bias is not None: bias = bias + self.sigma_bias * Variable(eps_out.t()) noise_v = Variable(torch.mul(eps_in, eps_out)) return F.linear(input, self.weight + self.sigma_weight * noise_v, bias)
def forward(self, input1, input2): self.batchgrid3d = torch.zeros(torch.Size([input1.size(0)]) + self.grid3d.size()) for i in range(input1.size(0)): self.batchgrid3d[i] = self.grid3d self.batchgrid3d = Variable(self.batchgrid3d) self.batchgrid = torch.zeros(torch.Size([input1.size(0)]) + self.grid.size()) for i in range(input1.size(0)): self.batchgrid[i] = self.grid self.batchgrid = Variable(self.batchgrid) #print(self.batchgrid3d) x = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,0:4]), 3) y = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,4:8]), 3) z = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,8:]), 3) #print(x) r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5 #print(r) theta = torch.acos(z/r)/(np.pi/2) - 1 #phi = torch.atan(y/x) phi = torch.atan(y/(x + 1e-5)) + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor)) phi = phi/np.pi input_u = input2.view(-1,1,1,1).repeat(1,self.height, self.width,1) output = torch.cat([theta,phi], 3) output1 = torch.atan(torch.tan(np.pi/2.0*(output[:,:,:,1:2] + self.batchgrid[:,:,:,2:] * input_u[:,:,:,:]))) /(np.pi/2) output2 = torch.cat([output[:,:,:,0:1], output1], 3) return output2
def forward(self, depth, trans0, trans1, rotate): self.batchgrid3d = torch.zeros(torch.Size([depth.size(0)]) + self.grid3d.size()) for i in range(depth.size(0)): self.batchgrid3d[i] = self.grid3d self.batchgrid3d = Variable(self.batchgrid3d) self.batchgrid = torch.zeros(torch.Size([depth.size(0)]) + self.grid.size()) for i in range(depth.size(0)): self.batchgrid[i] = self.grid self.batchgrid = Variable(self.batchgrid) x = self.batchgrid3d[:,:,:,0:1] * depth + trans0.view(-1,1,1,1).repeat(1, self.height, self.width, 1) y = self.batchgrid3d[:,:,:,1:2] * depth + trans1.view(-1,1,1,1).repeat(1, self.height, self.width, 1) z = self.batchgrid3d[:,:,:,2:3] * depth #print(x.size(), y.size(), z.size()) r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5 #print(r) theta = torch.acos(z/r)/(np.pi/2) - 1 #phi = torch.atan(y/x) phi = torch.atan(y/(x + 1e-5)) + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor)) phi = phi/np.pi #print(theta.size(), phi.size()) input_u = rotate.view(-1,1,1,1).repeat(1,self.height, self.width,1) output = torch.cat([theta,phi], 3) #print(output.size()) output1 = torch.atan(torch.tan(np.pi/2.0*(output[:,:,:,1:2] + self.batchgrid[:,:,:,2:] * input_u[:,:,:,:]))) /(np.pi/2) output2 = torch.cat([output[:,:,:,0:1], output1], 3) return output2