Python torch.nn.init 模块,uniform() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用torch.nn.init.uniform()

项目:Dynamic-memory-networks-plus-Pytorch    作者:dandelin    | 项目源码 | 文件源码
def __init__(self, hidden_size, vocab_size, num_hop=3, qa=None):
        super(DMNPlus, self).__init__()
        self.num_hop = num_hop
        self.qa = qa
        self.word_embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=0, sparse=True).cuda()
        init.uniform(self.word_embedding.state_dict()['weight'], a=-(3**0.5), b=3**0.5)
        self.criterion = nn.CrossEntropyLoss(size_average=False)

        self.input_module = InputModule(vocab_size, hidden_size)
        self.question_module = QuestionModule(vocab_size, hidden_size)
        self.memory = EpisodicMemory(hidden_size)
        self.answer_module = AnswerModule(vocab_size, hidden_size)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def _is_uniform(self, tensor, a, b):
        if isinstance(tensor, Variable):
            tensor = tensor.data
        samples = list(tensor.view(-1))
        p_value = stats.kstest(samples, 'uniform', args=(a, (b - a))).pvalue
        return p_value > 0.0001
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_uniform(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable)
                a = self._random_float(-3, 3)
                b = a + self._random_float(1, 5)
                init.uniform(input_tensor, a=a, b=b)
                assert self._is_uniform(input_tensor, a, b)
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def uniform(w, a=0, b=1):
    return nn.uniform(w, a=a, b=b)
项目:unsupervised-treelstm    作者:jihunchoi    | 项目源码 | 文件源码
def reset_parameters(self):
        if self.use_batchnorm:
            self.bn_mlp_input.reset_parameters()
            self.bn_mlp_output.reset_parameters()
        for i in range(self.num_layers):
            linear_layer = self.mlp[i][0]
            init.kaiming_normal(linear_layer.weight.data)
            init.constant(linear_layer.bias.data, val=0)
        init.uniform(self.clf_linear.weight.data, -0.005, 0.005)
        init.constant(self.clf_linear.bias.data, val=0)
项目:unsupervised-treelstm    作者:jihunchoi    | 项目源码 | 文件源码
def reset_parameters(self):
        if self.use_batchnorm:
            self.bn_mlp_input.reset_parameters()
            self.bn_mlp_output.reset_parameters()
        for i in range(self.num_layers):
            linear_layer = self.mlp[i][0]
            init.kaiming_normal(linear_layer.weight.data)
            init.constant(linear_layer.bias.data, val=0)
        init.uniform(self.clf_linear.weight.data, -0.002, 0.002)
        init.constant(self.clf_linear.bias.data, val=0)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def _is_uniform(self, tensor, a, b):
        if isinstance(tensor, Variable):
            tensor = tensor.data
        samples = list(tensor.view(-1))
        p_value = stats.kstest(samples, 'uniform', args=(a, (b - a))).pvalue
        return p_value > 0.0001
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_uniform(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable)
                a = self._random_float(-3, 3)
                b = a + self._random_float(1, 5)
                init.uniform(input_tensor, a=a, b=b)
                assert self._is_uniform(input_tensor, a, b)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _is_uniform(self, tensor, a, b):
        if isinstance(tensor, Variable):
            tensor = tensor.data
        samples = list(tensor.view(-1))
        p_value = stats.kstest(samples, 'uniform', args=(a, (b - a))).pvalue
        return p_value > 0.0001
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_uniform(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable)
                a = self._random_float(-3, 3)
                b = a + self._random_float(1, 5)
                init.uniform(input_tensor, a=a, b=b)
                assert self._is_uniform(input_tensor, a, b)
项目:squad_rasor_nn    作者:hsgodhia    | 项目源码 | 文件源码
def init_param(self, param):
        if len(param.size()) < 2:
            init.uniform(param)
        else:            
            init.xavier_uniform(param)
项目:squad_rasor_nn    作者:hsgodhia    | 项目源码 | 文件源码
def init_param(self, param):
        if len(param.size()) < 2:
            init.uniform(param)
        else:            
            init.xavier_uniform(param)
项目:URNN-PyTorch    作者:jingli9111    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters
        """
        init.uniform(self.thetaA, a=-0.1, b=0.1)
        init.uniform(self.thetaB, a=-0.1, b=0.1)
        init.uniform(self.U, a=-0.1, b=0.1)
        init.constant(self.bias.data, val=0)
项目:URNN-PyTorch    作者:jingli9111    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters  TO DO
        """
        init.uniform(self.thetaA, a=-0.1, b=0.1)
        init.uniform(self.thetaB, a=-0.1, b=0.1)
        init.uniform(self.U, a=-0.1, b=0.1)
        init.orthogonal(self.gate_U.data)

        gate_W_data = torch.eye(self.hidden_size)
        gate_W_data = gate_W_data.repeat(1, 2)
        self.gate_W.data.set_(gate_W_data)

        init.constant(self.bias.data, val=0)
        init.constant(self.gate_bias.data, val=0)
项目:NoisyNet-A3C    作者:Kaixhin    | 项目源码 | 文件源码
def reset_parameters(self):
    if hasattr(self, 'sigma_weight'):  # Only init after all params added (otherwise super().__init__() fails)
      init.uniform(self.weight, -math.sqrt(3 / self.in_features), math.sqrt(3 / self.in_features))
      init.uniform(self.bias, -math.sqrt(3 / self.in_features), math.sqrt(3 / self.in_features))
      init.constant(self.sigma_weight, self.sigma_init)
      init.constant(self.sigma_bias, self.sigma_init)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _is_uniform(self, tensor, a, b):
        if isinstance(tensor, Variable):
            tensor = tensor.data
        samples = list(tensor.view(-1))
        p_value = stats.kstest(samples, 'uniform', args=(a, (b - a)))[1]
        return p_value > 0.0001
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_uniform(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable)
                a = self._random_float(-3, 3)
                b = a + self._random_float(1, 5)
                init.uniform(input_tensor, a=a, b=b)
                assert self._is_uniform(input_tensor, a, b)
项目:DCN    作者:alexnowakvila    | 项目源码 | 文件源码
def fwd_split(self, input, batch, depth,
                  mergesort_split=False, mode='train', epoch=0):
        length = self.split.n
        var = 0.0
        # Iterate over scales
        e = Variable(torch.zeros((self.batch_size, length)).type(dtype),
                     requires_grad=False)
        mask = (input[:, :, 0] >= 0).type(dtype).squeeze()
        Phis, Bs, Inputs_N, Samples = ([] for ii in range(4))
        for scale in range(depth):
            logits, probs, input_n, Phi = self.split(e, input,
                                                     mask, scale=scale)
            # Sample from probabilities and update embeddings
            rand = (Variable(torch.zeros(self.batch_size, length))
                    .type(dtype))
            init.uniform(rand)
            sample = (probs > rand).type(dtype)
            e = 2 * e + sample
            # Appends
            Samples.append(sample)
            Phis.append(Phi)
            Bs.append(probs)
            Inputs_N.append(input_n)
            # variance of bernouilli probabilities
            var += self.compute_variance(probs, mask)
        # computes log probabilities of binary actions for the policy gradient
        Log_Probs = self.log_probabilities(Bs, Samples, mask, depth)
        # pad embeddings with infinity to not affect embeddings argsort
        infty = 1e6
        e = e * mask + (1 - mask) * infty
        return var, Phis, Bs, Inputs_N, e, Log_Probs

    ###########################################################################
    #                            Forward pass                                 #
    ###########################################################################
项目:DCN    作者:alexnowakvila    | 项目源码 | 文件源码
def fwd_split(self, input, batch, depth,
                  random_split=False, mode='train', epoch=0):
        length = self.split.n
        var = 0.0
        # Iterate over scales
        e = Variable(torch.zeros(self.batch_size, length)).type(dtype)
        mask = (input[:, :, 0] >= 0).type(dtype).squeeze()
        Phis, Bs, Inputs_N, Samples = ([] for ii in xrange(4))
        for scale in xrange(depth):
            logits, probs, input_n, Phi = self.split(e, input,
                                                     mask, scale=scale)
            # Sample from probabilities and update embeddings
            if random_split:
                rand = (Variable(torch.zeros(self.batch_size, length))
                        .type(dtype))
                init.uniform(rand)
                sample = (rand > 0.5).type(dtype)
            else:
                rand = (Variable(torch.zeros(self.batch_size, length))
                        .type(dtype))
                init.uniform(rand)
                sample = (probs > rand).type(dtype)
            e = 2 * e + sample
            # Appends
            Samples.append(sample)
            Phis.append(Phi)
            Bs.append(probs)
            Inputs_N.append(input_n)
            # variance of bernouilli probabilities
            var += self.compute_variance(probs, mask)
        # computes log probabilities of binary actions for the policy gradient
        Log_Probs = self.log_probabilities(Bs, Samples, mask, depth)
        # pad embeddings with infinity to not affect embeddings argsort
        infty = 1e6
        e = e * mask + (1 - mask) * infty
        return var, Phis, Bs, Inputs_N, e, Log_Probs

    ###########################################################################
    #                             Merge Phase                                 #
    ###########################################################################
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def __init__(self, args):
        super(DEEP_CNN_MUI, self).__init__()
        self.args = args

        V = args.embed_num
        V_mui = args.embed_num_mui
        D = args.embed_dim
        C = args.class_num
        Ci = 2
        Co = args.kernel_num
        Ks = args.kernel_sizes
        if args.max_norm is not None:
            print("max_norm = {} ".format(args.max_norm))
            self.embed_no_static = nn.Embedding(V, D, max_norm=args.max_norm, scale_grad_by_freq=True)
            self.embed_static = nn.Embedding(V_mui, D, max_norm=args.max_norm, scale_grad_by_freq=True)
        else:
            print("max_norm = {} ".format(args.max_norm))
            self.embed_no_static = nn.Embedding(V, D, scale_grad_by_freq=True)
            self.embed_static = nn.Embedding(V_mui, D, scale_grad_by_freq=True)

        if args.word_Embedding:
            pretrained_weight = np.array(args.pretrained_weight)
            self.embed_no_static.weight.data.copy_(torch.from_numpy(pretrained_weight))
            pretrained_weight_static = np.array(args.pretrained_weight_static)
            self.embed_static.weight.data.copy_(torch.from_numpy(pretrained_weight_static))
            # whether to fixed the word embedding
            self.embed_no_static.weight.requires_grad = True
        # cons layer
        self.convs1 = [nn.Conv2d(Ci, D, (K, D), stride=1, padding=(K//2, 0), bias=True) for K in Ks]
        self.convs2 = [nn.Conv2d(1, Co, (K, D), stride=1, padding=(K//2, 0), bias=True) for K in Ks]
        print(self.convs1)
        print(self.convs2)

        if args.init_weight:
            print("Initing W .......")
            for (conv1, conv2) in zip(self.convs1, self.convs2):
                init.xavier_normal(conv1.weight.data, gain=np.sqrt(args.init_weight_value))
                init.uniform(conv1.bias, 0, 0)
                init.xavier_normal(conv2.weight.data, gain=np.sqrt(args.init_weight_value))
                init.uniform(conv2.bias, 0, 0)

        # dropout
        self.dropout = nn.Dropout(args.dropout)
        # linear
        in_fea = len(Ks) * Co
        self.fc1 = nn.Linear(in_features=in_fea, out_features=in_fea // 2, bias=True)
        self.fc2 = nn.Linear(in_features=in_fea // 2, out_features=C, bias=True)
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def __init__(self, args):
        super(DEEP_CNN, self).__init__()
        self.args = args

        V = args.embed_num
        D = args.embed_dim
        C = args.class_num
        Ci = 1
        Co = args.kernel_num
        Ks = args.kernel_sizes
        if args.max_norm is not None:
            print("max_norm = {} ".format(args.max_norm))
            self.embed = nn.Embedding(V, D, max_norm=args.max_norm, scale_grad_by_freq=True)
            # self.embed.weight.data.uniform(-0.1, 0.1)
        else:
            print("max_norm = {} ".format(args.max_norm))
            self.embed = nn.Embedding(V, D, scale_grad_by_freq=True)
        # word embedding
        if args.word_Embedding:
            pretrained_weight = np.array(args.pretrained_weight)
            self.embed.weight.data.copy_(torch.from_numpy(pretrained_weight))
            # fixed the word embedding
            self.embed.weight.requires_grad = True
        # cons layer
        self.convs1 = [nn.Conv2d(Ci, D, (K, D), stride=1, padding=(K//2, 0), bias=True) for K in Ks]
        self.convs2 = [nn.Conv2d(Ci, Co, (K, D), stride=1, padding=(K//2, 0), bias=True) for K in Ks]
        print(self.convs1)
        print(self.convs2)

        if args.init_weight:
            print("Initing W .......")
            for (conv1, conv2) in zip(self.convs1, self.convs2):
                init.xavier_normal(conv1.weight.data, gain=np.sqrt(args.init_weight_value))
                init.uniform(conv1.bias, 0, 0)
                init.xavier_normal(conv2.weight.data, gain=np.sqrt(args.init_weight_value))
                init.uniform(conv2.bias, 0, 0)

        # dropout
        self.dropout = nn.Dropout(args.dropout)
        # linear
        in_fea = len(Ks) * Co
        self.fc1 = nn.Linear(in_features=in_fea, out_features=in_fea // 2, bias=True)
        self.fc2 = nn.Linear(in_features=in_fea // 2, out_features=C, bias=True)
项目:samplernn-pytorch    作者:deepsound-project    | 项目源码 | 文件源码
def __init__(self, frame_size, n_frame_samples, n_rnn, dim,
                 learn_h0, weight_norm):
        super().__init__()

        self.frame_size = frame_size
        self.n_frame_samples = n_frame_samples
        self.dim = dim

        h0 = torch.zeros(n_rnn, dim)
        if learn_h0:
            self.h0 = torch.nn.Parameter(h0)
        else:
            self.register_buffer('h0', torch.autograd.Variable(h0))

        self.input_expand = torch.nn.Conv1d(
            in_channels=n_frame_samples,
            out_channels=dim,
            kernel_size=1
        )
        init.kaiming_uniform(self.input_expand.weight)
        init.constant(self.input_expand.bias, 0)
        if weight_norm:
            self.input_expand = torch.nn.utils.weight_norm(self.input_expand)

        self.rnn = torch.nn.GRU(
            input_size=dim,
            hidden_size=dim,
            num_layers=n_rnn,
            batch_first=True
        )
        for i in range(n_rnn):
            nn.concat_init(
                getattr(self.rnn, 'weight_ih_l{}'.format(i)),
                [nn.lecun_uniform, nn.lecun_uniform, nn.lecun_uniform]
            )
            init.constant(getattr(self.rnn, 'bias_ih_l{}'.format(i)), 0)

            nn.concat_init(
                getattr(self.rnn, 'weight_hh_l{}'.format(i)),
                [nn.lecun_uniform, nn.lecun_uniform, init.orthogonal]
            )
            init.constant(getattr(self.rnn, 'bias_hh_l{}'.format(i)), 0)

        self.upsampling = nn.LearnedUpsampling1d(
            in_channels=dim,
            out_channels=dim,
            kernel_size=frame_size
        )
        init.uniform(
            self.upsampling.conv_t.weight, -np.sqrt(6 / dim), np.sqrt(6 / dim)
        )
        init.constant(self.upsampling.bias, 0)
        if weight_norm:
            self.upsampling.conv_t = torch.nn.utils.weight_norm(
                self.upsampling.conv_t
            )
项目:sourceseparation_misc    作者:ycemsubakan    | 项目源码 | 文件源码
def initializationhelper(param, nltype):

    c = 0.1 
    torchinit.uniform(param.weight, a=-c, b=c)

    #torchinit.xavier_uniform(param.weight, gain=c*torchinit.calculate_gain(nltype))
    c = 0.1
    torchinit.uniform(param.bias, a=-c, b=c)
项目:DCN    作者:alexnowakvila    | 项目源码 | 文件源码
def fwd_split(self, input, batch, depth,
                  mergesort_split=False, mode='train', epoch=0):
        length = self.split.n
        var = 0.0
        # Iterate over scales
        e = Variable(torch.zeros((self.batch_size, length)).type(dtype),
                     requires_grad=False)
        mask = (input[:, :, 0] >= 0).type(dtype).squeeze()
        Phis, Bs, Inputs_N, Samples = ([] for ii in xrange(4))
        if not mergesort_split:
            for scale in xrange(depth):
                logits, probs, input_n, Phi = self.split(e, input,
                                                         mask, scale=scale)
                # Sample from probabilities and update embeddings
                rand = (Variable(torch.zeros(self.batch_size, length))
                        .type(dtype))
                init.uniform(rand)
                sample = (probs > rand).type(dtype)
                e = 2 * e + sample
                # Appends
                Samples.append(sample)
                Phis.append(Phi)
                Bs.append(probs)
                Inputs_N.append(input_n)
                # variance of bernouilli probabilities
                var += self.compute_variance(probs, mask)
        else:
            e, Bs, Phis = utils.mergesort_split(mask.data.cpu().numpy(),
                                                depth)
            Inputs_N = [input for ii in xrange(depth)]
            Samples = Bs
            var = Variable(torch.zeros(1)).type(dtype)
        # computes log probabilities of binary actions for the policy gradient
        Log_Probs = self.log_probabilities(Bs, Samples, mask, depth)
        # pad embeddings with infinity to not affect embeddings argsort
        infty = 1e6
        e = e * mask + (1 - mask) * infty
        return var, Phis, Bs, Inputs_N, e, Log_Probs

    ###########################################################################
    #                             Merge Phase                                 #
    ###########################################################################