Python torch.nn.init 模块,constant() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.init.constant()

项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        # The input-to-hidden weight matrix is initialized orthogonally.
        init.orthogonal(self.weight_ih.data)
        # The hidden-to-hidden weight matrix is initialized as an identity
        # matrix.
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        init.constant(self.bias.data, val=0)
        # Initialization of BN parameters.
        self.bn_ih.reset_parameters()
        self.bn_hh.reset_parameters()
        self.bn_c.reset_parameters()
        self.bn_ih.bias.data.fill_(0)
        self.bn_hh.bias.data.fill_(0)
        self.bn_ih.weight.data.fill_(0.1)
        self.bn_hh.weight.data.fill_(0.1)
        self.bn_c.weight.data.fill_(0.1)
项目:samplernn-pytorch    作者:deepsound-project    | 项目源码 | 文件源码
def __init__(self, frame_size, dim, q_levels, weight_norm):
        super().__init__()

        self.q_levels = q_levels

        self.embedding = torch.nn.Embedding(
            self.q_levels,
            self.q_levels
        )

        self.input = torch.nn.Conv1d(
            in_channels=q_levels,
            out_channels=dim,
            kernel_size=frame_size,
            bias=False
        )
        init.kaiming_uniform(self.input.weight)
        if weight_norm:
            self.input = torch.nn.utils.weight_norm(self.input)

        self.hidden = torch.nn.Conv1d(
            in_channels=dim,
            out_channels=dim,
            kernel_size=1
        )
        init.kaiming_uniform(self.hidden.weight)
        init.constant(self.hidden.bias, 0)
        if weight_norm:
            self.hidden = torch.nn.utils.weight_norm(self.hidden)

        self.output = torch.nn.Conv1d(
            in_channels=dim,
            out_channels=q_levels,
            kernel_size=1
        )
        nn.lecun_uniform(self.output.weight)
        init.constant(self.output.bias, 0)
        if weight_norm:
            self.output = torch.nn.utils.weight_norm(self.output)
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        # The input-to-hidden weight matrix is initialized orthogonally.
        init.orthogonal(self.weight_ih.data)
        # The hidden-to-hidden weight matrix is initialized as an identity
        # matrix.
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        init.constant(self.bias.data, val=0)
        # Initialization of BN parameters.
        self.bn_ih.reset_parameters()
        self.bn_hh.reset_parameters()
        self.bn_c.reset_parameters()
        self.bn_ih.bias.data.fill_(0)
        self.bn_hh.bias.data.fill_(0)
        self.bn_ih.weight.data.fill_(0.1)
        self.bn_hh.weight.data.fill_(0.1)
        self.bn_c.weight.data.fill_(0.1)
项目:unsupervised-treelstm    作者:jihunchoi    | 项目源码 | 文件源码
def reset_parameters(self):
        if self.use_leaf_rnn:
            init.kaiming_normal(self.leaf_rnn_cell.weight_ih.data)
            init.orthogonal(self.leaf_rnn_cell.weight_hh.data)
            init.constant(self.leaf_rnn_cell.bias_ih.data, val=0)
            init.constant(self.leaf_rnn_cell.bias_hh.data, val=0)
            # Set forget bias to 1
            self.leaf_rnn_cell.bias_ih.data.chunk(4)[1].fill_(1)
            if self.bidirectional:
                init.kaiming_normal(self.leaf_rnn_cell_bw.weight_ih.data)
                init.orthogonal(self.leaf_rnn_cell_bw.weight_hh.data)
                init.constant(self.leaf_rnn_cell_bw.bias_ih.data, val=0)
                init.constant(self.leaf_rnn_cell_bw.bias_hh.data, val=0)
                # Set forget bias to 1
                self.leaf_rnn_cell_bw.bias_ih.data.chunk(4)[1].fill_(1)
        else:
            init.kaiming_normal(self.word_linear.weight.data)
            init.constant(self.word_linear.bias.data, val=0)
        self.treelstm_layer.reset_parameters()
        init.normal(self.comp_query.data, mean=0, std=0.01)
项目:DeepLab    作者:2prime    | 项目源码 | 文件源码
def msra_init(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight)
            # Modified by lzh @ 201707251408:
            # <<< Old:
            # if m.bias:
            # >>> New:
            if m.bias is not None:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            # Modified by lzh @ 201707241734:
            # <<< Old:
            # if m.bias:
            # >>> New:
            if m.bias is not None:
            # --- End
                init.constant(m.bias, 0)

# Added by lzh @ 201707251404:
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:a3c-mujoco    作者:Feryal    | 项目源码 | 文件源码
def __init__(self, observation_space, non_rgb_rgb_state_size, action_space,
                 hidden_size):
        super(ActorCritic, self).__init__()
        self.rgb_state_size = (6, 128, 128)
        self.action_size = 5
        self.relu = nn.ReLU(inplace=True)
        self.softmax = nn.Softmax()

        # the archtecture is adapted from Sim2Real (Rusu et. al., 2016)
        self.conv1 = nn.Conv2d(
            self.rgb_state_size[0], 16, 8, stride=4, padding=1)
        self.conv2 = nn.Conv2d(16, 32, 5, stride=2)
        self.fc1 = nn.Linear(1152 + non_rgb_rgb_state_size, hidden_size)
        self.lstm = nn.LSTMCell(hidden_size, hidden_size)
        self.fc_actor1 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor2 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor3 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor4 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor5 = nn.Linear(hidden_size, self.action_size)
        self.fc_actor6 = nn.Linear(hidden_size, self.action_size)
        self.fc_critic = nn.Linear(hidden_size, 1)

        # Orthogonal weight initialisation
        for name, p in self.named_parameters():
            if 'weight' in name:
                init.orthogonal(p)
            elif 'bias' in name:
                init.constant(p, 0)
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def reset_parameters(self):
        init.constant(self.weight,self.gamma)
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_l1_regularization(self):
        model = torch.nn.Sequential(
                torch.nn.Linear(5, 10),
                torch.nn.Linear(10, 5)
        )
        initializer = InitializerApplicator([(".*", lambda tensor: constant(tensor, -1))])
        initializer(model)
        value = RegularizerApplicator([("", L1Regularizer(1.0))])(model)
        # 115 because of biases.
        assert value.data.numpy() == 115.0
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_l2_regularization(self):
        model = torch.nn.Sequential(
                torch.nn.Linear(5, 10),
                torch.nn.Linear(10, 5)
        )
        initializer = InitializerApplicator([(".*", lambda tensor: constant(tensor, 0.5))])
        initializer(model)
        value = RegularizerApplicator([("", L2Regularizer(1.0))])(model)
        assert value.data.numpy() == 28.75
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_regularizer_applicator_respects_regex_matching(self):
        model = torch.nn.Sequential(
                torch.nn.Linear(5, 10),
                torch.nn.Linear(10, 5)
        )
        initializer = InitializerApplicator([(".*", lambda tensor: constant(tensor, 1.))])
        initializer(model)
        value = RegularizerApplicator([("weight", L2Regularizer(0.5)),
                                       ("bias", L1Regularizer(1.0))])(model)
        assert value.data.numpy() == 65.0
项目:textobjdetection    作者:andfoy    | 项目源码 | 文件源码
def reset_parameters(self):
        init.constant(self.weight, self.gamma)
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def reset_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def reset_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
项目:braindecode    作者:robintibor    | 项目源码 | 文件源码
def glorot_weight_zero_bias(model):
    """
    Initalize parameters of all modules
    by initializing weights with glorot  uniform/xavier initialization,
    and setting biases to zero.
    Weights from batch norm layers are set to 1.

    Parameters
    ----------
    model: Module
    """
    for module in model.modules():
        if hasattr(module, 'weight'):
            if not ('BatchNorm' in module.__class__.__name__):
                init.xavier_uniform(module.weight, gain=1)
            else:
                init.constant(module.weight, 1)
        if hasattr(module, 'bias'):
            if module.bias is not None:
                init.constant(module.bias, 0)
项目:wide-resnet.pytorch    作者:meliketoy    | 项目源码 | 文件源码
def conv_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        init.xavier_uniform(m.weight, gain=np.sqrt(2))
        init.constant(m.bias, 0)
    elif classname.find('BatchNorm') != -1:
        init.constant(m.weight, 1)
        init.constant(m.bias, 0)
项目:realtime-action-detection    作者:gurkirt    | 项目源码 | 文件源码
def reset_parameters(self):
        init.constant(self.weight,self.gamma)
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def weights_init(self,module):
        for m in module.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight, gain=np.sqrt(2))
                init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        init.orthogonal(self.weight_ih.data)
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        if self.use_bias:
            init.constant(self.bias.data, val=0)
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def weights_init(self,module):
        for m in module.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight, gain=np.sqrt(2))
                init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
项目:unet-pytorch    作者:jaxony    | 项目源码 | 文件源码
def weight_init(m):
        if isinstance(m, nn.Conv2d):
            init.xavier_normal(m.weight)
            init.constant(m.bias, 0)
项目:pytorch-cifar    作者:kuangliu    | 项目源码 | 文件源码
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:malmo-challenge    作者:Kaixhin    | 项目源码 | 文件源码
def __init__(self, hidden_size):
    super(ActorCritic, self).__init__()
    self.state_size = STATE_SIZE[0] * STATE_SIZE[1] * STATE_SIZE[2]

    self.elu = nn.ELU(inplace=True)
    self.softmax = nn.Softmax()
    self.sigmoid = nn.Sigmoid()

    # Pass state into model body
    self.conv1 = nn.Conv2d(STATE_SIZE[0], 32, 4, stride=2)
    self.conv2 = nn.Conv2d(32, 32, 3)
    self.fc1 = nn.Linear(1152, hidden_size)
    # Pass previous action, reward and timestep directly into LSTM
    self.lstm = nn.LSTMCell(hidden_size + ACTION_SIZE + 2, hidden_size)
    self.fc_actor1 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_critic1 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_actor2 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_critic2 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_class = nn.Linear(hidden_size, 1)

    # Orthogonal weight initialisation
    for name, p in self.named_parameters():
      if 'weight' in name:
        init.orthogonal(p)
      elif 'bias' in name:
        init.constant(p, 0)
    # Set LSTM forget gate bias to 1
    for name, p in self.lstm.named_parameters():
      if 'bias' in name:
        n = p.size(0)
        forget_start_idx, forget_end_idx = n // 4, n // 2
        init.constant(p[forget_start_idx:forget_end_idx], 1)
项目:ShuffleNet    作者:jaxony    | 项目源码 | 文件源码
def init_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_constant(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5, as_variable=as_variable)
                val = self._random_float(1, 10)
                init.constant(input_tensor, val)
                if as_variable:
                    input_tensor = input_tensor.data

                self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        init.orthogonal(self.weight_ih.data)
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 4)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        if self.use_bias:
            init.constant(self.bias.data, val=0)
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def constant(w, val):
    return nn.constant(w, val=val)
项目:unsupervised-treelstm    作者:jihunchoi    | 项目源码 | 文件源码
def reset_parameters(self):
        if self.use_batchnorm:
            self.bn_mlp_input.reset_parameters()
            self.bn_mlp_output.reset_parameters()
        for i in range(self.num_layers):
            linear_layer = self.mlp[i][0]
            init.kaiming_normal(linear_layer.weight.data)
            init.constant(linear_layer.bias.data, val=0)
        init.uniform(self.clf_linear.weight.data, -0.005, 0.005)
        init.constant(self.clf_linear.bias.data, val=0)
项目:unsupervised-treelstm    作者:jihunchoi    | 项目源码 | 文件源码
def reset_parameters(self):
        init.kaiming_normal(self.comp_linear.weight.data)
        init.constant(self.comp_linear.bias.data, val=0)
项目:unsupervised-treelstm    作者:jihunchoi    | 项目源码 | 文件源码
def reset_parameters(self):
        if self.use_batchnorm:
            self.bn_mlp_input.reset_parameters()
            self.bn_mlp_output.reset_parameters()
        for i in range(self.num_layers):
            linear_layer = self.mlp[i][0]
            init.kaiming_normal(linear_layer.weight.data)
            init.constant(linear_layer.bias.data, val=0)
        init.uniform(self.clf_linear.weight.data, -0.002, 0.002)
        init.constant(self.clf_linear.bias.data, val=0)
项目:ssd_pytorch    作者:miraclebiu    | 项目源码 | 文件源码
def reset_parameters(self):
        init.constant(self.weight,self.gamma)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_constant(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5, as_variable=as_variable)
                val = self._random_float(1, 10)
                init.constant(input_tensor, val)
                if as_variable:
                    input_tensor = input_tensor.data

                self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
项目:yolov2    作者:zhangkaij    | 项目源码 | 文件源码
def reset_parameters(self):
        init.constant(self.weight,self.gamma)
项目:MatchingNetworks    作者:gitabcworld    | 项目源码 | 文件源码
def weights_init(self,module):
        for m in module.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight, gain=np.sqrt(2))
                init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
项目:DeepLab    作者:2prime    | 项目源码 | 文件源码
def msra_init(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight)
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:DeepLab    作者:2prime    | 项目源码 | 文件源码
def xavier_init(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.xavier_normal(m.weight)
            if m.bias is not None:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias is not None:
                init.constant(m.bias, 0)
项目:DeepLab    作者:2prime    | 项目源码 | 文件源码
def gauss_init(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.normal(0.0, 0.01)
            if m.bias is not None:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias is not None:
                init.constant(m.bias, 0)
# --- End
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_constant(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5, as_variable=as_variable)
                val = self._random_float(1, 10)
                init.constant(input_tensor, val)
                if as_variable:
                    input_tensor = input_tensor.data

                self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def weights_init_normal(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.normal(m.weight.data, 0.0, 0.02)
    elif classname.find('Linear') != -1:
        init.normal(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def weights_init_xavier(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.xavier_normal(m.weight.data, gain=0.02)
    elif classname.find('Linear') != -1:
        init.xavier_normal(m.weight.data, gain=0.02)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def weights_init_orthogonal(m):
    classname = m.__class__.__name__
    print(classname)
    if classname.find('Conv') != -1:
        init.orthogonal(m.weight.data, gain=1)
    elif classname.find('Linear') != -1:
        init.orthogonal(m.weight.data, gain=1)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:URNN-PyTorch    作者:jingli9111    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters
        """
        init.uniform(self.thetaA, a=-0.1, b=0.1)
        init.uniform(self.thetaB, a=-0.1, b=0.1)
        init.uniform(self.U, a=-0.1, b=0.1)
        init.constant(self.bias.data, val=0)
项目:URNN-PyTorch    作者:jingli9111    | 项目源码 | 文件源码
def reset_parameters(self):
        """
        Initialize parameters  TO DO
        """
        init.uniform(self.thetaA, a=-0.1, b=0.1)
        init.uniform(self.thetaB, a=-0.1, b=0.1)
        init.uniform(self.U, a=-0.1, b=0.1)
        init.orthogonal(self.gate_U.data)

        gate_W_data = torch.eye(self.hidden_size)
        gate_W_data = gate_W_data.repeat(1, 2)
        self.gate_W.data.set_(gate_W_data)

        init.constant(self.bias.data, val=0)
        init.constant(self.gate_bias.data, val=0)
项目:NoisyNet-A3C    作者:Kaixhin    | 项目源码 | 文件源码
def reset_parameters(self):
    if hasattr(self, 'sigma_weight'):  # Only init after all params added (otherwise super().__init__() fails)
      init.uniform(self.weight, -math.sqrt(3 / self.in_features), math.sqrt(3 / self.in_features))
      init.uniform(self.bias, -math.sqrt(3 / self.in_features), math.sqrt(3 / self.in_features))
      init.constant(self.sigma_weight, self.sigma_init)
      init.constant(self.sigma_bias, self.sigma_init)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_constant(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5, as_variable=as_variable)
                val = self._random_float(1, 10)
                init.constant(input_tensor, val)
                if as_variable:
                    input_tensor = input_tensor.data

                self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
项目:generative_models    作者:j-min    | 项目源码 | 文件源码
def weights_init_normal(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.normal(m.weight.data, 0.0, 0.02)
    elif classname.find('Linear') != -1:
        init.normal(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:generative_models    作者:j-min    | 项目源码 | 文件源码
def weights_init_xavier(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.xavier_normal(m.weight.data, gain=0.02)
    elif classname.find('Linear') != -1:
        init.xavier_normal(m.weight.data, gain=0.02)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:generative_models    作者:j-min    | 项目源码 | 文件源码
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:SRU    作者:akuzeee    | 项目源码 | 文件源码
def initWeight(self, init_forget_bias=1):
        # See details in https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py
        for name, params in self.named_parameters():
            # weight?xavier????
            if 'weight' in name:
                init.xavier_uniform(params)

            # ??????????GRU?b_iz, b_hz????
            elif 'gru.bias_ih_l' in name:
                b_ir, b_iz, b_in = params.chunk(3, 0)
                init.constant(b_iz, init_forget_bias)
            elif 'gru.bias_hh_l' in name:
                b_hr, b_hz, b_hn = params.chunk(3, 0)
                init.constant(b_hz, init_forget_bias)

            # ?????bias?0????
            else:
                init.constant(params, 0)