Python torch.nn.init 模块,xavier_uniform() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.init.xavier_uniform()

项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _init_weight(self):
        init.xavier_uniform(self.transitions)
        self.transitions.data[START, :].fill_(-10000.)
        self.transitions.data[:, STOP].fill_(-10000.)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _init_weight(self, scope=1.):
        init.xavier_uniform(self.char_ebd.weight)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _init_weights(self, scope=1.):
        self.embedded_chars_left.weight.data.uniform_(-scope, scope)
        self.embedded_chars_right.weight.data.uniform_(-scope, scope)
        init.xavier_uniform(self.simi_weight)
        init.xavier_uniform(self.out_lr.weight)
        init.xavier_uniform(self.logistic.weight)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _init_weights(self, scope=0.25):
        self.lookup_table.weight.data.uniform_(-scope, scope)
        init.xavier_uniform(self.logistic.weight)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _init_weights(self, scope=0.25):
        self.lookup_table.weight.data.uniform_(-scope, scope)
        init.xavier_uniform(self.logistic.weight)
项目:textobjdetection    作者:andfoy    | 项目源码 | 文件源码
def xavier(param):
    init.xavier_uniform(param)
项目:textobjdetection    作者:andfoy    | 项目源码 | 文件源码
def xavier(param):
    init.xavier_uniform(param)
项目:textobjdetection    作者:andfoy    | 项目源码 | 文件源码
def xavier(param):
    init.xavier_uniform(param)
项目:braindecode    作者:robintibor    | 项目源码 | 文件源码
def glorot_weight_zero_bias(model):
    """
    Initalize parameters of all modules
    by initializing weights with glorot  uniform/xavier initialization,
    and setting biases to zero.
    Weights from batch norm layers are set to 1.

    Parameters
    ----------
    model: Module
    """
    for module in model.modules():
        if hasattr(module, 'weight'):
            if not ('BatchNorm' in module.__class__.__name__):
                init.xavier_uniform(module.weight, gain=1)
            else:
                init.constant(module.weight, 1)
        if hasattr(module, 'bias'):
            if module.bias is not None:
                init.constant(module.bias, 0)
项目:wide-resnet.pytorch    作者:meliketoy    | 项目源码 | 文件源码
def conv_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        init.xavier_uniform(m.weight, gain=np.sqrt(2))
        init.constant(m.bias, 0)
    elif classname.find('BatchNorm') != -1:
        init.constant(m.weight, 1)
        init.constant(m.bias, 0)
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def weights_init(self,module):
        for m in module.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight, gain=np.sqrt(2))
                init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def weights_init(self,module):
        for m in module.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight, gain=np.sqrt(2))
                init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
项目:love-letter    作者:user01    | 项目源码 | 文件源码
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        init.xavier_uniform(m.weight.data)
        m.bias.data.fill_(0)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, dropout=0, softplus_boost=1.0):
        super(ProposalBeta, self).__init__()
        self.lin1 = nn.Linear(input_dim, input_dim)
        self.lin2 = nn.Linear(input_dim, 2)
        self.drop = nn.Dropout(dropout)
        self.softplus_boost = softplus_boost
        init.xavier_uniform(self.lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.lin2.weight)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, output_dim, dropout=0, softplus_boost=1.0):
        super(ProposalMultivariateNormal, self).__init__()
        self.mean_lin1 = nn.Linear(input_dim, input_dim)
        self.mean_drop = nn.Dropout(dropout)
        self.mean_lin2 = nn.Linear(input_dim, output_dim)

        self.vars_lin1 = nn.Linear(input_dim, input_dim)
        self.vars_drop = nn.Dropout(dropout)
        self.vars_lin2 = nn.Linear(input_dim, output_dim)

        self.softplus_boost = softplus_boost

        init.xavier_uniform(self.mean_lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.mean_lin2.weight)
        init.xavier_uniform(self.vars_lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.vars_lin2.weight)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_example_non_batch, output_dim, dropout=0):
        super(ObserveEmbeddingFC, self).__init__()
        self.input_dim = input_example_non_batch.nelement()
        self.lin1 = nn.Linear(self.input_dim, output_dim)
        self.lin2 = nn.Linear(output_dim, output_dim)
        self.drop = nn.Dropout(dropout)
        init.xavier_uniform(self.lin1.weight, gain=np.sqrt(2.0))
        init.xavier_uniform(self.lin2.weight, gain=np.sqrt(2.0))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):
        for as_variable in [True, False]:
            for dims in [0, 1]:
                tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable)
                with self.assertRaises(ValueError):
                    init.xavier_uniform(tensor)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_xavier_uniform(self):
        for as_variable in [True, False]:
            for use_gain in [True, False]:
                for dims in [2, 4]:
                    input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25,
                                                                 as_variable=as_variable)
                    gain = 1

                    if use_gain:
                        gain = self._random_float(0.1, 2)
                        init.xavier_uniform(input_tensor, gain=gain)
                    else:
                        init.xavier_uniform(input_tensor)

                    if as_variable:
                        input_tensor = input_tensor.data

                    fan_in = input_tensor.size(1)
                    fan_out = input_tensor.size(0)
                    if input_tensor.dim() > 2:
                        fan_in *= input_tensor[0, 0].numel()
                        fan_out *= input_tensor[0, 0].numel()

                    expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
                    bounds = expected_std * math.sqrt(3)
                    assert self._is_uniform(input_tensor, -bounds, bounds)
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def xavier_uniform(w, gain=1):
    return nn.xavier_uniform(w, gain=gain)
项目:ml-utils    作者:LinxiFan    | 项目源码 | 文件源码
def conv_fc_init(layer,
                 weight_init=init.xavier_uniform,
                 bias_init=zero_init):
    """
    Initialize a layer's filter weights by xavier and bias weights to zero
    The layer can be either nn.ConvNd or nn.Linear
    """
    if isinstance(layer, (list, nn.ModuleList)):
        return type(layer)([conv_fc_init(l, weight_init=weight_init, bias_init=bias_init)
                            for l in layer])
    assert is_conv_layer(layer) or isinstance(layer, nn.Linear)
    weight_init(layer.weight)
    bias_init(layer.bias)
    return layer
项目:ssd_pytorch    作者:miraclebiu    | 项目源码 | 文件源码
def xavier(param):
    init.xavier_uniform(param)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):
        for as_variable in [True, False]:
            for dims in [0, 1]:
                tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable)
                with self.assertRaises(ValueError):
                    init.xavier_uniform(tensor)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_xavier_uniform(self):
        for as_variable in [True, False]:
            for use_gain in [True, False]:
                for dims in [2, 4]:
                    input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25,
                                                                 as_variable=as_variable)
                    gain = 1

                    if use_gain:
                        gain = self._random_float(0.1, 2)
                        init.xavier_uniform(input_tensor, gain=gain)
                    else:
                        init.xavier_uniform(input_tensor)

                    if as_variable:
                        input_tensor = input_tensor.data

                    fan_in = input_tensor.size(1)
                    fan_out = input_tensor.size(0)
                    if input_tensor.dim() > 2:
                        fan_in *= input_tensor[0, 0].numel()
                        fan_out *= input_tensor[0, 0].numel()

                    expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
                    bounds = expected_std * math.sqrt(3)
                    assert self._is_uniform(input_tensor, -bounds, bounds)
项目:MatchingNetworks    作者:gitabcworld    | 项目源码 | 文件源码
def weights_init(self,module):
        for m in module.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight, gain=np.sqrt(2))
                init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):
        for as_variable in [True, False]:
            for dims in [0, 1]:
                tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable)
                with self.assertRaises(ValueError):
                    init.xavier_uniform(tensor)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_xavier_uniform(self):
        for as_variable in [True, False]:
            for use_gain in [True, False]:
                for dims in [2, 4]:
                    input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25,
                                                                 as_variable=as_variable)
                    gain = 1

                    if use_gain:
                        gain = self._random_float(0.1, 2)
                        init.xavier_uniform(input_tensor, gain=gain)
                    else:
                        init.xavier_uniform(input_tensor)

                    if as_variable:
                        input_tensor = input_tensor.data

                    fan_in = input_tensor.size(1)
                    fan_out = input_tensor.size(0)
                    if input_tensor.dim() > 2:
                        fan_in *= input_tensor[0, 0].numel()
                        fan_out *= input_tensor[0, 0].numel()

                    expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
                    bounds = expected_std * math.sqrt(3)
                    assert self._is_uniform(input_tensor, -bounds, bounds)
项目:squad_rasor_nn    作者:hsgodhia    | 项目源码 | 文件源码
def init_param(self, param):
        if len(param.size()) < 2:
            init.uniform(param)
        else:            
            init.xavier_uniform(param)
项目:squad_rasor_nn    作者:hsgodhia    | 项目源码 | 文件源码
def init_param(self, param):
        if len(param.size()) < 2:
            init.uniform(param)
        else:            
            init.xavier_uniform(param)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):
        for as_variable in [True, False]:
            for dims in [0, 1]:
                tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable)
                with self.assertRaises(ValueError):
                    init.xavier_uniform(tensor)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_xavier_uniform(self):
        for as_variable in [True, False]:
            for use_gain in [True, False]:
                for dims in [2, 4]:
                    input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25,
                                                                 as_variable=as_variable)
                    gain = 1

                    if use_gain:
                        gain = self._random_float(0.1, 2)
                        init.xavier_uniform(input_tensor, gain=gain)
                    else:
                        init.xavier_uniform(input_tensor)

                    if as_variable:
                        input_tensor = input_tensor.data

                    fan_in = input_tensor.size(1)
                    fan_out = input_tensor.size(0)
                    if input_tensor.dim() > 2:
                        fan_in *= input_tensor[0, 0].numel()
                        fan_out *= input_tensor[0, 0].numel()

                    expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
                    bounds = expected_std * math.sqrt(3)
                    assert self._is_uniform(input_tensor, -bounds, bounds)
项目:pytorch-vqa    作者:Cyanogenoid    | 项目源码 | 文件源码
def __init__(self, embedding_tokens):
        super(Net, self).__init__()
        question_features = 1024
        vision_features = config.output_features
        glimpses = 2

        self.text = TextProcessor(
            embedding_tokens=embedding_tokens,
            embedding_features=300,
            lstm_features=question_features,
            drop=0.5,
        )
        self.attention = Attention(
            v_features=vision_features,
            q_features=question_features,
            mid_features=512,
            glimpses=2,
            drop=0.5,
        )
        self.classifier = Classifier(
            in_features=glimpses * vision_features + question_features,
            mid_features=1024,
            out_features=config.max_answers,
            drop=0.5,
        )

        for m in self.modules():
            if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:pytorch-vqa    作者:Cyanogenoid    | 项目源码 | 文件源码
def __init__(self, embedding_tokens, embedding_features, lstm_features, drop=0.0):
        super(TextProcessor, self).__init__()
        self.embedding = nn.Embedding(embedding_tokens, embedding_features, padding_idx=0)
        self.drop = nn.Dropout(drop)
        self.tanh = nn.Tanh()
        self.lstm = nn.LSTM(input_size=embedding_features,
                            hidden_size=lstm_features,
                            num_layers=1)
        self.features = lstm_features

        self._init_lstm(self.lstm.weight_ih_l0)
        self._init_lstm(self.lstm.weight_hh_l0)
        self.lstm.bias_ih_l0.data.zero_()
        self.lstm.bias_hh_l0.data.zero_()

        init.xavier_uniform(self.embedding.weight)
项目:pytorch-vqa    作者:Cyanogenoid    | 项目源码 | 文件源码
def _init_lstm(self, weight):
        for w in weight.chunk(4, 0):
            init.xavier_uniform(w)
项目:SRU    作者:akuzeee    | 项目源码 | 文件源码
def initWeight(self, init_forget_bias=1):
        # See details in https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py
        for name, params in self.named_parameters():
            # weight?xavier????
            if 'weight' in name:
                init.xavier_uniform(params)

            # ??????????GRU?b_iz, b_hz????
            elif 'gru.bias_ih_l' in name:
                b_ir, b_iz, b_in = params.chunk(3, 0)
                init.constant(b_iz, init_forget_bias)
            elif 'gru.bias_hh_l' in name:
                b_hr, b_hz, b_hn = params.chunk(3, 0)
                init.constant(b_hz, init_forget_bias)

            # ?????bias?0????
            else:
                init.constant(params, 0)
项目:SRU    作者:akuzeee    | 项目源码 | 文件源码
def initWeight(self, init_forget_bias=1):
        # See https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py
        for name, params in self.named_parameters():
            # weight?xavier????
            if 'weight' in name:
                init.xavier_uniform(params)

            # ??????????LSTM?b_if, b_hf????
            elif 'lstm.bias_ih_l' in name:
                b_ii, b_if, b_ig, b_i0 = params.chunk(4, 0)
                init.constant(b_if, init_forget_bias)
            elif 'lstm.bias_hh_l' in name:
                b_hi, b_hf, b_hg, b_h0 = params.chunk(4, 0)
                init.constant(b_hf, init_forget_bias)

            # ?????bias?0????
            else:
                init.constant(params, 0)
项目:intel-cervical-cancer    作者:wangg12    | 项目源码 | 文件源码
def weights_init(m):
  # classname = m.__class__.__name__
  if isinstance(m, nn.Conv2d):
    #print('init conv2d')
    #init.xavier_uniform(m.weight.data, gain=np.sqrt(2.0))
    init.kaiming_uniform(m.weight.data, mode='fan_in')
    # m.weight.data.normal_(0.0, 0.02)
  if isinstance(m, nn.Linear):
    #print('init fc')
    init.kaiming_uniform(m.weight.data, mode='fan_in')
    # size = m.weight.size()
    # fan_out = size[0] # number of rows
    # fan_in = size[1] # number of columns
    # variance = np.sqrt(2.0/(fan_in + fan_out))
    # m.weight.data.uniform_(0.0, variance)
项目:OpenNMT-py    作者:OpenNMT    | 项目源码 | 文件源码
def __init__(self, input_size, width=3, dropout=0.2, nopad=False):
        super(GatedConv, self).__init__()
        self.conv = WeightNormConv2d(input_size, 2 * input_size,
                                     kernel_size=(width, 1), stride=(1, 1),
                                     padding=(width // 2 * (1 - nopad), 0))
        init.xavier_uniform(self.conv.weight, gain=(4 * (1 - dropout))**0.5)
        self.dropout = nn.Dropout(dropout)
项目:pytorch-nec    作者:mjacar    | 项目源码 | 文件源码
def initialize_weights(self):
    conv_layers = [v for k,v in self._modules.iteritems() if 'conv' in k]
    for layer in conv_layers:
      init.xavier_uniform(layer.weight)
    init.xavier_uniform(self.head.weight)
    init.xavier_uniform(self.fc.weight)
项目:pytorch-nec    作者:mjacar    | 项目源码 | 文件源码
def initialize_weights(self):
    conv_layers = [v for k,v in self._modules.iteritems() if 'conv' in k]
    for layer in conv_layers:
      init.xavier_uniform(layer.weight)
    init.xavier_uniform(self.head.weight)
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def xavier(param):
    init.xavier_uniform(param)
项目:pytorch-tutorials    作者:tfygg    | 项目源码 | 文件源码
def initNetParams(net):
    '''Init net parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.xavier_uniform(m.weight)
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, output_dim, dropout=0, softmax_boost=1.0):
        super(ProposalUniformDiscrete, self).__init__()
        self.lin1 = nn.Linear(input_dim, input_dim)
        self.lin2 = nn.Linear(input_dim, output_dim)
        self.drop = nn.Dropout(dropout)
        self.softmax_boost = softmax_boost
        init.xavier_uniform(self.lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.lin2.weight)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, dropout=0):
        super(ProposalNormal, self).__init__()
        self.lin1 = nn.Linear(input_dim, input_dim)
        self.lin2 = nn.Linear(input_dim, 2)
        self.drop = nn.Dropout(dropout)
        init.xavier_uniform(self.lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.lin2.weight)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, dropout=0):
        super(ProposalLaplace, self).__init__()
        self.lin1 = nn.Linear(input_dim, input_dim)
        self.lin2 = nn.Linear(input_dim, 2)
        self.drop = nn.Dropout(dropout)
        init.xavier_uniform(self.lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.lin2.weight)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, dropout=0, softmax_boost=1.0):
        super(ProposalFlip, self).__init__()
        self.lin1 = nn.Linear(input_dim, input_dim)
        self.lin2 = nn.Linear(input_dim, 1)
        self.drop = nn.Dropout(dropout)
        self.softmax_boost = softmax_boost
        init.xavier_uniform(self.lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.lin2.weight)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, output_dim, dropout=0, softmax_boost=1.0):
        super(ProposalDiscrete, self).__init__()
        self.lin1 = nn.Linear(input_dim, input_dim)
        self.lin2 = nn.Linear(input_dim, output_dim)
        self.drop = nn.Dropout(dropout)
        self.softmax_boost = softmax_boost
        init.xavier_uniform(self.lin1.weight, gain=init.calculate_gain('relu'))
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, dropout=0, softplus_boost=1.0):
        super(ProposalUniformContinuous, self).__init__()
        self.lin1 = nn.Linear(input_dim, input_dim)
        self.lin2 = nn.Linear(input_dim, 2)
        self.drop = nn.Dropout(dropout)
        self.softplus_boost = softplus_boost
        init.xavier_uniform(self.lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.lin2.weight)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, mixture_components=10, dropout=0):
        super(ProposalUniformContinuousAlt, self).__init__()
        self.mixture_components = mixture_components
        self.output_dim = 3 * mixture_components
        self.lin1 = nn.Linear(input_dim, input_dim)
        self.lin2 = nn.Linear(input_dim, self.output_dim)
        self.drop = nn.Dropout(dropout)
        init.xavier_uniform(self.lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.lin2.weight)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def __init__(self, input_dim, dropout=0, softplus_boost=1.0):
        super(ProposalGamma, self).__init__()
        self.lin1 = nn.Linear(input_dim, input_dim)
        self.lin2 = nn.Linear(input_dim, 2)
        self.drop = nn.Dropout(dropout)
        self.softplus_boost = softplus_boost
        init.xavier_uniform(self.lin1.weight, gain=init.calculate_gain('relu'))
        init.xavier_uniform(self.lin2.weight)
项目:sourceseparation_misc    作者:ycemsubakan    | 项目源码 | 文件源码
def initializationhelper(param, nltype):

    c = 0.1 
    torchinit.uniform(param.weight, a=-c, b=c)

    #torchinit.xavier_uniform(param.weight, gain=c*torchinit.calculate_gain(nltype))
    c = 0.1
    torchinit.uniform(param.bias, a=-c, b=c)