Python torch.nn.init 模块,normal() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.init.normal()

项目:unsupervised-treelstm    作者:jihunchoi    | 项目源码 | 文件源码
def reset_parameters(self):
        if self.use_leaf_rnn:
            init.kaiming_normal(self.leaf_rnn_cell.weight_ih.data)
            init.orthogonal(self.leaf_rnn_cell.weight_hh.data)
            init.constant(self.leaf_rnn_cell.bias_ih.data, val=0)
            init.constant(self.leaf_rnn_cell.bias_hh.data, val=0)
            # Set forget bias to 1
            self.leaf_rnn_cell.bias_ih.data.chunk(4)[1].fill_(1)
            if self.bidirectional:
                init.kaiming_normal(self.leaf_rnn_cell_bw.weight_ih.data)
                init.orthogonal(self.leaf_rnn_cell_bw.weight_hh.data)
                init.constant(self.leaf_rnn_cell_bw.bias_ih.data, val=0)
                init.constant(self.leaf_rnn_cell_bw.bias_hh.data, val=0)
                # Set forget bias to 1
                self.leaf_rnn_cell_bw.bias_ih.data.chunk(4)[1].fill_(1)
        else:
            init.kaiming_normal(self.word_linear.weight.data)
            init.constant(self.word_linear.bias.data, val=0)
        self.treelstm_layer.reset_parameters()
        init.normal(self.comp_query.data, mean=0, std=0.01)
项目:DeepLab    作者:2prime    | 项目源码 | 文件源码
def msra_init(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight)
            # Modified by lzh @ 201707251408:
            # <<< Old:
            # if m.bias:
            # >>> New:
            if m.bias is not None:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            # Modified by lzh @ 201707241734:
            # <<< Old:
            # if m.bias:
            # >>> New:
            if m.bias is not None:
            # --- End
                init.constant(m.bias, 0)

# Added by lzh @ 201707251404:
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def define_D(input_nc, ndf, which_model_netD,
             n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
    netD = None
    use_gpu = len(gpu_ids) > 0
    norm_layer = get_norm_layer(norm_type=norm)

    if use_gpu:
        assert(torch.cuda.is_available())
    if which_model_netD == 'basic':
        netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
    elif which_model_netD == 'n_layers':
        netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
    elif which_model_netD == 'pixel':
        netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
    else:
        raise NotImplementedError('Discriminator model name [%s] is not recognized' %
                                  which_model_netD)
    if use_gpu:
        netD.cuda(gpu_ids[0])
    init_weights(netD, init_type=init_type)
    return netD
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def make_lm_hook(d, seed_texts=None, max_seq_len=25, gpu=False,
                 method='sample', temperature=1, width=5,
                 early_stopping=None, validate=True):
    """
    Make a generator hook for a normal language model
    """

    def hook(trainer, epoch, batch_num, checkpoint):
        trainer.log("info", "Checking training...")
        if validate:
            loss = sum(trainer.validate_model().pack())
            trainer.log("info", "Valid loss: {:g}".format(loss))
            trainer.log("info", "Registering early stopping loss...")
            if early_stopping is not None:
                early_stopping.add_checkpoint(loss)
        trainer.log("info", "Generating text...")
        scores, hyps = trainer.model.generate(
            d, seed_texts=seed_texts, max_seq_len=max_seq_len, gpu=gpu,
            method=method, temperature=temperature, width=width)
        hyps = [format_hyp(score, hyp, hyp_num + 1, d)
                for hyp_num, (score, hyp) in enumerate(zip(scores, hyps))]
        trainer.log("info", '\n***' + ''.join(hyps) + "\n***")

    return hook
项目:seqGAN    作者:suragnair    | 项目源码 | 文件源码
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, gpu=False, oracle_init=False):
        super(Generator, self).__init__()
        self.hidden_dim = hidden_dim
        self.embedding_dim = embedding_dim
        self.max_seq_len = max_seq_len
        self.vocab_size = vocab_size
        self.gpu = gpu

        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.gru = nn.GRU(embedding_dim, hidden_dim)
        self.gru2out = nn.Linear(hidden_dim, vocab_size)

        # initialise oracle network with N(0,1)
        # otherwise variance of initialisation is very small => high NLL for data sampled from the same model
        if oracle_init:
            for p in self.parameters():
                init.normal(p, 0, 1)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:pytorch-skipthoughts    作者:kaniblu    | 项目源码 | 文件源码
def reset_parameters(self):
        I.normal(self.embeddings.weight.data, mean=0, std=0.01)
        I.xavier_normal(self.W_i.weight.data)
        I.xavier_normal(self.W_o.weight.data)

        init_rnn_cell(self.encoder)

        for i in range(self.n_decoders):
            decoder = getattr(self, "decoder{}".format(i))
            init_rnn_cell(decoder)
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def reset_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def reset_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
项目:pytorch-cifar    作者:kuangliu    | 项目源码 | 文件源码
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:ShuffleNet    作者:jaxony    | 项目源码 | 文件源码
def init_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_normal(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable)
                mean = self._random_float(-3, 3)
                std = self._random_float(1, 5)
                init.normal(input_tensor, mean=mean, std=std)

                assert self._is_normal(input_tensor, mean, std)
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def normal(w, mean=0, std=1):
    return nn.normal(w, mean=mean, std=std)
项目:unsupervised-treelstm    作者:jihunchoi    | 项目源码 | 文件源码
def reset_parameters(self):
        init.normal(self.word_embedding.weight.data, mean=0, std=0.01)
        self.encoder.reset_parameters()
        self.classifier.reset_parameters()
项目:unsupervised-treelstm    作者:jihunchoi    | 项目源码 | 文件源码
def reset_parameters(self):
        init.normal(self.word_embedding.weight.data, mean=0, std=0.01)
        self.encoder.reset_parameters()
        self.classifier.reset_parameters()
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_normal(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable)
                mean = self._random_float(-3, 3)
                std = self._random_float(1, 5)
                init.normal(input_tensor, mean=mean, std=std)

                assert self._is_normal(input_tensor, mean, std)
项目:DeepLab    作者:2prime    | 项目源码 | 文件源码
def msra_init(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight)
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:DeepLab    作者:2prime    | 项目源码 | 文件源码
def xavier_init(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.xavier_normal(m.weight)
            if m.bias is not None:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias is not None:
                init.constant(m.bias, 0)
项目:DeepLab    作者:2prime    | 项目源码 | 文件源码
def gauss_init(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.normal(0.0, 0.01)
            if m.bias is not None:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias is not None:
                init.constant(m.bias, 0)
# --- End
项目:Efficient-Dynamic-Batching    作者:jsuarez5341    | 项目源码 | 文件源码
def initWeights(net, scheme='orthogonal'):
   print('Initializing weights. Warning: may overwrite sensitive bias parameters (e.g. batchnorm)')
   for e in net.parameters():
      if scheme == 'orthogonal':
         if len(e.size()) >= 2:
            init.orthogonal(e)
      elif scheme == 'normal':
         init.normal(e, std=1e-2)
      elif scheme == 'xavier':
         init.xavier_normal(e)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_normal(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable)
                mean = self._random_float(-3, 3)
                std = self._random_float(1, 5)
                init.normal(input_tensor, mean=mean, std=std)

                assert self._is_normal(input_tensor, mean, std)
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def weights_init_normal(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.normal(m.weight.data, 0.0, 0.02)
    elif classname.find('Linear') != -1:
        init.normal(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def weights_init_xavier(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.xavier_normal(m.weight.data, gain=0.02)
    elif classname.find('Linear') != -1:
        init.xavier_normal(m.weight.data, gain=0.02)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def weights_init_orthogonal(m):
    classname = m.__class__.__name__
    print(classname)
    if classname.find('Conv') != -1:
        init.orthogonal(m.weight.data, gain=1)
    elif classname.find('Linear') != -1:
        init.orthogonal(m.weight.data, gain=1)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def init_weights(net, init_type='normal'):
    print('initialization method [%s]' % init_type)
    if init_type == 'normal':
        net.apply(weights_init_normal)
    elif init_type == 'xavier':
        net.apply(weights_init_xavier)
    elif init_type == 'kaiming':
        net.apply(weights_init_kaiming)
    elif init_type == 'orthogonal':
        net.apply(weights_init_orthogonal)
    else:
        raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def make_mlm_hook(d, seed_texts=None, max_seq_len=25, gpu=False,
                  method='sample', temperature=1, width=5,
                  early_stopping=None, validate=True):
    """
    Make a generator hook for a normal language model
    """

    def hook(trainer, epoch, batch_num, checkpoint):
        trainer.log("info", "Checking training...")
        if validate:
            loss = sum(trainer.validate_model().pack())
            trainer.log("info", "Valid loss: {:g}".format(loss))
            trainer.log("info", "Registering early stopping loss...")
            if early_stopping is not None:
                early_stopping.add_checkpoint(loss)
        trainer.log("info", "Generating text...")
        for head in trainer.model.project:
            trainer.log("info", "Head: {}".format(head))
            scores, hyps = trainer.model.generate(
                d, head=head, seed_texts=seed_texts, max_seq_len=max_seq_len,
                gpu=gpu, method=method, temperature=temperature, width=width)
            hyps = [format_hyp(score, hyp, hyp_num + 1, d)
                    for hyp_num, (score, hyp) in enumerate(zip(scores, hyps))]
            trainer.log("info", '\n***' + ''.join(hyps) + "\n***")

    return hook
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_normal(self):
        for as_variable in [True, False]:
            for dims in [1, 2, 4]:
                input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50, as_variable=as_variable)
                mean = self._random_float(-3, 3)
                std = self._random_float(1, 5)
                init.normal(input_tensor, mean=mean, std=std)

                assert self._is_normal(input_tensor, mean, std)
项目:generative_models    作者:j-min    | 项目源码 | 文件源码
def weights_init_normal(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.normal(m.weight.data, 0.0, 0.02)
    elif classname.find('Linear') != -1:
        init.normal(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:generative_models    作者:j-min    | 项目源码 | 文件源码
def weights_init_xavier(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.xavier_normal(m.weight.data, gain=0.02)
    elif classname.find('Linear') != -1:
        init.xavier_normal(m.weight.data, gain=0.02)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:generative_models    作者:j-min    | 项目源码 | 文件源码
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:generative_models    作者:j-min    | 项目源码 | 文件源码
def init_weights(net, init_type='normal'):
    print('initialization method [%s]' % init_type)
    if init_type == 'normal':
        net.apply(weights_init_normal)
    elif init_type == 'xavier':
        net.apply(weights_init_xavier)
    elif init_type == 'kaiming':
        net.apply(weights_init_kaiming)
    elif init_type == 'orthogonal':
        net.apply(weights_init_orthogonal)
    else:
        raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self):
        super(SqueezeNet, self).__init__()

        self.lr = 0.01
        self.momentum = 0.01
        self.N_FRAMES = 2
        self.N_STEPS = 10
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(12, 64, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(64, 16, 64, 64),
        )
        self.post_metadata_features = nn.Sequential(
            Fire(256, 16, 64, 64),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(128, 32, 128, 128),
            Fire(256, 32, 128, 128),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(256, 48, 192, 192),
            Fire(384, 48, 192, 192),
            Fire(384, 64, 256, 256),
            Fire(512, 64, 256, 256),
        )
        final_conv = nn.Conv2d(512, 66, kernel_size=1)
        self.final_output_Aruco = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            # nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=5, stride=6)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_steps=10, n_frames=2):
        super(SqueezeNet, self).__init__()

        self.n_steps = n_steps
        self.n_frames = n_frames
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 64, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(64, 16, 64, 64)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(256, 16, 64, 64),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(128, 32, 128, 128),
            Fire(256, 32, 128, 128),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(256, 48, 192, 192),
            Fire(384, 48, 192, 192),
            Fire(384, 64, 256, 256),
            Fire(512, 64, 256, 256),
        )
        final_conv = nn.Conv2d(512, self.n_steps * 4, kernel_size=1)
        self.final_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            # nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=5, stride=6)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_steps=10, n_frames=2):
        super(SqueezeNet, self).__init__()

        self.n_steps = n_steps
        self.n_frames = n_frames
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 4, 8, 8)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(24, 6, 12, 12),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(24, 8, 16, 16),
            Fire(32, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
        )
        final_conv = nn.Conv2d(64, self.n_steps * 2, kernel_size=1)
        self.final_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            # nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=5, stride=5)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_steps=10, n_frames=2):
        super(Feedforward, self).__init__()

        self.n_steps = n_steps
        self.n_frames = n_frames
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * n_frames, 8, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(8, 8, kernel_size=3, padding=1)
        )
        self.post_metadata_features = nn.Sequential(
            nn.Conv2d(16, 12, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(12, 12, kernel_size=3, padding=1),
            nn.Conv2d(12, 16, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(16, 16, kernel_size=3, padding=1),
            nn.Conv2d(16, 24, kernel_size=3, padding=1),
            nn.Conv2d(24, 24, kernel_size=3, padding=1)
        )
        final_conv = nn.Conv2d(24, self.n_steps * 2, kernel_size=1)
        self.final_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            # nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=5, stride=5)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:pytorch-classification    作者:bearpaw    | 项目源码 | 文件源码
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:pytorch-bilstmcrf    作者:kaniblu    | 项目源码 | 文件源码
def reset_parameters(self):
        I.normal(self.transitions.data, 0, 1)
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def __init__(self, depth, pretrained=True, cut_at_pooling=False,
                 num_features=0, norm=False, dropout=0, num_classes=0):
        super(ResNet, self).__init__()

        self.depth = depth
        self.pretrained = pretrained
        self.cut_at_pooling = cut_at_pooling

        # Construct base (pretrained) resnet
        if depth not in ResNet.__factory:
            raise KeyError("Unsupported depth:", depth)
        self.base = ResNet.__factory[depth](pretrained=pretrained)

        if not self.cut_at_pooling:
            self.num_features = num_features
            self.norm = norm
            self.dropout = dropout
            self.has_embedding = num_features > 0
            self.num_classes = num_classes

            out_planes = self.base.fc.in_features

            # Append new layers
            if self.has_embedding:
                self.feat = nn.Linear(out_planes, self.num_features)
                self.feat_bn = nn.BatchNorm1d(self.num_features)
                init.kaiming_normal(self.feat.weight, mode='fan_out')
                init.constant(self.feat.bias, 0)
                init.constant(self.feat_bn.weight, 1)
                init.constant(self.feat_bn.bias, 0)
            else:
                # Change the num_features to CNN output channels
                self.num_features = out_planes
            if self.dropout > 0:
                self.drop = nn.Dropout(self.dropout)
            if self.num_classes > 0:
                self.classifier = nn.Linear(self.num_features, self.num_classes)
                init.normal(self.classifier.weight, std=0.001)
                init.constant(self.classifier.bias, 0)

        if not self.pretrained:
            self.reset_params()
项目:pytorch-tutorials    作者:tfygg    | 项目源码 | 文件源码
def initNetParams(net):
    '''Init net parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.xavier_uniform(m.weight)
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_calculate_gain_leaky_relu(self):
        for param in [None, 0, 0.01, 10]:
            gain = init.calculate_gain('leaky_relu', param)
            if param is None:  # Default slope is 0.01
                self.assertEqual(gain, 1.4141428569978354)
            elif param == 0:  # No slope = same gain as normal ReLU
                self.assertEqual(gain, 1.4142135623730951)
            elif param == 0.01:
                self.assertEqual(gain, 1.4141428569978354)
            elif param == 10:
                self.assertEqual(gain, 0.14071950894605836)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_calculate_gain_leaky_relu(self):
        for param in [None, 0, 0.01, 10]:
            gain = init.calculate_gain('leaky_relu', param)
            if param is None:  # Default slope is 0.01
                self.assertEqual(gain, 1.4141428569978354)
            elif param == 0:  # No slope = same gain as normal ReLU
                self.assertEqual(gain, 1.4142135623730951)
            elif param == 0.01:
                self.assertEqual(gain, 1.4141428569978354)
            elif param == 10:
                self.assertEqual(gain, 0.14071950894605836)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_calculate_gain_leaky_relu(self):
        for param in [None, 0, 0.01, 10]:
            gain = init.calculate_gain('leaky_relu', param)
            if param is None:  # Default slope is 0.01
                self.assertEqual(gain, 1.4141428569978354)
            elif param == 0:  # No slope = same gain as normal ReLU
                self.assertEqual(gain, 1.4142135623730951)
            elif param == 0.01:
                self.assertEqual(gain, 1.4141428569978354)
            elif param == 10:
                self.assertEqual(gain, 0.14071950894605836)
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def make_initializer(
        linear={'type': 'uniform', 'args': {'a': -0.05, 'b': 0.05}},
        linear_bias={'type': 'constant', 'args': {'val': 0.}},
        rnn={'type': 'xavier_uniform', 'args': {'gain': 1.}},
        rnn_bias={'type': 'constant', 'args': {'val': 0.}},
        cnn_bias={'type': 'constant', 'args': {'val': 0.}},
        emb={'type': 'normal', 'args': {'mean': 0, 'std': 1}},
        default={'type': 'uniform', 'args': {'a': -0.05, 'b': 0.05}}):

    rnns = (torch.nn.LSTM, torch.nn.GRU,
            torch.nn.LSTMCell, torch.nn.GRUCell,
            StackedGRU, StackedLSTM, NormalizedGRU,
            NormalizedGRUCell, StackedNormalizedGRU)

    convs = (torch.nn.Conv1d, torch.nn.Conv2d)

    def initializer(m):

        if isinstance(m, (rnns)):  # RNNs
            for p_name, p in m.named_parameters():
                if hasattr(p, 'custom'):
                    continue
                if is_bias(p_name):
                    getattr(init, rnn_bias['type'])(p, **rnn_bias['args'])
                else:
                    getattr(init, rnn['type'])(p, **rnn['args'])

        elif isinstance(m, torch.nn.Linear):  # linear
            for p_name, p in m.named_parameters():
                if hasattr(p, 'custom'):
                    continue
                if is_bias(p_name):
                    getattr(init, linear_bias['type'])(p, **linear_bias['args'])
                else:
                    getattr(init, linear['type'])(p, **linear['args'])

        elif isinstance(m, torch.nn.Embedding):  # embedding
            for p in m.parameters():
                if hasattr(p, 'custom'):
                    continue
                getattr(init, emb['type'])(p, **emb['args'])

        elif isinstance(m, convs):
            for p_name, p in m.named_parameters():
                if hasattr(p, 'custom'):
                    continue
                if is_bias(p_name):
                    getattr(init, cnn_bias['type'])(p, **cnn_bias['args'])
                else:
                    # Karpathy: http://cs231n.github.io/neural-networks-2/#init
                    # -> scale weight vector by square root of its fan-in...
                    # fan_in, _ = init._calculate_fan_in_and_fan_out(p)
                    # init.normal(p, mean=0, std=math.sqrt(fan_in))
                    init.xavier_normal(p)

    return initializer
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_calculate_gain_leaky_relu(self):
        for param in [None, 0, 0.01, 10]:
            gain = init.calculate_gain('leaky_relu', param)
            if param is None:  # Default slope is 0.01
                self.assertEqual(gain, 1.4141428569978354)
            elif param == 0:  # No slope = same gain as normal ReLU
                self.assertEqual(gain, 1.4142135623730951)
            elif param == 0.01:
                self.assertEqual(gain, 1.4141428569978354)
            elif param == 10:
                self.assertEqual(gain, 0.14071950894605836)
项目:generative_models    作者:j-min    | 项目源码 | 文件源码
def weights_init_orthogonal(m):
    classname = m.__class__.__name__
    print(classname)
    if classname.find('Conv') != -1:
        init.orthogonal(m.weight.data, gain=1)
    elif classname.find('Linear') != -1:
        init.orthogonal(m.weight.data, gain=1)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_frames=2, n_steps=10):
        """Sets up layers"""
        super(SqueezeNetTimeLSTM, self).__init__()

        self.is_cuda = False

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 4, 8, 8)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(24, 6, 12, 12),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(24, 8, 16, 16),
            Fire(32, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
        )
        final_conv = nn.Conv2d(64, 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstm_encoder = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True)
        ])
        self.lstm_decoder = nn.ModuleList([
            nn.LSTM(1, 32, 1, batch_first=True),
            nn.LSTM(32, 8, 1, batch_first=True),
            nn.LSTM(8, 16, 1, batch_first=True),
            nn.LSTM(16, 4, 1, batch_first=True),
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_frames=2, n_steps=10):
        """Sets up layers"""
        super(SqueezeNetLSTM, self).__init__()

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 4, 8, 8)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(24, 6, 12, 12),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(24, 8, 16, 16),
            Fire(32, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
        )
        final_conv = nn.Conv2d(64, self.n_steps * 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstms = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True),
            nn.LSTM(32, 4, 1, batch_first=True)
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_steps=10, n_frames=2):
        """Sets up layers"""
        super(SqueezeNetSqueezeLSTM, self).__init__()

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 4, 8, 8)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(24, 6, 12, 12),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(24, 8, 16, 16),
            Fire(32, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
        )
        final_conv = nn.Conv2d(64, self.n_steps * 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstms = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True),
            nn.LSTM(32, 8, 1, batch_first=True),
            nn.LSTM(8, 16, 1, batch_first=True),
            nn.LSTM(16, 4, 1, batch_first=True)
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_frames=2, n_steps=10):
        """Sets up layers"""
        super(SqueezeNetTimeLSTM, self).__init__()

        self.is_cuda = False

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2, 8, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(8, 8, kernel_size=3, padding=1)
        )
        self.post_metadata_features = nn.Sequential(
            nn.Conv2d(16, 12, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(12, 12, kernel_size=3, padding=1),
            nn.Conv2d(12, 16, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(16, 16, kernel_size=3, padding=1),
            nn.Conv2d(16, 24, kernel_size=3, padding=1),
            nn.Conv2d(24, 8, kernel_size=3, padding=1)
        )
        final_conv = nn.Conv2d(8, 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstm_encoder = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True)
        ])
        self.lstm_decoder = nn.ModuleList([
            nn.LSTM(1, 32, 1, batch_first=True),
            nn.LSTM(32, 4, 1, batch_first=True)
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()