Python torch.nn.init 模块,kaiming_uniform() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用torch.nn.init.kaiming_uniform()

项目:samplernn-pytorch    作者:deepsound-project    | 项目源码 | 文件源码
def __init__(self, frame_size, dim, q_levels, weight_norm):
        super().__init__()

        self.q_levels = q_levels

        self.embedding = torch.nn.Embedding(
            self.q_levels,
            self.q_levels
        )

        self.input = torch.nn.Conv1d(
            in_channels=q_levels,
            out_channels=dim,
            kernel_size=frame_size,
            bias=False
        )
        init.kaiming_uniform(self.input.weight)
        if weight_norm:
            self.input = torch.nn.utils.weight_norm(self.input)

        self.hidden = torch.nn.Conv1d(
            in_channels=dim,
            out_channels=dim,
            kernel_size=1
        )
        init.kaiming_uniform(self.hidden.weight)
        init.constant(self.hidden.bias, 0)
        if weight_norm:
            self.hidden = torch.nn.utils.weight_norm(self.hidden)

        self.output = torch.nn.Conv1d(
            in_channels=dim,
            out_channels=q_levels,
            kernel_size=1
        )
        nn.lecun_uniform(self.output.weight)
        init.constant(self.output.bias, 0)
        if weight_norm:
            self.output = torch.nn.utils.weight_norm(self.output)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):
        for as_variable in [True, False]:
            for dims in [0, 1]:
                with self.assertRaises(ValueError):
                    tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable)
                    init.kaiming_uniform(tensor)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_kaiming_uniform(self):
        for as_variable in [True, False]:
            for use_a in [True, False]:
                for dims in [2, 4]:
                    for mode in ['fan_in', 'fan_out']:
                        input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25,
                                                                     as_variable=as_variable)
                        if use_a:
                            a = self._random_float(0.1, 2)
                            init.kaiming_uniform(input_tensor, a=a, mode=mode)
                        else:
                            a = 0
                            init.kaiming_uniform(input_tensor, mode=mode)

                        if as_variable:
                            input_tensor = input_tensor.data

                        fan_in = input_tensor.size(1)
                        fan_out = input_tensor.size(0)
                        if input_tensor.dim() > 2:
                            fan_in *= input_tensor[0, 0].numel()
                            fan_out *= input_tensor[0, 0].numel()

                        if mode == 'fan_in':
                            n = fan_in
                        else:
                            n = fan_out

                        expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
                        bounds = expected_std * math.sqrt(3.0)
                        assert self._is_uniform(input_tensor, -bounds, bounds)
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def he_uniform(w, a=0, mode='fan_in'):
    return nn.kaiming_uniform(w, a=a, mode=mode)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):
        for as_variable in [True, False]:
            for dims in [0, 1]:
                with self.assertRaises(ValueError):
                    tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable)
                    init.kaiming_uniform(tensor)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_kaiming_uniform(self):
        for as_variable in [True, False]:
            for use_a in [True, False]:
                for dims in [2, 4]:
                    for mode in ['fan_in', 'fan_out']:
                        input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25,
                                                                     as_variable=as_variable)
                        if use_a:
                            a = self._random_float(0.1, 2)
                            init.kaiming_uniform(input_tensor, a=a, mode=mode)
                        else:
                            a = 0
                            init.kaiming_uniform(input_tensor, mode=mode)

                        if as_variable:
                            input_tensor = input_tensor.data

                        fan_in = input_tensor.size(1)
                        fan_out = input_tensor.size(0)
                        if input_tensor.dim() > 2:
                            fan_in *= input_tensor[0, 0].numel()
                            fan_out *= input_tensor[0, 0].numel()

                        if mode == 'fan_in':
                            n = fan_in
                        else:
                            n = fan_out

                        expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
                        bounds = expected_std * math.sqrt(3.0)
                        assert self._is_uniform(input_tensor, -bounds, bounds)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):
        for as_variable in [True, False]:
            for dims in [0, 1]:
                with self.assertRaises(ValueError):
                    tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable)
                    init.kaiming_uniform(tensor)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_kaiming_uniform(self):
        for as_variable in [True, False]:
            for use_a in [True, False]:
                for dims in [2, 4]:
                    for mode in ['fan_in', 'fan_out']:
                        input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25,
                                                                     as_variable=as_variable)
                        if use_a:
                            a = self._random_float(0.1, 2)
                            init.kaiming_uniform(input_tensor, a=a, mode=mode)
                        else:
                            a = 0
                            init.kaiming_uniform(input_tensor, mode=mode)

                        if as_variable:
                            input_tensor = input_tensor.data

                        fan_in = input_tensor.size(1)
                        fan_out = input_tensor.size(0)
                        if input_tensor.dim() > 2:
                            fan_in *= input_tensor[0, 0].numel()
                            fan_out *= input_tensor[0, 0].numel()

                        if mode == 'fan_in':
                            n = fan_in
                        else:
                            n = fan_out

                        expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
                        bounds = expected_std * math.sqrt(3.0)
                        assert self._is_uniform(input_tensor, -bounds, bounds)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):
        for as_variable in [True, False]:
            for dims in [0, 1]:
                with self.assertRaises(ValueError):
                    tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1, as_variable=as_variable)
                    init.kaiming_uniform(tensor)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_kaiming_uniform(self):
        for as_variable in [True, False]:
            for use_a in [True, False]:
                for dims in [2, 4]:
                    for mode in ['fan_in', 'fan_out']:
                        input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25,
                                                                     as_variable=as_variable)
                        if use_a:
                            a = self._random_float(0.1, 2)
                            init.kaiming_uniform(input_tensor, a=a, mode=mode)
                        else:
                            a = 0
                            init.kaiming_uniform(input_tensor, mode=mode)

                        if as_variable:
                            input_tensor = input_tensor.data

                        fan_in = input_tensor.size(1)
                        fan_out = input_tensor.size(0)
                        if input_tensor.dim() > 2:
                            fan_in *= input_tensor[0, 0].numel()
                            fan_out *= input_tensor[0, 0].numel()

                        if mode == 'fan_in':
                            n = fan_in
                        else:
                            n = fan_out

                        expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
                        bounds = expected_std * math.sqrt(3.0)
                        assert self._is_uniform(input_tensor, -bounds, bounds)
项目:intel-cervical-cancer    作者:wangg12    | 项目源码 | 文件源码
def weights_init(m):
  # classname = m.__class__.__name__
  if isinstance(m, nn.Conv2d):
    #print('init conv2d')
    #init.xavier_uniform(m.weight.data, gain=np.sqrt(2.0))
    init.kaiming_uniform(m.weight.data, mode='fan_in')
    # m.weight.data.normal_(0.0, 0.02)
  if isinstance(m, nn.Linear):
    #print('init fc')
    init.kaiming_uniform(m.weight.data, mode='fan_in')
    # size = m.weight.size()
    # fan_out = size[0] # number of rows
    # fan_in = size[1] # number of columns
    # variance = np.sqrt(2.0/(fan_in + fan_out))
    # m.weight.data.uniform_(0.0, variance)
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self):
        super(SqueezeNet, self).__init__()

        self.lr = 0.01
        self.momentum = 0.01
        self.N_FRAMES = 2
        self.N_STEPS = 10
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(12, 64, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(64, 16, 64, 64),
        )
        self.post_metadata_features = nn.Sequential(
            Fire(256, 16, 64, 64),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(128, 32, 128, 128),
            Fire(256, 32, 128, 128),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(256, 48, 192, 192),
            Fire(384, 48, 192, 192),
            Fire(384, 64, 256, 256),
            Fire(512, 64, 256, 256),
        )
        final_conv = nn.Conv2d(512, 66, kernel_size=1)
        self.final_output_Aruco = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            # nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=5, stride=6)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_steps=10, n_frames=2):
        super(SqueezeNet, self).__init__()

        self.n_steps = n_steps
        self.n_frames = n_frames
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 64, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(64, 16, 64, 64)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(256, 16, 64, 64),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(128, 32, 128, 128),
            Fire(256, 32, 128, 128),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(256, 48, 192, 192),
            Fire(384, 48, 192, 192),
            Fire(384, 64, 256, 256),
            Fire(512, 64, 256, 256),
        )
        final_conv = nn.Conv2d(512, self.n_steps * 4, kernel_size=1)
        self.final_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            # nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=5, stride=6)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_steps=10, n_frames=2):
        super(SqueezeNet, self).__init__()

        self.n_steps = n_steps
        self.n_frames = n_frames
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 4, 8, 8)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(24, 6, 12, 12),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(24, 8, 16, 16),
            Fire(32, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
        )
        final_conv = nn.Conv2d(64, self.n_steps * 2, kernel_size=1)
        self.final_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            # nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=5, stride=5)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_steps=10, n_frames=2):
        super(Feedforward, self).__init__()

        self.n_steps = n_steps
        self.n_frames = n_frames
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * n_frames, 8, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(8, 8, kernel_size=3, padding=1)
        )
        self.post_metadata_features = nn.Sequential(
            nn.Conv2d(16, 12, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(12, 12, kernel_size=3, padding=1),
            nn.Conv2d(12, 16, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(16, 16, kernel_size=3, padding=1),
            nn.Conv2d(16, 24, kernel_size=3, padding=1),
            nn.Conv2d(24, 24, kernel_size=3, padding=1)
        )
        final_conv = nn.Conv2d(24, self.n_steps * 2, kernel_size=1)
        self.final_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            # nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=5, stride=5)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
项目:samplernn-pytorch    作者:deepsound-project    | 项目源码 | 文件源码
def __init__(self, frame_size, n_frame_samples, n_rnn, dim,
                 learn_h0, weight_norm):
        super().__init__()

        self.frame_size = frame_size
        self.n_frame_samples = n_frame_samples
        self.dim = dim

        h0 = torch.zeros(n_rnn, dim)
        if learn_h0:
            self.h0 = torch.nn.Parameter(h0)
        else:
            self.register_buffer('h0', torch.autograd.Variable(h0))

        self.input_expand = torch.nn.Conv1d(
            in_channels=n_frame_samples,
            out_channels=dim,
            kernel_size=1
        )
        init.kaiming_uniform(self.input_expand.weight)
        init.constant(self.input_expand.bias, 0)
        if weight_norm:
            self.input_expand = torch.nn.utils.weight_norm(self.input_expand)

        self.rnn = torch.nn.GRU(
            input_size=dim,
            hidden_size=dim,
            num_layers=n_rnn,
            batch_first=True
        )
        for i in range(n_rnn):
            nn.concat_init(
                getattr(self.rnn, 'weight_ih_l{}'.format(i)),
                [nn.lecun_uniform, nn.lecun_uniform, nn.lecun_uniform]
            )
            init.constant(getattr(self.rnn, 'bias_ih_l{}'.format(i)), 0)

            nn.concat_init(
                getattr(self.rnn, 'weight_hh_l{}'.format(i)),
                [nn.lecun_uniform, nn.lecun_uniform, init.orthogonal]
            )
            init.constant(getattr(self.rnn, 'bias_hh_l{}'.format(i)), 0)

        self.upsampling = nn.LearnedUpsampling1d(
            in_channels=dim,
            out_channels=dim,
            kernel_size=frame_size
        )
        init.uniform(
            self.upsampling.conv_t.weight, -np.sqrt(6 / dim), np.sqrt(6 / dim)
        )
        init.constant(self.upsampling.bias, 0)
        if weight_norm:
            self.upsampling.conv_t = torch.nn.utils.weight_norm(
                self.upsampling.conv_t
            )
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_frames=2, n_steps=10):
        """Sets up layers"""
        super(SqueezeNetTimeLSTM, self).__init__()

        self.is_cuda = False

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 4, 8, 8)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(24, 6, 12, 12),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(24, 8, 16, 16),
            Fire(32, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
        )
        final_conv = nn.Conv2d(64, 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstm_encoder = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True)
        ])
        self.lstm_decoder = nn.ModuleList([
            nn.LSTM(1, 32, 1, batch_first=True),
            nn.LSTM(32, 8, 1, batch_first=True),
            nn.LSTM(8, 16, 1, batch_first=True),
            nn.LSTM(16, 4, 1, batch_first=True),
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_frames=2, n_steps=10):
        """Sets up layers"""
        super(SqueezeNetLSTM, self).__init__()

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 4, 8, 8)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(24, 6, 12, 12),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(24, 8, 16, 16),
            Fire(32, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
        )
        final_conv = nn.Conv2d(64, self.n_steps * 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstms = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True),
            nn.LSTM(32, 4, 1, batch_first=True)
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_steps=10, n_frames=2):
        """Sets up layers"""
        super(SqueezeNetSqueezeLSTM, self).__init__()

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 4, 8, 8)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(24, 6, 12, 12),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(24, 8, 16, 16),
            Fire(32, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
        )
        final_conv = nn.Conv2d(64, self.n_steps * 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstms = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True),
            nn.LSTM(32, 8, 1, batch_first=True),
            nn.LSTM(8, 16, 1, batch_first=True),
            nn.LSTM(16, 4, 1, batch_first=True)
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_frames=2, n_steps=10):
        """Sets up layers"""
        super(SqueezeNetTimeLSTM, self).__init__()

        self.is_cuda = False

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2, 8, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(8, 8, kernel_size=3, padding=1)
        )
        self.post_metadata_features = nn.Sequential(
            nn.Conv2d(16, 12, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(12, 12, kernel_size=3, padding=1),
            nn.Conv2d(12, 16, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(16, 16, kernel_size=3, padding=1),
            nn.Conv2d(16, 24, kernel_size=3, padding=1),
            nn.Conv2d(24, 8, kernel_size=3, padding=1)
        )
        final_conv = nn.Conv2d(8, 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstm_encoder = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True)
        ])
        self.lstm_decoder = nn.ModuleList([
            nn.LSTM(1, 32, 1, batch_first=True),
            nn.LSTM(32, 4, 1, batch_first=True)
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_frames=2, n_steps=10):
        """Sets up layers"""
        super(SqueezeNetTimeLSTM, self).__init__()

        self.is_cuda = False

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 6, 12, 12)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(36, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
            Fire(64, 24, 48, 48),
            Fire(96, 24, 48, 48),
        )
        final_conv = nn.Conv2d(96, 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstm_encoder = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True)
        ])
        self.lstm_decoder = nn.ModuleList([
            nn.LSTM(1, 32, 1, batch_first=True),
            nn.LSTM(32, 8, 1, batch_first=True),
            nn.LSTM(8, 16, 1, batch_first=True),
            nn.LSTM(16, 4, 1, batch_first=True),
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_frames=2, n_steps=10):
        """Sets up layers"""
        super(SqueezeNetLSTM, self).__init__()

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 6, 12, 12)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(36, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
            Fire(64, 24, 48, 48),
            Fire(96, 24, 48, 48),
        )
        final_conv = nn.Conv2d(96, self.n_steps * 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstms = nn.ModuleList([
            nn.LSTM(16, 32, 2, batch_first=True),
            nn.LSTM(32, 4, 1, batch_first=True)
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_frames=2, n_steps=10):
        """Sets up layers"""
        super(SqueezeNetTimeLSTM, self).__init__()

        self.is_cuda = False

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 6, 12, 12)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(36, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
            Fire(64, 24, 48, 48),
            Fire(96, 24, 48, 48),
        )
        final_conv = nn.Conv2d(96, 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstm_encoder = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True)
        ])
        self.lstm_decoder = nn.ModuleList([
            nn.LSTM(1, 32, 1, batch_first=True),
            nn.LSTM(32, 4, 1, batch_first=True)
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()
项目:training    作者:bddmodelcar    | 项目源码 | 文件源码
def __init__(self, n_steps=10, n_frames=2):
        """Sets up layers"""
        super(SqueezeNetSqueezeLSTM, self).__init__()

        self.n_frames = n_frames
        self.n_steps = n_steps
        self.pre_metadata_features = nn.Sequential(
            nn.Conv2d(3 * 2 * self.n_frames, 16, kernel_size=3, stride=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(16, 6, 12, 12)
        )
        self.post_metadata_features = nn.Sequential(
            Fire(36, 8, 16, 16),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(32, 12, 24, 24),
            Fire(48, 12, 24, 24),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(48, 16, 32, 32),
            Fire(64, 16, 32, 32),
            Fire(64, 24, 48, 48),
            Fire(96, 24, 48, 48),
        )
        final_conv = nn.Conv2d(96, self.n_steps * 2, kernel_size=1)
        self.pre_lstm_output = nn.Sequential(
            nn.Dropout(p=0.5),
            final_conv,
            nn.AvgPool2d(kernel_size=3, stride=2),
        )
        self.lstms = nn.ModuleList([
            nn.LSTM(16, 32, 1, batch_first=True),
            nn.LSTM(32, 8, 1, batch_first=True),
            nn.LSTM(8, 16, 1, batch_first=True),
            nn.LSTM(16, 4, 1, batch_first=True)
        ])

        for mod in self.modules():
            if isinstance(mod, nn.Conv2d):
                if mod is final_conv:
                    init.normal(mod.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(mod.weight.data)
                if mod.bias is not None:
                    mod.bias.data.zero_()