我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.functions.leaky_relu()。
def predict(self,x): h = F.leaky_relu(self.c1(x),slope=0.1) h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0) h = F.leaky_relu(self.c3(h),slope=0.1) h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0) h = F.leaky_relu(self.c5(h),slope=0.1) h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0) h = F.leaky_relu(self.c7(h),slope=0.1) h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0) h = F.leaky_relu(self.c9(h),slope=0.1) h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0) h = F.leaky_relu(self.c11(h),slope=0.1) h = F.max_pooling_2d(h,ksize=2,stride=2,pad=0) h = F.leaky_relu(self.c13(h),slope=0.1) h = F.leaky_relu(self.c14(h),slope=0.1) h = F.leaky_relu(self.c15(h),slope=0.1) h = F.leaky_relu(self.l16(h),slope=0.1) h = F.leaky_relu(self.l17(h),slope=0.1) # skip dropout h = self.l19(h) return h
def to_function(self): if self.nonlinearity.lower() == "clipped_relu": return clipped_relu() if self.nonlinearity.lower() == "crelu": return crelu() if self.nonlinearity.lower() == "elu": return elu() if self.nonlinearity.lower() == "hard_sigmoid": return hard_sigmoid() if self.nonlinearity.lower() == "leaky_relu": return leaky_relu() if self.nonlinearity.lower() == "relu": return relu() if self.nonlinearity.lower() == "sigmoid": return sigmoid() if self.nonlinearity.lower() == "softmax": return softmax() if self.nonlinearity.lower() == "softplus": return softplus() if self.nonlinearity.lower() == "tanh": return tanh() if self.nonlinearity.lower() == "bst": return bst() raise NotImplementedError()
def __call__(self, x, test=False, dropout=True): e1 = self.c1(x) e2 = self.b2(self.c2(F.leaky_relu(e1)), test=test) e3 = self.b3(self.c3(F.leaky_relu(e2)), test=test) e4 = self.b4(self.c4(F.leaky_relu(e3)), test=test) e5 = self.b5(self.c5(F.leaky_relu(e4)), test=test) e6 = self.b6(self.c6(F.leaky_relu(e5)), test=test) e7 = self.b7(self.c7(F.leaky_relu(e6)), test=test) e8 = self.b8(self.c8(F.leaky_relu(e7)), test=test) d1 = F.concat((F.dropout(self.b1_d(self.dc1(F.relu(e8)), test=test), train=dropout), e7)) d2 = F.concat((F.dropout(self.b2_d(self.dc2(F.relu(d1)), test=test), train=dropout), e6)) d3 = F.concat((F.dropout(self.b3_d(self.dc3(F.relu(d2)), test=test), train=dropout), e5)) d4 = F.concat((self.b4_d(self.dc4(F.relu(d3)), test=test), e4)) d5 = F.concat((self.b5_d(self.dc5(F.relu(d4)), test=test), e3)) d6 = F.concat((self.b6_d(self.dc6(F.relu(d5)), test=test), e2)) d7 = F.concat((self.b7_d(self.dc7(F.relu(d6)), test=test), e1)) y = F.tanh(self.dc8(F.relu(d7))) return y
def main(): class PoleModel(Chain): def __init__(self, input_num, action_num): print(input_num, action_num) super(PoleModel, self).__init__( l1=L.Linear(input_num, 32), l2=L.Linear(32, 32), l3=L.Linear(32, action_num) ) def q_function(self, state): h1 = F.leaky_relu(self.l1(state)) h2 = F.leaky_relu(self.l2(h1)) return self.l3(h2) dqn = DeepQNet(state_shape=(3, 32, 32), action_num=2, image_num_per_state=12, model=PoleModel(3*12*32*32, action_num=2))
def to_function(self): if self.nonlinearity.lower() == "clipped_relu": return clipped_relu() if self.nonlinearity.lower() == "crelu": return crelu() if self.nonlinearity.lower() == "elu": return elu() if self.nonlinearity.lower() == "hard_sigmoid": return hard_sigmoid() if self.nonlinearity.lower() == "leaky_relu": return leaky_relu() if self.nonlinearity.lower() == "relu": return relu() if self.nonlinearity.lower() == "sigmoid": return sigmoid() if self.nonlinearity.lower() == "softmax": return softmax() if self.nonlinearity.lower() == "softplus": return softplus() if self.nonlinearity.lower() == "tanh": return tanh() raise NotImplementedError()
def __call__(self, x, t=None): self.clear() #x = Variable(x_data) # x_data.astype(np.float32) h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) h = F.leaky_relu(self.conv3(h), slope=0.1) h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.leaky_relu(self.conv5(h), slope=0.1) h = F.leaky_relu(self.conv6(h), slope=0.1) h = F.clipped_relu(self.conv7(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
def differentiable_backward(self, g): if self.normalize_input: raise NotImplementedError if self.activation is F.leaky_relu: g = backward_leaky_relu(self.x, g) elif self.activation is F.relu: g = backward_relu(self.x, g) elif self.activation is F.tanh: g = backward_tanh(self.x, g) elif self.activation is F.sigmoid: g = backward_sigmoid(self.x, g) elif not self.activation is None: raise NotImplementedError if self.norm == 'ln': g = backward_layernormalization(self.nx, g, self.n) elif not self.norm is None: raise NotImplementedError if self.nn == 'down_conv' or self.nn == 'conv': g = backward_convolution(None, g, self.c) elif self.nn == 'linear': g = backward_linear(None, g, self.c) elif self.nn == 'up_deconv': g = backward_deconvolution(None, g, self.c) else: raise NotImplementedError return g
def __init__(self, in_ch=3, base_size=128, down_layers=4, use_bn=True, noise_all_layers=False, conv_as_last=False, w_init=None): layers = {} self.down_layers = down_layers self.conv_as_last = conv_as_last if use_bn: norm = 'bn' else: norm = None act = F.leaky_relu if w_init is None: w_init = chainer.initializers.Normal(0.02) layers['c_first'] = NNBlock(in_ch, base_size, nn='down_conv', norm=None, activation=act, noise=noise_all_layers, w_init=w_init) base = base_size for i in range(down_layers-1): layers['c'+str(i)] = NNBlock(base, base*2, nn='down_conv', norm=norm, activation=act, noise=noise_all_layers, w_init=w_init) base*=2 if conv_as_last: layers['c_last'] = NNBlock(base, 1, nn='conv', norm=None, activation=None, w_init=w_init) else: layers['c_last'] = NNBlock(None, 1, nn='linear', norm=None, activation=None, w_init=w_init) super(DCGANDiscriminator, self).__init__(**layers)
def __init__(self, in_ch=3, base_size=128, down_layers=4, use_bn=True, w_init=None, output_len=38): layers = {} self.down_layers = down_layers if use_bn: norm = 'bn' else: norm = None act = F.leaky_relu if w_init is None: w_init = chainer.initializers.Normal(0.02) layers['c_first'] = NNBlock(in_ch, base_size, nn='down_conv', norm=None, activation=act, w_init=w_init) base = base_size for i in range(down_layers-1): layers['c'+str(i)] = NNBlock(base, base*2, nn='down_conv', norm=norm, activation=act, w_init=w_init) base*=2 layers['c_last_0'] = NNBlock(None, 1, nn='linear', norm=None, activation=None, w_init=w_init) layers['c_last_1_0'] = NNBlock(None, output_len, nn='linear', norm=None, activation=None, w_init=None) #layers['c_last_1_1'] = NNBlock(1024, 1024, nn='linear', norm=None, activation=F.leaky_relu, w_init=None) #layers['c_last_1_2'] = NNBlock(1024, output_len, nn='linear', norm=None, activation=None, w_init=None) super(ACGANDiscriminator, self).__init__(**layers)
def __init__(self, hidden_size=768, output_size=1, use_bn=True): if use_bn: norm = 'bn' w_init=None else: norm = None w_init=None #w_init=Chainer.initializers.HeNormal() super(ThreeLayersMLP, self).__init__( l0 = NNBlock(None, hidden_size, norm=norm, nn='linear', w_init=w_init, activation=F.leaky_relu), l1 = NNBlock(hidden_size, hidden_size, norm=norm, nn='linear', w_init=w_init, activation=F.leaky_relu), l2 = NNBlock(hidden_size, output_size, norm=None, activation=None, nn='linear', w_init=w_init), )
def __call__(self, x): h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1) high_resolution_feature = h h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias19(self.bn19(self.conv19(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias20(self.bn20(self.conv20(h), finetune=self.finetune)), slope=0.1) h2 = high_resolution_feature h2 = F.leaky_relu(self.bias21(self.bn21(self.conv21(h2), finetune=self.finetune)), slope=0.1) h2 = reorg(h2) h = F.concat((h2, h), axis=1) h = F.leaky_relu(self.bias22(self.bn22(self.conv22(h), finetune=self.finetune)), slope=0.1) h = self.bias23(self.conv23(h)) return h
def __call__(self, x): h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1) h = F.average_pooling_2d(h, h.shape[-2:]) h = self.fc19(h) return h
def __call__(self, x): h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1) high_resolution_feature = h h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias19(self.bn19(self.conv19(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias20(self.bn20(self.conv20(h), finetune=self.finetune)), slope=0.1) h2 = high_resolution_feature h2 = F.leaky_relu(self.bias21(self.bn21(self.conv21(h2), finetune=self.finetune)), slope=0.1) h2 = reorg(h2) h = F.concat((h2, h), axis=1) h = F.leaky_relu(self.bias22(self.bn22(self.conv22(h), finetune=self.finetune)), slope=0.1) h = self.bias23(self.conv23(h)) return h
def __call__(self, x): h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1) h = F.average_pooling_2d(h, h.shape[-2:]) h = self.fc19(h) return h
def parse_activation(activation_str): if activation_str == 'relu': return F.relu elif activation_str == 'elu': return F.elu elif activation_str == 'lrelu': return F.leaky_relu else: raise RuntimeError( 'Not supported activation: {}'.format(activation_str))
def __init__(self, in_ch=3, n_down_layers=4): layers = {} w = chainer.initializers.Normal(0.02) self.n_down_layers = n_down_layers layers['c0'] = CBR(in_ch, 64, bn=False, sample='down', activation=F.leaky_relu, dropout=False, noise=True) base = 64 for i in range(1, n_down_layers): layers['c'+str(i)] = CBR(base, base*2, bn=True, sample='down', activation=F.leaky_relu, dropout=False, noise=True) base*=2 layers['c'+str(n_down_layers)] = CBR(base, 1, bn=False, sample='none', activation=None, dropout=False, noise=True) super(Discriminator, self).__init__(**layers)
def __call__(self, x): h = add_noise(x) h = F.leaky_relu(add_noise(self.c0_0(h))) h = F.leaky_relu(add_noise(self.bn0_1(self.c0_1(h)))) h = F.leaky_relu(add_noise(self.bn1_0(self.c1_0(h)))) h = F.leaky_relu(add_noise(self.bn1_1(self.c1_1(h)))) h = F.leaky_relu(add_noise(self.bn2_0(self.c2_0(h)))) h = F.leaky_relu(add_noise(self.bn2_1(self.c2_1(h)))) h = F.leaky_relu(add_noise(self.bn3_0(self.c3_0(h)))) return self.l4(h)
def __call__(self, x, train=True): return F.leaky_relu(self.bn(self.conv(x), test=not train))
def __call__(self, x, train=True): links = self.children() h = F.leaky_relu(next(links)(x)) for link in links: h = link(h, train) return h
def __call__(self, x): return functions.leaky_relu(x, self.slope)
def _setup_relu(self, layer): slope = layer.relu_param.negative_slope if slope != 0: fw = _SingleArgumentFunction(functions.leaky_relu, slope=slope) else: fw = functions.relu self.forwards[layer.name] = fw self._add_layer(layer)
def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.leaky_relu(x, slope=self.slope) self.assertEqual(y.data.dtype, self.dtype) expected = self.x.copy() for i in numpy.ndindex(self.x.shape): if self.x[i] < 0: expected[i] *= self.slope gradient_check.assert_allclose( expected, y.data, **self.check_forward_options)
def __call__(self, x, test=False): h = F.leaky_relu(self.c0(x)) h = F.leaky_relu(self.bn_c1(self.c1(h), test=test)) h = F.leaky_relu(self.bn_c2(self.c2(h), test=test)) h = self.c3(h) h = F.sum(h) / h.size # Mean return h
def __init__(self, slope=0.2): self._function = "leaky_relu" self.slope = slope
def __call__(self, x): return F.leaky_relu(x, self.slope)
def __call__(self, x): h = F.leaky_relu(self.c0(x)) h = F.leaky_relu(self.bn1(self.c1(h))) h = F.leaky_relu(self.bn2(self.c2(h))) h = F.leaky_relu(self.bn3(self.c3(h))) l = self.l4l(h) return l
def __call__(self, x, test=False): h = F.leaky_relu(self.c0(x)) for idx in range(1, self.n_layers): h = F.leaky_relu(self['b{}'.format(idx)](self['c{}'.format(idx)](h), test=test)) h = F.leaky_relu(self['b{}'.format(self.n_layers)](self['c{}'.format(self.n_layers)](h), test=test)) h = F.sigmoid(self.c(h)) return h
def __init__(self, in_ch): layers = {} w = chainer.initializers.Normal(0.02) layers['c0'] = L.Convolution2D(in_ch, 64, 3, 1, 1, initialW=w) layers['c1'] = CBR(64, 128, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c2'] = CBR(128, 256, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c3'] = CBR(256, 512, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c4'] = CBR(512, 512, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c5'] = CBR(512, 512, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c6'] = CBR(512, 512, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c7'] = CBR(512, 512, bn=True, sample='down', activation=F.leaky_relu, dropout=False) super(Encoder, self).__init__(**layers)
def __call__(self, x): hs = [F.leaky_relu(self.c0(x))] for i in range(1,8): hs.append(self['c%d'%i](hs[i-1])) return hs
def __init__(self, in_ch, out_ch): layers = {} w = chainer.initializers.Normal(0.02) layers['c0_0'] = CBR(in_ch, 32, bn=False, sample='down', activation=F.leaky_relu, dropout=False) layers['c0_1'] = CBR(out_ch, 32, bn=False, sample='down', activation=F.leaky_relu, dropout=False) layers['c1'] = CBR(64, 128, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c2'] = CBR(128, 256, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c3'] = CBR(256, 512, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c4'] = L.Convolution2D(512, 1, 3, 1, 1, initialW=w) super(Discriminator, self).__init__(**layers)
def __call__(self, x): h = F.leaky_relu(self.c0_0(x)) h = F.leaky_relu(self.bn0_1(self.c0_1(h))) h = F.leaky_relu(self.bn1_0(self.c1_0(h))) h = F.leaky_relu(self.bn1_1(self.c1_1(h))) h = F.leaky_relu(self.bn2_0(self.c2_0(h))) h = F.leaky_relu(self.bn2_1(self.c2_1(h))) h = F.leaky_relu(self.bn3_0(self.c3_0(h))) return self.l4(h)
def __call__(self, x): h = x h = F.leaky_relu(self.c0(h)) h = F.leaky_relu(self.c1(h)) h = F.leaky_relu(self.c2(h)) h = F.leaky_relu(self.c3(h)) h = F.leaky_relu(self.l4(h)) h = F.reshape(F.leaky_relu(self.l5(h)), (x.data.shape[0], self.ch, 4, 4)) h = F.leaky_relu(self.dc3(h)) h = F.leaky_relu(self.dc2(h)) h = F.leaky_relu(self.dc1(h)) h = F.tanh(self.dc0(h)) return F.mean_absolute_error(h, x)
def __call__(self, x): h = self.bn1(F.leaky_relu(self.l1(x)), test=not self.train) h = self.bn2(F.leaky_relu(self.l2(h)), test=not self.train) h = self.bn3(F.leaky_relu(self.l3(h)), test=not self.train) h = self.bn4(F.leaky_relu(self.l4(h)), test=not self.train) y = self.l5(h) return y
def __call__(self, x): hs = [F.leaky_relu(self.c0(x))] for i in range(1, 8): hs.append(self['c%d' % i](hs[i - 1])) return hs
def __init__(self, in_ch, out_ch, will_concat=True, layers={}): self.will_concat = will_concat channel_expansion = 2 if will_concat else 1 w = chainer.initializers.Normal(0.02) layers['c0_0'] = CBR(in_ch, 32, bn=False, sample='down', activation=F.leaky_relu, dropout=False) layers['c0_1'] = CBR(out_ch, 32, bn=False, sample='down', activation=F.leaky_relu, dropout=False) layers['c1'] = CBR(32 * channel_expansion, 128, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c2'] = CBR(128, 256, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c3'] = CBR(256, 512, bn=True, sample='down', activation=F.leaky_relu, dropout=False) layers['c4'] = L.Convolution2D(512, 1, 3, 1, 1, initialW=w) super(Discriminator, self).__init__(**layers)
def __call__(self, x): l1 = F.leaky_relu(self.l1(x)) l2 = F.leaky_relu(self.l2(l1)) out = self.l3(l2) return out
def __call__(self, x, train=True): h = F.leaky_relu(self.c0_0(x)) h = F.leaky_relu(self.bn0_1(self.c0_1(h), test=not train)) h = F.leaky_relu(self.bn1_1(self.c1_1(h), test=not train)) h = F.leaky_relu(self.bn2_1(self.c2_1(h), test=not train)) h = F.leaky_relu(self.bn3_0(self.c3_0(h), test=not train)) h = self.l4(h) return F.sum(h) / h.size
def __call__(self, x, train=True): h = add_noise(x, test=not train) h = F.leaky_relu(add_noise(self.c0_0(h), test=not train)) h = F.leaky_relu(add_noise(self.bn0_1(self.c0_1(h), test=not train), test=not train)) h = F.leaky_relu(add_noise(self.bn1_1(self.c1_1(h), test=not train), test=not train)) h = F.leaky_relu(add_noise(self.bn2_1(self.c2_1(h), test=not train), test=not train)) h = F.leaky_relu(add_noise(self.bn3_0(self.c3_0(h), test=not train), test=not train)) h = self.l4(h) return F.sum(h) / h.size
def __call__(self, x, train=True): h = add_noise(x, test=not train) h = F.leaky_relu(add_noise(self.c0_0(h), test=not train)) h = F.leaky_relu(add_noise(self.bn0_1(self.c0_1(h), test=not train), test=not train)) h = F.leaky_relu(add_noise(self.bn1_0(self.c1_0(h), test=not train), test=not train)) h = F.leaky_relu(add_noise(self.bn1_1(self.c1_1(h), test=not train), test=not train)) h = F.leaky_relu(add_noise(self.bn2_0(self.c2_0(h), test=not train), test=not train)) h = F.leaky_relu(add_noise(self.bn2_1(self.c2_1(h), test=not train), test=not train)) h = F.leaky_relu(add_noise(self.bn3_0(self.c3_0(h), test=not train), test=not train)) h = self.l4(h) return F.sum(h) / h.size
def __call__(self, x, train=True): h = F.leaky_relu(self.c0_0(x)) h = F.leaky_relu(self.c0_1(h)) h = F.leaky_relu(self.c1_0(h)) h = F.leaky_relu(self.c1_1(h)) h = F.leaky_relu(self.c2_0(h)) h = F.leaky_relu(self.c2_1(h)) h = F.leaky_relu(self.c3_0(h)) h = self.l4(h) return F.sum(h) / h.size
def __call__(self, x, train=True): h1 = F.leaky_relu(self.enc1(x)) h2 = F.leaky_relu(self.norm2(self.enc2(h1), test=not train)) h3 = F.leaky_relu(self.norm3(self.enc3(h2), test=not train)) h4 = F.leaky_relu(self.norm4(self.enc4(h3), test=not train)) mean = self.mean(h4) ln_var = self.ln_var(h4) return mean, ln_var
def __call__(self, x, train=True): h = add_noise(x, test=not train) h = F.leaky_relu(add_noise(self.c0_0(h), test=not train)) h = F.leaky_relu(add_noise(self.bn0_1(self.c0_1(h), test=not train), test=not train)) h = F.leaky_relu(add_noise(self.bn1_1(self.c1_1(h), test=not train), test=not train)) h2 = F.leaky_relu(add_noise(self.bn2_1(self.c2_1(h), test=not train), test=not train)) h3 = F.leaky_relu(add_noise(self.bn3_0(self.c3_0(h2), test=not train), test=not train)) h = self.l4(h3) return F.sum(h) / h.size, h2, h3
def __call__(self, x, train=True): h = F.leaky_relu(self.c0_0(x)) h = F.leaky_relu(self.bn0_1(self.c0_1(h), test=not train)) h = F.leaky_relu(self.bn1_1(self.c1_1(h), test=not train)) h2 = F.leaky_relu(self.bn2_1(self.c2_1(h), test=not train)) h3 = F.leaky_relu(self.bn3_0(self.c3_0(h2), test=not train)) h = self.l4(h3) return F.sum(h) / h.size, h2, h3
def __call__(self, x, train=True): h = F.leaky_relu(self.c0_0(x)) h = F.leaky_relu(self.bn0_1(self.c0_1(h), test=not train)) h = F.leaky_relu(self.bn1_1(self.c1_1(h), test=not train)) h = F.leaky_relu(self.bn2_1(self.c2_1(h), test=not train)) h = F.leaky_relu(self.bn3_0(self.c3_0(h), test=not train)) mean = self.mean(h) ln_var = self.ln_var(h) return mean, ln_var