Python keras.layers.convolutional 模块,Deconvolution2D() 实例源码

我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用keras.layers.convolutional.Deconvolution2D()

项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_generator(Xk_g, n_lat, n_chan=1):
  n_g_hid1 = 1024 # size of hidden layer in generator layer 1
  n_g_hid2 = 128  # size of hidden layer in generator layer 2

  x = Dense(n_g_hid1)(Xk_g)
  x = BatchNormalization(mode=2)(x)
  x = Activation('relu')(x)

  x = Dense(n_g_hid2*7*7)(x)
  x = BatchNormalization(mode=2)(x)
  x = Activation('relu')(x)
  x = Reshape((n_g_hid2, 7, 7))(x)

  x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14), 
        border_mode='same', activation=None, subsample=(2,2), 
        init='orthogonal', dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = Activation('relu')(x)

  g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28), 
        border_mode='same', activation='sigmoid', subsample=(2,2), 
        init='orthogonal', dim_ordering='th')(x)

  return g
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_generator(Xk_g, n_lat, n_chan=1):
  n_g_hid1 = 1024 # size of hidden layer in generator layer 1
  n_g_hid2 = 128  # size of hidden layer in generator layer 2

  x = Dense(n_g_hid1, init=conv2D_init)(Xk_g)
  x = BatchNormalization(mode=2, )(x)
  x = Activation('relu')(x)

  x = Dense(n_g_hid2*7*7, init=conv2D_init)(x)
  x = Reshape((n_g_hid2, 7, 7))(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = Activation('relu')(x)

  x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14), 
        border_mode='same', activation=None, subsample=(2,2), 
        init=conv2D_init, dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = Activation('relu')(x)

  g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28), 
        border_mode='same', activation='sigmoid', subsample=(2,2), 
        init=conv2D_init, dim_ordering='th')(x)

  return g
项目:keras-face-attribute-manipulation    作者:wkcw    | 项目源码 | 文件源码
def transform_model(weight_loss_pix=5e-4):
    inputs = Input(shape=( 128, 128, 3))
    x1 = Convolution2D(64, 5, 5, border_mode='same')(inputs)
    x2 = LeakyReLU(alpha=0.3, name='wkcw')(x1)
    x3 = BatchNormalization()(x2)
    x4 = Convolution2D(128, 4, 4, border_mode='same', subsample=(2,2))(x3)
    x5 = LeakyReLU(alpha=0.3)(x4)
    x6 = BatchNormalization()(x5)
    x7 = Convolution2D(256, 4, 4, border_mode='same', subsample=(2,2))(x6)
    x8 = LeakyReLU(alpha=0.3)(x7)
    x9 = BatchNormalization()(x8)
    x10 = Deconvolution2D(128, 3, 3, output_shape=(None, 64, 64, 128), border_mode='same', subsample=(2,2))(x9)
    x11 = BatchNormalization()(x10)
    x12 = Deconvolution2D(64, 3, 3, output_shape=(None, 128, 128, 64), border_mode='same', subsample=(2,2))(x11)
    x13 = BatchNormalization()(x12)
    x14 = Deconvolution2D(3, 4, 4, output_shape=(None, 128, 128, 3), border_mode='same', activity_regularizer=activity_l1(weight_loss_pix))(x13)
    output = merge([inputs, x14], mode='sum')
    model = Model(input=inputs, output=output)

    return model
项目:dem    作者:hengyuan-hu    | 项目源码 | 文件源码
def _deconv_shortcut(x, residual, output_shape):
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height).
    # Should be int if network architecture is correctly configured.
    stride_width = residual._keras_shape[1] / x._keras_shape[1]
    stride_height = residual._keras_shape[2] / x._keras_shape[2]
    equal_channels = residual._keras_shape[3] == x._keras_shape[3]

    shortcut = x
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Deconvolution2D(
            residual._keras_shape[3], 1, 1,
            subsample=(stride_width, stride_height),
            output_shape=output_shape,
            init="he_normal", border_mode="valid")(x)
    return merge([shortcut, residual], mode="sum")


# Builds a residual block with repeating bottleneck blocks.
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_deconvolution_2d():
    nb_samples = 2
    nb_filter = 2
    stack_size = 3
    nb_row = 10
    nb_col = 6

    for border_mode in _convolution_border_modes:
        for subsample in [(1, 1), (2, 2)]:
            if border_mode == 'same' and subsample != (1, 1):
                continue

            rows = conv_input_length(nb_row, 3, border_mode, subsample[0])
            cols = conv_input_length(nb_col, 3, border_mode, subsample[1])
            layer_test(convolutional.Deconvolution2D,
                       kwargs={'nb_filter': nb_filter,
                               'nb_row': 3,
                               'nb_col': 3,
                               'output_shape': (nb_samples, nb_filter, rows, cols),
                               'border_mode': border_mode,
                               'subsample': subsample,
                               'dim_ordering': 'th'},
                       input_shape=(nb_samples, stack_size, nb_row, nb_col),
                       fixed_batch_size=True)

            layer_test(convolutional.Deconvolution2D,
                       kwargs={'nb_filter': nb_filter,
                               'nb_row': 3,
                               'nb_col': 3,
                               'output_shape': (nb_samples, nb_filter, rows, cols),
                               'border_mode': border_mode,
                               'dim_ordering': 'th',
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample': subsample},
                       input_shape=(nb_samples, stack_size, nb_row, nb_col),
                       fixed_batch_size=True)
项目:keras-tf-Super-Resolution    作者:olgaliak    | 项目源码 | 文件源码
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
        """
            Creates a model to remove / reduce noise from upscaled images.
        """
        from keras.layers.convolutional import Deconvolution2D

        # Perform check that model input shape is divisible by 4
        init = super(DenoisingAutoEncoderSR, self).create_model(height, width, channels, load_weights, batch_size)

        if K.image_dim_ordering() == "th":
            output_shape = (None, channels, width, height)
        else:
            output_shape = (None, width, height, channels)

        level1_1 = Convolution2D(self.n1, 3, 3, activation='relu', border_mode='same')(init)
        level2_1 = Convolution2D(self.n1, 3, 3, activation='relu', border_mode='same')(level1_1)

        level2_2 = Deconvolution2D(self.n1, 3, 3, activation='relu', output_shape=output_shape, border_mode='same')(level2_1)
        level2 = merge([level2_1, level2_2], mode='sum')

        level1_2 = Deconvolution2D(self.n1, 3, 3, activation='relu', output_shape=output_shape, border_mode='same')(level2)
        level1 = merge([level1_1, level1_2], mode='sum')

        decoded = Convolution2D(channels, 5, 5, activation='linear', border_mode='same')(level1)

        model = Model(init, decoded)
        adam = optimizers.Adam(lr=1e-3)
        model.compile(optimizer=adam, loss='mse', metrics=[PSNRLoss])
        if load_weights: model.load_weights(self.weight_path)

        self.model = model
        return model
项目:vess2ret    作者:costapt    | 项目源码 | 文件源码
def Deconvolution(f, output_shape, k=2, s=2, **kwargs):
    """Convenience method for Transposed Convolutions."""
    if KERAS_2:
        return Conv2DTranspose(f,
                               kernel_size=(k, k),
                               output_shape=output_shape,
                               strides=(s, s),
                               data_format=K.image_data_format(),
                               **kwargs)
    else:
        return Deconvolution2D(f, k, k, output_shape=output_shape,
                               subsample=(s, s), **kwargs)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_deconvolution_2d():
    nb_samples = 2
    nb_filter = 2
    stack_size = 3
    nb_row = 10
    nb_col = 6

    for border_mode in _convolution_border_modes:
        for subsample in [(1, 1), (2, 2)]:
            if border_mode == 'same' and subsample != (1, 1):
                continue

            rows = conv_input_length(nb_row, 3, border_mode, subsample[0])
            cols = conv_input_length(nb_col, 3, border_mode, subsample[1])
            layer_test(convolutional.Deconvolution2D,
                       kwargs={'nb_filter': nb_filter,
                               'nb_row': 3,
                               'nb_col': 3,
                               'output_shape': (nb_samples, nb_filter, rows, cols),
                               'border_mode': border_mode,
                               'subsample': subsample,
                               'dim_ordering': 'th'},
                       input_shape=(nb_samples, stack_size, nb_row, nb_col),
                       fixed_batch_size=True)

            layer_test(convolutional.Deconvolution2D,
                       kwargs={'nb_filter': nb_filter,
                               'nb_row': 3,
                               'nb_col': 3,
                               'output_shape': (nb_samples, nb_filter, rows, cols),
                               'border_mode': border_mode,
                               'dim_ordering': 'th',
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample': subsample},
                       input_shape=(nb_samples, stack_size, nb_row, nb_col),
                       fixed_batch_size=True)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_deconvolution_2d():
    nb_samples = 2
    nb_filter = 2
    stack_size = 3
    nb_row = 10
    nb_col = 6

    for batch_size in [None, nb_samples]:
        for border_mode in _convolution_border_modes:
            for subsample in [(1, 1), (2, 2)]:
                if border_mode == 'same' and subsample != (1, 1):
                    continue
                print batch_size, border_mode, subsample
                rows = conv_input_length(nb_row, 3, border_mode, subsample[0])
                cols = conv_input_length(nb_col, 3, border_mode, subsample[1])
                layer_test(convolutional.Deconvolution2D,
                           kwargs={'nb_filter': nb_filter,
                                   'nb_row': 3,
                                   'nb_col': 3,
                                   'output_shape': (batch_size, nb_filter, rows, cols),
                                   'border_mode': border_mode,
                                   'subsample': subsample,
                                   'dim_ordering': 'th'},
                           input_shape=(nb_samples, stack_size, nb_row, nb_col),
                           fixed_batch_size=True)

                layer_test(convolutional.Deconvolution2D,
                           kwargs={'nb_filter': nb_filter,
                                   'nb_row': 3,
                                   'nb_col': 3,
                                   'output_shape': (batch_size, nb_filter, rows, cols),
                                   'border_mode': border_mode,
                                   'dim_ordering': 'th',
                                   'W_regularizer': 'l2',
                                   'b_regularizer': 'l2',
                                   'activity_regularizer': 'activity_l2',
                                   'subsample': subsample},
                           input_shape=(nb_samples, stack_size, nb_row, nb_col),
                           fixed_batch_size=True)
项目:Fully-Connected-DenseNets-Semantic-Segmentation    作者:titu1994    | 项目源码 | 文件源码
def __transition_up_block(ip, nb_filters, type='upsampling', output_shape=None, weight_decay=1E-4):
    ''' SubpixelConvolutional Upscaling (factor = 2)
    Args:
        ip: keras tensor
        nb_filters: number of layers
        type: can be 'upsampling', 'subpixel', 'deconv', or 'atrous'. Determines type of upsampling performed
        output_shape: required if type = 'deconv'. Output shape of tensor
        weight_decay: weight decay factor
    Returns: keras tensor, after applying upsampling operation.
    '''

    if type == 'upsampling':
        x = UpSampling2D()(ip)
    elif type == 'subpixel':
        x = Convolution2D(nb_filters, 3, 3, activation="relu", border_mode='same', W_regularizer=l2(weight_decay),
                          bias=False, init='he_uniform')(ip)
        x = SubPixelUpscaling(scale_factor=2)(x)
        x = Convolution2D(nb_filters, 3, 3, activation="relu", border_mode='same', W_regularizer=l2(weight_decay),
                          bias=False, init='he_uniform')(x)
    elif type == 'atrous':
        # waiting on https://github.com/fchollet/keras/issues/4018
        x = AtrousConvolution2D(nb_filters, 3, 3, activation="relu", W_regularizer=l2(weight_decay),
                                bias=False, atrous_rate=(2, 2), init='he_uniform')(ip)
    else:
        x = Deconvolution2D(nb_filters, 3, 3, output_shape, activation='relu', border_mode='same',
                            subsample=(2, 2), init='he_uniform')(ip)

    return x
项目:dem    作者:hengyuan-hu    | 项目源码 | 文件源码
def _bn_relu_deconv(nb_filter, nb_row, nb_col, subsample, output_shape):
    def f(x):
        norm = BatchNormalization(mode=2, axis=3)(x)
        activation = Activation("relu")(norm)
        return Deconvolution2D(
            nb_filter, nb_row, nb_col, W_regularizer=l2(1e-4),
            subsample=subsample, output_shape=output_shape,
            init="he_normal", border_mode="same")(activation)
    return f


# Bottleneck architecture for > 34 layer resnet.
# Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
# Returns a final conv layer of nb_filters * 4
# def _bottleneck(nb_filters, init_subsample=(1, 1)):
#     def f(x):
#         conv_1_1 = _bn_relu_conv(nb_filters, 1, 1, subsample=init_subsample)(x)
#         conv_3_3 = _bn_relu_conv(nb_filters, 3, 3)(conv_1_1)
#         residual = _bn_relu_conv(nb_filters * 4, 1, 1)(conv_3_3)
#         return _shortcut(x, residual)
#     return f


# Basic 3 X 3 convolution blocks.
# Use for resnet with layers <= 34
# Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf