Python keras.layers.convolutional 模块,Conv2DTranspose() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用keras.layers.convolutional.Conv2DTranspose()

项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def make_generator():
    """Creates a generator model that takes a 100-dimensional noise vector as a "seed", and outputs images
    of size 28x28x1."""
    model = Sequential()
    model.add(Dense(1024, input_dim=100))
    model.add(LeakyReLU())
    model.add(Dense(128 * 7 * 7))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    if K.image_data_format() == 'channels_first':
        model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
        bn_axis = 1
    else:
        model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,)))
        bn_axis = -1
    model.add(Conv2DTranspose(128, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Convolution2D(64, (5, 5), padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    # Because we normalized training inputs to lie in the range [-1, 1],
    # the tanh function should be used for the output of the generator to ensure its output
    # also lies in this range.
    model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh'))
    return model
项目:DenseNet    作者:titu1994    | 项目源码 | 文件源码
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4):
    ''' SubpixelConvolutional Upscaling (factor = 2)
    Args:
        ip: keras tensor
        nb_filters: number of layers
        type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed
        weight_decay: weight decay factor
    Returns: keras tensor, after applying upsampling operation.
    '''

    if type == 'upsampling':
        x = UpSampling2D()(ip)
    elif type == 'subpixel':
        x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
                   use_bias=False, kernel_initializer='he_normal')(ip)
        x = SubPixelUpscaling(scale_factor=2)(x)
        x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
                   use_bias=False, kernel_initializer='he_normal')(x)
    else:
        x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2),
                            kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip)

    return x
项目:WGAN_GP    作者:daigo0927    | 项目源码 | 文件源码
def GeneratorDeconv(image_size = 64): 

    L = int(image_size)

    inputs = Input(shape = (100, ))
    x = Dense(512*int(L/16)**2)(inputs) #shape(512*(L/16)**2,)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Reshape((int(L/16), int(L/16), 512))(x) # shape(L/16, L/16, 512)
    x = Conv2DTranspose(256, (4, 4), strides = (2, 2),
                        kernel_initializer = init,
                        padding = 'same')(x) # shape(L/8, L/8, 256)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2DTranspose(128, (4, 4), strides = (2, 2),
                        kernel_initializer = init,
                        padding = 'same')(x) # shape(L/4, L/4, 128)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2DTranspose(64, (4, 4), strides = (2, 2),
                        kernel_initializer = init,
                        padding = 'same')(x) # shape(L/2, L/2, 64)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2DTranspose(3, (4, 4), strides= (2, 2),
                        kernel_initializer = init,
                        padding = 'same')(x) # shape(L, L, 3)
    images = Activation('tanh')(x)

    model = Model(inputs = inputs, outputs = images)
    model.summary()
    return model
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output // 4

    x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)
    if not upsample:
        x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
    else:
        x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)

    x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module is not False:
            other = UpSampling2D(size=(2, 2))(other)

    if upsample and reverse_module is False:
        decoder = x
    else:
        x = BatchNormalization(momentum=0.1)(x)
        decoder = add([x, other])
        decoder = Activation('relu')(decoder)

    return decoder
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def build(encoder, nc):
    enet = bottleneck(encoder, 64, upsample=True, reverse_module=True)  # bottleneck 4.0
    enet = bottleneck(enet, 64)  # bottleneck 4.1
    enet = bottleneck(enet, 64)  # bottleneck 4.2
    enet = bottleneck(enet, 16, upsample=True, reverse_module=True)  # bottleneck 5.0
    enet = bottleneck(enet, 16)  # bottleneck 5.1

    enet = Conv2DTranspose(filters=nc, kernel_size=(2, 2), strides=(2, 2), padding='same')(enet)
    return enet
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output // 4

    x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)
    if not upsample:
        x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
    else:
        x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)

    x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module is not False:
            other = MaxUnpooling2D()([other, reverse_module])

    if upsample and reverse_module is False:
        decoder = x
    else:
        x = BatchNormalization(momentum=0.1)(x)
        decoder = add([x, other])
        # decoder = Activation('relu')(decoder)
        decoder = PReLU(shared_axes=[1, 2])(decoder)

    return decoder
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def build(encoder, nc):
    network, index_stack = encoder
    enet = bottleneck(network, 64, upsample=True, reverse_module=index_stack.pop())  # bottleneck 4.0
    enet = bottleneck(enet, 64)  # bottleneck 4.1
    enet = bottleneck(enet, 64)  # bottleneck 4.2
    enet = bottleneck(enet, 16, upsample=True, reverse_module=index_stack.pop())  # bottleneck 5.0
    enet = bottleneck(enet, 16)  # bottleneck 5.1

    enet = Conv2DTranspose(filters=nc, kernel_size=(2, 2), strides=(2, 2), padding='same')(enet)
    return enet
项目:vess2ret    作者:costapt    | 项目源码 | 文件源码
def Deconvolution(f, output_shape, k=2, s=2, **kwargs):
    """Convenience method for Transposed Convolutions."""
    if KERAS_2:
        return Conv2DTranspose(f,
                               kernel_size=(k, k),
                               output_shape=output_shape,
                               strides=(s, s),
                               data_format=K.image_data_format(),
                               **kwargs)
    else:
        return Deconvolution2D(f, k, k, output_shape=output_shape,
                               subsample=(s, s), **kwargs)
项目:WGAN-in-Keras    作者:tonyabracadabra    | 项目源码 | 文件源码
def __call__(self):
        model = Sequential()

        model.add(Dense(1024, kernel_initializer=self.initializer, \
            kernel_regularizer=self.regularizer, input_shape=(self.z_dim,)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))

        model.add(Dense(7 * 7 * 128, kernel_initializer=self.initializer, \
            kernel_regularizer=self.regularizer))
        model.add(Reshape((7, 7, 128)))

        model.add(BatchNormalization())
        model.add(Activation('relu'))

        # Convolution transpose layer
        model.add(Conv2DTranspose(64, kernel_size=(4, 4), strides=(2, 2), padding='same',\
            kernel_initializer=self.initializer, kernel_regularizer=self.regularizer))
        model.add(BatchNormalization())
        model.add(Activation('relu'))

        model.add(Conv2DTranspose(1, kernel_size=(4, 4), strides=(2, 2), padding='same',\
            kernel_initializer=self.initializer, kernel_regularizer=self.regularizer))
        model.add(Activation('sigmoid'))
        model.add(Reshape((784,)))

        return model
项目:WGAN-in-Keras    作者:tonyabracadabra    | 项目源码 | 文件源码
def __call__(self):
        model = Sequential()

        model.add(Dense(1024, kernel_initializer=self.initializer, \
            kernel_regularizer=self.regularizer, input_shape=(self.z_dim,)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))

        model.add(Dense(7 * 7 * 128, kernel_initializer=self.initializer, \
            kernel_regularizer=self.regularizer))
        model.add(Reshape((7, 7, 128)))

        model.add(BatchNormalization())
        model.add(Activation('relu'))

        # Convolution transpose layer
        model.add(Conv2DTranspose(64, kernel_size=(4, 4), strides=(2, 2), padding='same',\
            kernel_initializer=self.initializer, kernel_regularizer=self.regularizer))
        model.add(BatchNormalization())
        model.add(Activation('relu'))

        model.add(Conv2DTranspose(1, kernel_size=(4, 4), strides=(2, 2), padding='same',\
            kernel_initializer=self.initializer, kernel_regularizer=self.regularizer))
        model.add(Activation('sigmoid'))
        model.add(Reshape((784,)))

        return model
项目:Keras-GAN-Animeface-Character    作者:forcecore    | 项目源码 | 文件源码
def bilinear2x(x, nfilters):
    '''
    Ugh, I don't like making layers.
    My credit goes to: https://kivantium.net/keras-bilinear
    '''
    return Conv2DTranspose(nfilters, (4, 4),
        strides=(2, 2),
        padding='same',
        kernel_initializer=Constant(bilinear_upsample_weights(2, nfilters)))(x)
项目:Keras-GAN-Animeface-Character    作者:forcecore    | 项目源码 | 文件源码
def build_gen( shape ) :
    def deconv2d( x, filters, shape=(4, 4) ) :
        '''
        Conv2DTransposed gives me checkerboard artifact...
        Select one of the 3.
        '''
        # Simpe Conv2DTranspose
        # Not good, compared to upsample + conv2d below.
        x= Conv2DTranspose( filters, shape, padding='same',
            strides=(2, 2), kernel_initializer=Args.kernel_initializer )(x)

        # simple and works
        #x = UpSampling2D( (2, 2) )( x )
        #x = Conv2D( filters, shape, padding='same' )( x )

        # Bilinear2x... Not sure if it is without bug, not tested yet.
        # Tend to make output blurry though
        #x = bilinear2x( x, filters )
        #x = Conv2D( filters, shape, padding='same' )( x )

        x = BatchNormalization(momentum=Args.bn_momentum)( x )
        x = LeakyReLU(alpha=Args.alpha_G)( x )
        return x

    # https://github.com/tdrussell/IllustrationGAN  z predictor...?
    # might help. Not sure.

    noise = Input( shape=Args.noise_shape )
    x = noise
    # 1x1x256
    # noise is not useful for generating images.

    x= Conv2DTranspose( 512, (4, 4),
        kernel_initializer=Args.kernel_initializer )(x)
    x = BatchNormalization(momentum=Args.bn_momentum)( x )
    x = LeakyReLU(alpha=Args.alpha_G)( x )
    # 4x4
    x = deconv2d( x, 256 )
    # 8x8
    x = deconv2d( x, 128 )
    # 16x16
    x = deconv2d( x, 64 )
    # 32x32

    # Extra layer
    x = Conv2D( 64, (3, 3), padding='same',
        kernel_initializer=Args.kernel_initializer )( x )
    x = BatchNormalization(momentum=Args.bn_momentum)( x )
    x = LeakyReLU(alpha=Args.alpha_G)( x )
    # 32x32

    x= Conv2DTranspose( 3, (4, 4), padding='same', activation='tanh',
        strides=(2, 2), kernel_initializer=Args.kernel_initializer )(x)
    # 64x64

    return models.Model( inputs=noise, outputs=x )