Python keras.layers.convolutional 模块,UpSampling2D() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.convolutional.UpSampling2D()

项目:DenseNet    作者:titu1994    | 项目源码 | 文件源码
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4):
    ''' SubpixelConvolutional Upscaling (factor = 2)
    Args:
        ip: keras tensor
        nb_filters: number of layers
        type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed
        weight_decay: weight decay factor
    Returns: keras tensor, after applying upsampling operation.
    '''

    if type == 'upsampling':
        x = UpSampling2D()(ip)
    elif type == 'subpixel':
        x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
                   use_bias=False, kernel_initializer='he_normal')(ip)
        x = SubPixelUpscaling(scale_factor=2)(x)
        x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
                   use_bias=False, kernel_initializer='he_normal')(x)
    else:
        x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2),
                            kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip)

    return x
项目:lsun_2017    作者:ternaus    | 项目源码 | 文件源码
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def model_generator():
    model = Sequential()
    nch = 256
    reg = lambda: l1l2(l1=1e-7, l2=1e-7)
    h = 5
    model.add(Dense(nch * 4 * 4, input_dim=100, W_regularizer=reg()))
    model.add(BatchNormalization(mode=0))
    model.add(Reshape(dim_ordering_shape((nch, 4, 4))))
    model.add(Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(nch / 4, h, h, border_mode='same', W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(3, h, h, border_mode='same', W_regularizer=reg()))
    model.add(Activation('sigmoid'))
    return model
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def model_generator():
    nch = 256
    g_input = Input(shape=[100])
    H = Dense(nch * 14 * 14)(g_input)
    H = BatchNormalization(mode=2)(H)
    H = Activation('relu')(H)
    H = dim_ordering_reshape(nch, 14)(H)
    H = UpSampling2D(size=(2, 2))(H)
    H = Convolution2D(int(nch / 2), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(int(nch / 4), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(1, 1, 1, border_mode='same')(H)
    g_V = Activation('sigmoid')(H)
    return Model(g_input, g_V)
项目:Keras-GAN    作者:eriklindernoren    | 项目源码 | 文件源码
def build_generator(self):

        model = Sequential()

        model.add(Dense(1024, activation='relu', input_dim=self.latent_dim))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(128 * 7 * 7, activation="relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=4, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(self.channels, kernel_size=4, padding='same'))
        model.add(Activation("tanh"))

        model.summary()

        gen_input = Input(shape=(self.latent_dim,))
        img = model(gen_input)

        return Model(gen_input, img)
项目:Keras-GAN    作者:eriklindernoren    | 项目源码 | 文件源码
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=100))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(1, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(100,))
        img = model(noise)

        return Model(noise, img)
项目:Super-Resolution-using-Generative-Adversarial-Networks    作者:titu1994    | 项目源码 | 文件源码
def _upscale_block(self, ip, id):
        '''
        As per suggestion from http://distill.pub/2016/deconv-checkerboard/, I am swapping out
        SubPixelConvolution to simple Nearest Neighbour Upsampling
        '''
        init = ip

        x = Convolution2D(128, 3, 3, activation="linear", border_mode='same', name='sr_res_upconv1_%d' % id,
                          init=self.init)(init)
        x = LeakyReLU(alpha=0.25, name='sr_res_up_lr_%d_1_1' % id)(x)
        x = UpSampling2D(name='sr_res_upscale_%d' % id)(x)
        #x = SubPixelUpscaling(r=2, channels=32)(x)
        x = Convolution2D(128, 3, 3, activation="linear", border_mode='same', name='sr_res_filter1_%d' % id,
                          init=self.init)(x)
        x = LeakyReLU(alpha=0.3, name='sr_res_up_lr_%d_1_2' % id)(x)

        return x
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def model_generator():
    model = Sequential()
    nch = 256
    reg = lambda: l1l2(l1=1e-7, l2=1e-7)
    h = 5
    model.add(Dense(nch * 4 * 4, input_dim=100, W_regularizer=reg()))
    model.add(BatchNormalization(mode=0))
    model.add(Reshape(dim_ordering_shape((nch, 4, 4))))
    model.add(Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(nch / 4, h, h, border_mode='same', W_regularizer=reg()))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(3, h, h, border_mode='same', W_regularizer=reg()))
    model.add(Activation('sigmoid'))
    return model
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def model_generator():
    nch = 256
    g_input = Input(shape=[100])
    H = Dense(nch * 14 * 14)(g_input)
    H = BatchNormalization(mode=2)(H)
    H = Activation('relu')(H)
    H = dim_ordering_reshape(nch, 14)(H)
    H = UpSampling2D(size=(2, 2))(H)
    H = Convolution2D(int(nch / 2), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(int(nch / 4), 3, 3, border_mode='same')(H)
    H = BatchNormalization(mode=2, axis=1)(H)
    H = Activation('relu')(H)
    H = Convolution2D(1, 1, 1, border_mode='same')(H)
    g_V = Activation('sigmoid')(H)
    return Model(g_input, g_V)
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model_r1():  # CDNN Model
    model = Sequential()
    model.add(Convolution2D(
        1, 5, 5,
        border_mode='same',
        input_shape=(1, 14, 14)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))

    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model_r1():  # CDNN Model
    model = Sequential()
    model.add(Convolution2D(
        1, 5, 5,
        border_mode='same',
        input_shape=(1, 14, 14)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))

    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:dcgan    作者:kyloon    | 项目源码 | 文件源码
def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, output_dim=1024))
    model.add(Activation('tanh'))
    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((128,7,7), input_shape=(128*7*7,)))
    model.add(UpSampling2D(size=(2,2), dim_ordering="th"))
    model.add(Convolution2D(64,5,5, border_mode='same', dim_ordering="th"))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2,2), dim_ordering="th"))
    model.add(Convolution2D(1,5,5, border_mode='same', dim_ordering="th"))
    model.add(Activation('tanh'))
    return model
项目:lsun_2017    作者:ternaus    | 项目源码 | 文件源码
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, output_dim=1024))
    model.add(Activation('tanh'))
    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((128, 7, 7), input_shape=(128*7*7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output // 4

    x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)
    if not upsample:
        x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
    else:
        x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)

    x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module is not False:
            other = UpSampling2D(size=(2, 2))(other)

    if upsample and reverse_module is False:
        decoder = x
    else:
        x = BatchNormalization(momentum=0.1)(x)
        decoder = add([x, other])
        decoder = Activation('relu')(decoder)

    return decoder
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def build_generator(latent_size):
    # we will map a pair of (z, L), where z is a latent vector and L is a
    # label drawn from P_c, to image space (..., 1, 28, 28)
    cnn = Sequential()

    cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
    cnn.add(Dense(128 * 7 * 7, activation='relu'))
    cnn.add(Reshape((128, 7, 7)))

    # upsample to (..., 14, 14)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Convolution2D(256, 5, 5, border_mode='same',
                          activation='relu', init='glorot_normal'))

    # upsample to (..., 28, 28)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Convolution2D(128, 5, 5, border_mode='same',
                          activation='relu', init='glorot_normal'))

    # take a channel axis reduction
    cnn.add(Convolution2D(1, 2, 2, border_mode='same',
                          activation='tanh', init='glorot_normal'))

    # this is the z space commonly refered to in GAN papers
    latent = Input(shape=(latent_size, ))

    # this will be our label
    image_class = Input(shape=(1,), dtype='int32')

    # 10 classes in MNIST
    cls = Flatten()(Embedding(10, latent_size,
                              init='glorot_normal')(image_class))

    # hadamard product between z-space and a class conditional embedding
    h = merge([latent, cls], mode='mul')

    fake_image = cnn(h)

    return Model(input=[latent, image_class], output=fake_image)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_upsampling_2d():
    nb_samples = 2
    stack_size = 2
    input_nb_row = 11
    input_nb_col = 12

    for dim_ordering in ['th', 'tf']:
        if dim_ordering == 'th':
            input = np.random.rand(nb_samples, stack_size, input_nb_row,
                                   input_nb_col)
        else:  # tf
            input = np.random.rand(nb_samples, input_nb_row, input_nb_col,
                                   stack_size)

        for length_row in [2, 3, 9]:
            for length_col in [2, 3, 9]:
                layer = convolutional.UpSampling2D(
                    size=(length_row, length_col),
                    dim_ordering=dim_ordering)
                layer.build(input.shape)
                output = layer(K.variable(input))
                np_output = K.eval(output)
                if dim_ordering == 'th':
                    assert np_output.shape[2] == length_row * input_nb_row
                    assert np_output.shape[3] == length_col * input_nb_col
                else:  # tf
                    assert np_output.shape[1] == length_row * input_nb_row
                    assert np_output.shape[2] == length_col * input_nb_col

                # compare with numpy
                if dim_ordering == 'th':
                    expected_out = np.repeat(input, length_row, axis=2)
                    expected_out = np.repeat(expected_out, length_col, axis=3)
                else:  # tf
                    expected_out = np.repeat(input, length_row, axis=1)
                    expected_out = np.repeat(expected_out, length_col, axis=2)

                assert_allclose(np_output, expected_out)
项目:keras-tf-Super-Resolution    作者:olgaliak    | 项目源码 | 文件源码
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
        # Perform check that model input shape is divisible by 4
        init = super(DeepDenoiseSR, self).create_model(height, width, channels, load_weights, batch_size)

        c1 = Convolution2D(self.n1, 3, 3, activation='relu', border_mode='same')(init)
        c1 = Convolution2D(self.n1, 3, 3, activation='relu', border_mode='same')(c1)

        x = MaxPooling2D((2, 2))(c1)

        c2 = Convolution2D(self.n2, 3, 3, activation='relu', border_mode='same')(x)
        c2 = Convolution2D(self.n2, 3, 3, activation='relu', border_mode='same')(c2)

        x = MaxPooling2D((2, 2))(c2)

        c3 = Convolution2D(self.n3, 3, 3, activation='relu', border_mode='same')(x)

        x = UpSampling2D()(c3)

        c2_2 = Convolution2D(self.n2, 3, 3, activation='relu', border_mode='same')(x)
        c2_2 = Convolution2D(self.n2, 3, 3, activation='relu', border_mode='same')(c2_2)

        m1 = merge([c2, c2_2], mode='sum')
        m1 = UpSampling2D()(m1)

        c1_2 = Convolution2D(self.n1, 3, 3, activation='relu', border_mode='same')(m1)
        c1_2 = Convolution2D(self.n1, 3, 3, activation='relu', border_mode='same')(c1_2)

        m2 = merge([c1, c1_2], mode='sum')

        decoded = Convolution2D(channels, 5, 5, activation='linear', border_mode='same')(m2)

        model = Model(init, decoded)
        adam = optimizers.Adam(lr=1e-3)
        model.compile(optimizer=adam, loss='mse', metrics=[PSNRLoss])
        if load_weights: model.load_weights(self.weight_path)

        self.model = model
        return model
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def testnet_fcn(n_classes):
    stride = 32
    input_tensor = Input(shape=(None, None, 3))
    x = Convolution2D(4,5,5,name='conv',
                    activation = 'relu', border_mode='same', subsample= (stride,stride))(input_tensor)

    x = Softmax4D(axis=-1)(x)
    x = UpSampling2D(size=(stride,stride))(x)
    x = Convolution2D(n_classes,3,3,name = 'pred_up',border_mode = 'same')(x)

    model = Model(input=input_tensor,output=x)

    return model, stride
项目:Keras-GAN    作者:eriklindernoren    | 项目源码 | 文件源码
def build_generator(self):

        noise_shape = (100,)

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_shape=noise_shape))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=4, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=4, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(1, kernel_size=4, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=noise_shape)
        img = model(noise)

        return Model(noise, img)
项目:Keras-GAN    作者:eriklindernoren    | 项目源码 | 文件源码
def build_generator(self):


        model = Sequential()

        # Encoder
        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Conv2D(512, kernel_size=1, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))

        # Decoder
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation('tanh'))

        model.summary()

        masked_img = Input(shape=self.img_shape)
        gen_missing = model(masked_img)

        return Model(masked_img, gen_missing)
项目:Keras-GAN    作者:eriklindernoren    | 项目源码 | 文件源码
def build_generator(self):


        model = Sequential()

        # Encoder
        model.add(Conv2D(64, kernel_size=4, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(Activation('relu'))
        model.add(Conv2D(128, kernel_size=4, strides=2, padding="same"))
        model.add(Activation('relu'))
        model.add(Conv2D(256, kernel_size=4, strides=2, padding="same"))
        model.add(Activation('relu'))

        # Decoder
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=4, padding="same"))
        model.add(Activation('relu'))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=4, padding="same"))
        model.add(Activation('relu'))
        model.add(UpSampling2D())
        model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
        model.add(Activation('tanh'))

        model.summary()

        masked_img = Input(shape=self.img_shape)
        img = model(masked_img)

        return Model(masked_img, img)
项目:Keras-GAN    作者:eriklindernoren    | 项目源码 | 文件源码
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=100))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(100,))
        label = Input(shape=(1,), dtype='int32')

        label_embedding = Flatten()(Embedding(self.num_classes, 100)(label))

        input = multiply([noise, label_embedding])

        img = model(input)

        return Model([noise, label], img)
项目:image-segmentation-keras    作者:divamgupta    | 项目源码 | 文件源码
def Unet (nClasses , optimizer=None , input_width=360 , input_height=480 , nChannels=1 ): 

    inputs = Input((nChannels, input_height, input_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)

    up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2], mode='concat', concat_axis=1)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)

    up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1], mode='concat', concat_axis=1)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv5)

    conv6 = Convolution2D(nClasses, 1, 1, activation='relu',border_mode='same')(conv5)
    conv6 = core.Reshape((nClasses,input_height*input_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)


    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)

    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer= optimizer , metrics=['accuracy'] )

    return model
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def build_generator(latent_size):
    # we will map a pair of (z, L), where z is a latent vector and L is a
    # label drawn from P_c, to image space (..., 1, 28, 28)
    cnn = Sequential()

    cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
    cnn.add(Dense(128 * 7 * 7, activation='relu'))
    cnn.add(Reshape((128, 7, 7)))

    # upsample to (..., 14, 14)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Convolution2D(256, 5, 5, border_mode='same',
                          activation='relu', init='glorot_normal'))

    # upsample to (..., 28, 28)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Convolution2D(128, 5, 5, border_mode='same',
                          activation='relu', init='glorot_normal'))

    # take a channel axis reduction
    cnn.add(Convolution2D(1, 2, 2, border_mode='same',
                          activation='tanh', init='glorot_normal'))

    # this is the z space commonly refered to in GAN papers
    latent = Input(shape=(latent_size, ))

    # this will be our label
    image_class = Input(shape=(1,), dtype='int32')

    # 10 classes in MNIST
    cls = Flatten()(Embedding(10, latent_size,
                              init='glorot_normal')(image_class))

    # hadamard product between z-space and a class conditional embedding
    h = merge([latent, cls], mode='mul')

    fake_image = cnn(h)

    return Model(input=[latent, image_class], output=fake_image)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_upsampling_2d():
    nb_samples = 2
    stack_size = 2
    input_nb_row = 11
    input_nb_col = 12

    for dim_ordering in ['th', 'tf']:
        if dim_ordering == 'th':
            input = np.random.rand(nb_samples, stack_size, input_nb_row,
                                   input_nb_col)
        else:  # tf
            input = np.random.rand(nb_samples, input_nb_row, input_nb_col,
                                   stack_size)

        for length_row in [2, 3, 9]:
            for length_col in [2, 3, 9]:
                layer = convolutional.UpSampling2D(
                    size=(length_row, length_col),
                    dim_ordering=dim_ordering)
                layer.build(input.shape)
                output = layer(K.variable(input))
                np_output = K.eval(output)
                if dim_ordering == 'th':
                    assert np_output.shape[2] == length_row * input_nb_row
                    assert np_output.shape[3] == length_col * input_nb_col
                else:  # tf
                    assert np_output.shape[1] == length_row * input_nb_row
                    assert np_output.shape[2] == length_col * input_nb_col

                # compare with numpy
                if dim_ordering == 'th':
                    expected_out = np.repeat(input, length_row, axis=2)
                    expected_out = np.repeat(expected_out, length_col, axis=3)
                else:  # tf
                    expected_out = np.repeat(input, length_row, axis=1)
                    expected_out = np.repeat(expected_out, length_col, axis=2)

                assert_allclose(np_output, expected_out)
项目:keras-dcgan    作者:jacobgil    | 项目源码 | 文件源码
def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, output_dim=1024))
    model.add(Activation('tanh'))
    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((7, 7, 128), input_shape=(128*7*7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(1, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    return model
项目:deblocking    作者:yydlmzyz    | 项目源码 | 文件源码
def upscale(input,filters,kernel_size):#128 64 64
    conv_1=Conv2D(filters,(kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(x)
    upscale_1=UpSampling2D(size=(2, 2))(conv_1)
    relu_1 = LeakyReLU(alpha=0.25)(upscale_1)
    conv_2=Conv2D(filters,(kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(relu_1)
    upscale_2=UpSampling2D(size=(2, 2))(conv_2)
    relu_2 = LeakyReLU(alpha=0.25)(upscale_2)
    return relu_2
项目:qtim_ROP    作者:QTIM-Lab    | 项目源码 | 文件源码
def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, output_dim=1024))
    model.add(Activation('tanh'))
    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((128, 7, 7), input_shape=(128*7*7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def build_generator(latent_size):
    # we will map a pair of (z, L), where z is a latent vector and L is a
    # label drawn from P_c, to image space (..., 1, 28, 28)
    cnn = Sequential()

    cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
    cnn.add(Dense(128 * 7 * 7, activation='relu'))
    cnn.add(Reshape((128, 7, 7)))

    # upsample to (..., 14, 14)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Convolution2D(256, 5, 5, border_mode='same',
                          activation='relu', init='glorot_normal'))

    # upsample to (..., 28, 28)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Convolution2D(128, 5, 5, border_mode='same',
                          activation='relu', init='glorot_normal'))

    # take a channel axis reduction
    cnn.add(Convolution2D(1, 2, 2, border_mode='same',
                          activation='tanh', init='glorot_normal'))

    # this is the z space commonly refered to in GAN papers
    latent = Input(shape=(latent_size, ))

    # this will be our label
    image_class = Input(shape=(1,), dtype='int32')

    # 10 classes in MNIST
    cls = Flatten()(Embedding(10, latent_size,
                              init='glorot_normal')(image_class))

    # hadamard product between z-space and a class conditional embedding
    h = merge([latent, cls], mode='mul')

    fake_image = cnn(h)

    return Model(input=[latent, image_class], output=fake_image)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_upsampling_2d():
    nb_samples = 2
    stack_size = 2
    input_nb_row = 11
    input_nb_col = 12

    for dim_ordering in ['th', 'tf']:
        if dim_ordering == 'th':
            input = np.random.rand(nb_samples, stack_size, input_nb_row,
                                   input_nb_col)
        else:  # tf
            input = np.random.rand(nb_samples, input_nb_row, input_nb_col,
                                   stack_size)

        for length_row in [2, 3, 9]:
            for length_col in [2, 3, 9]:
                layer = convolutional.UpSampling2D(
                    size=(length_row, length_col),
                    dim_ordering=dim_ordering)
                layer.build(input.shape)
                output = layer(K.variable(input))
                np_output = K.eval(output)
                if dim_ordering == 'th':
                    assert np_output.shape[2] == length_row * input_nb_row
                    assert np_output.shape[3] == length_col * input_nb_col
                else:  # tf
                    assert np_output.shape[1] == length_row * input_nb_row
                    assert np_output.shape[2] == length_col * input_nb_col

                # compare with numpy
                if dim_ordering == 'th':
                    expected_out = np.repeat(input, length_row, axis=2)
                    expected_out = np.repeat(expected_out, length_col, axis=3)
                else:  # tf
                    expected_out = np.repeat(input, length_row, axis=1)
                    expected_out = np.repeat(expected_out, length_col, axis=2)

                assert_allclose(np_output, expected_out)
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, output_dim=1024))
    model.add(Activation('tanh'))
    model.add(Dense(128*7*7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((128, 7, 7), input_shape=(128*7*7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:Fully-Connected-DenseNets-Semantic-Segmentation    作者:titu1994    | 项目源码 | 文件源码
def __transition_up_block(ip, nb_filters, type='upsampling', output_shape=None, weight_decay=1E-4):
    ''' SubpixelConvolutional Upscaling (factor = 2)
    Args:
        ip: keras tensor
        nb_filters: number of layers
        type: can be 'upsampling', 'subpixel', 'deconv', or 'atrous'. Determines type of upsampling performed
        output_shape: required if type = 'deconv'. Output shape of tensor
        weight_decay: weight decay factor
    Returns: keras tensor, after applying upsampling operation.
    '''

    if type == 'upsampling':
        x = UpSampling2D()(ip)
    elif type == 'subpixel':
        x = Convolution2D(nb_filters, 3, 3, activation="relu", border_mode='same', W_regularizer=l2(weight_decay),
                          bias=False, init='he_uniform')(ip)
        x = SubPixelUpscaling(scale_factor=2)(x)
        x = Convolution2D(nb_filters, 3, 3, activation="relu", border_mode='same', W_regularizer=l2(weight_decay),
                          bias=False, init='he_uniform')(x)
    elif type == 'atrous':
        # waiting on https://github.com/fchollet/keras/issues/4018
        x = AtrousConvolution2D(nb_filters, 3, 3, activation="relu", W_regularizer=l2(weight_decay),
                                bias=False, atrous_rate=(2, 2), init='he_uniform')(ip)
    else:
        x = Deconvolution2D(nb_filters, 3, 3, output_shape, activation='relu', border_mode='same',
                            subsample=(2, 2), init='he_uniform')(ip)

    return x
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def build_generator(img_shape=[100], n_channels=200, l2_reg=0.):

    # Build Generative model ...
    g_input = Input(shape=img_shape)
    H = Dense(n_channels*14*14, init='glorot_normal')(g_input)
    H = BatchNormalization(mode=2)(H)
    H = Activation('relu')(H)
    if K.image_dim_ordering() == 'th':
        H = Reshape([n_channels, 14, 14])(H)
    else:
        H = Reshape([14, 14, n_channels])(H)
    H = UpSampling2D(size=(2, 2))(H)

    H = Convolution2D(n_channels/2, 3, 3, border_mode='same',
                      init='glorot_uniform')(H)
    H = BatchNormalization(mode=2)(H)
    H = Activation('relu')(H)

    H = Convolution2D(n_channels/4, 3, 3, border_mode='same',
                      init='glorot_uniform')(H)
    H = BatchNormalization(mode=2)(H)
    H = Activation('relu')(H)

    H = Convolution2D(1, 1, 1, border_mode='same', init='glorot_uniform')(H)
    g_output = Activation('sigmoid')(H)

    model = Model(g_input, g_output)

    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, output_dim=1024))
    model.add(Activation('tanh'))
    model.add(Dense(128 * 7 * 7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, output_dim=1024))
    model.add(Activation('tanh'))
    model.add(Dense(128 * 7 * 7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model():  # CDNN Model
    model = Sequential()
    model.add(Convolution2D(
        1, 5, 5,
        border_mode='same',
        input_shape=(1, 14, 14)))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    #model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model_rate2():  # CDNN Model
    model = Sequential()
    model.add(Convolution2D(
        1, 5, 5,
        border_mode='same',
        input_shape=(1, 14, 14)))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model():  # CDNN Model
    model = Sequential()
    model.add(Convolution2D(
        1, 5, 5,
        border_mode='same',
        input_shape=(1, 14, 14)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    #model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model():  # CDNN Model
    model = Sequential()
    model.add(Convolution2D(
        1, 5, 5,
        border_mode='same',
        input_shape=(1, 14, 14)))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    #model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model():
    model = Sequential()
    model.add(Dense(input_dim=100, output_dim=1024))
    model.add(Activation('tanh'))
    model.add(Dense(128 * 7 * 7))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model_r0():  # CDNN Model
    model = Sequential()
    model.add(Convolution2D(
        1, 5, 5,
        border_mode='same',
        input_shape=(1, 14, 14)))
    model.add(Activation('tanh'))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    #model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model():  # CDNN Model
    model = Sequential()
    model.add(Convolution2D(
        1, 5, 5,
        border_mode='same',
        input_shape=(1, 14, 14)))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(64, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    #model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(1, 5, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:Kerasimo    作者:s-macke    | 项目源码 | 文件源码
def build_generator(latent_size):
    # we will map a pair of (z, L), where z is a latent vector and L is a
    # label drawn from P_c, to image space (..., 1, 28, 28)
    cnn = Sequential()

    cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
    cnn.add(Dense(128 * 7 * 7, activation='relu'))
    cnn.add(Reshape((128, 7, 7)))

    # upsample to (..., 14, 14)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Conv2D(256, 5, padding='same',
                   activation='relu',
                   kernel_initializer='glorot_normal'))

    # upsample to (..., 28, 28)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Conv2D(128, 5, padding='same',
                   activation='relu',
                   kernel_initializer='glorot_normal'))

    # take a channel axis reduction
    cnn.add(Conv2D(1, 2, padding='same',
                   activation='tanh',
                   kernel_initializer='glorot_normal'))

    # this is the z space commonly refered to in GAN papers
    latent = Input(shape=(latent_size, ))

    # this will be our label
    image_class = Input(shape=(1,), dtype='int32')

    # 10 classes in MNIST
    cls = Flatten()(Embedding(10, latent_size,
                              embeddings_initializer='glorot_normal')(image_class))

    # hadamard product between z-space and a class conditional embedding
    h = layers.multiply([latent, cls])

    fake_image = cnn(h)

    return Model([latent, image_class], fake_image)
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def build_generator(latent_size):
    # we will map a pair of (z, L), where z is a latent vector and L is a
    # label drawn from P_c, to image space (..., 1, 28, 28)
    cnn = Sequential()

    cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
    cnn.add(Dense(128 * 7 * 7, activation='relu'))
    cnn.add(Reshape((128, 7, 7)))

    # upsample to (..., 14, 14)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Conv2D(256, 5, padding='same',
                   activation='relu',
                   kernel_initializer='glorot_normal'))

    # upsample to (..., 28, 28)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Conv2D(128, 5, padding='same',
                   activation='relu',
                   kernel_initializer='glorot_normal'))

    # take a channel axis reduction
    cnn.add(Conv2D(1, 2, padding='same',
                   activation='tanh',
                   kernel_initializer='glorot_normal'))

    # this is the z space commonly refered to in GAN papers
    latent = Input(shape=(latent_size, ))

    # this will be our label
    image_class = Input(shape=(1,), dtype='int32')

    # 10 classes in MNIST
    cls = Flatten()(Embedding(10, latent_size,
                              embeddings_initializer='glorot_normal')(image_class))

    # hadamard product between z-space and a class conditional embedding
    h = layers.multiply([latent, cls])

    fake_image = cnn(h)

    return Model([latent, image_class], fake_image)
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def resnet50_fcn(n_classes):
    # load ResNet
    input_tensor = Input(shape=(None, None, 3))
    base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=input_tensor)

    # add classifier
    x = base_model.get_layer('activation_49').output
    x = Dropout(0.5)(x)
    x = Convolution2D(n_classes,1,1,name = 'pred_32',init='zero',border_mode = 'valid')(x)

    # add upsampler
    stride = 32
    x = UpSampling2D(size=(stride,stride))(x)
    x = Convolution2D(n_classes,5,5,name = 'pred_32s',init='zero',border_mode = 'same')(x)
    x = Softmax4D(axis=-1)(x)

    model = Model(input=base_model.input,output=x)

    # create bilinear interpolation
    w = model.get_layer('pred_32s').get_weights()
    model.get_layer('pred_32s').set_weights([bilinear_interpolation(w), w[1]])

    # fine-tune 
    train_layers = ['pred_32',
                    'pred_32s'

                    'bn5c_branch2c', 
                    'res5c_branch2c',
                    'bn5c_branch2b', 
                    'res5c_branch2b',
                    'bn5c_branch2a', 
                    'res5c_branch2a',

                    'bn5b_branch2c', 
                    'res5b_branch2c',
                    'bn5b_branch2b', 
                    'res5b_branch2b',
                    'bn5b_branch2a', 
                    'res5b_branch2a',

                    'bn5a_branch2c', 
                    'res5a_branch2c',
                    'bn5a_branch2b', 
                    'res5a_branch2b',
                    'bn5a_branch2a', 
                    'res5a_branch2a']

    for l in model.layers:
        if l.name in train_layers:
            l.trainable = True
        else :
            l.trainable = False

    return model, stride
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def resnet50_16s_fcn(n_classes,model_input = ''):
    # load 32s base model
    base_model, stride = resnet50_fcn(n_classes)

    if model_input != '':
        base_model.load_weights(model_input)

    # add 16s classifier
    x = base_model.get_layer('activation_40').output
    x = Dropout(0.5)(x)
    x = Convolution2D(n_classes,1,1,name = 'pred_16',init='zero',border_mode = 'valid')(x)
    x = UpSampling2D(name='upsampling_16',size=(stride/2,stride/2))(x)
    x = Convolution2D(n_classes,5,5,name = 'pred_up_16',init='zero',border_mode = 'same')(x)

    # merge classifiers
    x = merge([x, base_model.get_layer('pred_32s').output],mode = 'sum')
    x = Softmax4D(name='pred_16s',axis=-1)(x)

    model = Model(input=base_model.input,output=x)

    # create bilinear interpolation
    w = model.get_layer('pred_up_16').get_weights()
    model.get_layer('pred_up_16').set_weights([bilinear_interpolation(w), w[1]])

    # fine-tune 
    train_layers = ['pred_32',
                    'pred_32s',
                    'pred_16',
                    'pred_up_16',

                    'bn5c_branch2c', 
                    'res5c_branch2c',
                    'bn5c_branch2b', 
                    'res5c_branch2b',
                    'bn5c_branch2a', 
                    'res5c_branch2a',

                    'bn5b_branch2c', 
                    'res5b_branch2c',
                    'bn5b_branch2b', 
                    'res5b_branch2b',
                    'bn5b_branch2a', 
                    'res5b_branch2a',

                    'bn5a_branch2c', 
                    'res5a_branch2c',
                    'bn5a_branch2b', 
                    'res5a_branch2b',
                    'bn5a_branch2a', 
                    'res5a_branch2a']

    for l in model.layers:
        if l.name in train_layers:
            l.trainable = True
        else :
            l.trainable = False

    return model, stride
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def resnet50_8s_fcn(n_classes,model_input = ''):
    # load 16s base model
    base_model, stride = resnet50_16s_fcn(n_classes)

    if model_input != '':
        base_model.load_weights(model_input)

    # add 16s classifier
    x = base_model.get_layer('activation_22').output
    x = Dropout(0.5)(x)
    x = Convolution2D(n_classes,1,1,name = 'pred_8',init='zero',border_mode = 'valid')(x)
    x = UpSampling2D(name='upsampling_8',size=(stride/4,stride/4))(x)
    x = Convolution2D(n_classes,5,5,name = 'pred_up_8',init='zero',border_mode = 'same')(x)

    # merge classifiers
    x = merge([x, base_model.get_layer('pred').output],mode = 'sum')
    x = Softmax4D(name='pred_8s',axis=-1)(x)

    model = Model(input=base_model.input,output=x)

    # create bilinear interpolation
    w = model.get_layer('pred_up_8').get_weights()
    model.get_layer('pred_up_8').set_weights([bilinear_interpolation(w), w[1]])

    # fine-tune 
    train_layers = ['pred_32',
                    'pred_32s',
                    'pred_16',
                    'pred_up_16',
                    'pred_8',
                    'pred_up_8',

                    'bn5c_branch2c', 
                    'res5c_branch2c',
                    'bn5c_branch2b', 
                    'res5c_branch2b',
                    'bn5c_branch2a', 
                    'res5c_branch2a',

                    'bn5b_branch2c', 
                    'res5b_branch2c',
                    'bn5b_branch2b', 
                    'res5b_branch2b',
                    'bn5b_branch2a', 
                    'res5b_branch2a',

                    'bn5a_branch2c', 
                    'res5a_branch2c',
                    'bn5a_branch2b', 
                    'res5a_branch2b',
                    'bn5a_branch2a', 
                    'res5a_branch2a']

    for l in model.layers:
        if l.name in train_layers:
            l.trainable = True
        else :
            l.trainable = False

    return model, stride
项目:Keras-GAN-Animeface-Character    作者:forcecore    | 项目源码 | 文件源码
def build_gen( shape ) :
    def deconv2d( x, filters, shape=(4, 4) ) :
        '''
        Conv2DTransposed gives me checkerboard artifact...
        Select one of the 3.
        '''
        # Simpe Conv2DTranspose
        # Not good, compared to upsample + conv2d below.
        x= Conv2DTranspose( filters, shape, padding='same',
            strides=(2, 2), kernel_initializer=Args.kernel_initializer )(x)

        # simple and works
        #x = UpSampling2D( (2, 2) )( x )
        #x = Conv2D( filters, shape, padding='same' )( x )

        # Bilinear2x... Not sure if it is without bug, not tested yet.
        # Tend to make output blurry though
        #x = bilinear2x( x, filters )
        #x = Conv2D( filters, shape, padding='same' )( x )

        x = BatchNormalization(momentum=Args.bn_momentum)( x )
        x = LeakyReLU(alpha=Args.alpha_G)( x )
        return x

    # https://github.com/tdrussell/IllustrationGAN  z predictor...?
    # might help. Not sure.

    noise = Input( shape=Args.noise_shape )
    x = noise
    # 1x1x256
    # noise is not useful for generating images.

    x= Conv2DTranspose( 512, (4, 4),
        kernel_initializer=Args.kernel_initializer )(x)
    x = BatchNormalization(momentum=Args.bn_momentum)( x )
    x = LeakyReLU(alpha=Args.alpha_G)( x )
    # 4x4
    x = deconv2d( x, 256 )
    # 8x8
    x = deconv2d( x, 128 )
    # 16x16
    x = deconv2d( x, 64 )
    # 32x32

    # Extra layer
    x = Conv2D( 64, (3, 3), padding='same',
        kernel_initializer=Args.kernel_initializer )( x )
    x = BatchNormalization(momentum=Args.bn_momentum)( x )
    x = LeakyReLU(alpha=Args.alpha_G)( x )
    # 32x32

    x= Conv2DTranspose( 3, (4, 4), padding='same', activation='tanh',
        strides=(2, 2), kernel_initializer=Args.kernel_initializer )(x)
    # 64x64

    return models.Model( inputs=noise, outputs=x )
项目:neural_markov    作者:LuxxxLucy    | 项目源码 | 文件源码
def build_model():
    # Build Generative model ...
    # nch = 200
    # g_input = Input(shape=[100])
    # H = Dense(nch*14*14, init='glorot_normal')(g_input)
    # H = BatchNormalization(mode=2)(H)
    # H = Activation('relu')(H)
    # H = Reshape( [ 14, 14,nch] )(H)
    # H = UpSampling2D(size=(2, 2))(H)
    # H = Convolution2D(int(nch/2), 3, 3, border_mode='same', init='glorot_uniform')(H)
    # H = BatchNormalization(mode=2)(H)
    # H = Activation('relu')(H)
    # H = Convolution2D(int(nch/4), 3, 3, border_mode='same', init='glorot_uniform')(H)
    # H = BatchNormalization(mode=2)(H)
    # H = Activation('relu')(H)
    # H = Convolution2D(1, 1, 1, border_mode='same', init='glorot_uniform')(H)
    # H = Reshape( [ 1,28,28] )(H)
    # g_V = Activation('sigmoid')(H)
    # generator = Model(g_input,g_V)
    # generator.compile(loss='binary_crossentropy', optimizer=opt)
    # generator.summary()

    # now start building network model
    dropout_rate=0.25
    input_1 = Input(shape=(CONTEXT_WINDOW_SIZE*VECTOR_DIMENSION,),name="state_context_input")
    input_2 = Input(shape=(OBSERVATION_WINDOW_SIZE*VECTOR_DIMENSION,),name="observation_context_input")
    input_3 = Input(shape=(VECTOR_DIMENSION,),name="current_input")

    encoded_y = Dense(VECTOR_DIMENSION,activation='relu')(input_1)
    encoded_x_y = Dense(VECTOR_DIMENSION)(input_2)

    merged_vector = keras.layers.concatenate([encoded_x_y, encoded_y,input_3], axis=-1)
    merged_vector = Dropout(dropout_rate)(merged_vector)
    hidden = Dense(VECTOR_DIMENSION,activation='relu')(merged_vector)
    hidden = Dropout(dropout_rate)(hidden)
    prediction = Dense(1,activation='relu',name="predictions")(hidden)

    model = Model(inputs=[input_1,input_2,input_3],outputs=prediction)

    opt = Adam(lr=1e-4)
    model.compile(loss='binary_crossentropy',metrics=[metrics.mae,metrics.binary_accuracy],optimizer=opt)
    model.summary()
    return model