Python keras.layers.convolutional 模块,Cropping2D() 实例源码

我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用keras.layers.convolutional.Cropping2D()

项目:lsun_2017    作者:ternaus    | 项目源码 | 文件源码
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model
项目:lsun_2017    作者:ternaus    | 项目源码 | 文件源码
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_cropping_2d():
    nb_samples = 2
    stack_size = 2
    input_len_dim1 = 8
    input_len_dim2 = 8
    cropping = ((2, 2), (3, 3))
    dim_ordering = K.image_dim_ordering()

    if dim_ordering == 'th':
        input = np.random.rand(nb_samples, stack_size,
                               input_len_dim1, input_len_dim2)
    else:
        input = np.random.rand(nb_samples,
                               input_len_dim1, input_len_dim2,
                               stack_size)
    # basic test
    layer_test(convolutional.Cropping2D,
               kwargs={'cropping': cropping,
                       'dim_ordering': dim_ordering},
               input_shape=input.shape)
    # correctness test
    layer = convolutional.Cropping2D(cropping=cropping,
                                     dim_ordering=dim_ordering)
    layer.build(input.shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    # compare with numpy
    if dim_ordering == 'th':
        expected_out = input[:,
                             :,
                             cropping[0][0]: -cropping[0][1],
                             cropping[1][0]: -cropping[1][1]]
    else:
        expected_out = input[:,
                             cropping[0][0]: -cropping[0][1],
                             cropping[1][0]: -cropping[1][1],
                             :]
    assert_allclose(np_output, expected_out)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_cropping_2d():
    nb_samples = 2
    stack_size = 2
    input_len_dim1 = 8
    input_len_dim2 = 8
    cropping = ((2, 2), (3, 3))
    dim_ordering = K.image_dim_ordering()

    if dim_ordering == 'th':
        input = np.random.rand(nb_samples, stack_size,
                               input_len_dim1, input_len_dim2)
    else:
        input = np.random.rand(nb_samples,
                               input_len_dim1, input_len_dim2,
                               stack_size)
    # basic test
    layer_test(convolutional.Cropping2D,
               kwargs={'cropping': cropping,
                       'dim_ordering': dim_ordering},
               input_shape=input.shape)
    # correctness test
    layer = convolutional.Cropping2D(cropping=cropping,
                                     dim_ordering=dim_ordering)
    layer.build(input.shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    # compare with numpy
    if dim_ordering == 'th':
        expected_out = input[:,
                             :,
                             cropping[0][0]: -cropping[0][1],
                             cropping[1][0]: -cropping[1][1]]
    else:
        expected_out = input[:,
                             cropping[0][0]: -cropping[0][1],
                             cropping[1][0]: -cropping[1][1],
                             :]
    assert_allclose(np_output, expected_out)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_cropping_2d():
    nb_samples = 2
    stack_size = 2
    input_len_dim1 = 8
    input_len_dim2 = 8
    cropping = ((2, 2), (3, 3))
    dim_ordering = K.image_dim_ordering()

    if dim_ordering == 'th':
        input = np.random.rand(nb_samples, stack_size,
                               input_len_dim1, input_len_dim2)
    else:
        input = np.random.rand(nb_samples,
                               input_len_dim1, input_len_dim2,
                               stack_size)
    # basic test
    layer_test(convolutional.Cropping2D,
               kwargs={'cropping': cropping,
                       'dim_ordering': dim_ordering},
               input_shape=input.shape)
    # correctness test
    layer = convolutional.Cropping2D(cropping=cropping,
                                     dim_ordering=dim_ordering)
    layer.build(input.shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    # compare with numpy
    if dim_ordering == 'th':
        expected_out = input[:,
                             :,
                             cropping[0][0]: -cropping[0][1],
                             cropping[1][0]: -cropping[1][1]]
    else:
        expected_out = input[:,
                             cropping[0][0]: -cropping[0][1],
                             cropping[1][0]: -cropping[1][1],
                             :]
    assert_allclose(np_output, expected_out)
    # another correctness test (no cropping)
    cropping = ((0, 0), (0, 0))
    layer = convolutional.Cropping2D(cropping=cropping,
                                     dim_ordering=dim_ordering)
    layer.build(input.shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    # compare with input
    assert_allclose(np_output, input)
项目:CarND-Behavioral-Cloning    作者:dventimi    | 项目源码 | 文件源码
def CarND(input_shape, crop_shape):
    model = Sequential()

    # Crop
    # model.add(Cropping2D(((80,20),(1,1)), input_shape=input_shape, name="Crop"))
    model.add(Cropping2D(crop_shape, input_shape=input_shape, name="Crop"))

    # Resize
    model.add(AveragePooling2D(pool_size=(1,4), name="Resize", trainable=False))

    # Normalize input.
    model.add(BatchNormalization(axis=1, name="Normalize"))

    # Reduce dimensions through trainable convolution, activation, and
    # pooling layers.
    model.add(Convolution2D(24, 3, 3, subsample=(2,2), name="Convolution2D1", activation="relu"))
    model.add(MaxPooling2D(name="MaxPool1"))
    model.add(Convolution2D(36, 3, 3, subsample=(1,1), name="Convolution2D2", activation="relu"))
    model.add(MaxPooling2D(name="MaxPool2"))
    model.add(Convolution2D(48, 3, 3, subsample=(1,1), name="Convolution2D3", activation="relu"))
    model.add(MaxPooling2D(name="MaxPool3"))

    # Dropout for regularization
    model.add(Dropout(0.1, name="Dropout"))

    # Flatten input in a non-trainable layer before feeding into
    # fully-connected layers.
    model.add(Flatten(name="Flatten"))

    # Model steering through trainable layers comprising dense units
    # as ell as dropout units for regularization.
    model.add(Dense(100, activation="relu", name="FC2"))
    model.add(Dense(50, activation="relu", name="FC3"))
    model.add(Dense(10, activation="relu", name="FC4"))

    # Generate output (steering angles) with a single non-trainable
    # node.
    model.add(Dense(1, name="Readout", trainable=False))
    return model

# #+RESULTS:

#       Here is a summary of the actual model, as generated directly by
#       =model.summary= in Keras.