我们从Python开源项目中,提取了以下3个代码示例,用于说明如何使用keras.layers.SpatialDropout2D()。
def build_shallow_weight(channels, width, height, output_size, nb_classes): # input inputs = Input(shape=(channels, height, width)) # 1 conv conv1_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu', W_regularizer=l2(0.01))(inputs) bn1 = BatchNormalization(mode=0, axis=1)(conv1_1) pool1 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn1) gn1 = GaussianNoise(0.5)(pool1) drop1 = SpatialDropout2D(0.5)(gn1) # 2 conv conv2_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu', W_regularizer=l2(0.01))(gn1) bn2 = BatchNormalization(mode=0, axis=1)(conv2_1) pool2 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn2) gn2 = GaussianNoise(0.5)(pool2) drop2 = SpatialDropout2D(0.5)(gn2) # 3 conv conv3_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu', W_regularizer=l2(0.01))(drop2) bn3 = BatchNormalization(mode=0, axis=1)(conv3_1) pool3 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn3) gn3 = GaussianNoise(0.5)(pool3) drop3 = SpatialDropout2D(0.5)(gn3) # 4 conv conv4_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu', W_regularizer=l2(0.01))(gn3) bn4 = BatchNormalization(mode=0, axis=1)(conv4_1) pool4 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn4) gn4 = GaussianNoise(0.5)(pool4) drop4 = SpatialDropout2D(0.5)(gn4) # flaten flat = Flatten()(gn4) # 1 dense dense1 = Dense(8, activation='relu', W_regularizer=l2(0.1))(flat) bn6 = BatchNormalization(mode=0, axis=1)(dense1) drop6 = Dropout(0.5)(bn6) # output out = [] for i in range(output_size): out.append(Dense(nb_classes, activation='softmax')(bn6)) if output_size > 1: merged_out = merge(out, mode='concat') shaped_out = Reshape((output_size, nb_classes))(merged_out) sample_weight_mode = 'temporal' else: shaped_out = out sample_weight_mode = None model = Model(input=[inputs], output=shaped_out) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[categorical_accuracy_per_sequence], sample_weight_mode = sample_weight_mode ) return model
def nvidia(img): """ Model based on Nvidia paper http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf """ shape = (img[0], img[1], 3) model = Sequential() def process(img): import tensorflow as tf # img = tf.image.rgb_to_grayscale(img) img = tf.image.resize_images(img, (66, 200)) return img model.add(Lambda(process, input_shape=shape)) model.add(Lambda(lambda x: x/255.-0.5)) model.add(Convolution2D(24, 5, 5, border_mode="same", subsample=(2,2), activation="elu")) model.add(SpatialDropout2D(0.2)) model.add(Convolution2D(36, 5, 5, border_mode="same", subsample=(2,2), activation="elu")) model.add(SpatialDropout2D(0.2)) model.add(Convolution2D(48, 5, 5, border_mode="valid", subsample=(2,2), activation="elu")) model.add(SpatialDropout2D(0.2)) model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu")) model.add(SpatialDropout2D(0.2)) model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu")) model.add(SpatialDropout2D(0.2)) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(100, activation="elu")) model.add(Dense(50, activation="elu")) model.add(Dense(10, activation="elu")) model.add(Dropout(0.5)) model.add(Dense(1)) return model # 0 = center # 1 = left # 2 = right # 3 = steering angle
def unet_model1(): inputs = Input((1, 512, 512)) conv1 = Convolution2D(width, 3, 3, activation='relu', border_mode='same')(inputs) conv1 = BatchNormalization(axis = 1)(conv1) conv1 = Convolution2D(width, 3, 3, activation='relu', border_mode='same')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Convolution2D(width*2, 3, 3, activation='relu', border_mode='same')(pool1) conv2 = BatchNormalization(axis = 1)(conv2) conv2 = Convolution2D(width*2, 3, 3, activation='relu', border_mode='same')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Convolution2D(width*4, 3, 3, activation='relu', border_mode='same')(pool2) conv3 = BatchNormalization(axis = 1)(conv3) conv3 = Convolution2D(width*4, 3, 3, activation='relu', border_mode='same')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Convolution2D(width*8, 3, 3, activation='relu', border_mode='same')(pool3) conv4 = BatchNormalization(axis = 1)(conv4) conv4 = Convolution2D(width*8, 3, 3, activation='relu', border_mode='same')(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Convolution2D(width*16, 3, 3, activation='relu', border_mode='same')(pool4) conv5 = BatchNormalization(axis = 1)(conv5) conv5 = Convolution2D(width*16, 3, 3, activation='relu', border_mode='same')(conv5) up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1) conv6 = SpatialDropout2D(dropout_rate)(up6) conv6 = Convolution2D(width*8, 3, 3, activation='relu', border_mode='same')(conv6) conv6 = Convolution2D(width*8, 3, 3, activation='relu', border_mode='same')(conv6) up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1) conv7 = SpatialDropout2D(dropout_rate)(up7) conv7 = Convolution2D(width*4, 3, 3, activation='relu', border_mode='same')(conv7) conv7 = Convolution2D(width*4, 3, 3, activation='relu', border_mode='same')(conv7) up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1) conv8 = SpatialDropout2D(dropout_rate)(up8) conv8 = Convolution2D(width*2, 3, 3, activation='relu', border_mode='same')(conv8) conv8 = Convolution2D(width*2, 3, 3, activation='relu', border_mode='same')(conv8) up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1) conv9 = SpatialDropout2D(dropout_rate)(up9) conv9 = Convolution2D(width, 3, 3, activation='relu', border_mode='same')(conv9) conv9 = Convolution2D(width, 3, 3, activation='relu', border_mode='same')(conv9) conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9) model = Model(input=inputs, output=conv10) model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef]) return model