我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.MaxPooling2D()。
def tsinalis(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1) """ model = Sequential(name='Tsinalis') model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu')) print(model.input_shape) print(model.output_shape) model.add(MaxPooling1D(pool_size = (20), strides=(10))) print(model.output_shape) model.add(keras.layers.core.Reshape([20,-1,1])) print(model.output_shape) model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu')) print(model.output_shape) model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2))) print(model.output_shape) model.add(Flatten()) print(model.output_shape) model.add(Dense (500, activation='relu')) model.add(Dense (500, activation='relu')) model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2() )) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy]) return model
def make_teacher_model(train_data, validation_data, nb_epoch=3): '''Train a simple CNN as teacher model. ''' model = Sequential() model.add(Conv2D(64, 3, 3, input_shape=input_shape, border_mode='same', name='conv1')) model.add(MaxPooling2D(name='pool1')) model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2')) model.add(MaxPooling2D(name='pool2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='fc1')) model.add(Dense(nb_class, activation='softmax', name='fc2')) model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics=['accuracy']) train_x, train_y = train_data history = model.fit(train_x, train_y, nb_epoch=nb_epoch, validation_data=validation_data) return model, history
def discriminator_model(): """ return a (b, 1) logits""" model = Sequential() model.add(Convolution2D(64, 4, 4,border_mode='same',input_shape=(IN_CH*2, img_cols, img_rows))) model.add(BatchNormalization(mode=2)) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(128, 4, 4,border_mode='same')) model.add(BatchNormalization(mode=2)) model.add(Activation('tanh')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(512, 4, 4,border_mode='same')) model.add(BatchNormalization(mode=2)) model.add(Activation('tanh')) model.add(Convolution2D(1, 4, 4,border_mode='same')) model.add(BatchNormalization(mode=2)) model.add(Activation('tanh')) model.add(Activation('sigmoid')) return model
def make_model(dense_layer_sizes, filters, kernel_size, pool_size): '''Creates model comprised of 2 convolutional layers followed by dense layers dense_layer_sizes: List of layer sizes. This list has one number for each layer filters: Number of convolutional filters in each convolutional layer kernel_size: Convolutional kernel size pool_size: Size of pooling area for max pooling ''' model = Sequential() model.add(Conv2D(filters, kernel_size, padding='valid', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(filters, kernel_size)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=pool_size)) model.add(Dropout(0.25)) model.add(Flatten()) for layer_size in dense_layer_sizes: model.add(Dense(layer_size)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model
def make_teacher_model(train_data, validation_data, epochs=3): '''Train a simple CNN as teacher model. ''' model = Sequential() model.add(Conv2D(64, 3, input_shape=input_shape, padding='same', name='conv1')) model.add(MaxPooling2D(2, name='pool1')) model.add(Conv2D(64, 3, padding='same', name='conv2')) model.add(MaxPooling2D(2, name='pool2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='fc1')) model.add(Dense(num_class, activation='softmax', name='fc2')) model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics=['accuracy']) train_x, train_y = train_data history = model.fit(train_x, train_y, epochs=epochs, validation_data=validation_data) return model, history
def get_model(): input_shape = (image_size, image_size, 3) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), padding='same', input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(n_classes, kernel_size=(3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(GlobalAveragePooling2D()) print (model.summary()) #sys.exit(0) # model.compile(loss=keras.losses.mean_squared_error, optimizer= keras.optimizers.Adadelta()) return model
def load_model(input_shape, num_classes): model = Sequential() model.add(Convolution2D(6, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding="same")) model.add(Convolution2D(32, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Convolution2D(64, kernel_size=(3, 3), border_mode='same', activation='relu')) model.add(Convolution2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) return model
def _conv_block(layer, num_conv_layers, num_filters): """Build a conv block on top of inputs :param inputs: Keras Layer object representing the VGG net up to this point :param num_conv_layers: int for the number of convolutional layers to include in this block :param num_filters: int for the number of filters per convolutional layer """ for _ in range(num_conv_layers - 1): layer = Conv2D( filters=num_filters, kernel_size=(3, 3), padding='same', activation='relu' )(layer) layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(layer) return layer
def cnn(height, width): question_input = Input(shape=(height, width, 1), name='question_input') conv1_Q = Conv2D(512, (2, 320), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(question_input) Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q) F1_Q = Flatten()(Max1_Q) Drop1_Q = Dropout(0.25)(F1_Q) predictQ = Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(Drop1_Q) prediction2 = Dropout(0.25)(predictQ) predictions = Dense(1, activation='relu')(prediction2) model = Model(inputs=[question_input], outputs=predictions) model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)) # model.compile(loss='mean_squared_error', # optimizer='nadam') return model
def double_conv_layer(x, size, dropout, batch_norm): from keras.models import Model from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D from keras.layers.normalization import BatchNormalization from keras.layers.core import Dropout, Activation conv = Convolution2D(size, 3, 3, border_mode='same')(x) if batch_norm == True: conv = BatchNormalization(mode=0, axis=1)(conv) conv = Activation('relu')(conv) conv = Convolution2D(size, 3, 3, border_mode='same')(conv) if batch_norm == True: conv = BatchNormalization(mode=0, axis=1)(conv) conv = Activation('relu')(conv) if dropout > 0: conv = Dropout(dropout)(conv) return conv
def create_network(): input_img = Input(shape=INPUT_SHAPE) x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) encoded = MaxPooling2D((2, 2), padding='same')(x) # at this point the representation is (4, 4, 8) i.e. 128-dimensional x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded) x = UpSampling2D((2, 2))(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(16, (3, 3), activation='relu')(x) x = UpSampling2D((2, 2))(x) decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x) model = Model(input_img, decoded) model.compile(optimizer='adadelta', loss='binary_crossentropy') return KerasNetwork(model, 'weights_conv_autoencoder.hd5')
def create_network(): from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=INPUT_SHAPE)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(NUM_CLASSES, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return KerasNetwork(model, 'cnn_weights.hd5')
def net_input(env): """ Create input part of the network with optional prescaling. :return: input_tensor, output_tensor """ in_t = Input(shape=env.observation_space.shape, name='input') out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(in_t) out_t = MaxPooling2D((2, 2))(out_t) out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(out_t) out_t = MaxPooling2D((2, 2))(out_t) out_t = Conv2D(64, 4, 4, activation='relu', border_mode='same')(out_t) out_t = MaxPooling2D((2, 2))(out_t) out_t = Conv2D(64, 3, 3, activation='relu', border_mode='same')(out_t) out_t = Flatten(name='flat')(out_t) out_t = Dense(512, name='l1', activation='relu')(out_t) return in_t, out_t
def build_model(self, dataset, nb_classes): self.model = Sequential() self.model.add(Convolution2D(32, (3, 3), padding='same', input_shape=dataset.x_train.shape[1:])) self.model.add(Activation('relu')) self.model.add(Convolution2D(32, (3, 3))) self.model.add(Activation('relu')) self.model.add(MaxPooling2D(pool_size=(2, 2))) self.model.add(Dropout(0.25)) self.model.add(Convolution2D(64, (3, 3), padding='same')) self.model.add(Activation('relu')) self.model.add(Convolution2D(64, (3, 3))) self.model.add(Activation('relu')) self.model.add(MaxPooling2D(pool_size=(2, 2))) self.model.add(Dropout(0.25)) self.model.add(Flatten()) self.model.add(Dense(512)) self.model.add(Activation('relu')) self.model.add(Dropout(0.5)) self.model.add(Dense(nb_classes)) self.model.add(Activation('softmax')) self.model.summary()
def get_model(img_channels, img_width, img_height, dropout=0.5): model = Sequential() model.add(Convolution2D(32, 3, 3, input_shape=( img_channels, img_width, img_height))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(1)) model.add(Activation('sigmoid')) return model
def get_model(shape, dropout=0.5, path=None): print('building neural network') model=Sequential() model.add(Convolution2D(512, 3, 3, border_mode='same', input_shape=shape)) model.add(Activation('relu')) model.add(Convolution2D(512, 3, 3, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(SpatialDropout2D(dropout)) model.add(Flatten())#input_shape=shape)) # model.add(Dense(4096)) # model.add(Activation('relu')) # model.add(Dropout(0.5)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(1)) #model.add(Activation('linear')) return model
def test_tiny_mcrnn_music_tagger(self): x_in = Input(shape=(4,6,1)) x = ZeroPadding2D(padding=(0, 1))(x_in) x = BatchNormalization(axis=2, name='bn_0_freq')(x) # Conv block 1 x = Conv2D(2, (3, 3), padding='same', name='conv1')(x) x = BatchNormalization(axis=3, name='bn1')(x) x = Activation('elu')(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x) # Conv block 2 x = Conv2D(4, (3, 3), padding='same', name='conv2')(x) x = BatchNormalization(axis=3, name='bn2')(x) x = Activation('elu')(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x) # Should get you (1,1,2,4) x = Reshape((2, 4))(x) x = GRU(32, return_sequences=True, name='gru1')(x) x = GRU(32, return_sequences=False, name='gru2')(x) # Create model. model = Model(x_in, x) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) self._test_keras_model(model, mode='random_zero_mean', delta=1e-2)
def mnist_cnn(args, input_image): shape = (args.channels, args.height, args.width) x = Convolution2D(32, 5, 5, activation='relu', border_mode='valid', input_shape=shape)(input_image) x = MaxPooling2D((2,2))(x) x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(x) x = Dropout(0.2)(x) x = MaxPooling2D((2,2))(x) x = Flatten()(x) x = Dense(128, activation='relu')(x) x = Dense(64, activation='relu')(x) predictions = Dense(args.num_labels, activation='softmax')(x) # this creates a model that includes # the Input layer and three Dense layers model = Model(input=input_image, output=predictions) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() return model
def xtest_net(self): input_shape = (28,28,1) model = Sequential() model.add(MaxPooling2D(pool_size=(3,3), input_shape = input_shape)) print("----->", model.layers[-1].output_shape) model.add(MaxPooling2D(pool_size=(3,3))) print("----->", model.layers[-1].output_shape) model.add(MaxPooling2D(pool_size=(3,3))) print("----->", model.layers[-1].output_shape) if model.layers[-1].output_shape[1] >= 2 and model.layers[-1].output_shape[2] >= 2: model.add(MaxPooling2D(pool_size=(2,2))) print("----->", model.layers[-1].output_shape) model.add(Flatten()) #model.add(Convolution2D(20, 5, 5, border_mode='same')) #model.add(MaxPooling2D(pool_size=(2,2))) #model.add(MaxPooling2D(pool_size=(2,2))) #model.add(MaxPooling2D(pool_size=(2,2))) #model.add(Flatten()) model.summary()
def block_reduction_a(input): if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 384, 3, 3, subsample=(2,2), border_mode='valid') branch_1 = conv2d_bn(input, 192, 1, 1) branch_1 = conv2d_bn(branch_1, 224, 3, 3) branch_1 = conv2d_bn(branch_1, 256, 3, 3, subsample=(2,2), border_mode='valid') branch_2 = MaxPooling2D((3,3), strides=(2,2), border_mode='valid')(input) x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis) return x
def block_reduction_b(input): if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 192, 1, 1) branch_0 = conv2d_bn(branch_0, 192, 3, 3, subsample=(2, 2), border_mode='valid') branch_1 = conv2d_bn(input, 256, 1, 1) branch_1 = conv2d_bn(branch_1, 256, 1, 7) branch_1 = conv2d_bn(branch_1, 320, 7, 1) branch_1 = conv2d_bn(branch_1, 320, 3, 3, subsample=(2,2), border_mode='valid') branch_2 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(input) x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis) return x
def make_teacher_model(train_data, validation_data, nb_epoch=3): '''Train a simple CNN as teacher model. ''' model = Sequential() model.add(Conv2D(64, 3, 3, input_shape=input_shape, border_mode='same', name='conv1')) model.add(MaxPooling2D(name='pool1')) model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2')) model.add(MaxPooling2D(name='pool2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='fc1')) model.add(Dense(nb_class, activation='softmax', name='fc2')) model = make_model(model, loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics=['accuracy']) train_x, train_y = train_data history = model.fit(train_x, train_y, nb_epoch=nb_epoch, validation_data=validation_data) return model, history
def m6_1(): model.add(Convolution2D(32, 3, 3, init=my_init, input_shape=input_shape)) model.add(Activation('relu')) model.add(Convolution2D(32, 3, 3, init=my_init)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Convolution2D(64, 3, 3, init=my_init)) model.add(Activation('relu')) model.add(Convolution2D(64, 3, 3, init=my_init)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(256, init=my_init)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax'))
def getNN(n): """ ????????? ???????????VGG??? """ nn=Sequential() nn.add(Convolution2D(32,(3,3),input_shape=(30,30,1),activation='relu')) nn.add(MaxPooling2D(pool_size=(2, 2))) nn.add(Convolution2D(16,(3,3),activation='relu')) nn.add(Dropout(0.2)) nn.add(Convolution2D(8,(3,3),activation='relu')) nn.add(MaxPooling2D(pool_size=(2, 2))) nn.add(Convolution2D(8,(3,3),activation='relu')) nn.add(Dense(50,activation='tanh')) nn.add(Dropout(0.2)) nn.add(Dense(50,activation='tanh')) nn.add(Flatten()) nn.add(Dense(n,activation='sigmoid')) nn.compile(optimizer='rmsprop',loss='categorical_crossentropy') return nn
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4): ''' Adds an initial conv block, with batch norm and relu for the DPN Args: input: input tensor initial_conv_filters: number of filters for initial conv block weight_decay: weight decay factor Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
def VGG16ConvBlockFive( pretrained_weights ): input_vector = Input( shape = ( 14, 14, 512 ) ) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')( input_vector ) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) model = Model( input_vector, x ) if pretrained_weights : print "finetuned conv_block_5 weights loading" model.load_weights( 'FCC-init-random-weights-on-finetuned-data.h5', by_name = True ) return model
def __init__(self, **kwargs): super(KerasLenetModel, self).__init__(**kwargs) norm_shape = self.norm_shape self.model = Sequential() self.model.add(Convolution2D(32, (3, 3), activation='relu', input_shape=(norm_shape[0], norm_shape[1], 1))) self.model.add(Convolution2D(32, (3, 3), activation='relu')) self.model.add(MaxPooling2D(pool_size=(2,2))) self.model.add(Dropout(0.25)) self.model.add(Flatten()) self.model.add(Dense(128, activation='relu')) self.model.add(Dropout(0.5)) self.model.add(Dense(self.max_n_label, activation='softmax')) # 8. Compile model self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def build_model(self, dataset, nb_classes=2): self.model = Sequential() self.model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=dataset.X_train.shape[1:])) self.model.add(Activation('relu')) self.model.add(Convolution2D(32, 3, 3)) self.model.add(Activation('relu')) self.model.add(MaxPooling2D(pool_size=(2, 2))) self.model.add(Dropout(0.25)) self.model.add(Convolution2D(64, 3, 3, border_mode='same')) self.model.add(Activation('relu')) self.model.add(Convolution2D(64, 3, 3)) self.model.add(Activation('relu')) self.model.add(MaxPooling2D(pool_size=(2, 2))) self.model.add(Dropout(0.25)) self.model.add(Flatten()) # multi -> one dimension self.model.add(Dense(512)) self.model.add(Activation('relu')) self.model.add(Dropout(0.5)) self.model.add(Dense(nb_classes)) self.model.add(Activation('softmax')) self.model.summary()
def reduction_b(inputs): "17x17 -> 8x8" inputs_norm = BNA(inputs) pool1 = MaxPooling2D((3,3), strides=(2,2), border_mode='same')(inputs_norm) # conv2_1 = NConvolution2D(64, 1, 1, subsample=(1,1), border_mode='same')(inputs_norm) conv2_2 = Convolution2D(96, 3, 3, subsample=(2,2), border_mode='same')(conv2_1) # conv3_1 = NConvolution2D(64, 1, 1, subsample=(1,1), border_mode='same')(inputs_norm) conv3_2 = Convolution2D(72, 3, 3, subsample=(2,2), border_mode='same')(conv3_1) # conv4_1 = NConvolution2D(64, 1, 1, subsample=(1,1), border_mode='same')(inputs_norm) conv4_2 = NConvolution2D(72, 3, 3, subsample=(1,1), border_mode='same')(conv4_1) conv4_3 = Convolution2D(80, 3, 3, subsample=(2,2), border_mode='same')(conv4_2) # res = merge([pool1, conv2_2, conv3_2, conv4_3], mode='concat', concat_axis=1) return res
def model_cnn(net_layers, input_shape): inp = Input(shape=input_shape) model = inp for cl in net_layers['conv_layers']: model = Conv2D(filters=cl[0], kernel_size=cl[1], activation='relu')(model) if cl[4]: model = MaxPooling2D()(model) if cl[2]: model = BatchNormalization()(model) if cl[3]: model = Dropout(0.2)(model) model = Flatten()(model) for dl in net_layers['dense_layers']: model = Dense(dl[0])(model) model = Activation('relu')(model) if dl[1]: model = BatchNormalization()(model) if dl[2]: model = Dropout(0.2)(model) model = Dense(1)(model) model = Activation('sigmoid')(model) model = Model(inp, model) return model # %% # LSTM architecture # conv_layers -> [(filters, kernel_size, BatchNormaliztion, Dropout, MaxPooling)] # dense_layers -> [(num_neurons, BatchNormaliztion, Dropout)]
def fcn_Resnet50(input_shape = None, weight_decay=0.0002, batch_momentum=0.9, batch_shape=None, classes=40): img_input = Input(shape=input_shape) bn_axis = 3 x = Conv2D(64, kernel_size=(7,7), subsample=(2, 2), border_mode='same', name='conv1', W_regularizer=l2(weight_decay))(img_input) x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1))(x) x = identity_block(3, [64, 64, 256], stage=2, block='b')(x) x = identity_block(3, [64, 64, 256], stage=2, block='c')(x) x = conv_block(3, [128, 128, 512], stage=3, block='a')(x) x = identity_block(3, [128, 128, 512], stage=3, block='b')(x) x = identity_block(3, [128, 128, 512], stage=3, block='c')(x) x = identity_block(3, [128, 128, 512], stage=3, block='d')(x) x = conv_block(3, [256, 256, 1024], stage=4, block='a')(x) x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x) x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x) x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x) x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x) x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x) x = conv_block(3, [512, 512, 2048], stage=5, block='a')(x) x = identity_block(3, [512, 512, 2048], stage=5, block='b')(x) x = identity_block(3, [512, 512, 2048], stage=5, block='c')(x) #classifying layer x = Conv2D(filters=40, kernel_size=(1,1), strides=(1,1), init='he_normal', activation='linear', border_mode='valid', W_regularizer=l2(weight_decay))(x) x = Conv2DTranspose(filters=40, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='upscore2')(x) x = Cropping2D(cropping=((19, 36),(19, 29)), name='score')(x) model = Model(img_input, x) weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', RES_WEIGHTS_PATH_NO_TOP, cache_subdir='models') model.load_weights(weights_path, by_name=True) return model
def build_neural_network(num_outputs, output_activation, path_to_pwms, window_size): # get PWMs pwms, tfs = get_pwms(path_to_pwms) P, ig, L = pwms.shape # construct the first layer, with a convolution filter for each PWM network = Sequential([Convolution2D(P, 4, L, \ input_shape=(1, 4, window_size), \ weights=[pwms.reshape(P, 1, 4, L), np.zeros((P, ))], \ trainable=False, activation='relu')]) # maxpool layer network.add(MaxPooling2D(pool_size=(1,4))) # 200 convolutional filters in second layer network.add(Convolution2D(200, 1, 6, \ activation='relu')) # maxpool layer network.add(MaxPooling2D(pool_size=(1,3))) # 500 neuron dense layer network.add(Flatten()) network.add(Dense(500, activation='relu')) # 20% dropout network.add(GaussianDropout(0.2)) # output layer network.add(Dense(num_outputs, activation=output_activation)) return network, tfs
def vgg_variant(space): model = Sequential() for outputs in space['conv0filters']: model.add(Convolution2D(outputs, 3, 3, border_mode='same', input_shape=(1, 150, 130), init='glorot_uniform', bias=True, activation='relu')) model.add(Convolution2D(outputs, 3, 3, border_mode='same', bias=True, activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) for outputs in space['conv1filters']: model.add(Convolution2D(outputs, 3, 3, border_mode='same', init='glorot_uniform', bias=True, activation='relu')) model.add(Convolution2D(outputs, 3, 3, border_mode='same', init='glorot_uniform', bias=True, activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) for outputs in space['conv2filters']: model.add(Convolution2D(outputs, 3, 3, border_mode='same', init='glorot_uniform', bias=True, activation='relu')) model.add(Convolution2D(outputs, 3, 3, border_mode='same', init='glorot_uniform', bias=True, activation='relu')) model.add(Convolution2D(outputs, 3, 3, border_mode='same', init='glorot_uniform', bias=True, activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Flatten()) for _ in range(int(space['num_fc'])): model.add(Dense(int(space['fcoutput']), init='glorot_uniform', bias=True, activation='relu')) model.add(Dropout(space['dropout'])) model.add(Dense(1, init='glorot_uniform', bias=True)) return model
def baseline_model(): # create model input_shape = (1, 50, 50) model = Sequential() model.add(Conv2D(16, (3, 3), activation='sigmoid', strides=(1, 1), data_format='channels_first', padding='same', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first')) model.add(Conv2D(48, kernel_size=(3, 3), activation='sigmoid', strides=(1, 1), data_format="channels_first", padding="same", input_shape=input_shape)) model.add(Conv2D(64, kernel_size=(3, 3), activation='sigmoid', strides=(1, 1), data_format="channels_first", padding="same", input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first')) model.add(Conv2D(64, kernel_size=(3, 3), activation='sigmoid', strides=(1, 1), data_format="channels_first", padding="same", input_shape=input_shape)) model.add(Flatten()) model.add(Dense(64, activation='sigmoid')) model.add(Dense(68*2, activation='tanh')) # Compile model sgd = SGD(lr=1e-4, momentum=0.9, decay=1e-6, nesterov=False) model.compile(loss='mean_squared_error', optimizer=sgd) return model
def nn_base(input_tensor=None, trainable=False): # Determine proper input shape if K.image_dim_ordering() == 'th': input_shape = (3, None, None) else: input_shape = (None, None, 3) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor # Block 1 x = Convolution2D(96, (7, 7), strides=(2, 2), activation='relu', padding='valid', name='block1_conv1')(img_input) x = MaxPooling2D((3, 3), strides=(2, 2), name='block1_pool')(x) # Block 2 x = Convolution2D(256, (5, 5), strides=(2, 2), activation='relu', padding='same', name='block2_conv1')(x) x = MaxPooling2D((3, 3), strides=(2, 2), name='block2_pool')(x) # Block 3 x = Convolution2D(384, (3, 3), strides=(1, 1), activation='relu', padding='same', name='block3_conv1')(x) x = Convolution2D(384, (3, 3), strides=(1, 1), activation='relu', padding='same', name='block3_conv2')(x) x = Convolution2D(384, (3, 3), strides=(1, 1), activation='relu', padding='same', name='block3_conv3')(x) return x
def build_CNN_LSTM(channels, width, height, lstm_output_size, nb_classes): model = Sequential() # 1 conv model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', input_shape=(channels, height, width))) model.add(BatchNormalization(mode=0, axis=1)) # 2 conv model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu')) model.add(BatchNormalization(mode=0, axis=1)) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2))) # 3 conv model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu')) model.add(BatchNormalization(mode=0, axis=1)) # 4 conv model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu')) model.add(BatchNormalization(mode=0, axis=1)) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2))) # flaten a = model.add(Flatten()) # 1 dense model.add(Dense(512, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) # 2 dense model.add(Dense(512, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) # lstm model.add(RepeatVector(lstm_output_size)) model.add(LSTM(512, return_sequences=True)) model.add(TimeDistributed(Dropout(0.5))) model.add(TimeDistributed(Dense(nb_classes, activation='softmax'))) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[categorical_accuracy_per_sequence], sample_weight_mode='temporal' ) return model
def build_model(self, nb_classes): self.model = Sequential() self.model.add( Convolution2D( filters=32, kernel_size=(5, 5), padding='same', dim_ordering='tf', input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name="1" ) ) self.model.add(Activation('relu',name="2")) self.model.add( MaxPooling2D( pool_size=(2, 2), strides=(2, 2), padding='same', name="3" ) ) self.model.add(Convolution2D(filters=64, kernel_size=(5, 5), padding='same', name='4')) self.model.add(Activation('relu',name='5')) self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',name='6')) self.model.add(Flatten(name='7')) self.model.add(Dense(512,name="8")) self.model.add(Activation('relu',name='9')) self.model.add(Dense(nb_classes,name='10')) self.model.add(Activation('softmax',name='11')) self.model.summary()
def make_model(dense_layer_sizes, nb_filters, nb_conv, nb_pool): '''Creates model comprised of 2 convolutional layers followed by dense layers dense_layer_sizes: List of layer sizes. This list has one number for each layer nb_filters: Number of convolutional filters in each convolutional layer nb_conv: Convolutional kernel size nb_pool: Size of pooling area for max pooling ''' model = Sequential() model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols))) model.add(Activation('relu')) model.add(Convolution2D(nb_filters, nb_conv, nb_conv)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) model.add(Dropout(0.25)) model.add(Flatten()) for layer_size in dense_layer_sizes: model.add(Dense(layer_size)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model
def model_from_thumbnails(train_x, train_y, val_x, val_y): n_obs, n_channels, n_rows, n_cols = train_x.shape n_classes = y.shape[1] model = Sequential() model.add(Convolution2D(32, 2, 2, border_mode='valid', activation='relu', input_shape=(n_channels, n_rows, n_cols))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(64, 2, 2, border_mode='valid', activation='relu')) model.add(Convolution2D(64, 2, 2, border_mode='valid', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(64, 2, 2, border_mode='valid', activation='relu')) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(100, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(100, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation='softmax')) optimizer = Adam() model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) stopper = EarlyStopping(monitor='val_loss', patience=15, verbose=0, mode='auto') model.fit(train_x, train_y, shuffle=True, nb_epoch=100, validation_data=(val_x, val_y), callbacks = [stopper]) return model
def build(input_shape, num_classes): model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) # initiate RMSprop optimizer opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6) # Let's train the model using RMSprop model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) if os.path.isfile('weights.hdf5'): model.load_weights('weights.hdf5') return model
def build_model(): from keras.models import Model from keras.layers import Input, Dense, Dropout, Activation, Flatten, Reshape from keras.layers import Convolution2D, MaxPooling2D nb_classes = 10 nb_filters = 32 pool_size = (2,2) kernel_size = (3,3) v = Input(shape=(28,28)) h = Reshape([1,28,28])(v) h = Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid')(h) h = Activation('relu')(h) h = Convolution2D(nb_filters, kernel_size[0], kernel_size[1])(h) h = Activation('relu')(h) h = MaxPooling2D(pool_size=pool_size)(h) h = Dropout(0.25)(h) h = Flatten()(h) h = Dense(128)(h) h = Activation('relu')(h) h = Dropout(0.5)(h) h = Dense(nb_classes)(h) o = Activation('softmax')(h) model = Model(input=v, output=o) return model
def build_model(): from keras.models import Model from keras.layers import Input, Dense, Dropout, Activation, Flatten, Reshape from keras.layers import Convolution2D, MaxPooling2D nb_classes = 10 nb_filters = {{nb_filters,choice,[64,32,16,8]}} pool_size = (2,2) kernel_size = (3,3) v = Input(shape=(28,28)) h = Reshape([1,28,28])(v) h = Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid')(h) h = Activation('relu')(h) {{cnn_layer2,maybe, h = Convolution2D(nb_filters, kernel_size[0], kernel_size[1])(h) h = Activation('relu')(h) }} h = MaxPooling2D(pool_size=pool_size)(h) h = Dropout(0.25)(h) h = Flatten()(h) h = Dense(128)(h) h = Activation('relu')(h) h = Dropout(0.5)(h) h = Dense(nb_classes)(h) o = Activation('softmax')(h) model = Model(input=v, output=o) return model
def conv_autoencoder(X): X = X.reshape(X.shape[0], 28, 28, 1) inputs = Input(shape=(28, 28, 1)) h = Conv2D(4, 3, 3, activation='relu', border_mode='same')(inputs) encoded = MaxPooling2D((2, 2))(h) h = Conv2D(4, 3, 3, activation='relu', border_mode='same')(encoded) h = UpSampling2D((2, 2))(h) outputs = Conv2D(1, 3, 3, activation='relu', border_mode='same')(h) model = Model(input=inputs, output=outputs) model.compile(optimizer='adam', loss='mse') model.fit(X, X, batch_size=64, nb_epoch=5) return model, Model(input=inputs, output=encoded)
def cnn(height_a, height_q, width_a, width_q): question_input = Input(shape=(height_q, width_q, 1), name='question_input') conv1_Q = Conv2D(512, (2, 128), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(question_input) Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q) F1_Q = Flatten()(Max1_Q) Drop1_Q = Dropout(0.5)(F1_Q) predictQ = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(Drop1_Q) # kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01) answer_input = Input(shape=(height_a, width_a, 1), name='answer_input') conv1_A = Conv2D(512, (2, 128), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(answer_input) Max1_A = MaxPooling2D((319, 1), strides=(1, 1), padding='valid')(conv1_A) F1_A = Flatten()(Max1_A) Drop1_A = Dropout(0.5)(F1_A) predictA = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(Drop1_A) predictions = merge([predictA, predictQ], mode='dot') model = Model(inputs=[question_input, answer_input], outputs=predictions) model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)) return model
def cnn(height_a, height_q, width_a, width_q): question_input = Input(shape=(height_q, width_q, 1), name='question_input') conv1_Q = Conv2D(512, (2, 128), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(question_input) Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q) F1_Q = Flatten()(Max1_Q) Drop1_Q = Dropout(0.5)(F1_Q) predictQ = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(Drop1_Q) # kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01) answer_input = Input(shape=(height_a, width_a, 1), name='answer_input') conv1_A = Conv2D(512, (2, 128), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(answer_input) Max1_A = MaxPooling2D((319, 1), strides=(1, 1), padding='valid')(conv1_A) F1_A = Flatten()(Max1_A) Drop1_A = Dropout(0.5)(F1_A) predictA = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(Drop1_A) predictions = merge([predictA, predictQ], mode='dot') model = Model(inputs=[question_input, answer_input], outputs=predictions) model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)) # model.compile(loss='mean_squared_error', # optimizer='nadam') return model
def cnn(height_a, height_q, width_a, width_q, extra_len): question_input = Input(shape=(height_q, width_q, 1), name='question_input') conv1_Q = Conv2D(512, (2, 128), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(question_input) Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q) F1_Q = Flatten()(Max1_Q) Drop1_Q = Dropout(0.5)(F1_Q) predictQ = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(Drop1_Q) # kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01) answer_input = Input(shape=(height_a, width_a, 1), name='answer_input') conv1_A = Conv2D(512, (2, 128), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(answer_input) Max1_A = MaxPooling2D((319, 1), strides=(1, 1), padding='valid')(conv1_A) F1_A = Flatten()(Max1_A) Drop1_A = Dropout(0.5)(F1_A) predictA = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01))(Drop1_A) extra_input = Input(shape=(extra_len,), name='extra_input') predictQ1 = concatenate([predictQ, extra_input], axis=1) predictA1 = concatenate([predictA, extra_input], axis=1) predictions = merge([predictA1, predictQ1], mode='dot') model = Model(inputs=[question_input, answer_input, extra_input], outputs=predictions) model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)) # model.compile(loss='mean_squared_error', # optimizer='nadam') return model
def cnn(height_a, height_q, count): question_input = Input(shape=(height_q, 1), name='question_input') embedding_q = Embedding(input_dim=count, output_dim=128, input_length=height_q)(question_input) re_q = Reshape((height_q, 128, 1), input_shape=(height_q,))(embedding_q) conv1_Q = Conv2D(128, (2, 128), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.02), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(re_q) Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q) F1_Q = Flatten()(Max1_Q) Drop1_Q = Dropout(0.5)(F1_Q) predictQ = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.02), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(Drop1_Q) # kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.01) answer_input = Input(shape=(height_a, 1), name='answer_input') embedding_a = Embedding(input_dim=count, output_dim=128, input_length=height_a)(answer_input) re_a = Reshape((height_a, 128, 1), input_shape=(height_a,))(embedding_a) conv1_A = Conv2D(128, (2, 128), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.02), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(re_a) Max1_A = MaxPooling2D((399, 1), strides=(1, 1), padding='valid')(conv1_A) F1_A = Flatten()(Max1_A) Drop1_A = Dropout(0.5)(F1_A) predictA = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.02), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.05))(Drop1_A) predictions = merge([predictA, predictQ], mode='dot') model = Model(inputs=[question_input, answer_input], outputs=predictions) model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)) # model.compile(loss='mean_squared_error', # optimizer='nadam') return model
def get_model(): inputs = Input(shape=(150, 150, 3)) conv_1 = Conv2D(64, (3,3), strides=(1,1))(inputs) act_1 = Activation('relu')(conv_1) conv_2 = Conv2D(64, (3,3), strides=(1,1))(act_1) act_2 = Activation('relu')(conv_2) pooling_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(act_2) conv_3 = Conv2D(128, (3,3), strides=(1,1))(pooling_1) act_3 = Activation('relu')(conv_3) pooling_2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(act_3) flat_1 = Flatten()(pooling_2) fc = Dense(128)(flat_1) fc = Activation('relu')(fc) fc = Dropout(0.5)(fc) fc = Dense(2)(fc) outputs = Activation('sigmoid')(fc) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model