我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.convolutional.ZeroPadding2D()。
def fire_module(x, squeeze=16, expand=64): x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x) x = Activation('relu')(x) left = Convolution2D(expand, 1, 1, border_mode='valid')(x) left = Activation('relu')(left) right= ZeroPadding2D(padding=(1, 1))(x) right = Convolution2D(expand, 3, 3, border_mode='valid')(right) right = Activation('relu')(right) y = merge([left, right], mode='concat', concat_axis=1) return y # Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
def fire_module(x, squeeze=16, expand=64): x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x) x = Activation('relu')(x) left = Convolution2D(expand, 1, 1, border_mode='valid')(x) left = Activation('relu')(left) right= ZeroPadding2D(padding=(1, 1))(x) right = Convolution2D(expand, 3, 3, border_mode='valid')(right) right = Activation('relu')(right) x = merge([left, right], mode='concat', concat_axis=1) return x # Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
def get_simple_model(): model = Sequential() model.add(ZeroPadding2D(padding=(3, 3), input_shape=(nb_input_layers, NB_ROWS, NB_COLS))) model.add(Convolution2D(96, 5, 5)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(nb_classes)) model.add(Activation('softmax')) print("Compiling model") model.compile(loss='categorical_crossentropy', optimizer='adam') print("Compiled model") return model ###############################################################################
def build_model(self): img_input = Input(shape=(img_channels, img_rows, img_cols)) # one conv at the beginning (spatial size: 32x32) x = ZeroPadding2D((1, 1))(img_input) x = Convolution2D(16, nb_row=3, nb_col=3)(x) # Stage 1 (spatial size: 32x32) x = bottleneck(x, n, 16, 16 * k, dropout=0.3, subsample=(1, 1)) # Stage 2 (spatial size: 16x16) x = bottleneck(x, n, 16 * k, 32 * k, dropout=0.3, subsample=(2, 2)) # Stage 3 (spatial size: 8x8) x = bottleneck(x, n, 32 * k, 64 * k, dropout=0.3, subsample=(2, 2)) x = BatchNormalization(mode=0, axis=1)(x) x = Activation('relu')(x) x = AveragePooling2D((8, 8), strides=(1, 1))(x) x = Flatten()(x) preds = Dense(nb_classes, activation='softmax')(x) self.model = Model(input=img_input, output=preds) self.keras_get_params()
def VGG_16(weights_path=None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) print "convolution" model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) print"FLATTEN" model.add(Dense(400, activation='relu')) model.add(Dropout(0.5)) print"YO" model.add(Dense(10, activation='softmax')) return model
def test_zero_padding_2d(self): nb_samples = 9 stack_size = 7 input_nb_row = 11 input_nb_col = 12 input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col)) layer = convolutional.ZeroPadding2D(padding=(2, 2)) layer.input = theano.shared(value=input) for train in [True, False]: out = layer.get_output(train).eval() for offset in [0, 1, -1, -2]: assert_allclose(out[:, :, offset, :], 0.) assert_allclose(out[:, :, :, offset], 0.) assert_allclose(out[:, :, 2:-2, 2:-2], 1.) config = layer.get_config()
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28): model = keras.models.Sequential() first_layer_channel = 128 if is_mnist: # size to be changed to 32,32 model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32) # the first conv model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same')) else: model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols))) model.add(Activation('relu')) # [residual-based Conv layers] residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel) model.add(residual_blocks) model.add(BatchNormalization(axis=1)) model.add(Activation('relu')) # [Classifier] model.add(Flatten()) model.add(Dense(nb_classes)) model.add(Activation('softmax')) # [END] return model
def get_squeezenet(nb_classes, img_size = (64,64)): input_img = Input(shape=(3, img_size[0], img_size[1])) x = Convolution2D(96, 7, 7, subsample=(2, 2), border_mode='valid')(input_img) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) x = fire_module(x, 16, 64) x = fire_module(x, 16, 64) x = fire_module(x, 32, 128) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) x = fire_module(x, 32, 192) x = fire_module(x, 48, 192) x = fire_module(x, 48, 192) x = fire_module(x, 64, 256) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) x = fire_module(x, 64, 256) x = Dropout(0.5)(x) x = ZeroPadding2D(padding=(1, 1))(x) x = Convolution2D(nb_classes, 1, 1, border_mode='valid')(x) # global pooling not available x = GlobalAveragePooling2D()(x) out = Dense(nb_classes, activation='softmax')(x) model = Model(input=input_img, output=[out]) return model
def get_squeezenet(nb_classes): input_img = Input(shape=(3, 227, 227)) x = Convolution2D(96, 7, 7, subsample=(2, 2), border_mode='valid')(input_img) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) x = fire_module(x, 16, 64) x = fire_module(x, 16, 64) x = fire_module(x, 32, 128) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) x = fire_module(x, 32, 192) x = fire_module(x, 48, 192) x = fire_module(x, 48, 192) x = fire_module(x, 64, 256) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) x = fire_module(x, 64, 256) x = Dropout(0.5)(x) x = ZeroPadding2D(padding=(1, 1))(x) x = Convolution2D(nb_classes, 1, 1, border_mode='valid')(x) # global pooling not available x = AveragePooling2D(pool_size=(15, 15))(x) x = Flatten()(x) out = Dense(nb_classes, activation='softmax')(x) model = Model(input=input_img, output=[out]) return model
def get_small_squeezenet(nb_classes): input_img = Input(shape=(3, 32, 32)) x = Convolution2D(16, 3, 3, border_mode='same')(input_img) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(3, 3))(x) x = fire_module(x, 32, 128) x = fire_module(x, 32, 128) x = MaxPooling2D(pool_size=(2, 2))(x) x = fire_module(x, 48, 192) x = fire_module(x, 48, 192) x = MaxPooling2D(pool_size=(2, 2))(x) x = fire_module(x, 64, 256) x = Dropout(0.5)(x) x = ZeroPadding2D(padding=(1, 1))(x) x = Convolution2D(nb_classes, 1, 1, border_mode='valid')(x) # global pooling not available x = AveragePooling2D(pool_size=(4, 4))(x) x = Flatten()(x) out = Dense(nb_classes, activation='softmax')(x) model = Model(input=input_img, output=[out]) return model
def Alexnet(height, width, weights_path=None): model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(3, height, width))) model.add(Convolution2D(64, 11, 11, border_mode="same", activation="relu")) model.add(BatchNormalization()) model.add(ZeroPadding2D((1, 1))) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Convolution2D(128, 7, 7, border_mode="same", activation="relu")) model.add(BatchNormalization()) model.add(ZeroPadding2D((1, 1))) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Convolution2D(192, 3, 3, border_mode="same", activation="relu")) model.add(BatchNormalization()) model.add(ZeroPadding2D((1, 1))) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Convolution2D(256, 3, 3, border_mode="same", activation="relu")) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Flatten()) model.add(Dense(4096, init='normal', activation="relu")) model.add(BatchNormalization()) model.add(Dense(512, init='normal', activation="relu")) model.add(BatchNormalization()) model.add(Dense(2, init='normal', activation="softmax")) if weights_path: print("Loading weights...", end='\t') model.load_weights(weights_path) print("Finished.") return model
def get_simple_cnn(height, width): """ A simple CNN that has the same input/output shapes as the VGG16 model. Args: height: input height width: input width Return: Keras model """ model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(3, height, width))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((4, 4), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((4, 4), strides=(2, 2))) #model.add(ZeroPadding2D((1, 1))) #model.add(Convolution2D(64, 3, 3, activation='relu')) #model.add(MaxPooling2D((4, 4), strides=(2, 2))) #model.add(ZeroPadding2D((1, 1))) #model.add(Convolution2D(512, 3, 3, activation='relu')) #model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(Lambda(global_average_pooling, output_shape=global_average_pooling_shape)) model.add(Dense(2, activation="softmax", init="uniform")) return model
def ConvBlock(layers, model, filters): for i in range(layers): model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(filters, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Dropout(0.25))
def _masked_conv(self, x, filter_size, stack_name, layer_idx, mask_type='B'): if stack_name == 'vertical': res = ZeroPadding2D(padding=(filter_size[0]//2, 0, filter_size[1]//2, filter_size[1]//2), name='v_pad_'+str(layer_idx))(x) res = Convolution2D(2*self.nb_filters, filter_size[0]//2+1, filter_size[1], border_mode='valid', name='v_conv_'+str(layer_idx))(res) elif stack_name == 'horizontal': res = ZeroPadding2D(padding=(0, 0, filter_size[1]//2, 0), name='h_pad_'+str(layer_idx))(x) if mask_type == 'A': res = Convolution2D(2*self.nb_filters, 1, filter_size[1]//2, border_mode='valid', name='h_conv_'+str(layer_idx))(res) else: res = Convolution2D(2*self.nb_filters, 1, filter_size[1]//2+1, border_mode='valid', name='h_conv_'+str(layer_idx))(res) return res
def _shift_down(x): x_shape = K.int_shape(x) x = ZeroPadding2D(padding=(1,0,0,0))(x) x = Lambda(lambda x: x[:,:x_shape[1],:,:])(x) return x
def __init__(self, d_size=(3, 128, 64), d_nb_filters=128, d_scales=4, d_FC=None, d_init=None, **kwargs): super(Discriminator, self).__init__(**kwargs) self.d_size = d_size self.d_nb_filters = d_nb_filters self.d_scales = d_scales self.d_FC = d_FC self.d_init = d_init if d_init is not None else InitNormal() c, h, w = d_size # h and w should be multiply of 16 nf = d_nb_filters for s in range(d_scales): if s == 0: self.add( ZeroPadding2D((2, 2), input_shape=d_size) ) else: self.add( ZeroPadding2D((2, 2)) ) self.add( Convolution2D(nf*(2**s), 5, 5, subsample=(2,2), border_mode='valid',) ) self.add( BN() ) # self.add( BatchNormalization(beta_init='zero', gamma_init='one', mode=2, axis=1) ) self.add( LeakyReLU(0.2) ) self.add( Flatten() ) if d_FC is not None: for fc_dim in d_FC: self.add( Dense(fc_dim,) ) self.add( LeakyReLU(0.2) ) self.add( BN() ) # self.add( BatchNormalization(beta_init='zero', gamma_init='one', mode=2) ) self.add( LeakyReLU(0.2) ) self.add( Dense(1, activation='sigmoid') ) d_init(self)
def __init__(self, d_size=(3, 128, 64), d_nb_filters=128, d_scales=4, d_FC=None, d_init=None, **kwargs): super(Critic, self).__init__(**kwargs) self.d_size = d_size self.d_nb_filters = d_nb_filters self.d_scales = d_scales self.d_FC = d_FC self.d_init = d_init if d_init is not None else InitNormal() c, h, w = d_size # h and w should be multiply of 16 nf = d_nb_filters for s in range(d_scales): if s == 0: self.add( ZeroPadding2D((2, 2), input_shape=d_size) ) else: self.add( ZeroPadding2D((2, 2)) ) self.add( Convolution2D(nf*(2**s), 5, 5, subsample=(2,2), border_mode='valid',) ) self.add( BN() ) # self.add( BatchNormalization(beta_init='zero', gamma_init='one', mode=2, axis=1) ) self.add( LeakyReLU(0.2) ) self.add( Flatten() ) if d_FC is not None: for fc_dim in d_FC: self.add( Dense(fc_dim,) ) self.add( LeakyReLU(0.2) ) self.add( BN() ) # self.add( BatchNormalization(beta_init='zero', gamma_init='one', mode=2) ) self.add( LeakyReLU(0.2) ) self.add( Dense(1, activation='linear',) ) d_init(self)
def __add_convolutional_layers(self): # first convolutional layer self.model.add(ZeroPadding2D((1,1),input_shape=(1,28,28))) self.model.add(Convolution2D(32,3,3, activation='relu')) # second convolutional layer self.model.add(ZeroPadding2D((1,1))) self.model.add(Convolution2D(48,3,3, activation='relu')) self.model.add(MaxPooling2D(pool_size=(2,2))) # third convolutional layer self.model.add(ZeroPadding2D((1,1))) self.model.add(Convolution2D(32,3,3, activation='relu')) self.model.add(MaxPooling2D(pool_size=(2,2)))
def conv2D_lrn2d(x, nb_filter, nb_row, nb_col, padding='same', strides=(1, 1), activation='relu', LRN2D_norm=True, bias_initializer='zeros',kernel_initializer='glorot_uniform', weight_decay=WEIGHT_DECAY, data_format="channels_first",name='conv'): ''' Info: Function taken from the Inceptionv3.py script keras github Utility function to apply to a tensor a module Convolution + lrn2d with optional weight decay (L2 weight regularization). ''' if weight_decay: W_regularizer = regularizers.l2(weight_decay) b_regularizer = regularizers.l2(weight_decay) else: W_regularizer = None b_regularizer = None x = Conv2D(nb_filter, (nb_row, nb_col), bias_regularizer=b_regularizer, activation=activation, data_format="channels_first", padding=padding, strides=strides, bias_initializer='zeros',kernel_initializer='glorot_uniform', kernel_regularizer=W_regularizer, use_bias=False,name=name)(x) x = ZeroPadding2D(padding=(1, 1), data_format="channels_first")(x) if LRN2D_norm: x = LRN2D(alpha=ALPHA, beta=BETA)(x) x = ZeroPadding2D(padding=(1, 1), data_format="channels_first")(x) return x
def create(self): model = Sequential() model.add(ZeroPadding2D((1,1), input_shape=self._visual_dim)) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(Flatten()) self._model_output_dim = 4096 model.add(Dense(self._model_output_dim, activation='relu')) model.add(Dropout(0.5)) if self._weights_path: model.load_weights(self._weights_path) return model
def vgg_16(weights_path=None): model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) return model
def VGG_16(weights_path=None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) #top layer of the VGG net model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) return model
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1): # main branch internal = output // internal_scale encoder = inp # 1x1 input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling encoder = Conv2D(internal, (input_stride, input_stride), # padding='same', strides=(input_stride, input_stride), use_bias=False)(encoder) # Batch normalization + PReLU encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # conv if not asymmetric and not dilated: encoder = Conv2D(internal, (3, 3), padding='same')(encoder) elif asymmetric: encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder) encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder) elif dilated: encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder) else: raise(Exception('You shouldn\'t be here')) encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # 1x1 encoder = Conv2D(output, (1, 1), use_bias=False)(encoder) encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99 encoder = SpatialDropout2D(dropout_rate)(encoder) other = inp # other branch if downsample: other = MaxPooling2D()(other) other = Permute((1, 3, 2))(other) pad_feature_maps = output - inp.get_shape().as_list()[3] tb_pad = (0, 0) lr_pad = (0, pad_feature_maps) other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other) other = Permute((1, 3, 2))(other) encoder = add([encoder, other]) encoder = PReLU(shared_axes=[1, 2])(encoder) return encoder
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1): # main branch internal = output // internal_scale encoder = inp # 1x1 input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling encoder = Conv2D(internal, (input_stride, input_stride), # padding='same', strides=(input_stride, input_stride), use_bias=False)(encoder) # Batch normalization + PReLU encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # conv if not asymmetric and not dilated: encoder = Conv2D(internal, (3, 3), padding='same')(encoder) elif asymmetric: encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder) encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder) elif dilated: encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder) else: raise(Exception('You shouldn\'t be here')) encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # 1x1 encoder = Conv2D(output, (1, 1), use_bias=False)(encoder) encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99 encoder = SpatialDropout2D(dropout_rate)(encoder) other = inp # other branch if downsample: other, indices = MaxPoolingWithArgmax2D()(other) other = Permute((1, 3, 2))(other) pad_feature_maps = output - inp.get_shape().as_list()[3] tb_pad = (0, 0) lr_pad = (0, pad_feature_maps) other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other) other = Permute((1, 3, 2))(other) encoder = add([encoder, other]) encoder = PReLU(shared_axes=[1, 2])(encoder) if downsample: return encoder, indices else: return encoder
def minst_attention(inc_noise=False, attention=True): #make layers inputs = Input(shape=(1,image_size,image_size),name='input') conv_1a = Convolution2D(32, 3, 3,activation='relu',name='conv_1') maxp_1a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_1') norm_1a = crosschannelnormalization(name="convpool_1") zero_1a = ZeroPadding2D((2,2),name='convzero_1') conv_2a = Convolution2D(32,3,3,activation='relu',name='conv_2') maxp_2a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_2') norm_2a = crosschannelnormalization(name="convpool_2") zero_2a = ZeroPadding2D((2,2),name='convzero_2') dense_1a = Lambda(global_average_pooling,output_shape=global_average_pooling_shape,name='dense_1') dense_2a = Dense(10, activation = 'softmax', init='uniform',name='dense_2') #make actual model if inc_noise: inputs_noise = noise.GaussianNoise(2.5)(inputs) input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs_noise) else: input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs) conv_1 = conv_1a(input_pad) conv_1 = maxp_1a(conv_1) conv_1 = norm_1a(conv_1) conv_1 = zero_1a(conv_1) conv_2_x = conv_2a(conv_1) conv_2 = maxp_2a(conv_2_x) conv_2 = norm_2a(conv_2) conv_2 = zero_2a(conv_2) conv_2 = Dropout(0.5)(conv_2) dense_1 = dense_1a(conv_2) dense_2 = dense_2a(dense_1) conv_shape1 = Lambda(change_shape1,output_shape=(32,),name='chg_shape')(conv_2_x) find_att = dense_2a(conv_shape1) if attention: find_att = Lambda(attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2]) else: find_att = Lambda(no_attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2]) zero_3a = ZeroPadding2D((1,1),name='convzero_3')(find_att) apply_attention = Merge(mode='mul',name='attend')([zero_3a,conv_1]) conv_3 = conv_2a(apply_attention) conv_3 = maxp_2a(conv_3) conv_3 = norm_2a(conv_3) conv_3 = zero_2a(conv_3) dense_3 = dense_1a(conv_3) dense_4 = dense_2a(dense_3) model = Model(input=inputs,output=dense_4) return model
def get_model(): model = Sequential() model.add(ZeroPadding2D(padding=(3, 3), input_shape=(nb_input_layers, NB_ROWS, NB_COLS))) model.add(Convolution2D(96, 5, 5)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(nb_classes)) model.add(Activation('softmax')) print("Compiling model") model.compile(loss='categorical_crossentropy', optimizer='adam') print("Compiled model") model.load_weights("../run2/epoch_45_weights.h5") return model
def VGG_16(height, width, weights_path=None): """ VGG Model Keras specification args: weights_path (str) trained weights file path returns model (Keras model) """ model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(3, height, width))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(Lambda(global_average_pooling, output_shape=global_average_pooling_shape)) model.add(Dense(2, activation="softmax", init="uniform")) if weights_path: print("Loading weights...", end='\t') model.load_weights(weights_path) print("Finished.") return model
def wide_basic(incoming, nb_in_filters, nb_out_filters, dropout=None, subsample=(2, 2)): nb_bottleneck_filter = nb_out_filters if nb_in_filters == nb_out_filters: # conv3x3 y = BatchNormalization(mode=0, axis=1)(incoming) y = Activation('relu')(y) y = ZeroPadding2D((1, 1))(y) y = Convolution2D(nb_bottleneck_filter, nb_row=3, nb_col=3, subsample=subsample, init='he_normal', border_mode='valid')(y) # conv3x3 y = BatchNormalization(mode=0, axis=1)(y) y = Activation('relu')(y) if dropout is not None: y = Dropout(dropout)(y) y = ZeroPadding2D((1, 1))(y) y = Convolution2D(nb_bottleneck_filter, nb_row=3, nb_col=3, subsample=(1, 1), init='he_normal', border_mode='valid')(y) return merge([incoming, y], mode='sum') else: # Residual Units for increasing dimensions # common BN, ReLU shortcut = BatchNormalization(mode=0, axis=1)(incoming) shortcut = Activation('relu')(shortcut) # conv3x3 y = ZeroPadding2D((1, 1))(shortcut) y = Convolution2D(nb_bottleneck_filter, nb_row=3, nb_col=3, subsample=subsample, init='he_normal', border_mode='valid')(y) # conv3x3 y = BatchNormalization(mode=0, axis=1)(y) y = Activation('relu')(y) if dropout is not None: y = Dropout(dropout)(y) y = ZeroPadding2D((1, 1))(y) y = Convolution2D(nb_out_filters, nb_row=3, nb_col=3, subsample=(1, 1), init='he_normal', border_mode='valid')(y) # shortcut shortcut = Convolution2D(nb_out_filters, nb_row=1, nb_col=1, subsample=subsample, init='he_normal', border_mode='same')(shortcut) return merge([shortcut, y], mode='sum')
def VGG_16(weights_path=None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) return model
def VGG_16_graph(): model = Graph() model.add_input(name='input', input_shape=(3, 224, 224)) model.add_node(ZeroPadding2D((1,1)), name='pad1', input='input') model.add_node(Convolution2D(64, 3, 3, activation='relu'), name='relu1', input='pad1') # weights=sequence_model.layers[1].W.container model.add_node(ZeroPadding2D((1,1)), name='pad2', input='relu1') model.add_node(Convolution2D(64, 3, 3, activation='relu'), name='relu2', input='pad2') model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='pool1', input='relu2') model.add_node(ZeroPadding2D((1,1)), name='1', input='pool1') model.add_node(Convolution2D(128, 3, 3, activation='relu'), name='2', input='1') model.add_node(ZeroPadding2D((1,1)), name='3', input='2') model.add_node(Convolution2D(128, 3, 3, activation='relu'), name='4', input='3') model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='5', input='4') model.add_node(ZeroPadding2D((1,1)), name='6', input='5') model.add_node(Convolution2D(256, 3, 3, activation='relu'), name='7', input='6') model.add_node(ZeroPadding2D((1,1)), name='8', input='7') model.add_node(Convolution2D(256, 3, 3, activation='relu'), name='9', input='8') model.add_node(ZeroPadding2D((1,1)), name='10', input='9') model.add_node(Convolution2D(256, 3, 3, activation='relu'), name='11', input='10') model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='12', input='11') model.add_node(ZeroPadding2D((1,1)), name='13', input='12') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='14', input='13') model.add_node(ZeroPadding2D((1,1)), name='15', input='14') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='16', input='15') model.add_node(ZeroPadding2D((1,1)), name='17', input='16') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='18', input='17') model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='19', input='18') model.add_node(ZeroPadding2D((1,1)), name='20', input='19') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='21', input='20') model.add_node(ZeroPadding2D((1,1)), name='22', input='21') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='23', input='22') model.add_node(ZeroPadding2D((1,1)), name='24', input='23') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='25', input='24') model.add_node(MaxPooling2D((2,2), strides=(2,2)), name='26', input='25') model.add_node(Flatten(), name='27', input='26') model.add_node(Dense(4096, activation='relu'), name='28', input='27') model.add_node(Dropout(0.5), name='29', input='28') model.add_node(Dense(4096, activation='relu'), name='30', input='29') model.add_node(Dropout(0.5), name='31', input='30') model.add_node(Dense(1000, activation='softmax'), name='32', input='31') model.add_output(input='32', name='output') return model
def get_vgg_full_graph(self, weights_path=None, with_output=True): model = Graph() model.add_input(name='input', input_shape=(3, 224, 224)) model.add_node(ZeroPadding2D((1, 1)), name='pad1', input='input') model.add_node(Convolution2D(64, 3, 3, activation='relu'), name='conv1', input='pad1') model.add_node(ZeroPadding2D((1, 1)), name='pad2', input='conv1') model.add_node(Convolution2D(64, 3, 3, activation='relu'), name='conv2', input='pad2') model.add_node(MaxPooling2D((2, 2), strides=(2, 2)), name='pool1', input='conv2') model.add_node(ZeroPadding2D((1, 1)), name='pad3', input='pool1') model.add_node(Convolution2D(128, 3, 3, activation='relu'), name='conv3', input='pad3') model.add_node(ZeroPadding2D((1, 1)), name='pad4', input='conv3') model.add_node(Convolution2D(128, 3, 3, activation='relu'), name='conv4', input='pad4') model.add_node(MaxPooling2D((2, 2), strides=(2, 2)), name='pool2', input='conv4') model.add_node(ZeroPadding2D((1, 1)), name='pad5', input='pool2') model.add_node(Convolution2D(256, 3, 3, activation='relu'), name='conv5', input='pad5') model.add_node(ZeroPadding2D((1, 1)), name='pad6', input='conv5') model.add_node(Convolution2D(256, 3, 3, activation='relu'), name='conv6', input='pad6') model.add_node(ZeroPadding2D((1, 1)), name='pad7', input='conv6') model.add_node(Convolution2D(256, 3, 3, activation='relu'), name='conv7', input='pad7') model.add_node(MaxPooling2D((2, 2), strides=(2, 2)), name='pool3', input='conv7') model.add_node(ZeroPadding2D((1, 1)), name='pad8', input='pool3') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='conv8', input='pad8') model.add_node(ZeroPadding2D((1, 1)), name='pad9', input='conv8') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='conv9', input='pad9') model.add_node(ZeroPadding2D((1, 1)), name='pad10', input='conv9') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='conv10', input='pad10') model.add_node(MaxPooling2D((2, 2), strides=(2, 2)), name='pool4', input='conv10') model.add_node(ZeroPadding2D((1, 1)), name='pad11', input='pool4') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='conv11', input='pad11') model.add_node(ZeroPadding2D((1, 1)), name='pad12', input='conv11') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='conv12', input='pad12') model.add_node(ZeroPadding2D((1, 1)), name='pad13', input='conv12') model.add_node(Convolution2D(512, 3, 3, activation='relu'), name='conv13', input='pad13') model.add_node(MaxPooling2D((2, 2), strides=(2, 2)), name='pool5', input='conv13') model.add_node(Flatten(), name='flat', input='pool5') model.add_node(Dense(4096, activation='relu'), name='dense1', input='flat') model.add_node(Dropout(0.5), name='drop1', input='dense1') model.add_node(Dense(4096, activation='relu'), name='dense2', input='drop1') model.add_node(Dropout(0.5), name='drop2', input='dense2') model.add_node(Dense(1000, activation='softmax'), name='dense3', input='drop2') if with_output: model.add_output(input='dense3', name='output') if weights_path: model.load_weights(weights_path) return model
def VGG_16(weights_path = None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) #Remove the last two layers to get the 4096D activations model = pop(model) model = pop(model) return model
def VGG_16(weights_path = None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) return model
def __init__(self, **kwargs): super(KerasVGG16, self).__init__(**kwargs) norm_shape = self.norm_shape model = Sequential() model.add(ZeroPadding2D((1,1), input_shape=(norm_shape[0], norm_shape[1], 1), )) model.add(Convolution2D(64, (3, 3), activation='relu', )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(64, (3, 3), activation='relu', )) model.add(MaxPooling2D((2,2), strides=(2,2), )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(128, (3, 3), activation='relu', )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(128, (3, 3), activation='relu', )) model.add(MaxPooling2D((2,2), strides=(2,2), )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(256, (3, 3), activation='relu', )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(256, (3, 3), activation='relu', )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(256, (3, 3), activation='relu', )) model.add(MaxPooling2D((2,2), strides=(2,2), )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(512, (3, 3), activation='relu', )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(512, (3, 3), activation='relu', )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(512, (3, 3), activation='relu', )) model.add(MaxPooling2D((2,2), strides=(2,2), )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(512, (3, 3), activation='relu', )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(512, (3, 3), activation='relu', )) model.add(ZeroPadding2D((1,1), )) model.add(Convolution2D(512, (3, 3), activation='relu', )) model.add(MaxPooling2D((2,2), strides=(2,2), )) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(self.max_n_label, activation='softmax')) # initiate RMSprop optimizer opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6) # Let's train the model using RMSprop model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) self.model = model
def get_deep_anime_model(n_outputs=1000, input_size=128): '''The deep neural network used for deep anime bot''' conv = Sequential() conv.add(Convolution2D(64, 3, 3, activation='relu', input_shape=(3, input_size, input_size))) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(64, 3, 3, activation='relu')) conv.add(MaxPooling2D((2, 2), strides=(2, 2))) conv.add(BatchNormalization()) # conv.add(Dropout(0.5)) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(128, 3, 3, activation='relu')) # conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(128, 1, 1, activation='relu')) conv.add(MaxPooling2D((2, 2), strides=(2, 2))) conv.add(BatchNormalization()) # conv.add(Dropout(0.5)) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(256, 3, 3, activation='relu')) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(256, 3, 3, activation='relu')) # conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(256, 1, 1, activation='relu')) conv.add(MaxPooling2D((2, 2), strides=(2, 2))) conv.add(BatchNormalization()) # conv.add(Dropout(0.5)) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(512, 3, 3, activation='relu')) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(512, 3, 3, activation='relu')) # conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(512, 1, 1, activation='relu')) conv.add(AveragePooling2D((8, 8), strides=(2, 2))) conv.add(BatchNormalization()) # conv.add(Dropout(0.5)) # conv.add(ZeroPadding2D((1, 1))) # conv.add(Convolution2D(512, 3, 3, activation='relu')) # conv.add(ZeroPadding2D((1, 1))) # conv.add(Convolution2D(512, 3, 3, activation='relu')) # #conv.add(ZeroPadding2D((1, 1))) # conv.add(Convolution2D(512, 1, 1, activation='relu')) # conv.add(AveragePooling2D((4, 4))) # conv.add(BatchNormalization()) conv.add(Flatten()) conv.add(Dropout(0.5)) conv.add(Dense(2048)) conv.add(BatchNormalization()) conv.add(Dropout(0.7)) conv.add(Dense(2048)) conv.add(BatchNormalization()) conv.add(Dropout(0.7)) conv.add(Dense(n_outputs)) conv.add(Activation('softmax')) print(conv.summary()) return conv
def model_structure_file(self): return os.path.join(self.current_directory(), 'deep_drive_model_structure.json') # def _create_feature_layers(self, target_size): # feature_layer_1 = [ # Convolution2D(96, 11, 11, subsample=(4, 4), dim_ordering='th', activation='relu', input_shape=(3, target_size[0], target_size[1]), name='conv1'), # MaxPooling2D(pool_size=(3, 3), strides=(2, 2), dim_ordering='th', name='pool1'), # LRN2D(alpha=0.0001, k=1, beta=0.75, n=5, name='norm1') # ] # # feature_layer_2 = [ # ZeroPadding2D(padding=(2, 2), name='conv2_zeropadding'), # Convolution2D(256, 5, 5, subsample=(1, 1), dim_ordering='th', activation='relu', name='conv2'), # MaxPooling2D(pool_size=(3, 3), strides=(2, 2), dim_ordering='th', name='pool2'), # LRN2D(alpha=0.0001, k=1, beta=0.75, n=5, name='norm2') # ] # # feature_layer_3 = [ # ZeroPadding2D(padding=(1, 1), name='conv3_zeropadding'), # Convolution2D(384, 3, 3, subsample=(1, 1), dim_ordering='th', activation='relu', name='conv3') # ] # # feature_layer_4 = [ # ZeroPadding2D(padding=(1, 1), name='conv4_zeropadding'), # Convolution2D(384, 3, 3, subsample=(1, 1), dim_ordering='th', activation='relu', name='conv4') # ] # # feature_layer_5 = [ # ZeroPadding2D(padding=(1, 1), name='conv5_zeropadding'), # Convolution2D(256, 3, 3, subsample=(1, 1), dim_ordering='th', activation='relu', name='conv5'), # MaxPooling2D(pool_size=(3, 3), strides=(2, 2), dim_ordering='th', name='pool5') # ] # # return feature_layer_1 + \ # feature_layer_2 + \ # feature_layer_3 + \ # feature_layer_4 + \ # feature_layer_5 # # def _create_classification_layers(self): # classification_layers = [ # Flatten(), # Dense(4096, activation='relu', name='fc6_gtanet'), # Dropout(0.5), # Dense(4096, activation='relu', name='fc7_gtanet'), # Dropout(0.5), # Dense(6, activation='relu', name='gtanet_fctop') # ] # return classification_layers
def VGG_16(weights_path=None): #??keras??????VGG_16?? model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) return model
def VGG_16_Terrassa(weights_path=""): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu',trainable = False)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu',trainable = False)) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu',trainable = False)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu',trainable = False)) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu',trainable = False)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu',trainable = False)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu',trainable = False)) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu',trainable = False)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu',trainable = False)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu',trainable = False)) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu',trainable = False)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu',trainable = False)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu',trainable = False)) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu',trainable = False)) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu',trainable = False)) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: print ("Weights Loaded") model.load_weights(weights_path) model.layers.pop() model.layers.pop() model.outputs = [model.layers[-1].output] model.layers[-1].outbound_nodes = [] model.add(Dense(13, activation='softmax')) return model
def VGG_16(weights_path=None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) #Remove the last two layers to get the 4096D activations model = pop(model) model = pop(model) return model
def VGG_16(X_train, y_train, X_test, y_test, batch_size = 20, nb_classes = 10, nb_epoch = 100): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=X_train[0].shape)) model.add(Convolution2D(64, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) model.add(ZeroPadding2D((1,1))) #model.add(Convolution2D(512, 3, 3, border_mode='valid', activation='relu', init='glorot_normal')) #model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) #sgd = SGD(lr=0.005, decay = 1e-6, momentum = 0.9, nesterov=True) sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) # initializes early stopping callback early_stopping = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto') model.compile(loss = 'categorical_crossentropy', optimizer = sgd) model.fit(X_train, y_train, show_accuracy=True, verbose=1, callbacks = [early_stopping], batch_size= batch_size, nb_epoch=nb_epoch, validation_data=(X_test, y_test)) return model, model.evaluate(X_test, y_test, show_accuracy=True, verbose=1)
def VGG_16(img_rows,img_cols,weights_path=None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,img_rows,img_cols))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.9)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.8)) model.add(Dense(20, activation='softmax')) if weights_path: model.load_weights(weights_path) return model
def VGG_16_test(weights_path='saved_models/best_model_VGG_16/weights_16.h5', shape = 112): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,shape,shape))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4, activation='softmax')) if weights_path: model.load_weights(weights_path) return model # testing purposes only!
def VGG_19_test(weights_path='saved_models/best_model_VGG_19/weights_19.h5', shape = 112): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,shape,shape))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4, activation='softmax')) if weights_path: model.load_weights(weights_path) return model
def VGG_16(weights_path=None): model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) return model