我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.merge()。
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True): ''' Build a dense_block where the output of each conv_block is fed to subsequent ones # Arguments x: input tensor stage: index for dense block nb_layers: the number of layers of conv_block to append to the model. nb_filter: number of filters growth_rate: growth rate dropout_rate: dropout rate weight_decay: weight decay factor grow_nb_filters: flag to decide to allow number of filters to grow ''' eps = 1.1e-5 concat_feat = x for i in range(nb_layers): branch = i+1 x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay) concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch)) if grow_nb_filters: nb_filter += growth_rate return concat_feat, nb_filter
def build_mod5(opt=adam()): n = 3 * 1024 in1 = Input((128,), name='x1') x1 = fc_block1(in1, n) x1 = fc_identity(x1, n) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n) x2 = fc_identity(x2, n) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_identity(x, n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') # model.summary() # plot(model=model, show_shapes=True) return model
def block17(input, scale=1.0, activation_fn='relu'): if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 shortcut = input tower_conv = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn) tower_conv1_0 = conv2d_bn(input, 128, 1, 1, activ_fn=activation_fn) tower_conv1_1 = conv2d_bn(tower_conv1_0, 160, 1, 7, activ_fn=activation_fn) tower_conv1_2 = conv2d_bn(tower_conv1_1, 192, 7, 1, activ_fn=activation_fn) mixed = merge([tower_conv, tower_conv1_2], mode='concat', concat_axis=channel_axis) up = conv2d_bn(mixed, 1088, 1, 1, activ_fn=False, normalize=False) up = Lambda(do_scale, output_shape=K.int_shape(up)[1:], arguments={'scale':scale})(up) net = merge([shortcut, up], mode='sum') if activation_fn: net = Activation(activation_fn)(net) return net
def block8(input, scale=1.0, activation_fn='relu'): if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 shortcut = input tower_conv = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn) tower_conv1_0 = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn) tower_conv1_1 = conv2d_bn(tower_conv1_0, 224, 1, 3, activ_fn=activation_fn) tower_conv1_2 = conv2d_bn(tower_conv1_1, 256, 3, 1, activ_fn=activation_fn) mixed = merge([tower_conv, tower_conv1_2], mode='concat', concat_axis=channel_axis) up = conv2d_bn(mixed, 2080, 1, 1, activ_fn=False, normalize=False) up = Lambda(do_scale, output_shape=K.int_shape(up)[1:], arguments={'scale':scale})(up) net = merge([shortcut, up], mode='sum') if activation_fn: net = Activation(activation_fn)(net) return net
def fire_module(x, squeeze=16, expand=64): x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x) x = Activation('relu')(x) left = Convolution2D(expand, 1, 1, border_mode='valid')(x) left = Activation('relu')(left) right= ZeroPadding2D(padding=(1, 1))(x) right = Convolution2D(expand, 3, 3, border_mode='valid')(right) right = Activation('relu')(right) y = merge([left, right], mode='concat', concat_axis=1) return y # Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
def fire_module(x, squeeze=16, expand=64): x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x) x = Activation('relu')(x) left = Convolution2D(expand, 1, 1, border_mode='valid')(x) left = Activation('relu')(left) right= ZeroPadding2D(padding=(1, 1))(x) right = Convolution2D(expand, 3, 3, border_mode='valid')(right) right = Activation('relu')(right) x = merge([left, right], mode='concat', concat_axis=1) return x # Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
def block_inception_a(input): if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 96, 1, 1) branch_1 = conv2d_bn(input, 64, 1, 1) branch_1 = conv2d_bn(branch_1, 96, 3, 3) branch_2 = conv2d_bn(input, 64, 1, 1) branch_2 = conv2d_bn(branch_2, 96, 3, 3) branch_2 = conv2d_bn(branch_2, 96, 3, 3) branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input) branch_3 = conv2d_bn(branch_3, 96, 1, 1) x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis) return x
def block_reduction_a(input): if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 384, 3, 3, subsample=(2,2), border_mode='valid') branch_1 = conv2d_bn(input, 192, 1, 1) branch_1 = conv2d_bn(branch_1, 224, 3, 3) branch_1 = conv2d_bn(branch_1, 256, 3, 3, subsample=(2,2), border_mode='valid') branch_2 = MaxPooling2D((3,3), strides=(2,2), border_mode='valid')(input) x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis) return x
def block_reduction_b(input): if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 192, 1, 1) branch_0 = conv2d_bn(branch_0, 192, 3, 3, subsample=(2, 2), border_mode='valid') branch_1 = conv2d_bn(input, 256, 1, 1) branch_1 = conv2d_bn(branch_1, 256, 1, 7) branch_1 = conv2d_bn(branch_1, 320, 7, 1) branch_1 = conv2d_bn(branch_1, 320, 3, 3, subsample=(2,2), border_mode='valid') branch_2 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(input) x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis) return x
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128): """ Creates a model to be used to scale images of specific height and width. """ init = super(ExpantionSuperResolution, self).create_model(height, width, channels, load_weights, batch_size) x = Convolution2D(self.n1, self.f1, self.f1, activation='relu', border_mode='same', name='level1')(init) x1 = Convolution2D(self.n2, self.f2_1, self.f2_1, activation='relu', border_mode='same', name='lavel1_1')(x) x2 = Convolution2D(self.n2, self.f2_2, self.f2_2, activation='relu', border_mode='same', name='lavel1_2')(x) x3 = Convolution2D(self.n2, self.f2_3, self.f2_3, activation='relu', border_mode='same', name='lavel1_3')(x) x = merge([x1, x2, x3], mode='ave') out = Convolution2D(channels, self.f3, self.f3, activation='relu', border_mode='same', name='output')(x) model = Model(init, out) adam = optimizers.Adam(lr=1e-3) model.compile(optimizer=adam, loss='mse', metrics=[PSNRLoss]) if load_weights: model.load_weights(self.weight_path) self.model = model return model
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv1D(k1,1,padding='same')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,padding='same')(out) pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv1D(k1,kernel_size,padding='same')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,padding='same')(out) pooling = MaxPooling1D(pooling_size,padding='same')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out) pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out) pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv1D(k1,1,padding='same')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,strides=2,padding='same')(out) pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv1D(k1,kernel_size,padding='same')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,strides=2,padding='same')(out) pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=2,dropout=0.5): k1,k2 = filters out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out) pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def double_conv_layer(x, size, dropout, batch_norm): from keras.models import Model from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D from keras.layers.normalization import BatchNormalization from keras.layers.core import Dropout, Activation conv = Convolution2D(size, 3, 3, border_mode='same')(x) if batch_norm == True: conv = BatchNormalization(mode=0, axis=1)(conv) conv = Activation('relu')(conv) conv = Convolution2D(size, 3, 3, border_mode='same')(conv) if batch_norm == True: conv = BatchNormalization(mode=0, axis=1)(conv) conv = Activation('relu')(conv) if dropout > 0: conv = Dropout(dropout)(conv) return conv
def fire_module(x, fire_id, squeeze=16, expand=64, dim_ordering='th'): s_id = 'fire' + str(fire_id) + '/' if dim_ordering is 'tf': c_axis = 3 else: c_axis = 1 x = Convolution2D(squeeze, 1, 1, border_mode='valid', name=s_id + sq1x1)(x) x = Activation('relu', name=s_id + relu + sq1x1)(x) left = Convolution2D(expand, 1, 1, border_mode='valid', name=s_id + exp1x1)(x) left = Activation('relu', name=s_id + relu + exp1x1)(left) right = Convolution2D(expand, 3, 3, border_mode='same', name=s_id + exp3x3)(x) right = Activation('relu', name=s_id + relu + exp3x3)(right) x = merge([left, right], mode='concat', concat_axis=c_axis, name=s_id + 'concat') return x # Original SqueezeNet from paper.
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1): subsample = (subsample_factor, subsample_factor, subsample_factor) x = BatchNormalization(axis=4)(input_tensor) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x) if subsample_factor > 1: shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor) else: shortcut = input_tensor x = merge([x, shortcut], mode='sum') return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1): subsample = (subsample_factor, subsample_factor) x = BatchNormalization(axis=3)(input_tensor) x = Activation('relu')(x) x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x) x = BatchNormalization(axis=3)(x) x = Activation('relu')(x) x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x) if subsample_factor > 1: shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor) else: shortcut = input_tensor x = merge([x, shortcut], mode='sum') return x
def BiDi(input_shape,vocabSize,veclen,wordWeights,nLayers,nHidden,lr): assert len(nHidden) == nLayers, '#Neurons for each layer does not match #Layers' r_flag = True _Input = Input(shape = (input_shape,),dtype = 'int32') E = keras.layers.embeddings.Embedding(vocabSize,veclen,weights=(wordWeights,),mask_zero = True)(_Input) for ind in range(nLayers): if ind == (nLayers-1): r_flag = False fwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag)(E) bkwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag,go_backwards = True)(E) E = merge([fwd_layer,bkwd_layer],mode = 'ave') #nHidden/= 2 Output = Dense(1,activation = 'sigmoid')(Dropout(0.5)(E)) model = Model(input = _Input, output = Output) opt = keras.optimizers.Adam(lr) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def addLayer(previousLayer, nChannels, nOutChannels, dropRate, blockNum): bn = BatchNormalization(name = 'denseb_BatchNorm_{}'.format(blockNum) , axis = 1)(previousLayer) relu = Activation('relu', name ='denseb_relu_{}'.format(blockNum))(bn) conv = Convolution2D(nOutChannels, 3, 3, border_mode='same', name='denseb_conv_{}'.format(blockNum))(relu) if dropRate is not None: dp = Dropout(dropRate, name='denseb_dropout_{}'.format)(conv) return merge([dp, previousLayer], mode='concat', concat_axis=1) else: return merge([conv, previousLayer], mode='concat', concat_axis=1)
def create_model(numNodes, factors): left_input = Input(shape=(1,)) right_input = Input(shape=(1,)) left_model = Sequential() left_model.add(Embedding(input_dim=numNodes + 1, output_dim=factors, input_length=1, mask_zero=False)) left_model.add(Reshape((factors,))) right_model = Sequential() right_model.add(Embedding(input_dim=numNodes + 1, output_dim=factors, input_length=1, mask_zero=False)) right_model.add(Reshape((factors,))) left_embed = left_model(left_input) right_embed = left_model(right_input) left_right_dot = merge([left_embed, right_embed], mode="dot", dot_axes=1, name="left_right_dot") model = Model(input=[left_input, right_input], output=[left_right_dot]) embed_generator = Model(input=[left_input, right_input], output=[left_embed, right_embed]) return model, embed_generator
def test_merge_mask_3d(): from keras.layers import Input, merge, Embedding, SimpleRNN from keras.models import Model rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32') # embeddings input_a = Input(shape=(3,), dtype='int32') input_b = Input(shape=(3,), dtype='int32') embedding = Embedding(3, 4, mask_zero=True) embedding_a = embedding(input_a) embedding_b = embedding(input_b) # rnn rnn = SimpleRNN(3, return_sequences=True) rnn_a = rnn(embedding_a) rnn_b = rnn(embedding_b) # concatenation merged_concat = merge([rnn_a, rnn_b], mode='concat', concat_axis=-1) model = Model([input_a, input_b], [merged_concat]) model.compile(loss='mse', optimizer='sgd') model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)])
def transform_model(weight_loss_pix=5e-4): inputs = Input(shape=( 128, 128, 3)) x1 = Convolution2D(64, 5, 5, border_mode='same')(inputs) x2 = LeakyReLU(alpha=0.3, name='wkcw')(x1) x3 = BatchNormalization()(x2) x4 = Convolution2D(128, 4, 4, border_mode='same', subsample=(2,2))(x3) x5 = LeakyReLU(alpha=0.3)(x4) x6 = BatchNormalization()(x5) x7 = Convolution2D(256, 4, 4, border_mode='same', subsample=(2,2))(x6) x8 = LeakyReLU(alpha=0.3)(x7) x9 = BatchNormalization()(x8) x10 = Deconvolution2D(128, 3, 3, output_shape=(None, 64, 64, 128), border_mode='same', subsample=(2,2))(x9) x11 = BatchNormalization()(x10) x12 = Deconvolution2D(64, 3, 3, output_shape=(None, 128, 128, 64), border_mode='same', subsample=(2,2))(x11) x13 = BatchNormalization()(x12) x14 = Deconvolution2D(3, 4, 4, output_shape=(None, 128, 128, 3), border_mode='same', activity_regularizer=activity_l1(weight_loss_pix))(x13) output = merge([inputs, x14], mode='sum') model = Model(input=inputs, output=output) return model
def block_inception_b(input): if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 384, 1, 1) branch_1 = conv2d_bn(input, 192, 1, 1) branch_1 = conv2d_bn(branch_1, 224, 1, 7) branch_1 = conv2d_bn(branch_1, 256, 7, 1) branch_2 = conv2d_bn(input, 192, 1, 1) branch_2 = conv2d_bn(branch_2, 192, 7, 1) branch_2 = conv2d_bn(branch_2, 224, 1, 7) branch_2 = conv2d_bn(branch_2, 224, 7, 1) branch_2 = conv2d_bn(branch_2, 256, 1, 7) branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input) branch_3 = conv2d_bn(branch_3, 128, 1, 1) x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis) return x