我们从Python开源项目中,提取了以下45个代码示例,用于说明如何使用keras.layers.merge.concatenate()。
def distance_layer(x1, x2): """Distance and angle of two inputs. Compute the concatenation of element-wise subtraction and multiplication of two inputs. """ def _distance(args): x1 = args[0] x2 = args[1] x = K.abs(x1 - x2) return x def _multiply(args): x1 = args[0] x2 = args[1] return x1 * x2 distance = Lambda(_distance, output_shape=(K.int_shape(x1)[-1],))([x1, x2]) multiply = Lambda(_multiply, output_shape=(K.int_shape(x1)[-1],))([x1, x2]) return concatenate([distance, multiply])
def get_unet0(num_start_filters=32): inputs = Input((img_rows, img_cols, num_channels)) conv1 = ConvBN2(inputs, num_start_filters) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = ConvBN2(pool1, 2 * num_start_filters) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = ConvBN2(pool2, 4 * num_start_filters) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = ConvBN2(pool3, 8 * num_start_filters) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = ConvBN2(pool4, 16 * num_start_filters) up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4]) conv6 = ConvBN2(up6, 8 * num_start_filters) up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3]) conv7 = ConvBN2(up7, 4 * num_start_filters) up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2]) conv8 = ConvBN2(up8, 2 * num_start_filters) up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1]) conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9) conv9 = BatchNormalization()(conv9) conv9 = Activation('selu')(conv9) conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9) crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9) conv9 = BatchNormalization()(crop9) conv9 = Activation('selu')(conv9) conv10 = Conv2D(num_mask_channels, (1, 1))(conv9) model = Model(inputs=inputs, outputs=conv10) return model
def __call__(self, x1, x2): def _sub_ops(args): x1 = args[0] x2 = args[1] x = K.abs(x1 - x2) return x def _mult_ops(args): x1 = args[0] x2 = args[1] return x1 * x2 output_shape = (self.sequence_length, self.input_dim,) sub = Lambda(_sub_ops, output_shape=output_shape)([x1, x2]) mult = Lambda(_mult_ops, output_shape=output_shape)([x1, x2]) sub = self.model(sub) mult = self.model(mult) return concatenate([sub, mult])
def create_model(img_height,img_width,img_channel): ip = Input(shape=(img_height, img_width,img_channel)) L1 = Conv2D(32, (11, 11), padding='same', activation='relu', kernel_initializer='glorot_uniform')(ip) L2 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L1) L3 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L2) L4 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L3) L4=concatenate([L4,L1],axis=-1)#Attention!.maybe this connection will influence the result,which means it can be moved. L5 = Conv2D(64, (1, 1), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L4) L6 = Conv2D(64, (5, 5), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L5) L6=concatenate([L6,L1],axis=-1)#Attention!.maybe this connection will influence the result,which means it can be moved. L7 = Conv2D(128, (1, 1), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L6) L8 = Conv2D(img_channel, (5, 5), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L7) deblocking =Model(inputs=ip,outputs= L8) optimizer = optimizers.Adam(lr=1e-4) deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim]) return deblocking
def create_model(img_height,img_width,img_channel): ip = Input(shape=(img_height, img_width,img_channel)) L1 = Conv2D(32, (11, 11), padding='same', activation='relu', kernel_initializer='glorot_uniform')(ip) L2 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L1) L3 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L2) L4 = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L3) L4=concatenate([L4,L1],axis=-1) L5 = Conv2D(64, (1, 1), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L4) L6 = Conv2D(64, (5, 5), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L5) L6=concatenate([L6,L1],axis=-1) L7 = Conv2D(128, (1, 1), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L6) L8 = Conv2D(img_channel, (5, 5), padding='same', activation='relu', kernel_initializer='glorot_uniform')(L7) deblocking =Model(inputs=ip,outputs= L8) optimizer = optimizers.Adam(lr=1e-4) deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim]) return deblocking
def block_inception_a(input): if K.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 96, 1, 1) branch_1 = conv2d_bn(input, 64, 1, 1) branch_1 = conv2d_bn(branch_1, 96, 3, 3) branch_2 = conv2d_bn(input, 64, 1, 1) branch_2 = conv2d_bn(branch_2, 96, 3, 3) branch_2 = conv2d_bn(branch_2, 96, 3, 3) branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input) branch_3 = conv2d_bn(branch_3, 96, 1, 1) x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis) return x
def block_reduction_a(input): if K.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 384, 3, 3, strides=(2,2), padding='valid') branch_1 = conv2d_bn(input, 192, 1, 1) branch_1 = conv2d_bn(branch_1, 224, 3, 3) branch_1 = conv2d_bn(branch_1, 256, 3, 3, strides=(2,2), padding='valid') branch_2 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(input) x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis) return x
def block_inception_b(input): if K.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 384, 1, 1) branch_1 = conv2d_bn(input, 192, 1, 1) branch_1 = conv2d_bn(branch_1, 224, 1, 7) branch_1 = conv2d_bn(branch_1, 256, 7, 1) branch_2 = conv2d_bn(input, 192, 1, 1) branch_2 = conv2d_bn(branch_2, 192, 7, 1) branch_2 = conv2d_bn(branch_2, 224, 1, 7) branch_2 = conv2d_bn(branch_2, 224, 7, 1) branch_2 = conv2d_bn(branch_2, 256, 1, 7) branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input) branch_3 = conv2d_bn(branch_3, 128, 1, 1) x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis) return x
def block_reduction_b(input): if K.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 192, 1, 1) branch_0 = conv2d_bn(branch_0, 192, 3, 3, strides=(2, 2), padding='valid') branch_1 = conv2d_bn(input, 256, 1, 1) branch_1 = conv2d_bn(branch_1, 256, 1, 7) branch_1 = conv2d_bn(branch_1, 320, 7, 1) branch_1 = conv2d_bn(branch_1, 320, 3, 3, strides=(2,2), padding='valid') branch_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input) x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis) return x
def yolo_body(inputs, num_anchors, num_classes): """Create YOLO_V2 model CNN body in Keras.""" darknet = Model(inputs, darknet_body()(inputs)) conv20 = compose( DarknetConv2D_BN_Leaky(1024, (3, 3)), DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output) conv13 = darknet.layers[43].output conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13) # TODO: Allow Keras Lambda to use func arguments for output_shape? conv21_reshaped = Lambda( space_to_depth_x2, output_shape=space_to_depth_x2_output_shape, name='space_to_depth')(conv21) x = concatenate([conv21_reshaped, conv20]) x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x) x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x) return Model(inputs, x)
def inception_model(input, filters_1x1, filters_3x3_reduce, filters_3x3, filters_5x5_reduce, filters_5x5, filters_pool_proj): conv_1x1 = Conv2D(filters=filters_1x1, kernel_size=(1, 1), padding='same', activation='relu', kernel_regularizer=l2(0.01))(input) conv_3x3_reduce = Conv2D(filters=filters_3x3_reduce, kernel_size=(1, 1), padding='same', activation='relu', kernel_regularizer=l2(0.01))(input) conv_3x3 = Conv2D(filters=filters_3x3, kernel_size=(3, 3), padding='same', activation='relu', kernel_regularizer=l2(0.01))(conv_3x3_reduce) conv_5x5_reduce = Conv2D(filters=filters_5x5_reduce, kernel_size=(1, 1), padding='same', activation='relu', kernel_regularizer=l2(0.01))(input) conv_5x5 = Conv2D(filters=filters_5x5, kernel_size=(5, 5), padding='same', activation='relu', kernel_regularizer=l2(0.01))(conv_5x5_reduce) maxpool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same')(input) maxpool_proj = Conv2D(filters=filters_pool_proj, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu', kernel_regularizer=l2(0.01))(maxpool) inception_output = concatenate([conv_1x1, conv_3x3, conv_5x5, maxpool_proj], axis=3) # use tf as backend return inception_output
def base_model(input_shapes): from keras.layers import Input from keras.layers.core import Masking x_global = Input(shape=input_shapes[0]) x_charged = Input(shape=input_shapes[1]) x_neutral = Input(shape=input_shapes[2]) x_ptreco = Input(shape=input_shapes[3]) lstm_c = Masking()(x_charged) lstm_c = LSTM(100,go_backwards=True,implementation=2)(lstm_c) lstm_n = Masking()(x_neutral) lstm_n = LSTM(100,go_backwards=True,implementation=2)(lstm_n) x = concatenate( [lstm_c, lstm_n, x_global] ) x = Dense(200, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = concatenate([x, x_ptreco]) return [x_global, x_charged, x_neutral, x_ptreco], x
def initial_block(inp, nb_filter=13, nb_row=3, nb_col=3, strides=(2, 2)): conv = Conv2D(nb_filter, (nb_row, nb_col), padding='same', strides=strides)(inp) max_pool = MaxPooling2D()(inp) merged = concatenate([conv, max_pool], axis=3) return merged
def initial_block(inp, nb_filter=13, nb_row=3, nb_col=3, strides=(2, 2)): conv = Conv2D(nb_filter, (nb_row, nb_col), padding='same', strides=strides)(inp) max_pool, indices = MaxPoolingWithArgmax2D()(inp) merged = concatenate([conv, max_pool], axis=3) return merged, indices
def build_model(self, x): pooled_tensors = [] for filter_size in self.filter_sizes: x_i = Conv1D(self.num_filters, filter_size, activation='elu', **self.conv_kwargs)(x) x_i = GlobalMaxPooling1D()(x_i) pooled_tensors.append(x_i) x = pooled_tensors[0] if len(self.filter_sizes) == 1 else concatenate(pooled_tensors, axis=-1) return x
def __call__(self, inputs): x = self.model(inputs) avg_x = GlobalAveragePooling1D()(x) max_x = GlobalMaxPooling1D()(x) x = concatenate([avg_x, max_x]) x = BatchNormalization()(x) return x
def simple_critic(env): """Build a simple critic network""" observation = env.state action = env.action # Concatenate the inputs for the critic inputs = concatenate([observation, action]) x = Dense(1)(inputs) x = Activation('linear')(x) # Final model return Model(inputs=[observation, action], outputs=[x])
def configure(self, observation_space_shape, nb_actions): # Next, we build a simple model. # actor network actor = Sequential() actor.add(Flatten(input_shape=(1,) + observation_space_shape)) actor.add(Dense(16)) actor.add(Activation('relu')) actor.add(Dense(16)) actor.add(Activation('relu')) actor.add(Dense(16)) actor.add(Activation('relu')) actor.add(Dense(nb_actions)) actor.add(Activation('linear')) print(actor.summary()) # critic network action_input = Input(shape=(nb_actions,), name='action_input') observation_input = Input(shape=(1,) + observation_space_shape, name='observation_input') flattened_observation = Flatten()(observation_input) x = concatenate([action_input, flattened_observation]) x = Dense(32)(x) x = Activation('relu')(x) x = Dense(32)(x) x = Activation('relu')(x) x = Dense(32)(x) x = Activation('relu')(x) x = Dense(1)(x) x = Activation('linear')(x) critic = Model(input=[action_input, observation_input], output=x) print(critic.summary()) # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and # even the metrics! memory = SequentialMemory(limit=100000, window_length=1) random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3) self.agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input, memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100, random_process=random_process, gamma=.99, target_model_update=1e-3) self.agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])
def get_model(): embedding_layer = Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False) lstm_layer = LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm) sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_sequences_1 = embedding_layer(sequence_1_input) x1 = lstm_layer(embedded_sequences_1) sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_sequences_2 = embedding_layer(sequence_2_input) y1 = lstm_layer(embedded_sequences_2) merged = concatenate([x1, y1]) merged = Dropout(rate_drop_dense)(merged) merged = BatchNormalization()(merged) merged = Dense(num_dense, activation=act)(merged) merged = Dropout(rate_drop_dense)(merged) merged = BatchNormalization()(merged) preds = Dense(1, activation='sigmoid')(merged) model = Model(inputs=[sequence_1_input, sequence_2_input], \ outputs=preds) model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['acc']) model.summary() return model ####################################### # train the model ########################################
def Encoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True): if activation is None: activation = ELU() if use_gru: def _encoder(x): if bidirectional: branch_1 = GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=False)(x) branch_2 = GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=True)(x) x = concatenate([branch_1, branch_2]) x = activation(x) return x else: x = GRU(hidden_size, activation='linear', return_sequences=return_sequences)(x) x = activation(x) return x else: def _encoder(x): if bidirectional: branch_1 = LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=False)(x) branch_2 = LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=True)(x) x = concatenate([branch_1, branch_2]) x = activation(x) return x else: x = LSTM(hidden_size, activation='linear', return_sequences=return_sequences)(x) x = activation(x) return x return _encoder
def AttentionDecoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True): if activation is None: activation = ELU() if use_gru: def _decoder(x, attention): if bidirectional: branch_1 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=False), attention, single_attention_param=True)(x) branch_2 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=True), attention, single_attention_param=True)(x) x = concatenate([branch_1, branch_2]) return activation(x) else: x = AttentionWrapper(GRU(hidden_size, activation='linear', return_sequences=return_sequences), attention, single_attention_param=True)(x) x = activation(x) return x else: def _decoder(x, attention): if bidirectional: branch_1 = AttentionWrapper(LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences, go_backwards=False), attention, single_attention_param=True)(x) branch_2 = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences, go_backwards=True), attention, single_attention_param=True)(x) x = concatenate([branch_1, branch_2]) x = activation(x) return x else: x = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences), attention, single_attention_param=True)(x) x = activation(x) return x return _decoder
def build_model(self): initializer = initializers.random_normal(stddev=0.02) input_img = Input(shape=(self.layers, 22, 80)) input_2 = Lambda(lambda x: x[:, 1:, :, :], output_shape=lambda x: (None, self.layers - 1, 22, 80))(input_img) # no map channel # whole map tower_1 = Conv2D(64, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(input_img) tower_1 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(tower_1) tower_1 = MaxPooling2D(pool_size=(22, 80), data_format="channels_first")(tower_1) #tower2 tower_2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(input_2) for _ in range(self.depth): tower_2 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same", activation='relu')(tower_2) tower_2 = MaxPooling2D(pool_size=(11, 40), data_format="channels_first")(tower_2) #tower3 tower_3 = MaxPooling2D(pool_size=(3, 6), data_format="channels_first", padding='same')(input_2) for _ in range(self.depth): tower_3 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same", activation='relu')(tower_3) tower_3 = MaxPooling2D(pool_size=(8, 14), data_format="channels_first", padding='same')(tower_3) merged_layers = concatenate([tower_1, tower_2, tower_3], axis=1) flat_layer = Flatten()(merged_layers) predictions = Dense(5, kernel_initializer=initializer)(flat_layer) model = Model(inputs=input_img, outputs=predictions) rmsprop = RMSprop(lr=0.00025) model.compile(loss='mse', optimizer=rmsprop) return model
def build_model(self): initializer = initializers.random_normal(stddev=0.02) input_img = Input(shape=(self.layers, 22, 80)) input_2 = Lambda(lambda x: x[:, :2, :, :], output_shape=lambda x: (None, 2, 22, 80))(input_img) # no map channel # whole map 10x1 tower_1 = ZeroPadding2D(padding=(1, 0), data_format="channels_first")(input_2) tower_1 = Conv2D(32, (10, 1), data_format="channels_first", strides=(7, 1), kernel_initializer=initializer, padding="valid")(tower_1) tower_1 = Flatten()(tower_1) # whole map 1x10 tower_2 = Conv2D(32, (1, 10), data_format="channels_first", strides=(1, 7), kernel_initializer=initializer, padding="valid")(input_2) tower_2 = Flatten()(tower_2) # whole map 3x3 then maxpool 22x80 tower_3 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(input_2) tower_3 = MaxPooling2D(pool_size=(22, 80), data_format="channels_first")(tower_3) tower_3 = Flatten()(tower_3) merged_layers = concatenate([tower_1, tower_2, tower_3], axis=1) predictions = Dense(4, kernel_initializer=initializer)(merged_layers) model = Model(inputs=input_img, outputs=predictions) adam = Adam(lr=1e-6) model.compile(loss='mse', optimizer=adam) return model
def default_imu(num_outputs, num_imu_inputs): ''' Notes: this model depends on concatenate which failed on keras < 2.0.8 ''' from keras.layers import Input, Dense from keras.models import Model from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda from keras.layers.merge import concatenate img_in = Input(shape=(120,160,3), name='img_in') imu_in = Input(shape=(num_imu_inputs,), name="imu_in") x = img_in x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top #x = Lambda(lambda x: x/127.5 - 1.)(x) # normalize and re-center x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x) x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x) x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x) x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x) x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x) x = Flatten(name='flattened')(x) x = Dense(100, activation='relu')(x) x = Dropout(.1)(x) y = imu_in y = Dense(14, activation='relu')(y) y = Dense(14, activation='relu')(y) y = Dense(14, activation='relu')(y) z = concatenate([x, y]) z = Dense(50, activation='relu')(z) z = Dropout(.1)(z) z = Dense(50, activation='relu')(z) z = Dropout(.1)(z) outputs = [] for i in range(num_outputs): outputs.append(Dense(1, activation='linear', name='out_' + str(i))(z)) model = Model(inputs=[img_in, imu_in], outputs=outputs) model.compile(optimizer='adam', loss='mse') return model
def _build(self, models, layers=[]): for layer in layers: layer.name = '%s/%s' % (self.scope, layer.name) inputs, outputs = self._get_inputs_outputs(models) x = concatenate(outputs) for layer in layers: x = layer(x) model = km.Model(inputs, x, name=self.name) return model
def _merge_inputs(self, inputs): return concatenate(inputs, axis=2)
def test_multi_directory_iterator_race_condition(sample_dataset_dir): n_models = 2 batch_size = 4 train_path = os.path.join(sample_dataset_dir, 'Training') val_path = os.path.join(sample_dataset_dir, 'Validation') # set up training and validation generators train_gen = MultiDirectoryIterator([make_dir_iterator(train_path, batch_size) for _ in range(n_models)]) val_gen = MultiDirectoryIterator([make_dir_iterator(val_path, batch_size) for _ in range(n_models)]) # join some MobileNets base_models = [] for i in range(n_models): model = MobileNet(weights=None) for layer in model.layers: layer.name += str(i) base_models.append(model) x = concatenate([m.output for m in base_models]) x = Dense(create_class_histogram(train_path).shape[0], name='dense')(x) x = Activation('softmax', name='act_softmax')(x) joined_model = Model([m.input for m in base_models], x) # run a few epochs joined_model.compile(optimizer=optimizers.SGD(), loss='categorical_crossentropy') joined_model.fit_generator(train_gen, validation_data=val_gen, epochs=4, workers=16, steps_per_epoch=int(np.ceil(train_gen.samples / batch_size)), validation_steps=int(np.ceil(val_gen.samples / batch_size))) # intentionally no assert, test passes if nothing throws
def denseblock(x, nb_layers, nb_filter, growth_rate): for i in range(nb_layers): if i<=2: kernel_size=3 elif i==3 or i==5: kernel_size=1 else: kernel_size=5 merge_tensor = conv_factory(x, growth_rate,kernel_size) #x = merge([merge_tensor, x], mode='concat', concat_axis=-1) x=concatenate([merge_tensor, x],axis=-1) return x
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True, return_concat_list=False): ''' Build a dense_block where the output of each conv_block is fed to subsequent ones Args: x: keras tensor nb_layers: the number of layers of conv_block to append to the model. nb_filter: number of filters growth_rate: growth rate bottleneck: bottleneck block dropout_rate: dropout rate weight_decay: weight decay factor grow_nb_filters: flag to decide to allow number of filters to grow return_concat_list: return the list of feature maps along with the actual output Returns: keras tensor with nb_layers of conv_block appended ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x_list = [x] for i in range(nb_layers): cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay) x_list.append(cb) x = concatenate([x, cb], axis=concat_axis) if grow_nb_filters: nb_filter += growth_rate if return_concat_list: return x, nb_filter, x_list else: return x, nb_filter
def block_inception_c(input): if K.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = -1 branch_0 = conv2d_bn(input, 256, 1, 1) branch_1 = conv2d_bn(input, 384, 1, 1) branch_10 = conv2d_bn(branch_1, 256, 1, 3) branch_11 = conv2d_bn(branch_1, 256, 3, 1) branch_1 = concatenate([branch_10, branch_11], axis=channel_axis) branch_2 = conv2d_bn(input, 384, 1, 1) branch_2 = conv2d_bn(branch_2, 448, 3, 1) branch_2 = conv2d_bn(branch_2, 512, 1, 3) branch_20 = conv2d_bn(branch_2, 256, 1, 3) branch_21 = conv2d_bn(branch_2, 256, 3, 1) branch_2 = concatenate([branch_20, branch_21], axis=channel_axis) branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input) branch_3 = conv2d_bn(branch_3, 256, 1, 1) x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis) return x
def __grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4): ''' Adds a grouped convolution block. It is an equivalent block from the paper Args: input: input tensor grouped_channels: grouped number of filters cardinality: cardinality factor describing the number of groups strides: performs strided convolution for downscaling if > 1 weight_decay: weight decay term Returns: a keras tensor ''' init = input channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 group_list = [] if cardinality == 1: # with cardinality 1, it is a standard convolution x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init) x = BatchNormalization(axis=channel_axis)(x) x = LeakyReLU()(x) return x for c in range(cardinality): x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels] if K.image_data_format() == 'channels_last' else lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input) x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x) group_list.append(x) group_merge = concatenate(group_list, axis=channel_axis) x = BatchNormalization(axis=channel_axis)(group_merge) x = LeakyReLU()(x) return x
def yolo_boxes_to_corners(box_xy, box_wh): """Convert YOLO box predictions to bounding box corners.""" box_mins = box_xy - (box_wh / 2.) box_maxes = box_xy + (box_wh / 2.) return K.concatenate([ box_mins[..., 1:2], # y_min box_mins[..., 0:1], # x_min box_maxes[..., 1:2], # y_max box_maxes[..., 0:1] # x_max ])
def baseline(): embedding_layer = Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False) lstm_layer = LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm) sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_sequences_1 = embedding_layer(sequence_1_input) x1 = lstm_layer(embedded_sequences_1) sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_sequences_2 = embedding_layer(sequence_2_input) y1 = lstm_layer(embedded_sequences_2) merged = concatenate([x1, y1]) merged = Dropout(rate_drop_dense)(merged) merged = BatchNormalization()(merged) merged = Dense(num_dense, activation=act)(merged) merged = Dropout(rate_drop_dense)(merged) merged = BatchNormalization()(merged) preds = Dense(1, activation='sigmoid')(merged) model = Model(inputs=[sequence_1_input, sequence_2_input], \ outputs=preds) model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['acc']) return model
def baseline(): embedding_layer = Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False) lstm_layer = LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm,return_sequences=True) sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_sequences_1 = embedding_layer(sequence_1_input) x1 = lstm_layer(embedded_sequences_1) x1 = Attention(MAX_SEQUENCE_LENGTH)(x1) sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') embedded_sequences_2 = embedding_layer(sequence_2_input) y1 = lstm_layer(embedded_sequences_2) y1 = Attention(MAX_SEQUENCE_LENGTH)(y1) merged = concatenate([x1, y1]) merged = Dropout(rate_drop_dense)(merged) merged = BatchNormalization()(merged) merged = Dense(num_dense, activation=act)(merged) merged = Dropout(rate_drop_dense)(merged) merged = BatchNormalization()(merged) preds = Dense(1, activation='sigmoid')(merged) model = Model(inputs=[sequence_1_input, sequence_2_input], \ outputs=preds) model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['acc']) return model
def step(self, inputs, states): h, c = self._get_hc(inputs, states) if self.output_cells: output = concatenate([h, c]) else: output = h if 0 < self.dropout + self.recurrent_dropout: output._uses_learning_phase = True return output, [h, c]
def preprocess_input(self, inputs, training=None): if self.implementation == 0: cell_mask = inputs[:, :, -self.units:] inputs = inputs[:, :, :-self.units] inputs_prep = super(CellMaskedLSTM, self).preprocess_input( inputs, training ) return K.concatenate([inputs_prep, cell_mask], axis=2) else: return inputs
def __call__(self, inputs): lstm_inputs, time = inputs cell_mask = self.cell_mask(time) outputs = self.cell_masked_lstm( concatenate([lstm_inputs, cell_mask], axis=2) ) return outputs
def cnn_melspect_1D(input_shape): kernel_size = 3 #activation_func = LeakyReLU() activation_func = Activation('relu') inputs = Input(input_shape) # Convolutional block_1 conv1 = Conv1D(32, kernel_size)(inputs) act1 = activation_func(conv1) bn1 = BatchNormalization()(act1) pool1 = MaxPooling1D(pool_size=2, strides=2)(bn1) # Convolutional block_2 conv2 = Conv1D(64, kernel_size)(pool1) act2 = activation_func(conv2) bn2 = BatchNormalization()(act2) pool2 = MaxPooling1D(pool_size=2, strides=2)(bn2) # Convolutional block_3 conv3 = Conv1D(128, kernel_size)(pool2) act3 = activation_func(conv3) bn3 = BatchNormalization()(act3) # Global Layers gmaxpl = GlobalMaxPooling1D()(bn3) gmeanpl = GlobalAveragePooling1D()(bn3) mergedlayer = concatenate([gmaxpl, gmeanpl], axis=1) # Regular MLP dense1 = Dense(512, kernel_initializer='glorot_normal', bias_initializer='glorot_normal')(mergedlayer) actmlp = activation_func(dense1) reg = Dropout(0.5)(actmlp) dense2 = Dense(512, kernel_initializer='glorot_normal', bias_initializer='glorot_normal')(reg) actmlp = activation_func(dense2) reg = Dropout(0.5)(actmlp) dense2 = Dense(10, activation='softmax')(reg) model = Model(inputs=[inputs], outputs=[dense2]) return model
def build_multi_input_main_residual_network(batch_size, a2_time_step, d2_time_step, d1_time_step, input_dim, output_dim, loop_depth=15, dropout=0.3): ''' a multiple residual network for wavelet transformation :param batch_size: as you might see :param a2_time_step: a2_size :param d2_time_step: d2_size :param d1_time_step: d1_size :param input_dim: input_dim :param output_dim: output_dim :param loop_depth: depth of residual network :param dropout: rate of dropout :return: ''' a2_inp = Input(shape=(a2_time_step,input_dim),name='a2') d2_inp = Input(shape=(d2_time_step,input_dim),name='d2') d1_inp = Input(shape=(d1_time_step,input_dim),name='a1') out = concatenate([a2_inp,d2_inp,d1_inp],axis=1) out = Conv1D(128,5)(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = first_block(out,(64,128),dropout=dropout) for _ in range(loop_depth): out = repeated_block(out,(64,128),dropout=dropout) # add flatten out = Flatten()(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dense(output_dim)(out) model = Model(inputs=[a2_inp,d2_inp,d1_inp],outputs=[out]) model.compile(loss='mse',optimizer='adam',metrics=['mse','mae']) return model
def make_parallel(model, gpu_count): def get_slice(data, idx, parts): # Adapted from: # https://github.com/fchollet/keras/issues/2436#issuecomment-291874528 sh = K.shape(data) L = sh[0] / parts if idx == parts - 1: return data[idx*L:] return data[idx*L:(idx+1)*L] outputs_all = [] for i in range(len(model.outputs)): outputs_all.append([]) #Place a copy of the model on each GPU, each getting a slice of the batch for i in range(gpu_count): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i) as scope: inputs = [] #Slice each input into a piece for processing on this GPU for x in model.inputs: input_shape = tuple(x.get_shape().as_list())[1:] slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x) inputs.append(slice_n) outputs = model(inputs) if not isinstance(outputs, list): outputs = [outputs] #Save all the outputs for merging back together later for l in range(len(outputs)): outputs_all[l].append(outputs[l]) # merge outputs on CPU with tf.device('/cpu:0'): merged = [] for outputs in outputs_all: merged.append(concatenate(outputs, axis=0)) # From https://github.com/kuza55/keras-extras/issues/3#issuecomment-264408864 new_model = Model(inputs=model.inputs, outputs=merged) func_type = type(model.save) # monkeypatch the save to save just the underlying model def new_save(_, *args, **kwargs): model.save(*args, **kwargs) new_model.save = func_type(new_save, new_model) return new_model
def make_parallel(model, gpu_count): def get_slice(data, idx, parts): shape = tf.shape(data) total_size = shape[:1] slice_size = total_size // parts slice_offset = slice_size * idx if idx == parts - 1: # give the last slice any surplus data, to avoid chopping it off slice_size += total_size % parts size = tf.concat([slice_size, shape[1:]], axis=0) start = tf.concat([slice_offset, shape[1:] * 0], axis=0) return tf.slice(data, start, size) outputs_all = [] for i in range(len(model.outputs)): outputs_all.append([]) # Place a copy of the model on each GPU, each getting a slice of the batch for i in range(gpu_count): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i): inputs = [] # Slice each input into a piece for processing on this GPU for x in model.inputs: input_shape = tuple(x.get_shape().as_list())[1:] slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x) inputs.append(slice_n) outputs = model(inputs) if not isinstance(outputs, list): outputs = [outputs] # Save all the outputs for merging back together later for l in range(len(outputs)): outputs_all[l].append(outputs[l]) # Merge outputs on CPU with tf.device('/cpu:0'): merged = [] for outputs in outputs_all: merged.append(concatenate(outputs, axis=0)) return Model(inputs=model.inputs, outputs=merged)
def make_parallel(model, gpu_count): def get_slice(data, idx, parts): shape = tf.shape(data) size = tf.concat([shape[:1] // parts, shape[1:]], axis=0) stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0) start = stride * idx return tf.slice(data, start, size) outputs_all = [] for i in range(len(model.outputs)): outputs_all.append([]) # Place a copy of the model on each GPU, each getting a slice of the batch for i in range(gpu_count): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i) as scope: inputs = [] # Slice each input into a piece for processing on this GPU for x in model.inputs: input_shape = tuple(x.get_shape().as_list())[1:] slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})( x) inputs.append(slice_n) outputs = model(inputs) if not isinstance(outputs, list): outputs = [outputs] # Save all the outputs for merging back together later for l in range(len(outputs)): outputs_all[l].append(outputs[l]) # merge outputs on CPU with tf.device('/cpu:0'): merged = [] for outputs in outputs_all: merged.append(concatenate(inputs=outputs, axis=0)) return Model(inputs=model.inputs, outputs=merged)
def model_EED(): _input = Input(shape=(None, None, 1), name='input') Feature = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(_input) Feature_out = Res_block()(Feature) # Upsampling Upsampling1 = Conv2D(filters=4, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(Feature_out) Upsampling2 = Conv2DTranspose(filters=4, kernel_size=(14, 14), strides=(2, 2), padding='same', activation='relu')(Upsampling1) Upsampling3 = Conv2D(filters=64, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(Upsampling2) # Mulyi-scale Reconstruction Reslayer1 = Res_block()(Upsampling3) Reslayer2 = Res_block()(Reslayer1) # ***************// Multi_scale1 = Conv2D(filters=16, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(Reslayer2) Multi_scale2a = Conv2D(filters=16, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(Multi_scale1) Multi_scale2b = Conv2D(filters=16, kernel_size=(1, 3), strides=(1, 1), padding='same', activation='relu')(Multi_scale1) Multi_scale2b = Conv2D(filters=16, kernel_size=(3, 1), strides=(1, 1), padding='same', activation='relu')(Multi_scale2b) Multi_scale2c = Conv2D(filters=16, kernel_size=(1, 5), strides=(1, 1), padding='same', activation='relu')(Multi_scale1) Multi_scale2c = Conv2D(filters=16, kernel_size=(5, 1), strides=(1, 1), padding='same', activation='relu')(Multi_scale2c) Multi_scale2d = Conv2D(filters=16, kernel_size=(1, 7), strides=(1, 1), padding='same', activation='relu')(Multi_scale1) Multi_scale2d = Conv2D(filters=16, kernel_size=(7, 1), strides=(1, 1), padding='same', activation='relu')(Multi_scale2d) Multi_scale2 = concatenate(inputs=[Multi_scale2a, Multi_scale2b, Multi_scale2c, Multi_scale2d]) out = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(Multi_scale2) model = Model(input=_input, output=out) return model
def ZF_UNET_224(dropout_val=0.0, batch_norm=True): if K.image_dim_ordering() == 'th': inputs = Input((INPUT_CHANNELS, 224, 224)) axis = 1 else: inputs = Input((224, 224, INPUT_CHANNELS)) axis = 3 filters = 32 conv_224 = double_conv_layer(inputs, filters, dropout_val, batch_norm) pool_112 = MaxPooling2D(pool_size=(2, 2))(conv_224) conv_112 = double_conv_layer(pool_112, 2*filters, dropout_val, batch_norm) pool_56 = MaxPooling2D(pool_size=(2, 2))(conv_112) conv_56 = double_conv_layer(pool_56, 4*filters, dropout_val, batch_norm) pool_28 = MaxPooling2D(pool_size=(2, 2))(conv_56) conv_28 = double_conv_layer(pool_28, 8*filters, dropout_val, batch_norm) pool_14 = MaxPooling2D(pool_size=(2, 2))(conv_28) conv_14 = double_conv_layer(pool_14, 16*filters, dropout_val, batch_norm) pool_7 = MaxPooling2D(pool_size=(2, 2))(conv_14) conv_7 = double_conv_layer(pool_7, 32*filters, dropout_val, batch_norm) up_14 = concatenate([UpSampling2D(size=(2, 2))(conv_7), conv_14], axis=axis) up_conv_14 = double_conv_layer(up_14, 16*filters, dropout_val, batch_norm) up_28 = concatenate([UpSampling2D(size=(2, 2))(up_conv_14), conv_28], axis=axis) up_conv_28 = double_conv_layer(up_28, 8*filters, dropout_val, batch_norm) up_56 = concatenate([UpSampling2D(size=(2, 2))(up_conv_28), conv_56], axis=axis) up_conv_56 = double_conv_layer(up_56, 4*filters, dropout_val, batch_norm) up_112 = concatenate([UpSampling2D(size=(2, 2))(up_conv_56), conv_112], axis=axis) up_conv_112 = double_conv_layer(up_112, 2*filters, dropout_val, batch_norm) up_224 = concatenate([UpSampling2D(size=(2, 2))(up_conv_112), conv_224], axis=axis) up_conv_224 = double_conv_layer(up_224, filters, 0, batch_norm) conv_final = Conv2D(OUTPUT_MASK_CHANNELS, (1, 1))(up_conv_224) conv_final = BatchNormalization(axis=axis)(conv_final) conv_final = Activation('sigmoid')(conv_final) model = Model(inputs, conv_final, name="ZF_UNET_224") return model