我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.Flatten()。
def tsinalis(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1) """ model = Sequential(name='Tsinalis') model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu')) print(model.input_shape) print(model.output_shape) model.add(MaxPooling1D(pool_size = (20), strides=(10))) print(model.output_shape) model.add(keras.layers.core.Reshape([20,-1,1])) print(model.output_shape) model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu')) print(model.output_shape) model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2))) print(model.output_shape) model.add(Flatten()) print(model.output_shape) model.add(Dense (500, activation='relu')) model.add(Dense (500, activation='relu')) model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2() )) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy]) return model
def create_Kao_Onet( weight_path = 'model48.h5'): input = Input(shape = [48,48,3]) x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1,2],name='prelu1')(x) x = MaxPool2D(pool_size=3, strides=2, padding='same')(x) x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1,2],name='prelu2')(x) x = MaxPool2D(pool_size=3, strides=2)(x) x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1,2],name='prelu3')(x) x = MaxPool2D(pool_size=2)(x) x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x) x = PReLU(shared_axes=[1,2],name='prelu4')(x) x = Permute((3,2,1))(x) x = Flatten()(x) x = Dense(256, name='conv5') (x) x = PReLU(name='prelu5')(x) classifier = Dense(2, activation='softmax',name='conv6-1')(x) bbox_regress = Dense(4,name='conv6-2')(x) landmark_regress = Dense(10,name='conv6-3')(x) model = Model([input], [classifier, bbox_regress, landmark_regress]) model.load_weights(weight_path, by_name=True) return model
def make_teacher_model(train_data, validation_data, nb_epoch=3): '''Train a simple CNN as teacher model. ''' model = Sequential() model.add(Conv2D(64, 3, 3, input_shape=input_shape, border_mode='same', name='conv1')) model.add(MaxPooling2D(name='pool1')) model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2')) model.add(MaxPooling2D(name='pool2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='fc1')) model.add(Dense(nb_class, activation='softmax', name='fc2')) model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics=['accuracy']) train_x, train_y = train_data history = model.fit(train_x, train_y, nb_epoch=nb_epoch, validation_data=validation_data) return model, history
def build_network(num_actions, agent_history_length, resized_width, resized_height): state = tf.placeholder("float", [None, agent_history_length, resized_width, resized_height]) inputs_v = Input(shape=(agent_history_length, resized_width, resized_height,)) #model_v = Permute((2, 3, 1))(inputs_v) model_v = Convolution2D(nb_filter=16, nb_row=8, nb_col=8, subsample=(4,4), activation='relu', border_mode='same')(inputs_v) model_v = Convolution2D(nb_filter=32, nb_row=4, nb_col=4, subsample=(2,2), activation='relu', border_mode='same')(model_v) model_v = Flatten()(model_v) model_v = Dense(output_dim=512)(model_v) model_v = PReLU()(model_v) action_probs = Dense(name="p", output_dim=num_actions, activation='softmax')(model_v) state_value = Dense(name="v", output_dim=1, activation='linear')(model_v) value_network = Model(input=inputs_v, output=[state_value, action_probs]) return state, value_network
def build_simpleCNN(input_shape = (32, 32, 3), num_output = 10): h, w, nch = input_shape assert h == w, 'expect input shape (h, w, nch), h == w' images = Input(shape = (h, h, nch)) x = Conv2D(64, (4, 4), strides = (1, 1), kernel_initializer = init, padding = 'same')(images) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size = (2, 2))(x) x = Conv2D(128, (4, 4), strides = (1, 1), kernel_initializer = init, padding = 'same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size = (2, 2))(x) x = Flatten()(x) outputs = Dense(num_output, kernel_initializer = init, activation = 'softmax')(x) model = Model(inputs = images, outputs = outputs) return model
def model_discriminator(input_shape=(1, 28, 28), dropout_rate=0.5): d_input = dim_ordering_input(input_shape, name="input_x") nch = 512 # nch = 128 H = Convolution2D(int(nch / 2), 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(d_input) H = LeakyReLU(0.2)(H) H = Dropout(dropout_rate)(H) H = Convolution2D(nch, 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(H) H = LeakyReLU(0.2)(H) H = Dropout(dropout_rate)(H) H = Flatten()(H) H = Dense(int(nch / 2))(H) H = LeakyReLU(0.2)(H) H = Dropout(dropout_rate)(H) d_V = Dense(1, activation='sigmoid')(H) return Model(d_input, d_V)
def create_Kao_Rnet (weight_path = 'model24.h5'): input = Input(shape=[24, 24, 3]) # change this shape to [None,None,3] to enable arbitraty shape input x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1, 2], name='prelu1')(x) x = MaxPool2D(pool_size=3,strides=2, padding='same')(x) x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1, 2], name='prelu2')(x) x = MaxPool2D(pool_size=3, strides=2)(x) x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1, 2], name='prelu3')(x) x = Permute((3, 2, 1))(x) x = Flatten()(x) x = Dense(128, name='conv4')(x) x = PReLU( name='prelu4')(x) classifier = Dense(2, activation='softmax', name='conv5-1')(x) bbox_regress = Dense(4, name='conv5-2')(x) model = Model([input], [classifier, bbox_regress]) model.load_weights(weight_path, by_name=True) return model
def fGRU_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER): wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32') wordInp = Flatten()(wordInputs) wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp) hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding) head = GlobalAveragePooling1D()(hij) v6 = Dense(1, activation="sigmoid", name="dense")(head) model = Model(inputs=[wordInputs] , outputs=[v6]) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
def fGlove_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER): wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32') wordInp = Flatten()(wordInputs) wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp) head = GlobalAveragePooling1D()(wordEmbedding) v6 = Dense(1, activation="sigmoid", name="dense")(head) model = Model(inputs=[wordInputs] , outputs=[v6]) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False): # compile times tend to be very high, so we use smaller ROI pooling regions to workaround if K.backend() == 'tensorflow': pooling_regions = 14 input_shape = (batch_size,14,14,2048) elif K.backend() == 'theano': pooling_regions = 7 input_shape = (batch_size,2048,7,7) out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois]) out = TimeDistributed(Flatten())(out_roi_pool) # out = TimeDistributed(Dropout(0.4))(out) # out = TimeDistributed(Dense(2048,activation='relu'))(out) out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out) # note: no regression target for bg class out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out) return [out_class, out_regr]
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False): # compile times tend to be very high, so we use smaller ROI pooling regions to workaround if K.backend() == 'tensorflow': pooling_regions = 14 input_shape = (batch_size,14,14,512) elif K.backend() == 'theano': pooling_regions = 7 input_shape = (batch_size,512,7,7) out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois]) out = TimeDistributed(Flatten())(out_roi_pool) out = TimeDistributed(Dense(4096,activation='relu'))(out) out = TimeDistributed(Dropout(0.5))(out) out = TimeDistributed(Dense(4096,activation='relu'))(out) out = TimeDistributed(Dropout(0.5))(out) out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out) # note: no regression target for bg class out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out) return [out_class, out_regr]
def classifier(base_layers, input_rois, batch_size, nb_classes = 3, trainable=False): # compile times tend to be very high, so we use smaller ROI pooling regions to workaround if K.backend() == 'tensorflow': pooling_regions = 14 input_shape = (batch_size,14,14,1024) elif K.backend() == 'theano': pooling_regions = 7 input_shape = (batch_size,1024,7,7) out_roi_pool = RoiPoolingConv(pooling_regions, batch_size)([base_layers, input_rois]) out = TimeDistributed(Flatten())(out_roi_pool) out = TimeDistributed(Dense(4096,activation='relu'))(out) out = TimeDistributed(Dropout(0.5))(out) out = TimeDistributed(Dense(4096,activation='relu'))(out) out = TimeDistributed(Dropout(0.5))(out) out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out) # note: no regression target for bg class out_regr = TimeDistributed(Dense(4 * nb_classes, activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out) return [out_class, out_regr]
def classifier(base_layers, input_rois, num_rois, nb_classes = 21, trainable=False): # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround if K.backend() == 'tensorflow': pooling_regions = 7 input_shape = (num_rois,7,7,512) elif K.backend() == 'theano': pooling_regions = 7 input_shape = (num_rois,512,7,7) out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois]) out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool) out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out) out = TimeDistributed(Dropout(0.5))(out) out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out) out = TimeDistributed(Dropout(0.5))(out) out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out) # note: no regression target for bg class out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out) return [out_class, out_regr]
def create_actor_network(self, state_size, action_dim): """Create actor network.""" print ("[MESSAGE] Build actor network.""") S = Input(shape=state_size) h_0 = Conv2D(32, (3, 3), padding="same", kernel_regularizer=l2(0.0001), activation="relu")(S) h_1 = Conv2D(32, (3, 3), padding="same", kernel_regularizer=l2(0.0001), activation="relu")(h_0) h_1 = AveragePooling2D(2, 2)(h_1) h_1 = Flatten()(h_1) h_1 = Dense(600, activation="relu")(h_1) A = Dense(action_dim, activation="softmax")(h_1) model = Model(inputs=S, outputs=A) return model, model.trainable_weights, S
def make_discriminator(): """Creates a discriminator model that takes an image as input and outputs a single value, representing whether the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability! Instead, the output should be as large and negative as possible for generated inputs and as large and positive as possible for real inputs. Note that the improved WGAN paper suggests that BatchNormalization should not be used in the discriminator.""" model = Sequential() if K.image_data_format() == 'channels_first': model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(1, 28, 28))) else: model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(28, 28, 1))) model.add(LeakyReLU()) model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', strides=[2, 2])) model.add(LeakyReLU()) model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', padding='same', strides=[2, 2])) model.add(LeakyReLU()) model.add(Flatten()) model.add(Dense(1024, kernel_initializer='he_normal')) model.add(LeakyReLU()) model.add(Dense(1, kernel_initializer='he_normal')) return model
def make_model(dense_layer_sizes, filters, kernel_size, pool_size): '''Creates model comprised of 2 convolutional layers followed by dense layers dense_layer_sizes: List of layer sizes. This list has one number for each layer filters: Number of convolutional filters in each convolutional layer kernel_size: Convolutional kernel size pool_size: Size of pooling area for max pooling ''' model = Sequential() model.add(Conv2D(filters, kernel_size, padding='valid', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(filters, kernel_size)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=pool_size)) model.add(Dropout(0.25)) model.add(Flatten()) for layer_size in dense_layer_sizes: model.add(Dense(layer_size)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model
def make_teacher_model(train_data, validation_data, epochs=3): '''Train a simple CNN as teacher model. ''' model = Sequential() model.add(Conv2D(64, 3, input_shape=input_shape, padding='same', name='conv1')) model.add(MaxPooling2D(2, name='pool1')) model.add(Conv2D(64, 3, padding='same', name='conv2')) model.add(MaxPooling2D(2, name='pool2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='fc1')) model.add(Dense(num_class, activation='softmax', name='fc2')) model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics=['accuracy']) train_x, train_y = train_data history = model.fit(train_x, train_y, epochs=epochs, validation_data=validation_data) return model, history
def load_model(input_shape, num_classes): model = Sequential() model.add(Convolution2D(6, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding="same")) model.add(Convolution2D(32, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Convolution2D(64, kernel_size=(3, 3), border_mode='same', activation='relu')) model.add(Convolution2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) return model
def predict(model, img): #Flatten it image = np.array(img).flatten() # float32 image = image.astype('float32') # normalize it image = image / 255 # reshape for NN rimage = image.reshape(1, img_rows, img_colms,img_channels) # Now feed it to the NN, to fetch the predictions clas = model.predict(rimage) #prob_array = model.predict_proba(rimage) return clas
def test_valid_workflow(self): # Create image URI dataframe label_cardinality = 10 image_uri_df = self._create_train_image_uris_and_labels( repeat_factor=3, cardinality=label_cardinality) # We need a small model so that machines with limited resources can run it model = Sequential() model.add(Flatten(input_shape=(299, 299, 3))) model.add(Dense(label_cardinality)) model.add(Activation("softmax")) estimator = self._get_estimator(model) self.assertTrue(estimator._validateParams()) transformers = estimator.fit(image_uri_df) self.assertEqual(1, len(transformers)) self.assertIsInstance(transformers[0]['transformer'], KerasImageFileTransformer)
def train(img_shape): classes = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'] # Model model = Sequential() model.add(Convolution2D( 32, 3, 3, input_shape=img_shape, activation='relu', W_constraint=maxnorm(3))) model.add(Dropout(0.2)) model.add(Convolution2D(32, 3, 3, activation='relu', W_constraint=maxnorm(3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu', W_constraint=maxnorm(3))) model.add(Dropout(0.5)) model.add(Dense(len(classes), activation='softmax')) features, labels = get_featurs_labels(img_shape) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(features, labels, nb_epoch=10, batch_size=32, validation_split=0.2, verbose=1) return model
def test_highway_layers(): n_highway_layers = 5 x = Input(shape=(8,), dtype="int32") v = Embedding(input_dim=2, output_dim=10)(x) v = Flatten()(v) assert hasattr(v, "_keras_shape") v = highway_layers(v, n_layers=n_highway_layers) output = Dense(1)(v) model = Model(inputs=[x], outputs=[output]) assert len(model.layers) > n_highway_layers * 3 x = np.array([ [1] + [0] * 7, [0] * 8, [0] * 7 + [1]]) y = np.array([0, 1, 0]) model.compile("rmsprop", "mse") model.fit(x, y, epochs=10) pred = model.predict(x) mean_diff = np.abs(pred - y).mean() assert mean_diff < 0.5, pred
def create_network(): from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=INPUT_SHAPE)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(NUM_CLASSES, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return KerasNetwork(model, 'cnn_weights.hd5')
def create_conv_model(self): # This is the place where neural network model initialized init = 'glorot_uniform' self.state_in = Input(self.state_dim) self.l1 = Convolution2D(32, 8, 8, activation='elu', init=init, subsample=(4, 4), border_mode='same')( self.state_in) self.l2 = Convolution2D(64, 4, 4, activation='elu', init=init, subsample=(2, 2), border_mode='same')( self.l1) # self.l3 = Convolution2D(64, 3, 3, activation='relu', init=init, subsample=(1, 1), border_mode='same')( # self.l2) self.l3 = self.l2 self.h = Flatten()(self.l3) self.hidden = Dense(256, init=init, activation='elu')(self.h) self.value = Dense(1, init=init)(self.hidden) self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden) self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) - Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy, axis=[1], keepdims=True), (1, self.action_dim))) self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim)) self.model = Model(self.state_in, output=[self.policy, self.value])
def make_model(state_shape, n_actions): in_t = Input(shape=(HISTORY_STEPS,) + state_shape, name='input') action_t = Input(shape=(1,), dtype='int32', name='action') advantage_t = Input(shape=(1,), name='advantage') fl_t = Flatten(name='flat')(in_t) l1_t = Dense(SIMPLE_L1_SIZE, activation='relu', name='l1')(fl_t) l2_t = Dense(SIMPLE_L2_SIZE, activation='relu', name='l2')(l1_t) policy_t = Dense(n_actions, name='policy', activation='softmax')(l2_t) def loss_func(args): p_t, act_t, adv_t = args oh_t = K.one_hot(act_t, n_actions) oh_t = K.squeeze(oh_t, 1) p_oh_t = K.log(1e-6 + K.sum(oh_t * p_t, axis=-1, keepdims=True)) res_t = adv_t * p_oh_t return -res_t loss_t = Lambda(loss_func, output_shape=(1,), name='loss')([policy_t, action_t, advantage_t]) return Model(input=[in_t, action_t, advantage_t], output=[policy_t, loss_t])
def net_input(env): """ Create input part of the network with optional prescaling. :return: input_tensor, output_tensor """ in_t = Input(shape=env.observation_space.shape, name='input') out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(in_t) out_t = MaxPooling2D((2, 2))(out_t) out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(out_t) out_t = MaxPooling2D((2, 2))(out_t) out_t = Conv2D(64, 4, 4, activation='relu', border_mode='same')(out_t) out_t = MaxPooling2D((2, 2))(out_t) out_t = Conv2D(64, 3, 3, activation='relu', border_mode='same')(out_t) out_t = Flatten(name='flat')(out_t) out_t = Dense(512, name='l1', activation='relu')(out_t) return in_t, out_t
def build_model(self): initializer = initializers.random_normal(stddev=0.02) model = Sequential() if self.padding: model.add(ZeroPadding2D(padding=(1, 0), data_format="channels_first", input_shape=(self.layers, self.rows, self.columns))) model.add(Conv2D(32, (8, 8), activation="relu", data_format="channels_first", strides=(4, 4), kernel_initializer=initializer, padding='same', input_shape=(self.layers, self.rows, self.columns))) model.add(Conv2D(64, (4, 4), activation="relu", data_format="channels_first", strides=(2, 2), kernel_initializer=initializer, padding='same')) model.add(Conv2D(64, (3, 3), activation="relu", data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding='same')) model.add(Flatten()) model.add(Dense(512, activation="relu", kernel_initializer=initializer)) model.add(Dense(self.actions_num, kernel_initializer=initializer)) adam = Adam(lr=1e-6) model.compile(loss='mse', optimizer=adam) return model
def test_model_pipe_keras(self): model = Sequential() model.add(Flatten(input_shape=(1, 28, 28))) model.add(Dense(128, activation='relu')) model.add(Dense(10, activation='softmax')) p = model_util.ModelPipe() input_data = [np.random.random((1, 1, 28, 28)) for _ in range(2)] p.add(model.predict, batch_size=64, batcher=np.vstack) expected_output = [ model.predict( x.reshape( (1, 1, 28, 28))) for x in input_data] output = p.apply_ordered(input_data) self.assertTrue(np.isclose(np.array(output).flatten(), np.array(expected_output).flatten()).all())
def build_discriminator(self): model = Sequential() model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=3, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.missing_shape) validity = model(img) return Model(img, validity)
def build_discriminator(self): img_shape = (self.img_rows, self.img_cols, self.channels) model = Sequential() model.add(Flatten(input_shape=img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=img_shape) validity = model(img) return Model(img, validity)
def build_encoder(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.latent_dim)) model.summary() img = Input(shape=self.img_shape) z = model(img) return Model(img, z)
def build_discriminator(self): z = Input(shape=(self.latent_dim, )) img = Input(shape=self.img_shape) d_in = concatenate([z, Flatten()(img)]) model = Dense(1024)(d_in) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) model = Dense(1024)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) model = Dense(1024)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) validity = Dense(1, activation="sigmoid")(model) return Model([z, img], validity)
def build_model(self, dataset, nb_classes): self.model = Sequential() self.model.add(Convolution2D(32, (3, 3), padding='same', input_shape=dataset.x_train.shape[1:])) self.model.add(Activation('relu')) self.model.add(Convolution2D(32, (3, 3))) self.model.add(Activation('relu')) self.model.add(MaxPooling2D(pool_size=(2, 2))) self.model.add(Dropout(0.25)) self.model.add(Convolution2D(64, (3, 3), padding='same')) self.model.add(Activation('relu')) self.model.add(Convolution2D(64, (3, 3))) self.model.add(Activation('relu')) self.model.add(MaxPooling2D(pool_size=(2, 2))) self.model.add(Dropout(0.25)) self.model.add(Flatten()) self.model.add(Dense(512)) self.model.add(Activation('relu')) self.model.add(Dropout(0.5)) self.model.add(Dense(nb_classes)) self.model.add(Activation('softmax')) self.model.summary()
def get_model(img_channels, img_width, img_height, dropout=0.5): model = Sequential() model.add(Convolution2D(32, 3, 3, input_shape=( img_channels, img_width, img_height))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Convolution2D(32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(1)) model.add(Activation('sigmoid')) return model
def get_model(shape, dropout=0.5, path=None): print('building neural network') model=Sequential() model.add(Convolution2D(512, 3, 3, border_mode='same', input_shape=shape)) model.add(Activation('relu')) model.add(Convolution2D(512, 3, 3, border_mode='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(SpatialDropout2D(dropout)) model.add(Flatten())#input_shape=shape)) # model.add(Dense(4096)) # model.add(Activation('relu')) # model.add(Dropout(0.5)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(1)) #model.add(Activation('linear')) return model
def _build_model(self): # Neural Net for Deep-Q learning Model model = Sequential() #model.add(Conv2D(256, kernel_size = (2,2), activation='relu', input_shape=(self.state_size.shape[0], self.state_size.shape[1],1), padding="same")) #model.add(Conv2D(712, kernel_size = (2,2), activation='relu', padding="same")) #model.add(Conv2D(128, kernel_size = (2,2), activation='relu', padding="same")) model.add(Dense(2048, input_dim=5, activation='relu'))#self.state_size.shape[0] * self.state_size.shape[1] #model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(512, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(32, activation='relu')) model.add(Dense(16, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(8, activation='relu')) model.add(Dense(4, activation='linear')) model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate)) return model
def deepMindAtariNet(nbClasses, inputShape, includeTop=True): '''Set up the 3 conv layer keras model. classes: Number of outputs. inputShape: The input shape without the batch size. includeTop: If you only want the whole net, or just the convolutions. ''' inp = Input(shape=inputShape) x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp) x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x) x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x) if includeTop: x = Flatten(name='flatten')(x) x = Dense(512, activation='relu', name='dense1')(x) out = Dense(nbClasses, activation='softmax', name='output')(x) else: out = x model = Model(inp, out) return model
def test_find_activation_layer(): conv1_filters = 1 conv2_filters = 1 dense_units = 1 model = Sequential() model.add(Conv2D(conv1_filters, [3, 3], input_shape=(28, 28, 1), data_format="channels_last", name='conv_1')) model.add(Activation('relu', name='act_1')) model.add(MaxPool2D((2, 2), name='pool_1')) model.add(Conv2D(conv2_filters, [3, 3], data_format="channels_last", name='conv_2')) model.add(Activation('relu', name='act_2')) model.add(MaxPool2D((2, 2), name='pool_2')) model.add(Flatten(name='flat_1')) model.add(Dense(dense_units, name='dense_1')) model.add(Activation('relu', name='act_3')) model.add(Dense(10, name='dense_2')) model.add(Activation('softmax', name='act_4')) assert find_activation_layer(model.get_layer('conv_1'), 0) == (model.get_layer('act_1'), 0) assert find_activation_layer(model.get_layer('conv_2'), 0) == (model.get_layer('act_2'), 0) assert find_activation_layer(model.get_layer('dense_1'), 0) == (model.get_layer('act_3'), 0) assert find_activation_layer(model.get_layer('dense_2'), 0) == (model.get_layer('act_4'), 0)
def model_cnn(net_layers, input_shape): inp = Input(shape=input_shape) model = inp for cl in net_layers['conv_layers']: model = Conv2D(filters=cl[0], kernel_size=cl[1], activation='relu')(model) if cl[4]: model = MaxPooling2D()(model) if cl[2]: model = BatchNormalization()(model) if cl[3]: model = Dropout(0.2)(model) model = Flatten()(model) for dl in net_layers['dense_layers']: model = Dense(dl[0])(model) model = Activation('relu')(model) if dl[1]: model = BatchNormalization()(model) if dl[2]: model = Dropout(0.2)(model) model = Dense(1)(model) model = Activation('sigmoid')(model) model = Model(inp, model) return model # %% # LSTM architecture # conv_layers -> [(filters, kernel_size, BatchNormaliztion, Dropout, MaxPooling)] # dense_layers -> [(num_neurons, BatchNormaliztion, Dropout)]
def build(input_shape, num_outputs, block_fn, repetitions): inputs = Input(shape = input_shape) conv1 = Conv2D(64, (7, 7), strides = (2, 2), padding = 'same')(inputs) conv1 = BatchNormalization()(conv1) conv1 = Activation('relu')(conv1) pool1 = MaxPooling2D(pool_size = (3, 3), strides = (2, 2), padding = 'same')(conv1) x = pool1 filters = 64 first_layer = True for i, r in enumerate(repetitions): x = _residual_block(block_fn, filters = filters, repetitions = r, is_first_layer = first_layer)(x) filters *= 2 if first_layer: first_layer = False # last activation <- unnecessary??? # x = BatchNormalization()(x) # x = Activation('relu')(x) _, w, h, ch = K.int_shape(x) pool2 = AveragePooling2D(pool_size = (w, h), strides = (1, 1))(x) flat1 = Flatten()(pool2) outputs = Dense(num_outputs, kernel_initializer = init, activation = 'softmax')(flat1) model = Model(inputs = inputs, outputs = outputs) return model
def model_discriminator(): nch = 256 h = 5 reg = lambda: l1l2(l1=1e-7, l2=1e-7) c1 = Convolution2D(nch / 4, h, h, border_mode='same', W_regularizer=reg(), input_shape=dim_ordering_shape((3, 32, 32))) c2 = Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()) c3 = Convolution2D(nch, h, h, border_mode='same', W_regularizer=reg()) c4 = Convolution2D(1, h, h, border_mode='same', W_regularizer=reg()) model = Sequential() model.add(c1) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(LeakyReLU(0.2)) model.add(c2) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(LeakyReLU(0.2)) model.add(c3) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(LeakyReLU(0.2)) model.add(c4) model.add(AveragePooling2D(pool_size=(4, 4), border_mode='valid')) model.add(Flatten()) model.add(Activation('sigmoid')) return model
def cnn3adam_slim(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='cnn3adam') model.add(Conv1D (kernel_size = (50), filters = 32, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 64, strides=1, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten()) model.add(Dense (250, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (250, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam()) return model
def cnn3adam_filter(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') print('use L2 model instead!') print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') model = Sequential(name='cnn3adam_filter') model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten(name='conv3')) model.add(Dense (1500, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization(name='fc1')) model.add(Dropout(0.5)) model.add(Dense (1500, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization(name='fc2')) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax',name='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001)) return model
def cnn3adam_filter_l2(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') print('use more L2 model instead!') print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') model = Sequential(name='cnn3adam_filter_l2') model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten(name='conv3')) model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc1')) model.add(BatchNormalization(name='bn1')) model.add(Dropout(0.5, name='do1')) model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc2')) model.add(BatchNormalization(name='bn2')) model.add(Dropout(0.5, name='do2')) model.add(Dense(n_classes, activation = 'softmax',name='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001)) # print('reset learning rate') return model
def cnn3adam_filter_morel2_slim(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='cnn3adam_filter_morel2_slim') model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.05))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 128, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 256, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten(name='conv3')) model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc1')) model.add(BatchNormalization(name='bn1')) model.add(Dropout(0.5, name='do1')) model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc2')) model.add(BatchNormalization(name='bn2')) model.add(Dropout(0.5, name='do2')) model.add(Dense(n_classes, activation = 'softmax',name='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001)) # print('reset learning rate') return model
def cnn1d(input_shape, n_classes ): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 1) """ model = Sequential(name='1D CNN') model.add(Conv1D (kernel_size = (50), filters = 150, strides=5, input_shape=input_shape, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) print(model.output_shape) model.add(Conv1D (kernel_size = (8), filters = 200, strides=2, input_shape=input_shape, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) print(model.output_shape) model.add(MaxPooling1D(pool_size = (10), strides=(2))) print(model.output_shape) model.add(Conv1D (kernel_size = (8), filters = 400, strides=2, input_shape=input_shape, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) print(model.output_shape) model.add(Flatten()) model.add(Dense (700, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (700, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=[keras.metrics.categorical_accuracy]) return model
def cnn1(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='no_MP_small_filters') model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (10), filters = 150, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense (1024, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (1024, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta()) return model
def cnn2(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='MP_small_filters') model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten()) model.add(Dense (500, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (500, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta()) return model