我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.Conv2D()。
def tsinalis(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1) """ model = Sequential(name='Tsinalis') model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu')) print(model.input_shape) print(model.output_shape) model.add(MaxPooling1D(pool_size = (20), strides=(10))) print(model.output_shape) model.add(keras.layers.core.Reshape([20,-1,1])) print(model.output_shape) model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu')) print(model.output_shape) model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2))) print(model.output_shape) model.add(Flatten()) print(model.output_shape) model.add(Dense (500, activation='relu')) model.add(Dense (500, activation='relu')) model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2() )) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy]) return model
def create_Kao_Onet( weight_path = 'model48.h5'): input = Input(shape = [48,48,3]) x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1,2],name='prelu1')(x) x = MaxPool2D(pool_size=3, strides=2, padding='same')(x) x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1,2],name='prelu2')(x) x = MaxPool2D(pool_size=3, strides=2)(x) x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1,2],name='prelu3')(x) x = MaxPool2D(pool_size=2)(x) x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x) x = PReLU(shared_axes=[1,2],name='prelu4')(x) x = Permute((3,2,1))(x) x = Flatten()(x) x = Dense(256, name='conv5') (x) x = PReLU(name='prelu5')(x) classifier = Dense(2, activation='softmax',name='conv6-1')(x) bbox_regress = Dense(4,name='conv6-2')(x) landmark_regress = Dense(10,name='conv6-3')(x) model = Model([input], [classifier, bbox_regress, landmark_regress]) model.load_weights(weight_path, by_name=True) return model
def make_teacher_model(train_data, validation_data, nb_epoch=3): '''Train a simple CNN as teacher model. ''' model = Sequential() model.add(Conv2D(64, 3, 3, input_shape=input_shape, border_mode='same', name='conv1')) model.add(MaxPooling2D(name='pool1')) model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2')) model.add(MaxPooling2D(name='pool2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='fc1')) model.add(Dense(nb_class, activation='softmax', name='fc2')) model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics=['accuracy']) train_x, train_y = train_data history = model.fit(train_x, train_y, nb_epoch=nb_epoch, validation_data=validation_data) return model, history
def test_keras_import(self): # Pad 1D model = Sequential() model.add(ZeroPadding1D(2, input_shape=(224, 3))) model.add(Conv1D(32, 7, strides=2)) model.build() self.pad_test(model, 'pad_w', 2) # Pad 2D model = Sequential() model.add(ZeroPadding2D(2, input_shape=(224, 224, 3))) model.add(Conv2D(32, 7, strides=2)) model.build() self.pad_test(model, 'pad_w', 2) # Pad 3D model = Sequential() model.add(ZeroPadding3D(2, input_shape=(224, 224, 224, 3))) model.add(Conv3D(32, 7, strides=2)) model.build() self.pad_test(model, 'pad_w', 2) # ********** Export json tests ********** # ********** Data Layers Test **********
def get_model(): inputs = Input(shape=(64, 64, 3)) conv_1 = Conv2D(1, (3, 3), strides=(1, 1), padding='same')(inputs) act_1 = Activation('relu')(conv_1) conv_2 = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(act_1) act_2 = Activation('relu')(conv_2) deconv_1 = Conv2DTranspose(64, (3, 3), strides=(1, 1), padding='same')(act_2) act_3 = Activation('relu')(deconv_1) merge_1 = concatenate([act_3, act_1], axis=3) deconv_2 = Conv2DTranspose(1, (3, 3), strides=(1, 1), padding='same')(merge_1) act_4 = Activation('relu')(deconv_2) model = Model(inputs=[inputs], outputs=[act_4]) model.compile(optimizer='adadelta', loss=dice_coef_loss, metrics=[dice_coef]) return model
def create_Kao_Rnet (weight_path = 'model24.h5'): input = Input(shape=[24, 24, 3]) # change this shape to [None,None,3] to enable arbitraty shape input x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input) x = PReLU(shared_axes=[1, 2], name='prelu1')(x) x = MaxPool2D(pool_size=3,strides=2, padding='same')(x) x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x) x = PReLU(shared_axes=[1, 2], name='prelu2')(x) x = MaxPool2D(pool_size=3, strides=2)(x) x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x) x = PReLU(shared_axes=[1, 2], name='prelu3')(x) x = Permute((3, 2, 1))(x) x = Flatten()(x) x = Dense(128, name='conv4')(x) x = PReLU( name='prelu4')(x) classifier = Dense(2, activation='softmax', name='conv5-1')(x) bbox_regress = Dense(4, name='conv5-2')(x) model = Model([input], [classifier, bbox_regress]) model.load_weights(weight_path, by_name=True) return model
def create_actor_network(self, state_size, action_dim): """Create actor network.""" print ("[MESSAGE] Build actor network.""") S = Input(shape=state_size) h_0 = Conv2D(32, (3, 3), padding="same", kernel_regularizer=l2(0.0001), activation="relu")(S) h_1 = Conv2D(32, (3, 3), padding="same", kernel_regularizer=l2(0.0001), activation="relu")(h_0) h_1 = AveragePooling2D(2, 2)(h_1) h_1 = Flatten()(h_1) h_1 = Dense(600, activation="relu")(h_1) A = Dense(action_dim, activation="softmax")(h_1) model = Model(inputs=S, outputs=A) return model, model.trainable_weights, S
def _conv_bn_relu(**conv_params): """Helper to build a conv -> BN -> relu residual unit activation function. This is the original ResNet v1 scheme in https://arxiv.org/abs/1512.03385 """ filters = conv_params["filters"] kernel_size = conv_params["kernel_size"] strides = conv_params.setdefault("strides", (1, 1)) dilation_rate = conv_params.setdefault("dilation_rate", (1, 1)) conv_name = conv_params.setdefault("conv_name", None) bn_name = conv_params.setdefault("bn_name", None) relu_name = conv_params.setdefault("relu_name", None) kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal") padding = conv_params.setdefault("padding", "same") kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4)) def f(x): x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, dilation_rate=dilation_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name=conv_name)(x) return _bn_relu(x, bn_name=bn_name, relu_name=relu_name) return f
def _bn_relu_conv(**conv_params): """Helper to build a BN -> relu -> conv residual unit with full pre-activation function. This is the ResNet v2 scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf """ filters = conv_params["filters"] kernel_size = conv_params["kernel_size"] strides = conv_params.setdefault("strides", (1, 1)) dilation_rate = conv_params.setdefault("dilation_rate", (1, 1)) conv_name = conv_params.setdefault("conv_name", None) bn_name = conv_params.setdefault("bn_name", None) relu_name = conv_params.setdefault("relu_name", None) kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal") padding = conv_params.setdefault("padding", "same") kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4)) def f(x): activation = _bn_relu(x, bn_name=bn_name, relu_name=relu_name) return Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, dilation_rate=dilation_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name=conv_name)(activation) return f
def __initial_conv_block(input, k=1, dropout=0.0, initial=False): init = input channel_axis = 1 if K.image_dim_ordering() == 'th' else -1 # Check if input number of filters is same as 16 * k, else create convolution2d for this input if initial: if K.image_dim_ordering() == 'th': init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init) else: init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init) x = BatchNormalization(axis=channel_axis)(input) x = Activation('relu')(x) x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x) if dropout > 0.0: x = Dropout(dropout)(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x) m = add([init, x]) return m
def make_model(dense_layer_sizes, filters, kernel_size, pool_size): '''Creates model comprised of 2 convolutional layers followed by dense layers dense_layer_sizes: List of layer sizes. This list has one number for each layer filters: Number of convolutional filters in each convolutional layer kernel_size: Convolutional kernel size pool_size: Size of pooling area for max pooling ''' model = Sequential() model.add(Conv2D(filters, kernel_size, padding='valid', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(filters, kernel_size)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=pool_size)) model.add(Dropout(0.25)) model.add(Flatten()) for layer_size in dense_layer_sizes: model.add(Dense(layer_size)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model
def make_teacher_model(train_data, validation_data, epochs=3): '''Train a simple CNN as teacher model. ''' model = Sequential() model.add(Conv2D(64, 3, input_shape=input_shape, padding='same', name='conv1')) model.add(MaxPooling2D(2, name='pool1')) model.add(Conv2D(64, 3, padding='same', name='conv2')) model.add(MaxPooling2D(2, name='pool2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='fc1')) model.add(Dense(num_class, activation='softmax', name='fc2')) model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics=['accuracy']) train_x, train_y = train_data history = model.fit(train_x, train_y, epochs=epochs, validation_data=validation_data) return model, history
def get_model(): input_shape = (image_size, image_size, 3) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), padding='same', input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(n_classes, kernel_size=(3, 3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(GlobalAveragePooling2D()) print (model.summary()) #sys.exit(0) # model.compile(loss=keras.losses.mean_squared_error, optimizer= keras.optimizers.Adadelta()) return model
def plot_network(image, model, label=None): layer_names = [l.name for l in model.layers if isinstance(l,Conv2D)] n_conv = len(layer_names) n_axes = n_conv prediction = model.predict(np.expand_dims(image,0)) mng = plt.get_current_fig_manager() mng.full_screen_toggle() fig, [axlist1, axlist2] = plt.subplots(2,n_conv) diagnosis = ["negative", "positive"] for j in range(n_conv): plot_heatmap(image, model, layer_names[j],"abnormal",axlist1[j]) # axlist1[j].set_xlabel(layer_names[j] + "ab") for j in range(n_conv): plot_heatmap(image, model, layer_names[j],"normal",axlist2[j],cmap=plt.cm.inferno) fig.suptitle("Prediction: {}, {}".format(prediction,label)) fig.show()
def _conv_block(layer, num_conv_layers, num_filters): """Build a conv block on top of inputs :param inputs: Keras Layer object representing the VGG net up to this point :param num_conv_layers: int for the number of convolutional layers to include in this block :param num_filters: int for the number of filters per convolutional layer """ for _ in range(num_conv_layers - 1): layer = Conv2D( filters=num_filters, kernel_size=(3, 3), padding='same', activation='relu' )(layer) layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(layer) return layer
def cnn(height, width): question_input = Input(shape=(height, width, 1), name='question_input') conv1_Q = Conv2D(512, (2, 320), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(question_input) Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q) F1_Q = Flatten()(Max1_Q) Drop1_Q = Dropout(0.25)(F1_Q) predictQ = Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(Drop1_Q) prediction2 = Dropout(0.25)(predictQ) predictions = Dense(1, activation='relu')(prediction2) model = Model(inputs=[question_input], outputs=predictions) model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)) # model.compile(loss='mean_squared_error', # optimizer='nadam') return model
def block(self, num_filters, num_layers, kernel_size, strides, input_tensor): x = Conv2D(num_layers, (1, 1), strides=strides)(input_tensor) x = Activation(selu)(x) x = Conv2D(num_filters, kernel_size, padding='same')(x) x = Activation(selu)(x) x = Conv2D(num_filters*4, (1, 1))(x) shortcut = Conv2D(num_filters*4, (1, 1), strides=strides, )(input_tensor) x = layers.add([x, shortcut]) x = Activation(selu)(x) return x
def keepsize_256(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7): """ Deep residual network that keeps the size of the input throughout the whole network """ def residual(inputs, n_filters): x = ReflectionPadding2D()(inputs) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = Activation(activation)(x) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = add([x, inputs]) return x inputs = Input(shape=(nx, ny, 1)) x = GaussianNoise(noise)(inputs) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x0 = Activation(activation)(x) x = residual(x0, n_filters) for i in range(depth-1): x = residual(x, n_filters) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = add([x, x0]) # Upsampling for superresolution x = UpSampling2D()(x) x = ReflectionPadding2D()(x) x = Conv2D(4*n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = Activation(activation)(x) final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) return Model(inputs=inputs, outputs=final)
def create_network(): input_img = Input(shape=INPUT_SHAPE) x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) encoded = MaxPooling2D((2, 2), padding='same')(x) # at this point the representation is (4, 4, 8) i.e. 128-dimensional x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded) x = UpSampling2D((2, 2))(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(16, (3, 3), activation='relu')(x) x = UpSampling2D((2, 2))(x) decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x) model = Model(input_img, decoded) model.compile(optimizer='adadelta', loss='binary_crossentropy') return KerasNetwork(model, 'weights_conv_autoencoder.hd5')
def create_network(): from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=INPUT_SHAPE)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(NUM_CLASSES, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return KerasNetwork(model, 'cnn_weights.hd5')
def net_input(env): """ Create input part of the network with optional prescaling. :return: input_tensor, output_tensor """ in_t = Input(shape=env.observation_space.shape, name='input') out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(in_t) out_t = MaxPooling2D((2, 2))(out_t) out_t = Conv2D(32, 5, 5, activation='relu', border_mode='same')(out_t) out_t = MaxPooling2D((2, 2))(out_t) out_t = Conv2D(64, 4, 4, activation='relu', border_mode='same')(out_t) out_t = MaxPooling2D((2, 2))(out_t) out_t = Conv2D(64, 3, 3, activation='relu', border_mode='same')(out_t) out_t = Flatten(name='flat')(out_t) out_t = Dense(512, name='l1', activation='relu')(out_t) return in_t, out_t
def make_init_model(): input_data = Input(shape=(32, 32, 3)) init_model_index = random.randint(1, 4) init_model_index = 2 if init_model_index == 1: # one conv layer with kernel num = 64 stem_conv_1 = Conv2D(64, (1, 1), padding='same')(input_data) elif init_model_index == 2: # two conv layers with kernel num = 64 stem_conv_1 = Conv2D(64, (1, 1), padding='same')(input_data) stem_conv_2 = Conv2D(64, (1, 1), padding='same')(stem_conv_1) elif init_model_index == 3: # one conv layer with a wider kernel num = 128 stem_conv_1 = Conv2D(128, (1, 1), padding='same')(input_data) elif init_model_index == 4: # two conv layers with a wider kernel_num = 128 stem_conv_1 = Conv2D(128, (1, 1), padding='same')(input_data) stem_conv_2 = Conv2D(128, (1, 1), padding='same')(stem_conv_1) stem_global_pooling_1 = GlobalMaxPooling2D()(stem_conv_1) stem_softmax_1 = Activation('softmax')(stem_global_pooling_1) model = Model(inputs=input_data, outputs=stem_softmax_1) return model
def Build(model_list): print model_list for idx, layer in enumerate(model_list): type = layer[0] if type == 'InputLayer': input = Input(shape=layer[1]) x = input elif type == 'Conv2D': x = Conv2D(filters=layer[2], kernel_size=layer[1], padding='same')(x) elif type == 'InceptionBlock': x = inception_block(x, idx) elif type == 'ResidualBlock': x = residual_block(x, layer[1], idx) elif type == "GlobalMaxPooling2D": x = GlobalMaxPooling2D()(x) elif type == "Activation": x = Activation('softmax')(x) model = Model(inputs=input, outputs=x) return model
def conv_wider(model): model_list = get_model_list(model) for idx, layer in enumerate(model_list): if layer[0] == 'Conv2D': wider_layer = layer insert_idx = idx + 1 # wider operation: filters * 2 wider_layer[2] *= 2 # if next layer is residual layer, we need to change residual layer's input shape while (model_list[insert_idx][0] == 'ResidualBlock'): model_list[insert_idx][1] = wider_layer[2] insert_idx = insert_idx + 1 new_model = Build(model_list) return new_model
def add_skipping(model): model_list = get_model_list(model) insert_idx = -1 # TODO: need to get the output shape from the last layer, use it as a parameter for idx, layer in enumerate(model_list): if layer[0] == 'Conv2D' or layer[0] == 'InceptionBlock' or layer[0] == 'ResidualBlock': insert_idx = idx + 1 if layer[0] == 'Conv2D': pre_output_shape = layer[2] else: pre_output_shape = layer[1] if insert_idx != -1: model_list.insert(insert_idx, ['ResidualBlock', pre_output_shape]) new_model = Build(model_list) return new_model
def update(self): self.type2ind = {} for node in self.nodes(): import re ind = int(re.findall(r'^\w+?(\d+)$', node.name)[0]) self.type2ind[node.type] = self.type2ind.get(node.type, []) + [ind] for node in nx.topological_sort(self): if node.type in ['Conv2D', 'Group', 'Conv2D_Pooling']: plus = 1 else: plus = 0 if len(self.predecessors(node)) == 0: node.depth = 0 else: pre_depth = [_node.depth for _node in self.predecessors(node)] pre_depth = max(pre_depth) node.depth = self.max_depth = pre_depth + plus
def get_model_list(self, model): model_list = [] model_dict = json.loads(model.to_json()) model_layer = model_dict['config']['layers'] for layer in model_layer: layer_name = layer['config']['name'] layer_output_shape = model.get_layer(layer_name).output_shape if layer['class_name'] == 'Conv2D' and layer['config']['name'].lower().startswith('conv'): model_list.append([layer['class_name'], layer['config']['name'], {'kernel_size': layer['config']['kernel_size'], 'filters': layer['config']['filters']}]) elif layer['class_name'] == 'GlobalMaxPooling2D': model_list.append([layer['class_name'], layer['config']['name'], {}]) elif layer['class_name'] == 'Activation': model_list.append([layer['class_name'], layer['config']['name'], {'activation_type': 'softmax'}]) return model_list
def build_model(self): initializer = initializers.random_normal(stddev=0.02) model = Sequential() if self.padding: model.add(ZeroPadding2D(padding=(1, 0), data_format="channels_first", input_shape=(self.layers, self.rows, self.columns))) model.add(Conv2D(32, (8, 8), activation="relu", data_format="channels_first", strides=(4, 4), kernel_initializer=initializer, padding='same', input_shape=(self.layers, self.rows, self.columns))) model.add(Conv2D(64, (4, 4), activation="relu", data_format="channels_first", strides=(2, 2), kernel_initializer=initializer, padding='same')) model.add(Conv2D(64, (3, 3), activation="relu", data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding='same')) model.add(Flatten()) model.add(Dense(512, activation="relu", kernel_initializer=initializer)) model.add(Dense(self.actions_num, kernel_initializer=initializer)) adam = Adam(lr=1e-6) model.compile(loss='mse', optimizer=adam) return model
def deflating_convolution(inputs, n_deflation_layers, n_filters_init=32, noise=None, name_prefix=None): def add_linear_noise(x, eps, ind): flattened_deflated = Reshape((-1,), name=name_prefix + '_conv_flatten_{}'.format(ind))(x) deflated_shape = ker.int_shape(x) deflated_size = deflated_shape[1] * deflated_shape[2] * deflated_shape[3] noise_transformed = Dense(deflated_size, activation=None, name=name_prefix + '_conv_noise_dense_{}'.format(ind))(eps) added_noise = Add(name=name_prefix + '_conv_add_noise_{}'.format(ind))([noise_transformed, flattened_deflated]) x = Reshape((deflated_shape[1], deflated_shape[2], deflated_shape[3]), name=name_prefix + '_conv_backreshape_{}'.format(ind))(added_noise) return x deflated = Conv2D(filters=n_filters_init, kernel_size=(5, 5), strides=(2, 2), padding='same', activation='relu', name=name_prefix + '_conv_0')(inputs) if noise is not None: deflated = add_linear_noise(deflated, noise, 0) for i in range(1, n_deflation_layers): deflated = Conv2D(filters=n_filters_init * (2**i), kernel_size=(5, 5), strides=(2, 2), padding='same', activation='relu', name=name_prefix + '_conv_{}'.format(i))(deflated) # if noise is not None: # deflated = add_linear_noise(deflated, noise, i) return deflated
def _build_model(self): # Neural Net for Deep-Q learning Model model = Sequential() #model.add(Conv2D(256, kernel_size = (2,2), activation='relu', input_shape=(self.state_size.shape[0], self.state_size.shape[1],1), padding="same")) #model.add(Conv2D(712, kernel_size = (2,2), activation='relu', padding="same")) #model.add(Conv2D(128, kernel_size = (2,2), activation='relu', padding="same")) model.add(Dense(2048, input_dim=5, activation='relu'))#self.state_size.shape[0] * self.state_size.shape[1] #model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(512, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(32, activation='relu')) model.add(Dense(16, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(8, activation='relu')) model.add(Dense(4, activation='linear')) model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate)) return model
def deepMindAtariNet(nbClasses, inputShape, includeTop=True): '''Set up the 3 conv layer keras model. classes: Number of outputs. inputShape: The input shape without the batch size. includeTop: If you only want the whole net, or just the convolutions. ''' inp = Input(shape=inputShape) x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp) x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x) x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x) if includeTop: x = Flatten(name='flatten')(x) x = Dense(512, activation='relu', name='dense1')(x) out = Dense(nbClasses, activation='softmax', name='output')(x) else: out = x model = Model(inp, out) return model
def model(flags): inputs = Input(shape=(flags['image_size'] + (3,))) x = inputs x, sources = downsamples(x, [40, 40, 80, 100, 100, 100, 80, 80]) x = mix(x, 100) x = Dropout(0.1)(x) x = upsamples(x, sources, [100] * 8) x = Conv2D(30, (1, 1), padding='valid', activation='relu')(x) x = Conv2D(30, (1, 1), padding='valid', activation='relu')(x) x = Conv2D(len(flags['example']['y']), (1, 1), padding='valid', activation='sigmoid')(x) mod = Model(inputs=inputs, outputs=x) return mod
def test_find_activation_layer(): conv1_filters = 1 conv2_filters = 1 dense_units = 1 model = Sequential() model.add(Conv2D(conv1_filters, [3, 3], input_shape=(28, 28, 1), data_format="channels_last", name='conv_1')) model.add(Activation('relu', name='act_1')) model.add(MaxPool2D((2, 2), name='pool_1')) model.add(Conv2D(conv2_filters, [3, 3], data_format="channels_last", name='conv_2')) model.add(Activation('relu', name='act_2')) model.add(MaxPool2D((2, 2), name='pool_2')) model.add(Flatten(name='flat_1')) model.add(Dense(dense_units, name='dense_1')) model.add(Activation('relu', name='act_3')) model.add(Dense(10, name='dense_2')) model.add(Activation('softmax', name='act_4')) assert find_activation_layer(model.get_layer('conv_1'), 0) == (model.get_layer('act_1'), 0) assert find_activation_layer(model.get_layer('conv_2'), 0) == (model.get_layer('act_2'), 0) assert find_activation_layer(model.get_layer('dense_1'), 0) == (model.get_layer('act_3'), 0) assert find_activation_layer(model.get_layer('dense_2'), 0) == (model.get_layer('act_4'), 0)
def regionProposalNetwork(base_layers, noOfAnchors): """ Region Proposal Network """ x = Conv2D(512, (1, 300), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv1')(base_layers) print 'INFO: rpn_conv1: ',x #x = Conv2D(512, (1, 302), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv2')(base_layers) #x = MaxPooling2D((1,2), strides = (1,2))(x) x_class = Conv2D(noOfAnchors, (1, 103), activation='sigmoid', kernel_initializer='uniform', name='rpn_out_class')(x) print 'INFO: rpn_out_class: ',x_class x_regr = Conv2D(noOfAnchors * 4, (1, 103), activation='linear', kernel_initializer='zero', name='rpn_out_regress')(x) print 'INFO: rpn_out_regress: ',x_regr return [x_class, x_regr, base_layers]
def test_tiny_conv_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) num_kernels, kernel_height, kernel_width = 3, 5, 5 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size = (kernel_height, kernel_width))) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model, model_precision=model_precision)
def test_tiny_conv_dilated(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) num_kernels, kernel_height, kernel_width = 3, 5, 5 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, dilation_rate=(2,2), filters = num_kernels, kernel_size = (kernel_height, kernel_width))) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model, model_precision=model_precision)
def test_tiny_conv_dilated_rect_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_shape = (32, 20, 3) num_kernels = 2 kernel_height = 3 kernel_width = 3 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, dilation_rate=(2,2), filters = num_kernels, kernel_size = (kernel_height, kernel_width))) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model, model_precision=model_precision)
def test_tiny_conv_rect_kernel_x(self): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) num_kernels = 3 kernel_height = 1 kernel_width = 5 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size = (kernel_height, kernel_width), padding = 'same')) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model)
def test_tiny_conv_rect_kernel_xy(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) num_kernels = 3 kernel_height = 5 kernel_width = 3 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size = (kernel_height, kernel_width), padding = 'valid')) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model, model_precision=model_precision)
def test_conv_batchnorm_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 3) num_kernels = 3 kernel_height = 5 kernel_width = 5 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size = (kernel_height, kernel_width))) model.add(BatchNormalization(epsilon=1e-5)) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_keras_model(model, model_precision=model_precision)
def test_conv_batchnorm_no_gamma_no_beta(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 3) num_kernels = 3 kernel_height = 5 kernel_width = 5 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size = (kernel_height, kernel_width))) model.add(BatchNormalization(center=False, scale=False, epsilon=1e-5)) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_keras_model(model, model_precision=model_precision)
def test_tiny_conv_upsample_random(self): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) num_kernels = 3 kernel_height = 5 kernel_width = 5 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size = (kernel_height, kernel_width))) model.add(UpSampling2D(size = 2)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model)
def test_tiny_conv_dense_random(self): np.random.seed(1988) num_samples = 1 input_dim = 8 input_shape = (input_dim, input_dim, 3) num_kernels = 2 kernel_height = 5 kernel_width = 5 hidden_dim = 4 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size=(kernel_height, kernel_width))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(hidden_dim)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_keras_model(model)
def test_tiny_conv_dropout_random(self): np.random.seed(1988) num_samples = 1 input_dim = 8 input_shape = (input_dim, input_dim, 3) num_kernels = 2 kernel_height = 5 kernel_width = 5 hidden_dim = 4 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size=(kernel_height, kernel_width))) model.add(SpatialDropout2D(0.5)) model.add(Flatten()) model.add(Dense(hidden_dim)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_keras_model(model)
def test_shared_vision(self): digit_input = Input(shape=(27, 27,1)) x = Conv2D(64, (3, 3))(digit_input) x = Conv2D(64, (3, 3))(x) out = Flatten()(x) vision_model = Model(inputs=[digit_input], outputs=[out]) # then define the tell-digits-apart model digit_a = Input(shape=(27,27,1)) digit_b = Input(shape=(27,27,1)) # the vision model will be shared, weights and all out_a = vision_model(digit_a) out_b = vision_model(digit_b) concatenated = concatenate([out_a, out_b]) out = Dense(1, activation='sigmoid')(concatenated) model = Model(inputs=[digit_a, digit_b], outputs=out) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) self._test_keras_model(model)
def test_conv_layer_params(self, model_precision=_MLMODEL_FULL_PRECISION): options = dict( activation = ['relu', 'tanh', 'sigmoid'], # keras does not support softmax on 4-D use_bias = [True, False], padding = ['same', 'valid'], filters = [1, 3, 5], kernel_size = [[5,5]], # fails when sizes are different ) # Define a function that tests a model input_shape = (10, 10, 1) def build_model(x): kwargs = dict(zip(options.keys(), x)) model = Sequential() model.add(Conv2D(input_shape = input_shape, **kwargs)) return x, model # Iterate through all combinations product = itertools.product(*options.values()) args = [build_model(p) for p in product] # Test the cases print("Testing a total of %s cases. This could take a while" % len(args)) for param, model in args: self._run_test(model, param, model_precision=model_precision)
def test_tiny_mcrnn_music_tagger(self): x_in = Input(shape=(4,6,1)) x = ZeroPadding2D(padding=(0, 1))(x_in) x = BatchNormalization(axis=2, name='bn_0_freq')(x) # Conv block 1 x = Conv2D(2, (3, 3), padding='same', name='conv1')(x) x = BatchNormalization(axis=3, name='bn1')(x) x = Activation('elu')(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x) # Conv block 2 x = Conv2D(4, (3, 3), padding='same', name='conv2')(x) x = BatchNormalization(axis=3, name='bn2')(x) x = Activation('elu')(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x) # Should get you (1,1,2,4) x = Reshape((2, 4))(x) x = GRU(32, return_sequences=True, name='gru1')(x) x = GRU(32, return_sequences=False, name='gru2')(x) # Create model. model = Model(x_in, x) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) self._test_keras_model(model, mode='random_zero_mean', delta=1e-2)
def model_cnn(net_layers, input_shape): inp = Input(shape=input_shape) model = inp for cl in net_layers['conv_layers']: model = Conv2D(filters=cl[0], kernel_size=cl[1], activation='relu')(model) if cl[4]: model = MaxPooling2D()(model) if cl[2]: model = BatchNormalization()(model) if cl[3]: model = Dropout(0.2)(model) model = Flatten()(model) for dl in net_layers['dense_layers']: model = Dense(dl[0])(model) model = Activation('relu')(model) if dl[1]: model = BatchNormalization()(model) if dl[2]: model = Dropout(0.2)(model) model = Dense(1)(model) model = Activation('sigmoid')(model) model = Model(inp, model) return model # %% # LSTM architecture # conv_layers -> [(filters, kernel_size, BatchNormaliztion, Dropout, MaxPooling)] # dense_layers -> [(num_neurons, BatchNormaliztion, Dropout)]