我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用keras.layers.GlobalAveragePooling1D()。
def test_delete_channels_globalaveragepooling1d(channel_index): layer = GlobalAveragePooling1D() layer_test_helper_1d_global(layer, channel_index)
def test_global_average_pooling_1d(self): np.random.seed(1988) input_dim = 2 input_length = 10 filter_length = 3 nb_filters = 4 model = Sequential() model.add(Conv1D(nb_filters, kernel_size = filter_length, padding='same', input_shape=(input_length, input_dim))) model.add(GlobalAveragePooling1D()) self._test_keras_model(model)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, name='conv1', kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.BatchNormalization(name='bn1')(x) x = kl.Activation('relu', name='act1')(x) x = kl.MaxPooling1D(2, name='pool1')(x) # 124 x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2) x = self._res_unit(x, [32, 32, 128], stage=1, block=2) # 64 x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2) x = self._res_unit(x, [64, 64, 256], stage=2, block=2) # 32 x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2) x = self._res_unit(x, [128, 128, 512], stage=3, block=2) # 16 x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, name='conv1', kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.BatchNormalization(name='bn1')(x) x = kl.Activation('relu', name='act1')(x) x = kl.MaxPooling1D(2, name='pool1')(x) # 124 x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2) x = self._res_unit(x, [32, 32, 128], stage=1, block=2) x = self._res_unit(x, [32, 32, 128], stage=1, block=3) # 64 x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2) x = self._res_unit(x, [64, 64, 256], stage=2, block=2) x = self._res_unit(x, [64, 64, 256], stage=2, block=3) # 32 x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2) x = self._res_unit(x, [128, 128, 512], stage=3, block=2) x = self._res_unit(x, [128, 128, 512], stage=3, block=3) # 16 x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, name='conv1', kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.BatchNormalization(name='bn1')(x) x = kl.Activation('relu', name='act1')(x) x = kl.MaxPooling1D(2, name='pool1')(x) # 124 x = self._res_unit(x, 128, stage=1, block=1, stride=2) x = self._res_unit(x, 128, stage=1, block=2) # 64 x = self._res_unit(x, 256, stage=2, block=1, stride=2) # 32 x = self._res_unit(x, 256, stage=3, block=1, stride=2) # 32 x = self._res_unit(x, 512, stage=4, block=1, stride=2) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, name='conv1', kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu', name='act1')(x) x = kl.MaxPooling1D(2, name='pool1')(x) # 124 x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2) x = self._res_unit(x, [32, 32, 128], atrous=2, stage=1, block=2) x = self._res_unit(x, [32, 32, 128], atrous=4, stage=1, block=3) # 64 x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2) x = self._res_unit(x, [64, 64, 256], atrous=2, stage=2, block=2) x = self._res_unit(x, [64, 64, 256], atrous=4, stage=2, block=3) # 32 x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2) x = self._res_unit(x, [128, 128, 512], atrous=2, stage=3, block=2) x = self._res_unit(x, [128, 128, 512], atrous=4, stage=3, block=3) # 16 x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = self._merge_inputs(inputs) shape = getattr(x, '_keras_shape') replicate_model = self._replicate_model(kl.Input(shape=shape[2:])) x = kl.TimeDistributed(replicate_model)(x) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def build_model(cat, loss): print('Build model...') model = Sequential() # we start off with an efficient embedding layer which maps # our vocab indices into embedding_dims dimensions model.add(Embedding(max_features, embedding_dims, input_length=maxlen)) model.add(Dropout(0.5)) # we add a GlobalAveragePooling1D, which will average the embeddings # of all words in the document model.add(GlobalAveragePooling1D()) model.add(Dropout(0.5)) # We project onto a single unit output layer, and squash it with a sigmoid: model.add(Dense(cat, activation='softmax')) model.compile(loss=loss, optimizer='adam', metrics=['accuracy']) return model
def build_model(self, x): x = GlobalAveragePooling1D()(x) return x
def build_model(max_length=1000, nb_filters=64, kernel_size=3, pool_size=2, regularization=0.01, weight_constraint=2., dropout_prob=0.4, clear_session=True): if clear_session: K.clear_session() model = Sequential() model.add(Embedding( embeddings.shape[0], embeddings.shape[1], input_length=max_length, trainable=False, weights=[embeddings])) model.add(Conv1D(nb_filters, kernel_size, activation='relu')) model.add(Conv1D(nb_filters, kernel_size, activation='relu')) model.add(MaxPooling1D(pool_size)) model.add(Dropout(dropout_prob)) model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu')) model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu')) model.add(MaxPooling1D(pool_size)) model.add(Dropout(dropout_prob)) model.add(GlobalAveragePooling1D()) model.add(Dense(1, kernel_regularizer=l2(regularization), kernel_constraint=maxnorm(weight_constraint), activation='sigmoid')) model.compile( loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model