我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.MaxPooling1D()。
def tsinalis(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1) """ model = Sequential(name='Tsinalis') model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu')) print(model.input_shape) print(model.output_shape) model.add(MaxPooling1D(pool_size = (20), strides=(10))) print(model.output_shape) model.add(keras.layers.core.Reshape([20,-1,1])) print(model.output_shape) model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu')) print(model.output_shape) model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2))) print(model.output_shape) model.add(Flatten()) print(model.output_shape) model.add(Dense (500, activation='relu')) model.add(Dense (500, activation='relu')) model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2() )) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy]) return model
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv1D(k1,1,padding='same')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,padding='same')(out) pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def rcnn(input_shape, n_classes): """ Input size should be [batch, 1d, ch] = (XXX, 3000, 1) """ model = Sequential(name='RCNN test') model.add(Conv1D (kernel_size = (200), filters = 20, batch_input_shape=input_shape, activation='elu')) model.add(MaxPooling1D(pool_size = (20), strides=(10))) model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu')) model.add(MaxPooling1D(pool_size = (10), strides=(3))) model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu')) model.add(MaxPooling1D(pool_size = (10), strides=(3))) model.add(Dense (512, activation='elu')) model.add(Dense (512, activation='elu')) model.add(Reshape((1,model.output_shape[1]))) model.add(LSTM(256, stateful=True, return_sequences=False)) model.add(Dropout(0.3)) model.add(Dense(n_classes, activation = 'sigmoid')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta()) return model
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv1D(k1,kernel_size,strides=2,padding='same')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,strides=2,padding='same')(out) pooling = MaxPooling1D(pooling_size,strides=4,padding='same')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(self.l1_decay, self.l2_decay) x = kl.Conv1D(128, 11, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) x = kl.Flatten()(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Dense(self.nb_hidden, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(256, 7, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) gru = kl.recurrent.GRU(256, kernel_regularizer=kernel_regularizer) x = kl.Bidirectional(gru)(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def createBaseNetworkSmall(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkLarge(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkLarge(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkLarge(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def netSigmoid(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(128, activation='relu')) baseNetwork.add(Dropout(0.2)) baseNetwork.add(Dense(128, activation='relu')) baseNetwork.add(Dropout(0.2)) return baseNetwork
def createBaseNetworkSmall(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkLarge(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def build_lstm(input_shape): model = Sequential() # model.add(Masking(input_shape=input_shape, mask_value=-1.)) model.add(Embedding(input_shape[0], 128, input_length=input_shape[1])) model.add(Convolution1D(nb_filter=64, filter_length=5, border_mode='valid', activation='relu', subsample_length=1)) model.add(MaxPooling1D(pool_length=4)) model.add(GRU(128)) # model.add(GRU(128, return_sequences=False)) # Add dropout if overfitting # model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def build_lstm(input_shape): model = Sequential() # model.add(Masking(input_shape=input_shape, mask_value=-1.)) model.add(Embedding(input_shape[0], 128, input_length=input_shape[1])) model.add(Convolution1D(nb_filter=64, filter_length=5, border_mode='valid', activation='relu', subsample_length=1)) model.add(MaxPooling1D(pool_length=model.output_shape[1])) model.add(Flatten()) model.add(Dense(128)) # model.add(GRU(128, return_sequences=False)) # Add dropout if overfitting # model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def _build_model(self): # Deep Conv Neural Net for Deep-Q learning Model model = Sequential() model.add(Conv1D(128, 3, input_shape=(19,48))) model.add(Activation('relu')) model.add(MaxPooling1D(pool_size=2)) model.add(Conv1D(64, 3)) model.add(Activation('relu')) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors model.add(Dense(64)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(self.action_size)) model.add(Activation('sigmoid')) model.compile(loss=self._huber_loss, optimizer=Adam(lr=self.learning_rate)) #model.compile(loss='binary_crossentropy', # optimizer='rmsprop', # metrics=['accuracy']) return model
def char_block(in_layer, nb_filter=(64, 100), filter_length=(3, 3), subsample=(2, 1), pool_length=(2, 2)): block = in_layer for i in range(len(nb_filter)): block = Conv1D(filters=nb_filter[i], kernel_size=filter_length[i], padding='valid', activation='tanh', strides=subsample[i])(block) # block = BatchNormalization()(block) # block = Dropout(0.1)(block) if pool_length[i]: block = MaxPooling1D(pool_size=pool_length[i])(block) # block = Lambda(max_1d, output_shape=(nb_filter[-1],))(block) block = GlobalMaxPool1D()(block) block = Dense(128, activation='relu')(block) return block
def cnn3adam_slim(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='cnn3adam') model.add(Conv1D (kernel_size = (50), filters = 32, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 64, strides=1, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten()) model.add(Dense (250, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (250, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam()) return model
def cnn3adam_filter(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') print('use L2 model instead!') print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') model = Sequential(name='cnn3adam_filter') model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten(name='conv3')) model.add(Dense (1500, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization(name='fc1')) model.add(Dropout(0.5)) model.add(Dense (1500, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization(name='fc2')) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax',name='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001)) return model
def cnn3adam_filter_l2(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') print('use more L2 model instead!') print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') model = Sequential(name='cnn3adam_filter_l2') model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten(name='conv3')) model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc1')) model.add(BatchNormalization(name='bn1')) model.add(Dropout(0.5, name='do1')) model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc2')) model.add(BatchNormalization(name='bn2')) model.add(Dropout(0.5, name='do2')) model.add(Dense(n_classes, activation = 'softmax',name='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001)) # print('reset learning rate') return model
def cnn3adam_filter_morel2_slim(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='cnn3adam_filter_morel2_slim') model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.05))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 128, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 256, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten(name='conv3')) model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc1')) model.add(BatchNormalization(name='bn1')) model.add(Dropout(0.5, name='do1')) model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc2')) model.add(BatchNormalization(name='bn2')) model.add(Dropout(0.5, name='do2')) model.add(Dense(n_classes, activation = 'softmax',name='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001)) # print('reset learning rate') return model
def cnn1d(input_shape, n_classes ): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 1) """ model = Sequential(name='1D CNN') model.add(Conv1D (kernel_size = (50), filters = 150, strides=5, input_shape=input_shape, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) print(model.output_shape) model.add(Conv1D (kernel_size = (8), filters = 200, strides=2, input_shape=input_shape, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) print(model.output_shape) model.add(MaxPooling1D(pool_size = (10), strides=(2))) print(model.output_shape) model.add(Conv1D (kernel_size = (8), filters = 400, strides=2, input_shape=input_shape, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) print(model.output_shape) model.add(Flatten()) model.add(Dense (700, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (700, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=[keras.metrics.categorical_accuracy]) return model
def cnn2(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='MP_small_filters') model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten()) model.add(Dense (500, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (500, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta()) return model
def __init__(self, training, sequence_length=None, vocabulary_size=None, train_embeddings=SequentialTextEmbeddingClassifier.TRAIN_EMBEDDINGS, dropout=DROPOUT, filters=FILTERS, kernel_size=KERNEL_SIZE, pool_factor=POOL_FACTOR, learning_rate=LEARNING_RATE, language_model=LANGUAGE_MODEL): from keras.layers import Dropout, Conv1D, Flatten, MaxPooling1D, Dense from keras.models import Sequential from keras.optimizers import Adam label_names, sequence_length, vocabulary_size = self.parameters_from_training(sequence_length, vocabulary_size, training, language_model) embedder = TextSequenceEmbedder(vocabulary_size, sequence_length, language_model) model = Sequential() model.add(self.embedding_layer(embedder, sequence_length, train_embeddings, name="embedding")) model.add(Conv1D(filters, kernel_size, padding="valid", activation="relu", strides=1, name="convolution")) model.add(MaxPooling1D(pool_size=pool_factor, name="pooling")) model.add(Flatten(name="flatten")) model.add(Dropout(dropout, name="dropout")) model.add(Dense(len(label_names), activation="softmax", name="softmax")) optimizer = Adam(lr=learning_rate) model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"]) self.filters = filters self.kernel_size = kernel_size self.pool_factor = pool_factor self.dropout = dropout super().__init__(model, embedder, label_names)
def test_keras_import(self): # Global Pooling 1D model = Sequential() model.add(GlobalMaxPooling1D(input_shape=(1, 16))) model.build() self.keras_param_test(model, 0, 5) # Global Pooling 2D model = Sequential() model.add(GlobalMaxPooling2D(input_shape=(1, 16, 16))) model.build() self.keras_param_test(model, 0, 8) # Pooling 1D model = Sequential() model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(1, 16))) model.build() self.keras_param_test(model, 0, 5) # Pooling 2D model = Sequential() model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(1, 16, 16))) model.build() self.keras_param_test(model, 0, 8) # Pooling 3D model = Sequential() model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same', input_shape=(1, 16, 16, 16))) model.build() self.keras_param_test(model, 0, 11) # ********** Locally-connected Layers **********
def test_keras_export(self): tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app', 'keras_export_test.json'), 'r') response = json.load(tests) tests.close() net = yaml.safe_load(json.dumps(response['net'])) net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Pooling']} # Pool 1D net['l1']['connection']['output'].append('l3') net['l3']['connection']['input'] = ['l1'] net['l3']['params']['layer_type'] = '1D' net['l3']['shape']['input'] = net['l1']['shape']['output'] net['l3']['shape']['output'] = [12, 12] inp = data(net['l1'], '', 'l1')['l1'] temp = pooling(net['l3'], [inp], 'l3') model = Model(inp, temp['l3']) self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling1D') # Pool 2D net['l0']['connection']['output'].append('l0') net['l3']['connection']['input'] = ['l0'] net['l3']['params']['layer_type'] = '2D' net['l3']['shape']['input'] = net['l0']['shape']['output'] net['l3']['shape']['output'] = [3, 226, 226] inp = data(net['l0'], '', 'l0')['l0'] temp = pooling(net['l3'], [inp], 'l3') model = Model(inp, temp['l3']) self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling2D') # Pool 3D net['l2']['connection']['output'].append('l3') net['l3']['connection']['input'] = ['l2'] net['l3']['params']['layer_type'] = '3D' net['l3']['shape']['input'] = net['l2']['shape']['output'] net['l3']['shape']['output'] = [3, 226, 226, 18] inp = data(net['l2'], '', 'l2')['l2'] temp = pooling(net['l3'], [inp], 'l3') model = Model(inp, temp['l3']) self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling3D') # ********** Locally-connected Layers **********
def Discriminator(y_dash, dropout=0.4, lr=0.00001, PATH="Dis.h5"): """Creates a discriminator model that takes an image as input and outputs a single value, representing whether the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability! Instead, the output should be as large and negative as possible for generated inputs and as large and positive as possible for real inputs.""" model = Sequential() model.add(Conv1D(input_shape=(y_dash.shape[1], y_dash.shape[2]), nb_filter=25, filter_length=4, border_mode='same')) model.add(LeakyReLU()) model.add(Dropout(dropout)) model.add(MaxPooling1D()) model.add(Conv1D(nb_filter=10, filter_length=4, border_mode='same')) model.add(LeakyReLU()) model.add(Dropout(dropout)) model.add(MaxPooling1D()) model.add(Flatten()) model.add(Dense(64)) model.add(LeakyReLU()) model.add(Dropout(dropout)) model.add(Dense(1)) model.add(Activation('linear')) opt = Adam(lr, beta_1=0.5, beta_2=0.9) #reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=30, min_lr=0.000001, verbose=1) checkpoint_D = ModelCheckpoint( filepath=PATH, verbose=1, save_best_only=True) model.compile(optimizer=opt, loss=wasserstein_loss, metrics=['accuracy']) return model, checkpoint_D
def build_text_model(word_index): text_input = Input(shape=(MAX_SEQUENCE_LENGTH,)) embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) for word, i in word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector[:EMBEDDING_DIM] embedding_layer = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1], weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH) x = embedding_layer(text_input) x.trainable = False x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Conv1D(128, 5, activation='relu')(x) x = MaxPooling1D(5)(x) x = Flatten()(x) x = Dense(1024, activation='relu')(x) return x, text_input ## ## Image model ##
def test_max_pooling_1d(self): model = Sequential() model.add(MaxPooling1D(input_shape=(16,3), pool_size=4)) self._test_keras_model(model)
def test_conv1d_flatten(self): model = Sequential() model.add(AveragePooling1D(2,input_shape=(64,9))) model.add(Conv1D(16, 1, padding='same', activation='relu', use_bias=False)) model.add(MaxPooling1D(2)) model.add(Flatten()) model.add(Dense(units=7, activation='softmax', use_bias=False)) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) self._test_keras_model(model)
def test_conv_batch_1d(self): vocabulary_size = 4 embedding_dimension = 6 input_length = 10 model = Sequential() model.add(Embedding(vocabulary_size, embedding_dimension, input_length=input_length, trainable=True)) model.add(Conv1D(5, 2)) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling1D(2)) self._test_keras_model(model, one_dim_seq_flags=[True])
def test_intermediate_rcnn_1d(self): x_in = Input(shape=(10,2)) # Conv block 1 x = Conv1D(3, 3, padding='same', name='interm_rcnn_conv1')(x_in) x = BatchNormalization(axis=-1, name='interm_rcnn_bn1')(x) x = Activation('elu')(x) x = MaxPooling1D(pool_size=2, name='interm_rcnn_pool1')(x) out1 = x # out1.shape = (5,3) x = GRU(6, name='gru1')(x) out2 = x model = Model(x_in, [out1,out2]) # model = Model(x_in, [out2]) self._test_keras_model(model, mode='random_zero_mean', delta=1e-2)
def test_clickbait_cnn(self, model_precision=_MLMODEL_FULL_PRECISION): # from: https://github.com/saurabhmathur96/clickbait-detector vocabulary_size = 500 embedding_dimension = 30 input_length = 20 model = Sequential() model.add(Embedding(vocabulary_size, embedding_dimension, input_length=input_length, trainable=True)) model.add(Conv1D(32, 2)) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(Conv1D(32, 2)) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(Conv1D(32, 2)) model.add(BatchNormalization()) model.add(Activation("relu")) model.add(MaxPooling1D(17)) model.add(Flatten()) model.add(Dense(1, use_bias=True)) model.add(BatchNormalization()) model.add(Activation("sigmoid")) self._test_keras_model(model, one_dim_seq_flags=[True], model_precision=model_precision)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(256, 3, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(2)(x) x = kl.Flatten()(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Dense(self.nb_hidden, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(256, 3, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(2)(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(512, 3, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(2)(x) x = kl.Flatten()(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Dense(self.nb_hidden, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, name='conv1', kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.BatchNormalization(name='bn1')(x) x = kl.Activation('relu', name='act1')(x) x = kl.MaxPooling1D(2, name='pool1')(x) # 124 x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2) x = self._res_unit(x, [32, 32, 128], stage=1, block=2) # 64 x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2) x = self._res_unit(x, [64, 64, 256], stage=2, block=2) # 32 x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2) x = self._res_unit(x, [128, 128, 512], stage=3, block=2) # 16 x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, name='conv1', kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.BatchNormalization(name='bn1')(x) x = kl.Activation('relu', name='act1')(x) x = kl.MaxPooling1D(2, name='pool1')(x) # 124 x = self._res_unit(x, 128, stage=1, block=1, stride=2) x = self._res_unit(x, 128, stage=1, block=2) # 64 x = self._res_unit(x, 256, stage=2, block=1, stride=2) # 32 x = self._res_unit(x, 256, stage=3, block=1, stride=2) # 32 x = self._res_unit(x, 512, stage=4, block=1, stride=2) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, name='conv1', kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu', name='act1')(x) x = kl.MaxPooling1D(2, name='pool1')(x) # 124 x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2) x = self._res_unit(x, [32, 32, 128], atrous=2, stage=1, block=2) x = self._res_unit(x, [32, 32, 128], atrous=4, stage=1, block=3) # 64 x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2) x = self._res_unit(x, [64, 64, 256], atrous=2, stage=2, block=2) x = self._res_unit(x, [64, 64, 256], atrous=4, stage=2, block=3) # 32 x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2) x = self._res_unit(x, [128, 128, 512], atrous=2, stage=3, block=2) x = self._res_unit(x, [128, 128, 512], atrous=4, stage=3, block=3) # 16 x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2) x = kl.GlobalAveragePooling1D()(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def createmodel(self): """ create cnn model structure :return: model structure """ max_features = max(self.words.values()) + 1 # input dims model = Sequential() if self.W is None: model.add(Embedding(max_features, self.embedding_length, input_length=self.maxlen, dropout=0.2)) else: model.add(Embedding(max_features, self.layer1_size, weights=[self.W], input_length=self.maxlen, dropout=0.2)) model.add(Convolution1D(nb_filter=self.nb_filter, filter_length=self.filter_length, border_mode='valid', activation='relu', subsample_length=1)) model.add(MaxPooling1D(pool_length=model.output_shape[1])) model.add(Flatten()) model.add(Dense(self.hidden_dims)) model.add(Dropout(0.2)) model.add(Activation('relu')) model.add(Dense(self.nb_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=["accuracy"]) return model
def createSplitBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) return baseNetwork
def createBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(64, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(64, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork