我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.Conv1D()。
def tsinalis(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1) """ model = Sequential(name='Tsinalis') model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu')) print(model.input_shape) print(model.output_shape) model.add(MaxPooling1D(pool_size = (20), strides=(10))) print(model.output_shape) model.add(keras.layers.core.Reshape([20,-1,1])) print(model.output_shape) model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu')) print(model.output_shape) model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2))) print(model.output_shape) model.add(Flatten()) print(model.output_shape) model.add(Dense (500, activation='relu')) model.add(Dense (500, activation='relu')) model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2() )) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy]) return model
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv1D(k1,1,padding='same')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,padding='same')(out) pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def test_keras_import(self): # Pad 1D model = Sequential() model.add(ZeroPadding1D(2, input_shape=(224, 3))) model.add(Conv1D(32, 7, strides=2)) model.build() self.pad_test(model, 'pad_w', 2) # Pad 2D model = Sequential() model.add(ZeroPadding2D(2, input_shape=(224, 224, 3))) model.add(Conv2D(32, 7, strides=2)) model.build() self.pad_test(model, 'pad_w', 2) # Pad 3D model = Sequential() model.add(ZeroPadding3D(2, input_shape=(224, 224, 224, 3))) model.add(Conv3D(32, 7, strides=2)) model.build() self.pad_test(model, 'pad_w', 2) # ********** Export json tests ********** # ********** Data Layers Test **********
def rcnn(input_shape, n_classes): """ Input size should be [batch, 1d, ch] = (XXX, 3000, 1) """ model = Sequential(name='RCNN test') model.add(Conv1D (kernel_size = (200), filters = 20, batch_input_shape=input_shape, activation='elu')) model.add(MaxPooling1D(pool_size = (20), strides=(10))) model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu')) model.add(MaxPooling1D(pool_size = (10), strides=(3))) model.add(Conv1D (kernel_size = (20), filters = 200, activation='elu')) model.add(MaxPooling1D(pool_size = (10), strides=(3))) model.add(Dense (512, activation='elu')) model.add(Dense (512, activation='elu')) model.add(Reshape((1,model.output_shape[1]))) model.add(LSTM(256, stateful=True, return_sequences=False)) model.add(Dropout(0.3)) model.add(Dense(n_classes, activation = 'sigmoid')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta()) return model
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv1D(k1,kernel_size,strides=2,padding='same')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,strides=2,padding='same')(out) pooling = MaxPooling1D(pooling_size,strides=4,padding='same')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def ResidualBlock1D_helper(layers, kernel_size, filters, final_stride=1): def f(_input): basic = _input for ln in range(layers): #basic = BatchNormalization()( basic ) # triggers known keras bug w/ TimeDistributed: https://github.com/fchollet/keras/issues/5221 basic = ELU()(basic) basic = Conv1D(filters, kernel_size, kernel_initializer='he_normal', kernel_regularizer=l2(1.e-4), padding='same')(basic) # note that this strides without averaging return AveragePooling1D(pool_size=1, strides=final_stride)(Add()([_input, basic])) return f
def layer_test_helper_1d_global(layer, channel_index): # This should test that the output is the correct shape so it should pass # into a Dense layer rather than a Conv layer. # The weighted layer is the previous layer, # Create model main_input = Input(shape=list(random.randint(10, 20, size=2))) x = Conv1D(3, 3)(main_input) x = layer(x) main_output = Dense(5)(x) model = Model(inputs=main_input, outputs=main_output) # Delete channels del_layer_index = 1 next_layer_index = 3 del_layer = model.layers[del_layer_index] new_model = operations.delete_channels(model, del_layer, channel_index) new_w = new_model.layers[next_layer_index].get_weights() # Calculate next layer's correct weights channel_count = getattr(del_layer, utils.get_channels_attr(del_layer)) channel_index = [i % channel_count for i in channel_index] correct_w = model.layers[next_layer_index].get_weights() correct_w[0] = np.delete(correct_w[0], channel_index, axis=0) assert weights_equal(correct_w, new_w)
def test_conv1d_lstm(self): from keras.layers import Conv1D, LSTM, Dense model = Sequential() # input_shape = (time_step, dimensions) model.add(Conv1D(32,3,padding='same',input_shape=(10,8))) # conv1d output shape = (None, 10, 32) model.add(LSTM(24)) model.add(Dense(1, activation='sigmoid')) input_names = ['input'] output_names = ['output'] spec = keras.convert(model, input_names, output_names).get_spec() self.assertIsNotNone(spec) self.assertTrue(spec.HasField('neuralNetwork')) # Test the inputs and outputs self.assertEquals(len(spec.description.input), len(input_names) + 2) self.assertEquals(len(spec.description.output), len(output_names) + 2) # Test the layer parameters. layers = spec.neuralNetwork.layers self.assertIsNotNone(layers[0].convolution) self.assertIsNotNone(layers[1].simpleRecurrent) self.assertIsNotNone(layers[2].innerProduct)
def test_tiny_conv1d_dilated_random(self): np.random.seed(1988) input_shape = (20, 1) num_kernels = 2 filter_length = 3 # Define a model model = Sequential() model.add(Conv1D(num_kernels, kernel_size = filter_length, padding = 'valid', input_shape = input_shape, dilation_rate = 3)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model)
def test_tiny_conv_upsample_1d_random(self): np.random.seed(1988) input_dim = 2 input_length = 10 filter_length = 3 nb_filters = 4 model = Sequential() model.add(Conv1D(nb_filters, kernel_size = filter_length, padding='same', input_shape=(input_length, input_dim))) model.add(UpSampling1D(size = 2)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model)
def test_tiny_conv_crop_1d_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 2 input_length = 10 filter_length = 3 nb_filters = 4 model = Sequential() model.add(Conv1D(nb_filters, kernel_size = filter_length, padding='same', input_shape=(input_length, input_dim))) model.add(Cropping1D(cropping = 2)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model, model_precision=model_precision)
def test_tiny_conv_pad_1d_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 2 input_length = 10 filter_length = 3 nb_filters = 4 model = Sequential() model.add(Conv1D(nb_filters, kernel_size = filter_length, padding='same', input_shape=(input_length, input_dim))) model.add(ZeroPadding1D(padding = 2)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_keras_model(model, model_precision=model_precision)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(self.l1_decay, self.l2_decay) x = kl.Conv1D(128, 11, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) x = kl.Flatten()(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Dense(self.nb_hidden, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def __call__(self, inputs): x = inputs[0] kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(128, 11, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) x = kl.Conv1D(256, 7, kernel_initializer=self.init, kernel_regularizer=kernel_regularizer)(x) x = kl.Activation('relu')(x) x = kl.MaxPooling1D(4)(x) kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay) gru = kl.recurrent.GRU(256, kernel_regularizer=kernel_regularizer) x = kl.Bidirectional(gru)(x) x = kl.Dropout(self.dropout)(x) return self._build(inputs, x)
def createBaseNetworkSmall(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkLarge(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkLarge(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkLarge(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def netSigmoid(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(128, activation='relu')) baseNetwork.add(Dropout(0.2)) baseNetwork.add(Dense(128, activation='relu')) baseNetwork.add(Dropout(0.2)) return baseNetwork
def createBaseNetworkSmall(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkLarge(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def _build_model(self): # Deep Conv Neural Net for Deep-Q learning Model model = Sequential() model.add(Conv1D(128, 3, input_shape=(19,48))) model.add(Activation('relu')) model.add(MaxPooling1D(pool_size=2)) model.add(Conv1D(64, 3)) model.add(Activation('relu')) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors model.add(Dense(64)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(self.action_size)) model.add(Activation('sigmoid')) model.compile(loss=self._huber_loss, optimizer=Adam(lr=self.learning_rate)) #model.compile(loss='binary_crossentropy', # optimizer='rmsprop', # metrics=['accuracy']) return model
def char_block(in_layer, nb_filter=(64, 100), filter_length=(3, 3), subsample=(2, 1), pool_length=(2, 2)): block = in_layer for i in range(len(nb_filter)): block = Conv1D(filters=nb_filter[i], kernel_size=filter_length[i], padding='valid', activation='tanh', strides=subsample[i])(block) # block = BatchNormalization()(block) # block = Dropout(0.1)(block) if pool_length[i]: block = MaxPooling1D(pool_size=pool_length[i])(block) # block = Lambda(max_1d, output_shape=(nb_filter[-1],))(block) block = GlobalMaxPool1D()(block) block = Dense(128, activation='relu')(block) return block
def model_lstm(input_shape): inp = Input(shape=input_shape) model = inp if input_shape[0] > 2: model = Conv1D(filters=24, kernel_size=(3), activation='relu')(model) # if input_shape[0] > 0: model = TimeDistributed(Conv1D(filters=24, kernel_size=3, activation='relu'))(model) model = LSTM(16)(model) model = Activation('relu')(model) model = Dropout(0.2)(model) model = Dense(16)(model) model = Activation('relu')(model) model = BatchNormalization()(model) model = Dense(1)(model) model = Activation('sigmoid')(model) model = Model(inp, model) return model # %% # Conv-1D architecture. Just one sample as input
def cnn3adam_slim(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='cnn3adam') model.add(Conv1D (kernel_size = (50), filters = 32, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 64, strides=1, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten()) model.add(Dense (250, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (250, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam()) return model
def cnn3adam_filter(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') print('use L2 model instead!') print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') model = Sequential(name='cnn3adam_filter') model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten(name='conv3')) model.add(Dense (1500, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization(name='fc1')) model.add(Dropout(0.5)) model.add(Dense (1500, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization(name='fc2')) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax',name='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001)) return model
def cnn3adam_filter_l2(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') print('use more L2 model instead!') print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') model = Sequential(name='cnn3adam_filter_l2') model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten(name='conv3')) model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc1')) model.add(BatchNormalization(name='bn1')) model.add(Dropout(0.5, name='do1')) model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc2')) model.add(BatchNormalization(name='bn2')) model.add(Dropout(0.5, name='do2')) model.add(Dense(n_classes, activation = 'softmax',name='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001)) # print('reset learning rate') return model
def cnn3adam_filter_morel2_slim(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='cnn3adam_filter_morel2_slim') model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.05))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 128, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 256, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten(name='conv3')) model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc1')) model.add(BatchNormalization(name='bn1')) model.add(Dropout(0.5, name='do1')) model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc2')) model.add(BatchNormalization(name='bn2')) model.add(Dropout(0.5, name='do2')) model.add(Dense(n_classes, activation = 'softmax',name='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001)) # print('reset learning rate') return model
def cnn1d(input_shape, n_classes ): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 1) """ model = Sequential(name='1D CNN') model.add(Conv1D (kernel_size = (50), filters = 150, strides=5, input_shape=input_shape, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) print(model.output_shape) model.add(Conv1D (kernel_size = (8), filters = 200, strides=2, input_shape=input_shape, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) print(model.output_shape) model.add(MaxPooling1D(pool_size = (10), strides=(2))) print(model.output_shape) model.add(Conv1D (kernel_size = (8), filters = 400, strides=2, input_shape=input_shape, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) print(model.output_shape) model.add(Flatten()) model.add(Dense (700, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (700, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=[keras.metrics.categorical_accuracy]) return model
def cnn1(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='no_MP_small_filters') model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (10), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (10), filters = 150, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense (1024, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (1024, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta()) return model
def cnn3(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='mixture') model.add(Conv1D (kernel_size = (50), filters = 64, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (5), filters = 128, strides=1, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Conv1D (kernel_size = (5), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(MaxPooling1D()) model.add(Flatten()) model.add(Dense (500, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (500, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta()) return model
def cnn4(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='large_kernel') model.add(Conv1D (kernel_size = (100), filters = 128, strides=10, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (100), filters = 128, strides=1, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (100), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense (768, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (768, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta()) return model
def cnn5(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3) """ model = Sequential(name='very_large_kernel') model.add(Conv1D (kernel_size = (200), filters = 128, strides=3, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (200), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (200), filters = 128, strides=1, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Conv1D (kernel_size = (10), filters = 128, strides=2, kernel_initializer='he_normal', activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense (768, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense (768, activation='elu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adadelta()) return model
def input_to_prediction_length_ratio(self): """Returns which factor shorter the output is compared to the input caused by striding.""" return reduce(lambda x, y: x * y, [layer.strides[0] for layer in self.predictive_net.layers if isinstance(layer, Conv1D)], 1)
def __init__(self, num_filters=64, filter_sizes=[3, 4, 5], dropout_rate=0.5, **conv_kwargs): """Yoon Kim's shallow cnn model: https://arxiv.org/pdf/1408.5882.pdf Args: num_filters: The number of filters to use per `filter_size`. (Default value = 64) filter_sizes: The filter sizes for each convolutional layer. (Default value = [3, 4, 5]) **cnn_kwargs: Additional args for building the `Conv1D` layer. """ super(YoonKimCNN, self).__init__(dropout_rate) self.num_filters = num_filters self.filter_sizes = filter_sizes self.conv_kwargs = conv_kwargs
def build_model(self, x): pooled_tensors = [] for filter_size in self.filter_sizes: x_i = Conv1D(self.num_filters, filter_size, activation='elu', **self.conv_kwargs)(x) x_i = GlobalMaxPooling1D()(x_i) pooled_tensors.append(x_i) x = pooled_tensors[0] if len(self.filter_sizes) == 1 else concatenate(pooled_tensors, axis=-1) return x
def __init__(self, training, sequence_length=None, vocabulary_size=None, train_embeddings=SequentialTextEmbeddingClassifier.TRAIN_EMBEDDINGS, dropout=DROPOUT, filters=FILTERS, kernel_size=KERNEL_SIZE, pool_factor=POOL_FACTOR, learning_rate=LEARNING_RATE, language_model=LANGUAGE_MODEL): from keras.layers import Dropout, Conv1D, Flatten, MaxPooling1D, Dense from keras.models import Sequential from keras.optimizers import Adam label_names, sequence_length, vocabulary_size = self.parameters_from_training(sequence_length, vocabulary_size, training, language_model) embedder = TextSequenceEmbedder(vocabulary_size, sequence_length, language_model) model = Sequential() model.add(self.embedding_layer(embedder, sequence_length, train_embeddings, name="embedding")) model.add(Conv1D(filters, kernel_size, padding="valid", activation="relu", strides=1, name="convolution")) model.add(MaxPooling1D(pool_size=pool_factor, name="pooling")) model.add(Flatten(name="flatten")) model.add(Dropout(dropout, name="dropout")) model.add(Dense(len(label_names), activation="softmax", name="softmax")) optimizer = Adam(lr=learning_rate) model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"]) self.filters = filters self.kernel_size = kernel_size self.pool_factor = pool_factor self.dropout = dropout super().__init__(model, embedder, label_names)
def build_model(timestep,input_dim,output_dim,dropout=0.5,recurrent_layers_num=4,cnn_layers_num=6,lr=0.001): inp = Input(shape=(timestep,input_dim)) output = TimeDistributed(Masking(mask_value=0))(inp) #output = inp output = Conv1D(128, 1)(output) output = BatchNormalization()(output) output = Activation('relu')(output) output = first_block(output, (64, 128), dropout=dropout) output = Dropout(dropout)(output) for _ in range(cnn_layers_num): output = repeated_block(output, (64, 128), dropout=dropout) output = Flatten()(output) #output = LSTM(128, return_sequences=False)(output) output = BatchNormalization()(output) output = Activation('relu')(output) output = Dense(output_dim)(output) model = Model(inp,output) optimizer = Adam(lr=lr) model.compile(optimizer,'mse',['mae']) return model
def test_keras_export(self): tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app', 'keras_export_test.json'), 'r') response = json.load(tests) tests.close() net = yaml.safe_load(json.dumps(response['net'])) net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Convolution']} # Conv 1D net['l1']['connection']['output'].append('l3') net['l3']['connection']['input'] = ['l1'] net['l3']['params']['layer_type'] = '1D' net['l3']['shape']['input'] = net['l1']['shape']['output'] net['l3']['shape']['output'] = [128, 12] inp = data(net['l1'], '', 'l1')['l1'] temp = convolution(net['l3'], [inp], 'l3') model = Model(inp, temp['l3']) self.assertEqual(model.layers[2].__class__.__name__, 'Conv1D') # Conv 2D net['l0']['connection']['output'].append('l0') net['l3']['connection']['input'] = ['l0'] net['l3']['params']['layer_type'] = '2D' net['l3']['shape']['input'] = net['l0']['shape']['output'] net['l3']['shape']['output'] = [128, 226, 226] inp = data(net['l0'], '', 'l0')['l0'] temp = convolution(net['l3'], [inp], 'l3') model = Model(inp, temp['l3']) self.assertEqual(model.layers[2].__class__.__name__, 'Conv2D') # Conv 3D net['l2']['connection']['output'].append('l3') net['l3']['connection']['input'] = ['l2'] net['l3']['params']['layer_type'] = '3D' net['l3']['shape']['input'] = net['l2']['shape']['output'] net['l3']['shape']['output'] = [128, 226, 226, 18] inp = data(net['l2'], '', 'l2')['l2'] temp = convolution(net['l3'], [inp], 'l3') model = Model(inp, temp['l3']) self.assertEqual(model.layers[2].__class__.__name__, 'Conv3D')
def build_words2color_model(max_tokens, dim): """Build a model that learns to generate colors from words. :param max_tokens: :param dim: :return: """ model = Sequential() model.add(Conv1D(128, 1, input_shape = (max_tokens, dim), activation = "tanh")) model.add(GlobalMaxPooling1D()) model.add(Dropout(0.5)) model.add(Dense(3)) model.compile(loss = "mse", optimizer = "sgd") return model
def Discriminator(y_dash, dropout=0.4, lr=0.00001, PATH="Dis.h5"): """Creates a discriminator model that takes an image as input and outputs a single value, representing whether the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability! Instead, the output should be as large and negative as possible for generated inputs and as large and positive as possible for real inputs.""" model = Sequential() model.add(Conv1D(input_shape=(y_dash.shape[1], y_dash.shape[2]), nb_filter=25, filter_length=4, border_mode='same')) model.add(LeakyReLU()) model.add(Dropout(dropout)) model.add(MaxPooling1D()) model.add(Conv1D(nb_filter=10, filter_length=4, border_mode='same')) model.add(LeakyReLU()) model.add(Dropout(dropout)) model.add(MaxPooling1D()) model.add(Flatten()) model.add(Dense(64)) model.add(LeakyReLU()) model.add(Dropout(dropout)) model.add(Dense(1)) model.add(Activation('linear')) opt = Adam(lr, beta_1=0.5, beta_2=0.9) #reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=30, min_lr=0.000001, verbose=1) checkpoint_D = ModelCheckpoint( filepath=PATH, verbose=1, save_best_only=True) model.compile(optimizer=opt, loss=wasserstein_loss, metrics=['accuracy']) return model, checkpoint_D