Python keras.layers.convolutional 模块,MaxPooling1D() 实例源码

我们从Python开源项目中,提取了以下30个代码示例,用于说明如何使用keras.layers.convolutional.MaxPooling1D()

项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def set_cnn_model(ninstance=4, input_dim = 4, input_length = 107):
    nbfilter = 16
    model = Sequential() # #seqs * seqlen * 4
    #model.add(brnn)
    model.add(Conv2D(input_shape=(ninstance, input_length, input_dim),
                            filters=nbfilter,
                            kernel_size=(1,10),
                            padding="valid",
                            #activation="relu",
                            strides=1))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(1,3))) # 32 16
    # model.add(Dropout(0.25)) # will be better
    model.add(Conv2D(filters=nbfilter*2, kernel_size=(1,32), padding='valid', activation='relu', strides=1))
    # model.add(Flatten())
    #model.add(Softmax4D(axis=1))

    #model.add(MaxPooling1D(pool_length=3))
    #model.add(Flatten())
    #model.add(Recalc(axis=1))
    # model.add(Flatten())
    # model.add(Dense(nbfilter*2, activation='relu'))
    model.add(Dropout(0.25))
    model.add(Conv2D(filters=1, kernel_size=(1,1), padding='valid', activation='sigmoid', strides=1))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution1D(
        12, 5,
        border_mode='same',
        input_shape=(INPUT_LN, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=N_GEN_l[0]))

    model.add(Convolution1D(12, 5, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=N_GEN_l[1]))

    #model.add(Reshape((128*7,)))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution1D(
        12, 5,
        border_mode='same',
        input_shape=(INPUT_LN, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=4))

    model.add(Convolution1D(12, 5, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=4))

    #model.add(Reshape((128*7,)))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        self.textual_embedding(self, mask_zero=False)
        self.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=self._config.language_cnn_filter_length,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        #self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length))
        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=False))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_maxpooling_1d():
    for stride in [1, 2]:
        layer_test(convolutional.MaxPooling1D,
                   kwargs={'stride': stride,
                           'border_mode': 'valid'},
                   input_shape=(3, 5, 4))
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_maxpooling_1d():
    for border_mode in ['valid', 'same']:
        for stride in [1, 2]:
            layer_test(convolutional.MaxPooling1D,
                       kwargs={'stride': stride,
                               'border_mode': border_mode},
                       input_shape=(3, 5, 4))
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def build_small_chrom_label(args):
    model = Sequential()
    model.add(Convolution1D(input_dim=len(args.inputs), 
        input_length=args.window_size, 
        nb_filter=40,
        filter_length=16,
        border_mode='valid',
        activation="relu",
        init='normal'))

    model.add(MaxPooling1D(pool_length=3, stride=3))
    model.add(Convolution1D(nb_filter=64, filter_length=16, activation="relu", init='normal', border_mode='valid'))
    model.add(Dropout(0.2)) 
    model.add(MaxPooling1D(pool_length=3, stride=3))
    model.add(Flatten())

    model.add(Dense(output_dim=32, init='normal'))
    model.add(Activation('relu'))

    model.add( Dense(output_dim=len(args.labels), init='normal') )
    model.add( Activation('softmax'))

    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=0.5)
    adamo = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)
    classes = args.labels.keys()
    my_metrics = [metrics.categorical_accuracy, precision, recall ]

    model.compile(loss='categorical_crossentropy', optimizer=adamo, metrics=my_metrics)
    print('model summary:\n', model.summary())

    return model
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def build_sequential_chrom_label(args):
    model = Sequential()
    model.add(Convolution1D(input_dim=len(args.inputs), 
        input_length=args.window_size, 
        nb_filter=128,
        filter_length=16,
        border_mode='valid',
        activation="relu",
        init='normal'))

    model.add(Dropout(0.2))
    model.add(Convolution1D(nb_filter=192, filter_length=16, activation="relu", init='normal', border_mode='valid'))
    model.add(Dropout(0.2))
    model.add(Convolution1D(nb_filter=192, filter_length=16, activation="relu", init='normal', border_mode='valid'))
    model.add(Dropout(0.2))
    model.add(Convolution1D(nb_filter=256, filter_length=16, activation="relu", init='normal', border_mode='valid'))
    model.add(Dropout(0.2)) 
    model.add(MaxPooling1D(pool_length=3, stride=3))
    model.add(Flatten())

    model.add(Dense(output_dim=50, init='normal'))
    model.add(Activation('relu'))

    model.add( Dense(output_dim=len(args.labels), init='normal') )
    model.add( Activation('softmax'))

    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=0.5)
    adamo = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)
    classes = args.labels.keys()
    my_metrics = [metrics.categorical_accuracy, precision, recall]

    model.compile(loss='categorical_crossentropy', optimizer=adamo, metrics=my_metrics)
    print('model summary:\n', model.summary())

    return model
项目:sentiment_comments_zh    作者:zhouhoo    | 项目源码 | 文件源码
def baseModel(self, nb_filter=250, filter_length=3, hidden_dims=125):
        model = Sequential()

        # we start off with an efficient embedding layer which maps
        # our vocab indices into embedding_dims dimensions
        model.add(Embedding(self.max_words + self.index_from,self.embedding_dims,
                            input_length=self.max_length))
        model.add(Dropout(0.25))

        # we add a Convolution1D, which will learn nb_filter
        # word group filters of size filter_length:

        # filter_length is like filter size, subsample_length is like step in 2D CNN.
        model.add(Convolution1D(filters=nb_filter,
                                kernel_size=filter_length,
                                padding='valid',
                                activation='relu',
                                strides=1))
        # we use standard max pooling (halving the output of the previous layer):
        model.add(MaxPooling1D(pool_size=2))

        # We flatten the output of the conv layer,
        # so that we can add a vanilla dense layer:
        model.add(Flatten())

        # We add a vanilla hidden layer:
        model.add(Dense(hidden_dims))
        model.add(Dropout(0.25))
        model.add(Activation('relu'))

        # We project onto a single unit output layer, and squash it with a sigmoid:
        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        model.compile(loss='binary_crossentropy',
                      optimizer='rmsprop')

        return model
项目:SarcasmDetection    作者:AniSkywalker    | 项目源码 | 文件源码
def _build_network(self, vocab_size, maxlen, emb_weights=[], hidden_units=256, trainable=False):
        print('Build model...')
        model = Sequential()

        model.add(Embedding(vocab_size, emb_weights.shape[1], input_length=maxlen, weights=[emb_weights],
                            trainable=trainable))

        # model.add(Reshape((maxlen, emb_weights.shape[1], 1)))

        model.add(Convolution1D(emb_weights.shape[1], 3, kernel_initializer='he_normal', padding='valid',
                                activation='sigmoid',
                                input_shape=(1, maxlen)))
        # model.add(MaxPooling1D(pool_size=3))

        model.add(Convolution1D(emb_weights.shape[1], 3, kernel_initializer='he_normal', padding='valid',
                                activation='sigmoid',
                                input_shape=(1, maxlen - 2)))
        # model.add(MaxPooling1D(pool_size=3))

        model.add(Dropout(0.25))

        model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5,
                       return_sequences=True))
        model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5))

        model.add(Dense(hidden_units, kernel_initializer='he_normal', activation='sigmoid'))
        model.add(Dense(2, activation='softmax'))
        adam = Adam(lr=0.0001)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
        print('No of parameter:', model.count_params())

        print(model.summary())
        return model
项目:SarcasmDetection    作者:AniSkywalker    | 项目源码 | 文件源码
def _build_network(self, vocab_size, maxlen, embedding_dimension=256, hidden_units=256, trainable=False):
        print('Build model...')
        model = Sequential()

        model.add(
            Embedding(vocab_size, embedding_dimension, input_length=maxlen, embeddings_initializer='glorot_normal'))

        model.add(Convolution1D(hidden_units, 3, kernel_initializer='he_normal', padding='valid', activation='sigmoid',
                                input_shape=(1, maxlen)))
        # model.add(MaxPooling1D(pool_size=3))
        model.add(Convolution1D(hidden_units, 3, kernel_initializer='he_normal', padding='valid', activation='sigmoid',
                                input_shape=(1, maxlen - 2)))
        # model.add(MaxPooling1D(pool_size=3))

        # model.add(Dropout(0.25))

        model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5,
                       return_sequences=True))
        model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5))

        model.add(Dense(hidden_units, kernel_initializer='he_normal', activation='sigmoid'))
        model.add(Dense(2))
        model.add(Activation('softmax'))
        adam = Adam(lr=0.0001)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
        print('No of parameter:', model.count_params())

        print(model.summary())
        return model
项目:cervantes    作者:textclf    | 项目源码 | 文件源码
def _generate_model(self, lembedding, num_classes=2, num_features=128, train_vectors=True):

        model = Sequential()
        if lembedding.vector_box.W is None:
            emb = Embedding(lembedding.vector_box.size,
                            lembedding.vector_box.vector_dim,
                            W_constraint=None,
                            input_length=lembedding.size)
        else:
            emb = Embedding(lembedding.vector_box.size,
                            lembedding.vector_box.vector_dim,
                            weights=[lembedding.vector_box.W], W_constraint=None,
                            input_length=lembedding.size)
        emb.trainable = train_vectors
        model.add(emb)

        model.add(Convolution1D(num_features, 3, init='uniform'))
        model.add(Activation('relu'))
        model.add(MaxPooling1D(2))
        model.add(Dropout(0.25))

        model.add(Convolution1D(num_features, 3, init='uniform'))
        model.add(Activation('relu'))
        model.add(MaxPooling1D(2))
        model.add(Dropout(0.25))

        model.add(Flatten())

        if num_classes == 2:
            model.add(Dense(1, activation='sigmoid'))
            if self.optimizer is None:
                self.optimizer = 'rmsprop'
            model.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])
        else:
            if self.optimizer is None:
                self.optimizer = 'adam'
            model.add(Dense(num_classes, activation='softmax'))
            model.compile(loss='categorical_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])

        return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_maxpooling_1d():
    for border_mode in ['valid']:
        for stride in [1, 2]:
            layer_test(convolutional.MaxPooling1D,
                       kwargs={'stride': stride,
                               'border_mode': border_mode},
                       input_shape=(3, 5, 4))
项目:event_chain    作者:wangzq870305    | 项目源码 | 文件源码
def cnn_train(X_train,y_train,vocab_size):

    X_train = sequence.pad_sequences(X_train, maxlen=MAX_LEN)

    print('Build model...')
    model = Sequential()
    model.add(Embedding(vocab_size, EMBED_SIZE, input_length=MAX_LEN))

    model.add(Dropout(0.25))

    # we add a Convolution1D, which will learn nb_filter
    # word group filters of size filter_length:
    model.add(Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    # we use standard max pooling (halving the output of the previous layer):
    model.add(MaxPooling1D(pool_length=2))

    # We flatten the output of the conv layer,
    # so that we can add a vanilla dense layer:
    model.add(Flatten())

    # We add a vanilla hidden layer:
    model.add(Dense(HIDDEN_SIZE))
    model.add(Dropout(0.25))
    model.add(Activation('relu'))

    # We project onto a single unit output layer, and squash it with a sigmoid:
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='rmsprop')
    model.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS, show_accuracy=True)

    return model
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_maxpooling_1d(self):
        nb_samples = 9
        nb_steps = 7
        input_dim = 10

        input = np.ones((nb_samples, nb_steps, input_dim))
        for ignore_border in [True, False]:
            for stride in [1, 2]:
                layer = convolutional.MaxPooling1D(stride=stride, ignore_border=ignore_border)
                layer.input = theano.shared(value=input)
                for train in [True, False]:
                    layer.get_output(train).eval()

                config = layer.get_config()
项目:TextClassification    作者:AlgorTroy    | 项目源码 | 文件源码
def build(self):
        print('\nBuilding model...')
        # create the model
        embedding_vector_length = settings['EMBEDDING_VECTOR_LENGTH']
        self.model = Sequential()
        self.model.add(Embedding(self.top_words, embedding_vector_length, input_length=self.max_words_limit))
        self.model.add(Convolution1D(nb_filter=settings['CNN_NO_OF_FILTER'], filter_length=settings['CNN_FILTER_LENGTH'], border_mode='same', activation='relu'))
        self.model.add(MaxPooling1D(pool_length=settings['CNN_POOL_LENGTH']))
        self.model.add(LSTM(settings['LSTM_CELLS_COUNT']))
        self.model.add(Dropout(settings['DROPOUT']))
        self.model.add(Dense(self.num_classes, activation='softmax'))
        print(self.model.summary())
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        self.textual_embedding_fixed_length(self, mask_zero=False)
        self.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=self._config.language_cnn_filter_length,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length))
        self.add(Flatten())
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:dnn_page_vectors    作者:ankit-cliqz    | 项目源码 | 文件源码
def model(sequence_length=None):
    graph = Graph()
    graph.add_input(name='input', input_shape=(sequence_length, embedding_dim))
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1,
                             input_dim=embedding_dim,
                             input_length=sequence_length)
        pool = MaxPooling1D(pool_length=sequence_length - fsz + 1)
        graph.add_node(conv, name='conv-%s' % fsz, input='input')
        graph.add_node(pool, name='maxpool-%s' % fsz, input='conv-%s' % fsz)
        graph.add_node(
            Flatten(),
            name='flatten-%s' %
            fsz,
            input='maxpool-%s' %
            fsz)

    if len(filter_sizes) > 1:
        graph.add_output(name='output',
                         inputs=['flatten-%s' % fsz for fsz in filter_sizes],
                         merge_mode='concat')
    else:
        graph.add_output(name='output', input='flatten-%s' % filter_sizes[0])

    # main sequential model
    model = Sequential()
    model.add(
        Embedding(
            vocab_size,
            embedding_dim,
            input_length=sequence_length,
            weights=[embedding_weights]))
    model.add(
        Dropout(
            dropout_prob[0],
            input_shape=(
                sequence_length,
                embedding_dim)))
    model.add(graph)
    model.add(Dense(hidden_dims))
    model.add(Dropout(dropout_prob[1]))
    model.add(Activation('relu'))
    return model

# Input Layer with all the query, similar and non similar documents.
项目:dnn_page_vectors    作者:ankit-cliqz    | 项目源码 | 文件源码
def model(sequence_length=None):
    graph = Graph()
    graph.add_input(name='input', input_shape=(sequence_length, embedding_dim))
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1,
                             input_dim=embedding_dim,
                             input_length=sequence_length)
        pool = MaxPooling1D(pool_length=sequence_length - fsz + 1)
        graph.add_node(conv, name='conv-%s' % fsz, input='input')
        graph.add_node(pool, name='maxpool-%s' % fsz, input='conv-%s' % fsz)
        graph.add_node(
            Flatten(),
            name='flatten-%s' %
            fsz,
            input='maxpool-%s' %
            fsz)

    if len(filter_sizes) > 1:
        graph.add_output(name='output',
                         inputs=['flatten-%s' % fsz for fsz in filter_sizes],
                         merge_mode='concat')
    else:
        graph.add_output(name='output', input='flatten-%s' % filter_sizes[0])

    # main sequential model
    model = Sequential()

    if conf.feature_level == "word":
        model.add(
            Embedding(
                vocab_size,
                embedding_dim,
                input_length=sequence_length,
                weights=[embedding_weights]))
    elif conf.feature_level == "char" or conf.feature_level == "ngram":
        model.add(
            Embedding(
                vocab_size,
                embedding_dim,
                input_length=sequence_length))


    model.add(
        Dropout(
            dropout_prob[0],
            input_shape=(
                sequence_length,
                embedding_dim)))
    model.add(graph)
    model.add(Dense(hidden_dims))
    # model.add(Dropout(dropout_prob[1]))
    model.add(Activation('relu'))
    return model

# Input Layer with all the query, similar and non similar documents.
项目:dnn_page_vectors    作者:ankit-cliqz    | 项目源码 | 文件源码
def model(sequence_length=None):
    graph = Graph()
    graph.add_input(name='input', input_shape=(sequence_length, embedding_dim))
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1,
                             input_dim=embedding_dim,
                             input_length=sequence_length)
        pool = MaxPooling1D(pool_length=sequence_length - fsz + 1)
        graph.add_node(conv, name='conv-%s' % fsz, input='input')
        graph.add_node(pool, name='maxpool-%s' % fsz, input='conv-%s' % fsz)
        graph.add_node(
            Flatten(),
            name='flatten-%s' %
            fsz,
            input='maxpool-%s' %
            fsz)

    if len(filter_sizes) > 1:
        graph.add_output(name='output',
                         inputs=['flatten-%s' % fsz for fsz in filter_sizes],
                         merge_mode='concat')
    else:
        graph.add_output(name='output', input='flatten-%s' % filter_sizes[0])

    # main sequential model
    model = Sequential()
    model.add(
        Embedding(
            vocab_size,
            embedding_dim,
            input_length=sequence_length,
            weights=[embedding_weights]))
    model.add(
        Dropout(
            dropout_prob[0],
            input_shape=(
                sequence_length,
                embedding_dim)))
    model.add(graph)
    model.add(Dense(hidden_dims))
    # model.add(Dropout(dropout_prob[1]))
    model.add(Activation('relu'))
    return model

# Input Layer with all the query, similar and non similar documents.
项目:Sub-word-LSTM    作者:DrImpossible    | 项目源码 | 文件源码
def RNN(X_train,y_train,args):
    """
    Purpose -> Define and train the proposed LSTM network
    Input   -> Data, Labels and model hyperparameters
    Output  -> Trained LSTM network
    """
    #Sets the model hyperparameters
    #Embedding hyperparameters
    max_features = args[0]
    maxlen = args[1]
    embedding_size = args[2]
    # Convolution hyperparameters
    filter_length = args[3]
    nb_filter = args[4]
    pool_length = args[5]
    # LSTM hyperparameters
    lstm_output_size = args[6]
    # Training hyperparameters
    batch_size = args[7]
    nb_epoch = args[8]
    numclasses = args[9]
    test_size = args[10] 

    #Format conversion for y_train for compatibility with Keras
    y_train = np_utils.to_categorical(y_train, numclasses) 
    #Train & Validation data splitting
    X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=test_size, random_state=42)

    #Build the sequential model
    # Model Architecture is:
    # Input -> Embedding -> Conv1D+Maxpool1D -> LSTM -> LSTM -> FC-1 -> Softmaxloss
    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size, dropout_W=0.2, dropout_U=0.2, return_sequences=True))
    model.add(LSTM(lstm_output_size, dropout_W=0.2, dropout_U=0.2, return_sequences=False))
    model.add(Dense(numclasses))
    model.add(Activation('softmax'))

    # Optimizer is Adamax along with categorical crossentropy loss
    model.compile(loss='categorical_crossentropy',
                optimizer='adamax',
                metrics=['accuracy'])


    print('Train...')
    #Trains model for 50 epochs with shuffling after every epoch for training data and validates on validation data
    model.fit(X_train, y_train, 
              batch_size=batch_size, 
              shuffle=True, 
              nb_epoch=nb_epoch,
              validation_data=(X_valid, y_valid))
    return model
项目:Sub-word-LSTM    作者:DrImpossible    | 项目源码 | 文件源码
def RNN(X_train,y_train,args):
    """
    Purpose -> Define and train the proposed LSTM network
    Input   -> Data, Labels and model hyperparameters
    Output  -> Trained LSTM network
    """
    #Sets the model hyperparameters
    #Embedding hyperparameters
    max_features = args[0]
    maxlen = args[1]
    embedding_size = args[2]
    # Convolution hyperparameters
    filter_length = args[3]
    nb_filter = args[4]
    pool_length = args[5]
    # LSTM hyperparameters
    lstm_output_size = args[6]
    # Training hyperparameters
    batch_size = args[7]
    nb_epoch = args[8]
    numclasses = args[9]
    test_size = args[10] 

    #Format conversion for y_train for compatibility with Keras
    y_train = np_utils.to_categorical(y_train, numclasses) 
    #Train & Validation data splitting
    X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=test_size, random_state=42)

    #Build the sequential model
    # Model Architecture is:
    # Input -> Embedding -> Conv1D+Maxpool1D -> LSTM -> LSTM -> FC-1 -> Softmaxloss
    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size, dropout_W=0.2, dropout_U=0.2, return_sequences=True))
    model.add(LSTM(lstm_output_size, dropout_W=0.2, dropout_U=0.2, return_sequences=False))
    model.add(Dense(numclasses))
    model.add(Activation('softmax'))

    # Optimizer is Adamax along with categorical crossentropy loss
    model.compile(loss='categorical_crossentropy',
                optimizer='adamax',
                metrics=['accuracy'])


    print('Train...')
    #Trains model for 50 epochs with shuffling after every epoch for training data and validates on validation data
    model.fit(X_train, y_train, 
              batch_size=batch_size, 
              shuffle=True, 
              nb_epoch=nb_epoch,
              validation_data=(X_valid, y_valid))
    return model
项目:event_chain    作者:wangzq870305    | 项目源码 | 文件源码
def cnn_combine_train(X_train_list,y_train,vocab_size):
    N=len(X_train_list)

    X_train_list = [sequence.pad_sequences(x_train, maxlen=MAX_LEN) for x_train in X_train_list]

    input_list=[]
    out_list=[]
    for i in range(N):
        input,out=get_embedding_input_output('f%d' %i,vocab_size)
        input_list.append(input)
        out_list.append(out)

    x = merge(out_list,mode='concat')

    x = Dropout(0.25)(x)

    # we add a Convolution1D, which will learn nb_filter
    # word group filters of size filter_length:
    x = Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1)(x)

    # we use standard max pooling (halving the output of the previous layer):
    x = MaxPooling1D(pool_length=2)(x)

    # We flatten the output of the conv layer,
    # so that we can add a vanilla dense layer:
    x = Flatten()(x)

    # We add a vanilla hidden layer:
    x = Dense(HIDDEN_SIZE)(x)
    x = Dropout(0.25)(x)
    x = Activation('relu')(x)

    # We project onto a single unit output layer, and squash it with a sigmoid:
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)

    model = Model(input=input_list, output=x)

    model.compile(loss='binary_crossentropy', optimizer='rmsprop')
    model.fit(X_train_list, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS)

    return model
项目:rf_helicopter    作者:dandxy89    | 项目源码 | 文件源码
def create_neural_network_rnn(self):
        """
        Create the Neural Network Model

        :return: Keras Modelh
        """

        model = Sequential()

        # we start off with an efficient embedding layer which maps
        # our vocab indices into embedding_dims dimensions
        model.add(Embedding(12,  # Number of Features from State Space
                            300,  # Vector Size
                            input_length=self.input_dim))

        # we add a Convolution1D, which will learn nb_filter
        # word group filters of size filter_length:
        model.add(Convolution1D(nb_filter=self.nb_filter,
                                filter_length=self.filter_length,
                                border_mode='valid',
                                activation='relu',
                                subsample_length=1))

        # we use standard max pooling (halving the output of the previous
        # layer):
        model.add(MaxPooling1D(pool_length=self.pool_length))
        model.add(Dropout(self.dropout))

        # We flatten the output of the conv layer,
        # so that we can add a vanilla dense layer:
        model.add(Flatten())

        # We add a vanilla hidden layer:
        model.add(Dense(self.neurons))
        model.add(Dropout(self.dropout))
        model.add(Activation('relu'))

        # We project onto a single unit output layer, and squash it with a
        # sigmoid:
        model.add(Dense(len(self.actions)))
        model.add(Activation('linear'))

        model.compile(loss='mse',
                      optimizer=Adadelta(lr=0.00025))

        print(model.summary())

        return model