Python keras.layers.convolutional 模块,Convolution1D() 实例源码

我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用keras.layers.convolutional.Convolution1D()

项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
项目:eXposeDeepNeuralNetwork    作者:joshsaxe    | 项目源码 | 文件源码
def getconvmodel(filter_length,nb_filter):
    model = Sequential()
    model.add(Convolution1D(nb_filter=nb_filter,
                            input_shape=(100,32),
                            filter_length=filter_length,
                            border_mode='same',
                            activation='relu',
                            subsample_length=1))
    model.add(Lambda(sum_1d, output_shape=(nb_filter,)))
    #model.add(BatchNormalization(mode=0))
    model.add(Dropout(0.5))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model_bpsk(no_bits_in_a_frame):
    """
    BPSK outputs will be generated by CCN.
    CCN would be 1-2x because x is binary and the output should be bipolar. 
    Also, it is 1-tap processing. For 16-QAM, it will be more compliated. 
    I should consider how to optimize stride or oversampling/max polling 
    in a network. For GANs, hyperparameters can be more well optimized than 
    conventional feedforward networks. 
    While I was watching RNN-LSTM, I realized that many hyperparameters such as
    gating variables are optimized by networks itself. Those values have been optimized 
    by grid search or some other external techniques. However, RNN can do it by itself online. 
    These capability may come from RNN superpower. Similarly, many hyperparameters can be
    easily optimized in GANs. 
    """
    model = Sequential()
    model.add(Convolution1D(
        1, 1,
        input_shape=(no_bits_in_a_frame, 1)))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model():  # CDNN Model
    print(INPUT_LN, N_GEN_l, CODE_LN) 

    model = Sequential()

    model.add(Convolution1D(16, 5, border_mode='same', input_shape=(CODE_LN, 1)))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=N_GEN_l[0]))
    model.add(Convolution1D(32, 5, border_mode='same'))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=N_GEN_l[1]))
    model.add(Convolution1D(1, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution1D(
        12, 5,
        border_mode='same',
        input_shape=(INPUT_LN, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=N_GEN_l[0]))

    model.add(Convolution1D(12, 5, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=N_GEN_l[1]))

    #model.add(Reshape((128*7,)))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution1D(
        12, 5,
        border_mode='same',
        input_shape=(INPUT_LN, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=4))

    model.add(Convolution1D(12, 5, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=4))

    #model.add(Reshape((128*7,)))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def BuildModel(dim):
    '''
    :param dim: input shape.

    The Separable_SimpleRNN can be added after linear convolution.
    Make sure the nb_filter in the Convolution layer is equivalent to output_dim in RNN
    '''
    inp = Input(shape=dim)
    #
    x_conv = Convolution1D(nb_filter=16, filter_length=5, border_mode='same')(inp)
    x_rnn = Separable_SimpleRNN(output_dim=16, activation='relu')(x_conv)
    #
    model = Model(input=inp, output=x_rnn)
    return model

# Load an example image.
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def BuildModel(dim):
    '''
    :param dim: input shape.

    The Separable_SimpleRNN can be added after linear convolution.
    Make sure the nb_filter in the Convolution layer is equivalent to output_dim in RNN
    '''
    inp = Input(shape=dim)
    #
    x_conv = Convolution1D(nb_filter=16, filter_length=5, border_mode='same')(inp)
    x_rnn = LN_SimpleRNN(output_dim=16, activation='tanh')(x_conv)
    #
    model = Model(input=inp, output=x_rnn)
    return model


# Load an example image.
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        self.textual_embedding(self, mask_zero=False)
        self.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=self._config.language_cnn_filter_length,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        #self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length))
        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=False))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        model_list = [None] * self._config.language_cnn_views
        for j in xrange(1,self._config.language_cnn_views+1):
            current_view = Sequential()
            self.textual_embedding(current_view, mask_zero=True)
            current_view.add(Convolution1D(
                nb_filter=self._config.language_cnn_filters,
                filter_length=j,
                border_mode='valid',
                activation=self._config.language_cnn_activation,
                subsample_length=1))
            self.temporal_pooling(current_view)
            model_list[j-1] = current_view

        self.add(Merge(model_list, mode='concat'))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_convolution_1d():
    nb_samples = 2
    nb_steps = 8
    input_dim = 2
    filter_length = 3
    nb_filter = 3

    for border_mode in _convolution_border_modes:
        for subsample_length in [1, 2]:
            if border_mode == 'same' and subsample_length != 1:
                continue

            layer_test(convolutional.Convolution1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))

            layer_test(convolutional.Convolution1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))
项目:aes    作者:feidong1991    | 项目源码 | 文件源码
def build_hcnn_model(opts, vocab_size=0, maxnum=50, maxlen=50, embedd_dim=50, embedding_weights=None, verbose=False):

    N = maxnum
    L = maxlen

    logger.info("Model parameters: max_sentnum = %d, max_sentlen = %d, embedding dim = %s, nbfilters = %s, filter1_len = %s, filter2_len = %s, drop rate = %s, l2 = %s" % (N, L, embedd_dim,
        opts.nbfilters, opts.filter1_len, opts.filter2_len, opts.dropout, opts.l2_value))

    word_input = Input(shape=(N*L,), dtype='int32', name='word_input')
    x = Embedding(output_dim=embedd_dim, input_dim=vocab_size, input_length=N*L, weights=embedding_weights, name='x')(word_input)
    drop_x = Dropout(opts.dropout, name='drop_x')(x)

    resh_W = Reshape((N, L, embedd_dim), name='resh_W')(drop_x)

    z = TimeDistributed(Convolution1D(opts.nbfilters, opts.filter1_len, border_mode='valid'), name='z')(resh_W)

    avg_z = TimeDistributed(AveragePooling1D(pool_length=L-opts.filter1_len+1), name='avg_z')(z)    # shape= (N, 1, nbfilters)

    resh_z = Reshape((N, opts.nbfilters), name='resh_z')(avg_z)     # shape(N, nbfilters)

    hz = Convolution1D(opts.nbfilters, opts.filter2_len, border_mode='valid', name='hz')(resh_z)
    # avg_h = MeanOverTime(mask_zero=True, name='avg_h')(hz)

    avg_hz = GlobalAveragePooling1D(name='avg_hz')(hz)
    y = Dense(output_dim=1, activation='sigmoid', name='output')(avg_hz)

    model = Model(input=word_input, output=y)

    if verbose:
        model.summary()

    start_time = time.time()
    model.compile(loss='mse', optimizer='rmsprop')
    total_time = time.time() - start_time
    logger.info("Model compiled in %.4f s" % total_time)

    return model
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildConvolution(self, name):
        filters = self.params.get('filters')
        nb_filter = self.params.get('nb_filter')
        assert filters
        assert nb_filter
        convs = []
        for fsz in filters:
            layer_name = '%s-conv-%d' % (name, fsz)
            conv = Convolution1D(
                nb_filter=nb_filter,
                filter_length=fsz,
                border_mode='valid',
                #activation='relu',
                subsample_length=1,
                init='glorot_uniform',
                #init=init,
                #init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
                W_constraint=maxnorm(self.params.get('w_maxnorm')),
                b_constraint=maxnorm(self.params.get('b_maxnorm')),
                #W_regularizer=regularizers.l2(self.params.get('w_l2')),
                #b_regularizer=regularizers.l2(self.params.get('b_l2')),
                #input_shape=(self.q_length, self.wdim),
                name=layer_name
            )
            convs.append(conv)
        self.layers['%s-convolution' % name] = convs
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildConvolution(self, name):
        filters = self.params.get('filters')
        nb_filter = self.params.get('nb_filter')
        assert filters
        assert nb_filter
        convs = []
        for fsz in filters:
            layer_name = '%s-conv-%d' % (name, fsz)
            conv = Convolution1D(
                nb_filter=nb_filter,
                filter_length=fsz,
                border_mode='valid',
                #activation='relu',
                subsample_length=1,
                init='glorot_uniform',
                #init=init,
                #init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
                W_constraint=maxnorm(self.params.get('w_maxnorm')),
                b_constraint=maxnorm(self.params.get('b_maxnorm')),
                #W_regularizer=regularizers.l2(self.params.get('w_l2')),
                #b_regularizer=regularizers.l2(self.params.get('b_l2')),
                #input_shape=(self.q_length, self.wdim),
                name=layer_name
            )
            convs.append(conv)
        self.layers['%s-convolution' % name] = convs
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_convolution_1d():
    nb_samples = 2
    nb_steps = 8
    input_dim = 2
    filter_length = 3
    nb_filter = 3

    for border_mode in _convolution_border_modes:
        for subsample_length in [1, 2]:
            if border_mode == 'same' and subsample_length != 1:
                continue

            layer_test(convolutional.Convolution1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))

            layer_test(convolutional.Convolution1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def build_small_chrom_label(args):
    model = Sequential()
    model.add(Convolution1D(input_dim=len(args.inputs), 
        input_length=args.window_size, 
        nb_filter=40,
        filter_length=16,
        border_mode='valid',
        activation="relu",
        init='normal'))

    model.add(MaxPooling1D(pool_length=3, stride=3))
    model.add(Convolution1D(nb_filter=64, filter_length=16, activation="relu", init='normal', border_mode='valid'))
    model.add(Dropout(0.2)) 
    model.add(MaxPooling1D(pool_length=3, stride=3))
    model.add(Flatten())

    model.add(Dense(output_dim=32, init='normal'))
    model.add(Activation('relu'))

    model.add( Dense(output_dim=len(args.labels), init='normal') )
    model.add( Activation('softmax'))

    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=0.5)
    adamo = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)
    classes = args.labels.keys()
    my_metrics = [metrics.categorical_accuracy, precision, recall ]

    model.compile(loss='categorical_crossentropy', optimizer=adamo, metrics=my_metrics)
    print('model summary:\n', model.summary())

    return model
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def build_sequential_chrom_label(args):
    model = Sequential()
    model.add(Convolution1D(input_dim=len(args.inputs), 
        input_length=args.window_size, 
        nb_filter=128,
        filter_length=16,
        border_mode='valid',
        activation="relu",
        init='normal'))

    model.add(Dropout(0.2))
    model.add(Convolution1D(nb_filter=192, filter_length=16, activation="relu", init='normal', border_mode='valid'))
    model.add(Dropout(0.2))
    model.add(Convolution1D(nb_filter=192, filter_length=16, activation="relu", init='normal', border_mode='valid'))
    model.add(Dropout(0.2))
    model.add(Convolution1D(nb_filter=256, filter_length=16, activation="relu", init='normal', border_mode='valid'))
    model.add(Dropout(0.2)) 
    model.add(MaxPooling1D(pool_length=3, stride=3))
    model.add(Flatten())

    model.add(Dense(output_dim=50, init='normal'))
    model.add(Activation('relu'))

    model.add( Dense(output_dim=len(args.labels), init='normal') )
    model.add( Activation('softmax'))

    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=0.5)
    adamo = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)
    classes = args.labels.keys()
    my_metrics = [metrics.categorical_accuracy, precision, recall]

    model.compile(loss='categorical_crossentropy', optimizer=adamo, metrics=my_metrics)
    print('model summary:\n', model.summary())

    return model
项目:sentiment_comments_zh    作者:zhouhoo    | 项目源码 | 文件源码
def baseModel(self, nb_filter=250, filter_length=3, hidden_dims=125):
        model = Sequential()

        # we start off with an efficient embedding layer which maps
        # our vocab indices into embedding_dims dimensions
        model.add(Embedding(self.max_words + self.index_from,self.embedding_dims,
                            input_length=self.max_length))
        model.add(Dropout(0.25))

        # we add a Convolution1D, which will learn nb_filter
        # word group filters of size filter_length:

        # filter_length is like filter size, subsample_length is like step in 2D CNN.
        model.add(Convolution1D(filters=nb_filter,
                                kernel_size=filter_length,
                                padding='valid',
                                activation='relu',
                                strides=1))
        # we use standard max pooling (halving the output of the previous layer):
        model.add(MaxPooling1D(pool_size=2))

        # We flatten the output of the conv layer,
        # so that we can add a vanilla dense layer:
        model.add(Flatten())

        # We add a vanilla hidden layer:
        model.add(Dense(hidden_dims))
        model.add(Dropout(0.25))
        model.add(Activation('relu'))

        # We project onto a single unit output layer, and squash it with a sigmoid:
        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        model.compile(loss='binary_crossentropy',
                      optimizer='rmsprop')

        return model
项目:SarcasmDetection    作者:AniSkywalker    | 项目源码 | 文件源码
def _build_network(self, vocab_size, maxlen, emb_weights=[], hidden_units=256, trainable=False):
        print('Build model...')
        model = Sequential()

        model.add(Embedding(vocab_size, emb_weights.shape[1], input_length=maxlen, weights=[emb_weights],
                            trainable=trainable))

        # model.add(Reshape((maxlen, emb_weights.shape[1], 1)))

        model.add(Convolution1D(emb_weights.shape[1], 3, kernel_initializer='he_normal', padding='valid',
                                activation='sigmoid',
                                input_shape=(1, maxlen)))
        # model.add(MaxPooling1D(pool_size=3))

        model.add(Convolution1D(emb_weights.shape[1], 3, kernel_initializer='he_normal', padding='valid',
                                activation='sigmoid',
                                input_shape=(1, maxlen - 2)))
        # model.add(MaxPooling1D(pool_size=3))

        model.add(Dropout(0.25))

        model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5,
                       return_sequences=True))
        model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5))

        model.add(Dense(hidden_units, kernel_initializer='he_normal', activation='sigmoid'))
        model.add(Dense(2, activation='softmax'))
        adam = Adam(lr=0.0001)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
        print('No of parameter:', model.count_params())

        print(model.summary())
        return model
项目:SarcasmDetection    作者:AniSkywalker    | 项目源码 | 文件源码
def _build_network(self, vocab_size, maxlen, embedding_dimension=256, hidden_units=256, trainable=False):
        print('Build model...')
        model = Sequential()

        model.add(
            Embedding(vocab_size, embedding_dimension, input_length=maxlen, embeddings_initializer='glorot_normal'))

        model.add(Convolution1D(hidden_units, 3, kernel_initializer='he_normal', padding='valid', activation='sigmoid',
                                input_shape=(1, maxlen)))
        # model.add(MaxPooling1D(pool_size=3))
        model.add(Convolution1D(hidden_units, 3, kernel_initializer='he_normal', padding='valid', activation='sigmoid',
                                input_shape=(1, maxlen - 2)))
        # model.add(MaxPooling1D(pool_size=3))

        # model.add(Dropout(0.25))

        model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5,
                       return_sequences=True))
        model.add(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5))

        model.add(Dense(hidden_units, kernel_initializer='he_normal', activation='sigmoid'))
        model.add(Dense(2))
        model.add(Activation('softmax'))
        adam = Adam(lr=0.0001)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
        print('No of parameter:', model.count_params())

        print(model.summary())
        return model
项目:cervantes    作者:textclf    | 项目源码 | 文件源码
def _generate_model(self, lembedding, num_classes=2, num_features=128, train_vectors=True):

        model = Sequential()
        if lembedding.vector_box.W is None:
            emb = Embedding(lembedding.vector_box.size,
                            lembedding.vector_box.vector_dim,
                            W_constraint=None,
                            input_length=lembedding.size)
        else:
            emb = Embedding(lembedding.vector_box.size,
                            lembedding.vector_box.vector_dim,
                            weights=[lembedding.vector_box.W], W_constraint=None,
                            input_length=lembedding.size)
        emb.trainable = train_vectors
        model.add(emb)

        model.add(Convolution1D(num_features, 3, init='uniform'))
        model.add(Activation('relu'))
        model.add(MaxPooling1D(2))
        model.add(Dropout(0.25))

        model.add(Convolution1D(num_features, 3, init='uniform'))
        model.add(Activation('relu'))
        model.add(MaxPooling1D(2))
        model.add(Dropout(0.25))

        model.add(Flatten())

        if num_classes == 2:
            model.add(Dense(1, activation='sigmoid'))
            if self.optimizer is None:
                self.optimizer = 'rmsprop'
            model.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])
        else:
            if self.optimizer is None:
                self.optimizer = 'adam'
            model.add(Dense(num_classes, activation='softmax'))
            model.compile(loss='categorical_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])

        return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_convolution_1d():
    nb_samples = 2
    nb_steps = 8
    input_dim = 2
    filter_length = 3
    nb_filter = 3

    for border_mode in _convolution_border_modes:
        for subsample_length in [1, 2]:
            if border_mode == 'same' and subsample_length != 1:
                continue

            layer_test(convolutional.Convolution1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))

            layer_test(convolutional.Convolution1D,
                       kwargs={'nb_filter': nb_filter,
                               'filter_length': filter_length,
                               'border_mode': border_mode,
                               'W_regularizer': 'l2',
                               'b_regularizer': 'l2',
                               'activity_regularizer': 'activity_l2',
                               'subsample_length': subsample_length},
                       input_shape=(nb_samples, nb_steps, input_dim))
项目:event_chain    作者:wangzq870305    | 项目源码 | 文件源码
def cnn_train(X_train,y_train,vocab_size):

    X_train = sequence.pad_sequences(X_train, maxlen=MAX_LEN)

    print('Build model...')
    model = Sequential()
    model.add(Embedding(vocab_size, EMBED_SIZE, input_length=MAX_LEN))

    model.add(Dropout(0.25))

    # we add a Convolution1D, which will learn nb_filter
    # word group filters of size filter_length:
    model.add(Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    # we use standard max pooling (halving the output of the previous layer):
    model.add(MaxPooling1D(pool_length=2))

    # We flatten the output of the conv layer,
    # so that we can add a vanilla dense layer:
    model.add(Flatten())

    # We add a vanilla hidden layer:
    model.add(Dense(HIDDEN_SIZE))
    model.add(Dropout(0.25))
    model.add(Activation('relu'))

    # We project onto a single unit output layer, and squash it with a sigmoid:
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='rmsprop')
    model.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS, show_accuracy=True)

    return model
项目:Keras-CNN-QA    作者:shashankg7    | 项目源码 | 文件源码
def Model1(dim, max_ques_len, max_ans_len, vocab_lim, embedding):
    inp_q = Input(shape=(max_ques_len,))
    embedding_q  = Embedding(vocab_lim, dim, input_length=max_ques_len, weights=[embedding], trainable=False)(inp_q)
    conv_q= Convolution1D(100, 5, border_mode='same', activation='relu')(embedding_q)
    conv_q = Dropout(0.25)(conv_q)
    pool_q = GlobalMaxPooling1D()(conv_q)

    inp_a = Input(shape=(max_ans_len,))
    embedding_a  = Embedding(vocab_lim, dim, input_length=max_ans_len, weights=[embedding], trainable=False)(inp_a)
    conv_a = Convolution1D(100, 5, border_mode='same', activation='relu')(embedding_a)
    conv_a = Dropout(0.25)(conv_a)
    pool_a = GlobalMaxPooling1D()(conv_a)

    #sim = SimLayer(1)([pool_q, pool_a])
    sim = merge([Dense(100, bias=False)(pool_q), pool_a], mode='dot')
    # print pool_a, pool_q

    # model1 = merge([pool_q, pool_a, sim], mode='concat')
    # model = Model(input=[inp_q, inp_a], output=[model1])
    # model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    # print model.summary()
    # return model


    model_sim = merge([pool_q, pool_a, sim], mode='concat')
    print model_sim

    # #model_final = Flatten()(model_sim)
    model_final = Dropout(0.5)(model_sim)
    model_final = Dense(201)(model_final)
    model_final = Dropout(0.5)(model_final)
    model_final = Dense(1, activation='sigmoid')(model_final)

    model = Model(input=[inp_q, inp_a], output=[model_final])
    print(model.output_shape)
    model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
    print model.summary()
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model_r0():
    model = Sequential()
    model.add(Convolution1D(
        2, 5,
        border_mode='same',
        input_shape=(INPUT_LN, 1)))
    #model.add(Reshape((2*INPUT_LN,)))
    model.add(Flatten())
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model_44():  # CDNN Model
    model = Sequential()

    model.add(Convolution1D(16, 5, border_mode='same', input_shape=(CODE_LN, 1)))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=4))
    model.add(Convolution1D(32, 5, border_mode='same'))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=4))
    model.add(Convolution1D(1, 5, border_mode='same'))
    # model.add(Activation('relu'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model_r0():
    model = Sequential()
    model.add(Convolution1D(
        2, 5,
        border_mode='same',
        input_shape=(INPUT_LN, 1)))
    #model.add(Reshape((2*INPUT_LN,)))
    model.add(Flatten())
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model_44():  # CDNN Model
    model = Sequential()

    model.add(Convolution1D(16, 5, border_mode='same', input_shape=(CODE_LN, 1)))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=4))
    model.add(Convolution1D(32, 5, border_mode='same'))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=4))
    model.add(Convolution1D(1, 5, border_mode='same'))
    # model.add(Activation('relu'))
    return model
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_convolution_1d(self):
        nb_samples = 9
        nb_steps = 7
        input_dim = 10
        filter_length = 6
        nb_filter = 5

        weights_in = [np.ones((nb_filter, input_dim, filter_length, 1)), np.ones(nb_filter)]

        input = np.ones((nb_samples, nb_steps, input_dim))
        for weight in [None, weights_in]:
            for border_mode in ['valid', 'full', 'same']:
                for subsample_length in [1, 3]:
                    if border_mode == 'same' and subsample_length != 1:
                        continue
                    for W_regularizer in [None, 'l2']:
                        for b_regularizer in [None, 'l2']:
                            for act_regularizer in [None, 'l2']:
                                layer = convolutional.Convolution1D(
                                    nb_filter, filter_length, weights=weight,
                                    border_mode=border_mode, W_regularizer=W_regularizer,
                                    b_regularizer=b_regularizer, activity_regularizer=act_regularizer,
                                    subsample_length=subsample_length, input_shape=(None, input_dim))

                            layer.input = theano.shared(value=input)
                            for train in [True, False]:
                                out = layer.get_output(train).eval()
                                assert input.shape[0] == out.shape[0]
                                if border_mode == 'same' and subsample_length == 1:
                                    assert input.shape[1] == out.shape[1]

                            config = layer.get_config()
项目:TextClassification    作者:AlgorTroy    | 项目源码 | 文件源码
def build(self):
        print('\nBuilding model...')
        # create the model
        embedding_vector_length = settings['EMBEDDING_VECTOR_LENGTH']
        self.model = Sequential()
        self.model.add(Embedding(self.top_words, embedding_vector_length, input_length=self.max_words_limit))
        self.model.add(Convolution1D(nb_filter=settings['CNN_NO_OF_FILTER'], filter_length=settings['CNN_FILTER_LENGTH'], border_mode='same', activation='relu'))
        self.model.add(MaxPooling1D(pool_length=settings['CNN_POOL_LENGTH']))
        self.model.add(LSTM(settings['LSTM_CELLS_COUNT']))
        self.model.add(Dropout(settings['DROPOUT']))
        self.model.add(Dense(self.num_classes, activation='softmax'))
        print(self.model.summary())
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        self.textual_embedding_fixed_length(self, mask_zero=False)
        self.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=self._config.language_cnn_filter_length,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length))
        self.add(Flatten())
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        unigram = Sequential() 
        self.textual_embedding(unigram, mask_zero=True)
        unigram.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=1,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(unigram)

        bigram = Sequential()
        self.textual_embedding(bigram, mask_zero=True)
        bigram.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=2,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(bigram)

        trigram = Sequential()
        self.textual_embedding(trigram, mask_zero=True)
        trigram.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=3,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(trigram)

        self.add(Merge([unigram, bigram, trigram], mode='concat'))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
    # Merge noise and auxilary inputs
    gen_input = Input(shape=(noise_dim,), name="noise_input")
    aux_input = Input(shape=(aux_dim,), name="auxilary_input")
    x = merge([gen_input, aux_input], mode="concat", concat_axis=-1)

    # Dense Layer 1
    x = Dense(10 * 100)(x) 
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 10*100

    # Reshape the tensors to support CNNs
    x = Reshape((100, 10))(x) # shape is 100 x 10

    # Conv Layer 1
    x = Convolution1D(nb_filter=250,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 100 x 250
    x = UpSampling1D(length=2)(x) # output shape is 200 x 250

    # Conv Layer 2
    x = Convolution1D(nb_filter=100,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 200 x 100
    x = UpSampling1D(length=2)(x) # output shape is 400 x 100


    # Conv Layer 3
    x = Convolution1D(nb_filter=1,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = BatchNormalization()(x)
    x = Activation('tanh')(x) # final output shape is 400 x 1

    generator_model = Model(
        input=[gen_input, aux_input], output=[x], name=model_name)

    return generator_model
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def discriminator_model(model_name="discriminator"):
    disc_input = Input(shape=(400, 1), name="discriminator_input")
    aux_input = Input(shape=(47,), name="auxilary_input")

    # Conv Layer 1
    x = Convolution1D(nb_filter=100,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(disc_input)
    x = LeakyReLU(0.2)(x) # output shape is 100 x 400
    x = AveragePooling1D(pool_length=20)(x) # ouput shape is 100 x 20

    # Conv Layer 2
    x = Convolution1D(nb_filter=250,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = LeakyReLU(0.2)(x) # output shape is 250 x 20
    x = AveragePooling1D(pool_length=5)(x) # output shape is 250 x 4

    # Conv Layer 3
    x = Convolution1D(nb_filter=300,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = LeakyReLU(0.2)(x) # output shape is 300 x 4
    x = Flatten()(x) # output shape is 1200

    x = merge([x, aux_input], mode="concat", concat_axis=-1) # shape is 1247

    # Dense Layer 1
    x = Dense(200)(x)
    x = LeakyReLU(0.2)(x) # output shape is 200

    # Dense Layer 2
    x = Dense(1)(x)
    #x = Activation('sigmoid')(x)
    x = Activation('linear')(x) # output shape is 1

    discriminator_model = Model(
        input=[disc_input, aux_input], output=[x], name=model_name)

    return discriminator_model
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def trainCNN(obj, dataset_headLines, dataset_body):
    embedding_dim = 300
    LSTM_neurons = 50
    dense_neuron = 16
    dimx = 100
    dimy = 200
    lamda = 0.0
    nb_filter = 100
    filter_length = 4
    vocab_size = 10000
    batch_size = 50
    epochs = 5
    ntn_out = 16
    ntn_in = nb_filter 
    state = False


    train_head,train_body,embedding_matrix = obj.process_data(sent_Q=dataset_headLines,
                                                     sent_A=dataset_body,dimx=dimx,dimy=dimy,
                                                     wordVec_model = wordVec_model)    
    inpx = Input(shape=(dimx,),dtype='int32',name='inpx')
    #x = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimx)(inpx)
    x = word2vec_embedding_layer(embedding_matrix)(inpx)  
    inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
    #y = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimy)(inpy)
    y = word2vec_embedding_layer(embedding_matrix)(inpy)
    ques = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
                         border_mode='valid', activation='relu',
                         subsample_length=1)(x)

    ans = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
                        border_mode='valid', activation='relu',
                        subsample_length=1)(y)

    #hx = Lambda(max_1d, output_shape=(nb_filter,))(ques)
    #hy = Lambda(max_1d, output_shape=(nb_filter,))(ans)
    hx = GlobalMaxPooling1D()(ques)
    hy = GlobalMaxPooling1D()(ans)
    #wordVec_model = []
    #h =  Merge(mode="concat",name='h')([hx,hy])

    h1 = Multiply()([hx,hy])
    h2 = Abs()([hx,hy])

    h =  Merge(mode="concat",name='h')([h1,h2])
    #h = NeuralTensorLayer(output_dim=1,input_dim=ntn_in)([hx,hy])
    #h = ntn_layer(ntn_in,ntn_out,activation=None)([hx,hy])
    #score = h
    wrap = Dense(dense_neuron, activation='relu',name='wrap')(h)
    #score = Dense(1,activation='sigmoid',name='score')(h)
    #wrap = Dense(dense_neuron,activation='relu',name='wrap')(h)
    score = Dense(4,activation='softmax',name='score')(wrap)

    #score=K.clip(score,1e-7,1.0-1e-7)
    #corr = CorrelationRegularization(-lamda)([hx,hy])
    #model = Model( [inpx,inpy],[score,corr])
    model = Model( [inpx,inpy],score)
    model.compile( loss='categorical_crossentropy',optimizer="adadelta",metrics=['accuracy'])    
    return model,train_head,train_body
项目:cervantes    作者:textclf    | 项目源码 | 文件源码
def _generate_model(self, lembedding, num_classes=2, ngrams=[1,2,3,4,5],
                        nfilters=64, train_vectors=True):

        def sub_ngram(n):
            return Sequential([
                Convolution1D(nfilters, n,
                      activation='relu',
                      input_shape=(lembedding.size, lembedding.vector_box.vector_dim)),
                Lambda(
                    lambda x: K.max(x, axis=1),
                    output_shape=(nfilters,)
                )
        ])

        doc = Input(shape=(lembedding.size, ), dtype='int32')
        embedded = Embedding(input_dim=lembedding.vector_box.size,
                             output_dim=lembedding.vector_box.vector_dim,
                             weights=[lembedding.vector_box.W])(doc)
        embedded.trainable = train_vectors

        rep = Dropout(0.5)(
            merge(
                [sub_ngram(n)(embedded) for n in ngrams],
                mode='concat',
                concat_axis=-1
            )
        )

        if num_classes == 2:
            out = Dense(1, activation='sigmoid')(rep)
            model = Model(input=doc, output=out)
            if self.optimizer is None:
                self.optimizer = 'rmsprop'
            model.compile(loss='binary_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])
        else:
            out = Dense(num_classes, activation='softmax')(rep)
            model = Model(input=doc, output=out)
            if self.optimizer is None:
                self.optimizer = 'adam'
            model.compile(loss='categorical_crossentropy', optimizer=self.optimizer, metrics=["accuracy"])

        return model
项目:dnn_page_vectors    作者:ankit-cliqz    | 项目源码 | 文件源码
def model(sequence_length=None):
    graph = Graph()
    graph.add_input(name='input', input_shape=(sequence_length, embedding_dim))
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1,
                             input_dim=embedding_dim,
                             input_length=sequence_length)
        pool = MaxPooling1D(pool_length=sequence_length - fsz + 1)
        graph.add_node(conv, name='conv-%s' % fsz, input='input')
        graph.add_node(pool, name='maxpool-%s' % fsz, input='conv-%s' % fsz)
        graph.add_node(
            Flatten(),
            name='flatten-%s' %
            fsz,
            input='maxpool-%s' %
            fsz)

    if len(filter_sizes) > 1:
        graph.add_output(name='output',
                         inputs=['flatten-%s' % fsz for fsz in filter_sizes],
                         merge_mode='concat')
    else:
        graph.add_output(name='output', input='flatten-%s' % filter_sizes[0])

    # main sequential model
    model = Sequential()
    model.add(
        Embedding(
            vocab_size,
            embedding_dim,
            input_length=sequence_length,
            weights=[embedding_weights]))
    model.add(
        Dropout(
            dropout_prob[0],
            input_shape=(
                sequence_length,
                embedding_dim)))
    model.add(graph)
    model.add(Dense(hidden_dims))
    model.add(Dropout(dropout_prob[1]))
    model.add(Activation('relu'))
    return model

# Input Layer with all the query, similar and non similar documents.
项目:dnn_page_vectors    作者:ankit-cliqz    | 项目源码 | 文件源码
def model(sequence_length=None):
    graph = Graph()
    graph.add_input(name='input', input_shape=(sequence_length, embedding_dim))
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1,
                             input_dim=embedding_dim,
                             input_length=sequence_length)
        pool = MaxPooling1D(pool_length=sequence_length - fsz + 1)
        graph.add_node(conv, name='conv-%s' % fsz, input='input')
        graph.add_node(pool, name='maxpool-%s' % fsz, input='conv-%s' % fsz)
        graph.add_node(
            Flatten(),
            name='flatten-%s' %
            fsz,
            input='maxpool-%s' %
            fsz)

    if len(filter_sizes) > 1:
        graph.add_output(name='output',
                         inputs=['flatten-%s' % fsz for fsz in filter_sizes],
                         merge_mode='concat')
    else:
        graph.add_output(name='output', input='flatten-%s' % filter_sizes[0])

    # main sequential model
    model = Sequential()

    if conf.feature_level == "word":
        model.add(
            Embedding(
                vocab_size,
                embedding_dim,
                input_length=sequence_length,
                weights=[embedding_weights]))
    elif conf.feature_level == "char" or conf.feature_level == "ngram":
        model.add(
            Embedding(
                vocab_size,
                embedding_dim,
                input_length=sequence_length))


    model.add(
        Dropout(
            dropout_prob[0],
            input_shape=(
                sequence_length,
                embedding_dim)))
    model.add(graph)
    model.add(Dense(hidden_dims))
    # model.add(Dropout(dropout_prob[1]))
    model.add(Activation('relu'))
    return model

# Input Layer with all the query, similar and non similar documents.
项目:dnn_page_vectors    作者:ankit-cliqz    | 项目源码 | 文件源码
def model(sequence_length=None):
    graph = Graph()
    graph.add_input(name='input', input_shape=(sequence_length, embedding_dim))
    for fsz in filter_sizes:
        conv = Convolution1D(nb_filter=num_filters,
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1,
                             input_dim=embedding_dim,
                             input_length=sequence_length)
        pool = MaxPooling1D(pool_length=sequence_length - fsz + 1)
        graph.add_node(conv, name='conv-%s' % fsz, input='input')
        graph.add_node(pool, name='maxpool-%s' % fsz, input='conv-%s' % fsz)
        graph.add_node(
            Flatten(),
            name='flatten-%s' %
            fsz,
            input='maxpool-%s' %
            fsz)

    if len(filter_sizes) > 1:
        graph.add_output(name='output',
                         inputs=['flatten-%s' % fsz for fsz in filter_sizes],
                         merge_mode='concat')
    else:
        graph.add_output(name='output', input='flatten-%s' % filter_sizes[0])

    # main sequential model
    model = Sequential()
    model.add(
        Embedding(
            vocab_size,
            embedding_dim,
            input_length=sequence_length,
            weights=[embedding_weights]))
    model.add(
        Dropout(
            dropout_prob[0],
            input_shape=(
                sequence_length,
                embedding_dim)))
    model.add(graph)
    model.add(Dense(hidden_dims))
    # model.add(Dropout(dropout_prob[1]))
    model.add(Activation('relu'))
    return model

# Input Layer with all the query, similar and non similar documents.
项目:Sub-word-LSTM    作者:DrImpossible    | 项目源码 | 文件源码
def RNN(X_train,y_train,args):
    """
    Purpose -> Define and train the proposed LSTM network
    Input   -> Data, Labels and model hyperparameters
    Output  -> Trained LSTM network
    """
    #Sets the model hyperparameters
    #Embedding hyperparameters
    max_features = args[0]
    maxlen = args[1]
    embedding_size = args[2]
    # Convolution hyperparameters
    filter_length = args[3]
    nb_filter = args[4]
    pool_length = args[5]
    # LSTM hyperparameters
    lstm_output_size = args[6]
    # Training hyperparameters
    batch_size = args[7]
    nb_epoch = args[8]
    numclasses = args[9]
    test_size = args[10] 

    #Format conversion for y_train for compatibility with Keras
    y_train = np_utils.to_categorical(y_train, numclasses) 
    #Train & Validation data splitting
    X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=test_size, random_state=42)

    #Build the sequential model
    # Model Architecture is:
    # Input -> Embedding -> Conv1D+Maxpool1D -> LSTM -> LSTM -> FC-1 -> Softmaxloss
    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size, dropout_W=0.2, dropout_U=0.2, return_sequences=True))
    model.add(LSTM(lstm_output_size, dropout_W=0.2, dropout_U=0.2, return_sequences=False))
    model.add(Dense(numclasses))
    model.add(Activation('softmax'))

    # Optimizer is Adamax along with categorical crossentropy loss
    model.compile(loss='categorical_crossentropy',
                optimizer='adamax',
                metrics=['accuracy'])


    print('Train...')
    #Trains model for 50 epochs with shuffling after every epoch for training data and validates on validation data
    model.fit(X_train, y_train, 
              batch_size=batch_size, 
              shuffle=True, 
              nb_epoch=nb_epoch,
              validation_data=(X_valid, y_valid))
    return model
项目:Sub-word-LSTM    作者:DrImpossible    | 项目源码 | 文件源码
def RNN(X_train,y_train,args):
    """
    Purpose -> Define and train the proposed LSTM network
    Input   -> Data, Labels and model hyperparameters
    Output  -> Trained LSTM network
    """
    #Sets the model hyperparameters
    #Embedding hyperparameters
    max_features = args[0]
    maxlen = args[1]
    embedding_size = args[2]
    # Convolution hyperparameters
    filter_length = args[3]
    nb_filter = args[4]
    pool_length = args[5]
    # LSTM hyperparameters
    lstm_output_size = args[6]
    # Training hyperparameters
    batch_size = args[7]
    nb_epoch = args[8]
    numclasses = args[9]
    test_size = args[10] 

    #Format conversion for y_train for compatibility with Keras
    y_train = np_utils.to_categorical(y_train, numclasses) 
    #Train & Validation data splitting
    X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=test_size, random_state=42)

    #Build the sequential model
    # Model Architecture is:
    # Input -> Embedding -> Conv1D+Maxpool1D -> LSTM -> LSTM -> FC-1 -> Softmaxloss
    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size, dropout_W=0.2, dropout_U=0.2, return_sequences=True))
    model.add(LSTM(lstm_output_size, dropout_W=0.2, dropout_U=0.2, return_sequences=False))
    model.add(Dense(numclasses))
    model.add(Activation('softmax'))

    # Optimizer is Adamax along with categorical crossentropy loss
    model.compile(loss='categorical_crossentropy',
                optimizer='adamax',
                metrics=['accuracy'])


    print('Train...')
    #Trains model for 50 epochs with shuffling after every epoch for training data and validates on validation data
    model.fit(X_train, y_train, 
              batch_size=batch_size, 
              shuffle=True, 
              nb_epoch=nb_epoch,
              validation_data=(X_valid, y_valid))
    return model
项目:event_chain    作者:wangzq870305    | 项目源码 | 文件源码
def cnn_combine_train(X_train_list,y_train,vocab_size):
    N=len(X_train_list)

    X_train_list = [sequence.pad_sequences(x_train, maxlen=MAX_LEN) for x_train in X_train_list]

    input_list=[]
    out_list=[]
    for i in range(N):
        input,out=get_embedding_input_output('f%d' %i,vocab_size)
        input_list.append(input)
        out_list.append(out)

    x = merge(out_list,mode='concat')

    x = Dropout(0.25)(x)

    # we add a Convolution1D, which will learn nb_filter
    # word group filters of size filter_length:
    x = Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1)(x)

    # we use standard max pooling (halving the output of the previous layer):
    x = MaxPooling1D(pool_length=2)(x)

    # We flatten the output of the conv layer,
    # so that we can add a vanilla dense layer:
    x = Flatten()(x)

    # We add a vanilla hidden layer:
    x = Dense(HIDDEN_SIZE)(x)
    x = Dropout(0.25)(x)
    x = Activation('relu')(x)

    # We project onto a single unit output layer, and squash it with a sigmoid:
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)

    model = Model(input=input_list, output=x)

    model.compile(loss='binary_crossentropy', optimizer='rmsprop')
    model.fit(X_train_list, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS)

    return model
项目:rf_helicopter    作者:dandxy89    | 项目源码 | 文件源码
def create_neural_network_rnn(self):
        """
        Create the Neural Network Model

        :return: Keras Modelh
        """

        model = Sequential()

        # we start off with an efficient embedding layer which maps
        # our vocab indices into embedding_dims dimensions
        model.add(Embedding(12,  # Number of Features from State Space
                            300,  # Vector Size
                            input_length=self.input_dim))

        # we add a Convolution1D, which will learn nb_filter
        # word group filters of size filter_length:
        model.add(Convolution1D(nb_filter=self.nb_filter,
                                filter_length=self.filter_length,
                                border_mode='valid',
                                activation='relu',
                                subsample_length=1))

        # we use standard max pooling (halving the output of the previous
        # layer):
        model.add(MaxPooling1D(pool_length=self.pool_length))
        model.add(Dropout(self.dropout))

        # We flatten the output of the conv layer,
        # so that we can add a vanilla dense layer:
        model.add(Flatten())

        # We add a vanilla hidden layer:
        model.add(Dense(self.neurons))
        model.add(Dropout(self.dropout))
        model.add(Activation('relu'))

        # We project onto a single unit output layer, and squash it with a
        # sigmoid:
        model.add(Dense(len(self.actions)))
        model.add(Activation('linear'))

        model.compile(loss='mse',
                      optimizer=Adadelta(lr=0.00025))

        print(model.summary())

        return model