Python keras.layers.convolutional 模块,Conv1D() 实例源码

我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用keras.layers.convolutional.Conv1D()

项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def cnn_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        outputs = []
        for i in range(len(self.kernel_sizes)):
            output_i = Conv1D(self.opt['filters_cnn'], kernel_size=self.kernel_sizes[i], activation=None,
                              kernel_regularizer=l2(self.opt['regul_coef_conv']), padding='same')(embed_input)
            output_i = BatchNormalization()(output_i)
            output_i = Activation('relu')(output_i)
            output_i = GlobalMaxPooling1D()(output_i)
            outputs.append(output_i)

        output = concatenate(outputs, axis=1)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
    # Merge noise and auxilary inputs
    gen_input = Input(shape=(noise_dim,), name="noise_input")
    aux_input = Input(shape=(aux_dim,), name="auxilary_input")
    x = concatenate([gen_input, aux_input], axis=-1)

    # Dense Layer 1
    x = Dense(10 * 100)(x) 
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 10*100

    # Reshape the tensors to support CNNs
    x = Reshape((100, 10))(x) # shape is 100 x 10

    # Conv Layer 1
    x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 100 x 250
    x = UpSampling1D(size=2)(x) # output shape is 200 x 250

    # Conv Layer 2
    x = Conv1D(filters=100, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 200 x 100
    x = UpSampling1D(size=2)(x) # output shape is 400 x 100

    # Conv Layer 3
    x = Conv1D(filters=1, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('tanh')(x) # final output shape is 400 x 1

    generator_model = Model(
        outputs=[x], inputs=[gen_input, aux_input], name=model_name)

    return generator_model
项目:LeaguePredictor    作者:dgarwin    | 项目源码 | 文件源码
def conv_net():
    model = Sequential()
    model.add(Conv1D(nb_filter=20, filter_length=1, input_shape=(10, 55)))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Reshape((200,)))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    #model.add(Dense(128))
    #model.add(Activation('relu'))
    #model.add(Dropout(0.5))
    model.add(Dense(5, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return nn(lambda: model)
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def discriminator_model(model_name="discriminator"):
    disc_input = Input(shape=(400, 1), name="discriminator_input")
    aux_input = Input(shape=(47,), name="auxilary_input")

    # Conv Layer 1
    x = Conv1D(filters=100, kernel_size=13, padding='same')(disc_input)
    x = LeakyReLU(0.2)(x) # output shape is 100 x 400
    x = AveragePooling1D(pool_size=20)(x) # ouput shape is 100 x 20

    # Conv Layer 2
    x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
    x = LeakyReLU(0.2)(x) # output shape is 250 x 20
    x = AveragePooling1D(pool_size=5)(x) # output shape is 250 x 4

    # Conv Layer 3
    x = Conv1D(filters=300, kernel_size=13, padding='same')(x)
    x = LeakyReLU(0.2)(x) # output shape is 300 x 4
    x = Flatten()(x) # output shape is 1200

    x = concatenate([x, aux_input], axis=-1) # shape is 1247

    # Dense Layer 1
    x = Dense(200)(x)
    x = LeakyReLU(0.2)(x) # output shape is 200

    # Dense Layer 2
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)

    discriminator_model = Model(
        outputs=[x], inputs=[disc_input, aux_input], name=model_name)

    return discriminator_model
项目:quora_duplicate    作者:ijinmao    | 项目源码 | 文件源码
def __call__(self, inputs):
        x = Conv1D(self.filters, 3, activation='relu')(inputs)
        return GlobalMaxPooling1D()(x)
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:VDCNN    作者:yuhsinliu1993    | 项目源码 | 文件源码
def build_model(num_filters, num_classes, sequence_max_length=512, num_quantized_chars=71, embedding_size=16, learning_rate=0.001, top_k=3, model_path=None):

    inputs = Input(shape=(sequence_max_length, ), dtype='int32', name='inputs')

    embedded_sent = Embedding(num_quantized_chars, embedding_size, input_length=sequence_max_length)(inputs)

    # First conv layer
    conv = Conv1D(filters=64, kernel_size=3, strides=2, padding="same")(embedded_sent)

    # Each ConvBlock with one MaxPooling Layer
    for i in range(len(num_filters)):
        conv = ConvBlockLayer(get_conv_shape(conv), num_filters[i])(conv)
        conv = MaxPooling1D(pool_size=3, strides=2, padding="same")(conv)

    # k-max pooling (Finds values and indices of the k largest entries for the last dimension)
    def _top_k(x):
        x = tf.transpose(x, [0, 2, 1])
        k_max = tf.nn.top_k(x, k=top_k)
        return tf.reshape(k_max[0], (-1, num_filters[-1] * top_k))
    k_max = Lambda(_top_k, output_shape=(num_filters[-1] * top_k,))(conv)

    # 3 fully-connected layer with dropout regularization
    fc1 = Dropout(0.2)(Dense(512, activation='relu', kernel_initializer='he_normal')(k_max))
    fc2 = Dropout(0.2)(Dense(512, activation='relu', kernel_initializer='he_normal')(fc1))
    fc3 = Dense(num_classes, activation='softmax')(fc2)

    # define optimizer
    sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=False)
    model = Model(inputs=inputs, outputs=fc3)
    model.compile(optimizer=sgd, loss='mean_squared_error', metrics=['accuracy'])

    if model_path is not None:
        model.load_weights(model_path)

    return model
项目:VDCNN    作者:yuhsinliu1993    | 项目源码 | 文件源码
def __init__(self, input_shape, num_filters):
        self.model = Sequential()
        # first conv layer
        self.model.add(Conv1D(filters=num_filters, kernel_size=3, strides=1, padding="same", input_shape=input_shape))
        self.model.add(BatchNormalization())
        self.model.add(Activation('relu'))

        # second conv layer
        self.model.add(Conv1D(filters=num_filters, kernel_size=3, strides=1, padding="same"))
        self.model.add(BatchNormalization())
        self.model.add(Activation('relu'))
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model_qpsk(no_bits_in_a_frame):
    """
    Now the outputs will have 2-d, consisting of real and image parts
    input should be the same output.
    """
    model = Sequential()
    model.add(Conv1D(
        2, 2,
        subsample_length=2,
        input_shape=(no_bits_in_a_frame, 1)))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def generator_model_16qam(no_bits_in_a_frame):
    """
    Now the outputs will have 2-d, consisting of real and image parts
    input should be the same output.
    """
    model = Sequential()
    model.add(Conv1D(
        2, 4,
        subsample_length=4,
        input_shape=(no_bits_in_a_frame, 1)))
    return model
项目:gtzan.keras    作者:Hguimaraes    | 项目源码 | 文件源码
def cnn_melspect_1D(input_shape):
    kernel_size = 3
    #activation_func = LeakyReLU()
    activation_func = Activation('relu')
    inputs = Input(input_shape)

    # Convolutional block_1
    conv1 = Conv1D(32, kernel_size)(inputs)
    act1 = activation_func(conv1)
    bn1 = BatchNormalization()(act1)
    pool1 = MaxPooling1D(pool_size=2, strides=2)(bn1)

    # Convolutional block_2
    conv2 = Conv1D(64, kernel_size)(pool1)
    act2 = activation_func(conv2)
    bn2 = BatchNormalization()(act2)
    pool2 = MaxPooling1D(pool_size=2, strides=2)(bn2)

    # Convolutional block_3
    conv3 = Conv1D(128, kernel_size)(pool2)
    act3 = activation_func(conv3)
    bn3 = BatchNormalization()(act3)

    # Global Layers
    gmaxpl = GlobalMaxPooling1D()(bn3)
    gmeanpl = GlobalAveragePooling1D()(bn3)
    mergedlayer = concatenate([gmaxpl, gmeanpl], axis=1)

    # Regular MLP
    dense1 = Dense(512,
        kernel_initializer='glorot_normal',
        bias_initializer='glorot_normal')(mergedlayer)
    actmlp = activation_func(dense1)
    reg = Dropout(0.5)(actmlp)

    dense2 = Dense(512,
        kernel_initializer='glorot_normal',
        bias_initializer='glorot_normal')(reg)
    actmlp = activation_func(dense2)
    reg = Dropout(0.5)(actmlp)

    dense2 = Dense(10, activation='softmax')(reg)

    model = Model(inputs=[inputs], outputs=[dense2])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_multi_input_main_residual_network(batch_size,
                                a2_time_step,
                                d2_time_step,
                                d1_time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    '''
    a multiple residual network for wavelet transformation
    :param batch_size: as you might see
    :param a2_time_step: a2_size
    :param d2_time_step: d2_size
    :param d1_time_step: d1_size
    :param input_dim: input_dim
    :param output_dim: output_dim
    :param loop_depth: depth of residual network
    :param dropout: rate of dropout
    :return: 
    '''
    a2_inp = Input(shape=(a2_time_step,input_dim),name='a2')
    d2_inp = Input(shape=(d2_time_step,input_dim),name='d2')
    d1_inp = Input(shape=(d1_time_step,input_dim),name='a1')

    out = concatenate([a2_inp,d2_inp,d1_inp],axis=1)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inputs=[a2_inp,d2_inp,d1_inp],outputs=[out])

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model