我们从Python开源项目中,提取了以下39个代码示例,用于说明如何使用keras.layers.Convolution1D()。
def __call__(self, model): original = model tanh_out = CausalAtrousConvolution1D(self.filters, 2, atrous_rate=self.rate, border_mode='valid')(model) tanh_out = Activation('tanh')(tanh_out) sigm_out = CausalAtrousConvolution1D(self.filters, 2, atrous_rate=self.rate, border_mode='valid')(model) sigm_out = Activation('sigmoid')(sigm_out) model = Merge(mode='mul')([tanh_out, sigm_out]) skip_x = Convolution1D(self.filters, 1, border_mode='same')(model) res_x = Convolution1D(self.filters, 1, border_mode='same')(model) res_x = Merge(mode='sum')([original, res_x]) return res_x, skip_x
def build_lstm(input_shape): model = Sequential() # model.add(Masking(input_shape=input_shape, mask_value=-1.)) model.add(Embedding(input_shape[0], 128, input_length=input_shape[1])) model.add(Convolution1D(nb_filter=64, filter_length=5, border_mode='valid', activation='relu', subsample_length=1)) model.add(MaxPooling1D(pool_length=4)) model.add(GRU(128)) # model.add(GRU(128, return_sequences=False)) # Add dropout if overfitting # model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def build_lstm(input_shape): model = Sequential() # model.add(Masking(input_shape=input_shape, mask_value=-1.)) model.add(Embedding(input_shape[0], 128, input_length=input_shape[1])) model.add(Convolution1D(nb_filter=64, filter_length=5, border_mode='valid', activation='relu', subsample_length=1)) model.add(MaxPooling1D(pool_length=model.output_shape[1])) model.add(Flatten()) model.add(Dense(128)) # model.add(GRU(128, return_sequences=False)) # Add dropout if overfitting # model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def build_cnn(input_shape, output_dim,nb_filter): clf = Sequential() clf.add(Convolution1D(nb_filter=nb_filter, filter_length=4,border_mode="valid",activation="relu",subsample_length=1,input_shape=input_shape)) clf.add(GlobalMaxPooling1D()) clf.add(Dense(100)) clf.add(Dropout(0.2)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf # just one filter
def build_cnn_char(input_dim, output_dim,nb_filter): clf = Sequential() clf.add(Embedding(input_dim, 32, # character embedding size input_length=maxlen, dropout=0.2)) clf.add(Convolution1D(nb_filter=nb_filter, filter_length=3,border_mode="valid",activation="relu",subsample_length=1)) clf.add(GlobalMaxPooling1D()) clf.add(Dense(100)) clf.add(Dropout(0.2)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf # just one filter
def Wavenet(input_shape, filters, depth, stacks, last=0, h=None, build=True): # TODO: Soft targets? A float to make targets a gaussian with stdev. # TODO: Train only receptive field. The temporal-first outputs are computed from zero-padding. # TODO: Global conditioning? # TODO: Local conditioning? _, nb_bins = input_shape input_audio = Input(input_shape, name='audio_input') model = CausalAtrousConvolution1D(filters, 2, mask_type='A', atrous_rate=1, border_mode='valid')(input_audio) out, skip_connections = WavenetBlocks(filters, depth, stacks)(model) out = Merge(mode='sum', name='merging_skips')(skip_connections) out = PReLU()(out) out = Convolution1D(nb_bins, 1, border_mode='same')(out) out = PReLU()(out) out = Convolution1D(nb_bins, 1, border_mode='same')(out) # https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif if last > 0: out = Lambda(lambda x: x[:, -last:], output_shape=(last, out._keras_shape[2]), name='last_out')(out) out = Activation('softmax')(out) if build: model = Model(input_audio, out) model.compile(Nadam(), 'sparse_categorical_crossentropy') return model
def test_conv1d_lstm(self): from keras.layers import Convolution1D, LSTM, Dense model = Sequential() # input_shape = (time_step, dimensions) model.add(Convolution1D(32,3,border_mode='same',input_shape=(10,8))) # conv1d output shape = (None, 10, 32) model.add(LSTM(24)) model.add(Dense(1, activation='sigmoid')) print('model.layers[1].output_shape=', model.layers[1].output_shape) input_names = ['input'] output_names = ['output'] spec = keras.convert(model, input_names, output_names).get_spec() self.assertIsNotNone(spec) self.assertTrue(spec.HasField('neuralNetwork')) # Test the inputs and outputs self.assertEquals(len(spec.description.input), len(input_names)) self.assertItemsEqual(input_names, map(lambda x: x.name, spec.description.input)) self.assertEquals(len(spec.description.output), len(output_names)) self.assertItemsEqual(output_names, map(lambda x: x.name, spec.description.output)) # Test the layer parameters. layers = spec.neuralNetwork.layers self.assertIsNotNone(layers[0].convolution) self.assertIsNotNone(layers[1].simpleRecurrent) self.assertIsNotNone(layers[2].innerProduct)
def createmodel(self): """ create cnn model structure :return: model structure """ max_features = max(self.words.values()) + 1 # input dims model = Sequential() if self.W is None: model.add(Embedding(max_features, self.embedding_length, input_length=self.maxlen, dropout=0.2)) else: model.add(Embedding(max_features, self.layer1_size, weights=[self.W], input_length=self.maxlen, dropout=0.2)) model.add(Convolution1D(nb_filter=self.nb_filter, filter_length=self.filter_length, border_mode='valid', activation='relu', subsample_length=1)) model.add(MaxPooling1D(pool_length=model.output_shape[1])) model.add(Flatten()) model.add(Dense(self.hidden_dims)) model.add(Dropout(0.2)) model.add(Activation('relu')) model.add(Dense(self.nb_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=["accuracy"]) return model
def build_model(): model = Sequential() # model.add(Convolution1D(16, 2, border_mode='valid', input_shape=(20, 1))) # model.add(Activation('relu')) # model.add(Convolution1D(32, 3, border_mode='valid')) # model.add(Activation('relu')) # model.add(Convolution1D(32, 2, border_mode='valid')) # model.add(Activation('relu')) # model.add(MaxPooling1D(pool_length=2)) # model.add(Flatten()) # model.add(Dense(32)) # model.add(Activation('relu')) # model.add(Reshape((32, 1))) model.add(LSTM(input_dim=1, output_dim=16, activation='relu', return_sequences=True)) model.add(Dropout(0.2)) # Dropout overfitting model.add(LSTM(32, activation='relu', return_sequences=False)) model.add(Dropout(0.2)) # Dropout overfitting # model.add(Dense(64)) # model.add(Activation("relu")) # model.add(Dropout(0.2)) # Dropout overfitting model.add(Dense(64)) model.add(Activation("softmax")) start = time.time() # sgd = SGD(lr=0.5, decay=1e-6, momentum=0.9, nesterov=True) # model.compile(loss="mse", optimizer=sgd) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy']) # Nadam RMSprop() print "Compilation Time : ", time.time() - start return model
def get_cnn(self): """ Build a keras' convolutional neural network model. :returns: A tuple of 2 models, for encoding and encoding+decoding model. :rtype: tuple(Model) """ n_vocab = self.abstracts_preprocessor.get_num_vocab() n1 = 64 input_layer = Input(shape=(n_vocab,)) model = Reshape((1, n_vocab,))(input_layer) model = Convolution1D(n1, 3, border_mode='same', activation='sigmoid', W_regularizer=l2(.01))(model) model = Reshape((n1,))(model) model = Dense(n1, activation='sigmoid', W_regularizer=l2(.01))(model) model = Reshape((1, n1))(model) model = Convolution1D(self.n_factors, 3, border_mode='same', activation='softmax', W_regularizer=l2(.01))(model) encoding = Reshape((self.n_factors,), name='encoding')(model) model = Reshape((1, self.n_factors))(encoding) model = Convolution1D(n1, 3, border_mode='same', activation='sigmoid', W_regularizer=l2(.01))(model) model = Reshape((n1,))(model) model = Dense(n1, activation='relu', W_regularizer=l2(.01))(model) model = Reshape((1, n1))(model) model = Convolution1D(n_vocab, 3, border_mode='same', W_regularizer=l2(.01))(model) decoding = Reshape((n_vocab,))(model) model = concatenate([encoding, decoding]) self.model = Model(inputs=input_layer, outputs=model) self.model.compile(loss='mean_squared_error', optimizer='sgd')
def modeling(self, l = [49, 30, 10, 3]): """ generate model """ n_cv_flt, n_cv_ln = self.n_cv_flt, self.n_cv_ln cv_activation = self.cv_activation model = Sequential() # Direct: input_shape should be (l,0) not (l) # if l, it assume a scalar for an input feature. #model.add(Dense( l[1], input_shape=(l[0],))) # Convolution print( "n_cv_flt, n_cv_ln, cv_activation", n_cv_flt, n_cv_ln, cv_activation) model.add(Convolution1D( n_cv_flt, n_cv_ln, activation=cv_activation, border_mode='same', input_shape=(l[0], 1))) model.add(Flatten()) model.add(Dense( l[1])) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense( l[2])) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense( l[3])) model.add(Activation('softmax')) return model
def modeling(self, l = [49, 30, 10, 3]): """ generate model """ self.c_name = 'conv' n_cv_flt, n_cv_ln = self.n_cv_flt, self.n_cv_ln cv_activation = self.cv_activation model = Sequential() # Direct: input_shape should be (l,0) not (l) # if l, it assume a scalar for an input feature. #model.add(Dense( l[1], input_shape=(l[0],))) # Convolution print( "n_cv_flt, n_cv_ln, cv_activation", n_cv_flt, n_cv_ln, cv_activation) #model.add(Convolution1D( n_cv_flt, n_cv_ln, activation=cv_activation, # border_mode='same', input_shape=(1, l[0]), name = 'conv')) model.add(Convolution1D( n_cv_flt, n_cv_ln, activation=cv_activation, border_mode='same', input_shape=(l[0],1), name = self.c_name)) model.add(Flatten()) model.add(Dense( l[1])) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense( l[2])) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense( l[3])) model.add(Activation('softmax')) self.layer_dict = dict([(layer.name, layer) for layer in model.layers]) return model
def build_cnn_char_complex(input_dim, output_dim,nb_filter): randomEmbeddingLayer = Embedding(input_dim,32, input_length=maxlen,dropout=0.1) poolingLayer = Lambda(max_1d, output_shape=(nb_filter,)) conv_filters = [] for n_gram in range(2,4): ngramModel = Sequential() ngramModel.add(randomEmbeddingLayer) ngramModel.add(Convolution1D(nb_filter=nb_filter, filter_length=n_gram, border_mode="valid", activation="relu", subsample_length=1)) ngramModel.add(poolingLayer) conv_filters.append(ngramModel) clf = Sequential() clf.add(Merge(conv_filters,mode="concat")) clf.add(Activation("relu")) clf.add(Dense(100)) clf.add(Dropout(0.1)) clf.add(Activation("tanh")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf
def build_lstm(output_dim, embeddings): loss_function = "categorical_crossentropy" # this is the placeholder tensor for the input sequences sequence = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype="int32") # this embedding layer will transform the sequences of integers embedded = Embedding(embeddings.shape[0], embeddings.shape[1], input_length=MAX_SEQUENCE_LENGTH, weights=[embeddings], trainable=True)(sequence) # 4 convolution layers (each 1000 filters) cnn = [Convolution1D(filter_length=filters, nb_filter=1000, border_mode="same") for filters in [2, 3, 5, 7]] # concatenate merged_cnn = merge([cnn(embedded) for cnn in cnn], mode="concat") # create attention vector from max-pooled convoluted maxpool = Lambda(lambda x: keras_backend.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2])) attention_vector = maxpool(merged_cnn) forwards = AttentionLSTM(64, attention_vector)(embedded) backwards = AttentionLSTM(64, attention_vector, go_backwards=True)(embedded) # concatenate the outputs of the 2 LSTM layers bi_lstm = merge([forwards, backwards], mode="concat", concat_axis=-1) after_dropout = Dropout(0.5)(bi_lstm) # softmax output layer output = Dense(output_dim=output_dim, activation="softmax")(after_dropout) # the complete omdel model = Model(input=sequence, output=output) # try using different optimizers and different optimizer configs model.compile("adagrad", loss_function, metrics=["accuracy"]) return model
def block_deepFlavourBTVConvolutions(charged,vertices,dropoutRate,active=True,batchnorm=False,batchmomentum=0.6): ''' deep Flavour convolution part. ''' cpf=charged if active: cpf = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv0')(cpf) if batchnorm: cpf = BatchNormalization(momentum=batchmomentum ,name='cpf_batchnorm0')(cpf) cpf = Dropout(dropoutRate,name='cpf_dropout0')(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv1')(cpf) if batchnorm: cpf = BatchNormalization(momentum=batchmomentum,name='cpf_batchnorm1')(cpf) cpf = Dropout(dropoutRate,name='cpf_dropout1')(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv2')(cpf) if batchnorm: cpf = BatchNormalization(momentum=batchmomentum,name='cpf_batchnorm2')(cpf) cpf = Dropout(dropoutRate,name='cpf_dropout2')(cpf) cpf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu' , name='cpf_conv3')(cpf) else: cpf = Convolution1D(1,1, kernel_initializer='zeros',trainable=False)(cpf) vtx = vertices if active: vtx = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv0')(vtx) if batchnorm: vtx = BatchNormalization(momentum=batchmomentum,name='vtx_batchnorm0')(vtx) vtx = Dropout(dropoutRate,name='vtx_dropout0')(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv1')(vtx) if batchnorm: vtx = BatchNormalization(momentum=batchmomentum,name='vtx_batchnorm1')(vtx) vtx = Dropout(dropoutRate,name='vtx_dropout1')(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv2')(vtx) if batchnorm: vtx = BatchNormalization(momentum=batchmomentum,name='vtx_batchnorm2')(vtx) vtx = Dropout(dropoutRate,name='vtx_dropout2')(vtx) vtx = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv3')(vtx) else: vtx = Convolution1D(1,1, kernel_initializer='zeros',trainable=False)(vtx) return cpf,vtx
def convolutional_model_deepcsv(Inputs,nclasses,nregclasses,dropoutRate=-1): cpf=Inputs[1] vtx=Inputs[2] cpf = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv0')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv1')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv2')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu' , name='cpf_conv3')(cpf) vtx = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv0')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv1')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv2')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv3')(vtx) cpf=Flatten()(cpf) vtx=Flatten()(vtx) x = Concatenate()( [Inputs[0],cpf,vtx ]) x = block_deepFlavourDense(x,dropoutRate) predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x) model = Model(inputs=Inputs, outputs=predictions) return model
def convolutional_model_ConvCSV(Inputs,nclasses,nregclasses,dropoutRate=0.25): """ Inputs similar to 2016 training, but with covolutional layers on each track and sv """ a = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[1]) a = Dropout(dropoutRate)(a) a = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(a) a = Dropout(dropoutRate)(a) a = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(a) a = Dropout(dropoutRate)(a) a=Flatten()(a) c = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[2]) c = Dropout(dropoutRate)(c) c = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(c) c = Dropout(dropoutRate)(c) c = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(c) c = Dropout(dropoutRate)(c) c=Flatten()(c) x = Concatenate()( [Inputs[0],a,c] ) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x) model = Model(inputs=Inputs, outputs=predictions) return model
def Dense_model_microPF(Inputs,nclasses,Inputshapes,dropoutRate=-1): from keras.layers.local import LocallyConnected1D #npf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[1]) #npf = Dropout(dropoutRate)(npf) #npf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(npf) #npf = Dropout(dropoutRate)(npf) #npf = Convolution1D(4, 1, kernel_initializer='lecun_uniform', activation='relu')(npf) #npf = Dropout(dropoutRate)(npf) npf = Flatten()(Inputs[1]) x = merge( [Inputs[0],npf] , mode='concat') x= Dense(250, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(200, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x) model = Model(inputs=Inputs, outputs=predictions) return model
def Dense_model_ConvCSV(Inputs,nclasses,Inputshape,dropoutRate=0.25): """ Inputs similar to 2016 training, but with covolutional layers on each track and sv """ a = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[1]) a = Dropout(dropoutRate)(a) a = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(a) a = Dropout(dropoutRate)(a) a = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(a) a = Dropout(dropoutRate)(a) a=Flatten()(a) c = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[2]) c = Dropout(dropoutRate)(c) c = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(c) c = Dropout(dropoutRate)(c) c = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(c) c = Dropout(dropoutRate)(c) c=Flatten()(c) x = merge( [Inputs[0],a,c] , mode='concat') x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x) model = Model(inputs=Inputs, outputs=predictions) return model
def TextCNN(sequence_length, embedding_dim, filter_sizes, num_filters): ''' Convolutional Neural Network, including conv + pooling Args: sequence_length: ??????? embedding_dim: ????? filter_sizes: filter??? num_filters: filter?? Returns: features extracted by CNN ''' graph_in = Input(shape=(sequence_length, embedding_dim)) convs = [] for fsz in filter_sizes: conv = Convolution1D(nb_filter=num_filters, filter_length=fsz, border_mode='valid', activation='relu', subsample_length=1)(graph_in) pool = MaxPooling1D()(conv) flatten = Flatten()(pool) convs.append(flatten) if len(filter_sizes)>1: out = Merge(mode='concat')(convs) else: out = convs[0] graph = Model(input=graph_in, output=out) return graph
def build_model(cat, hidden_dim): graph_in = Input(shape=(sequence_length, embedding_dim)) convs = [] for fsz in filter_sizes: conv = Convolution1D(nb_filter=num_filters, filter_length=fsz, border_mode='valid', activation='relu', subsample_length=1)(graph_in) pool = MaxPooling1D(pool_length=2)(conv) flatten = Flatten()(pool) convs.append(flatten) if len(filter_sizes)>1: out = Merge(mode='concat')(convs) else: out = convs[0] graph = Model(input=graph_in, output=out) # main sequential model model = Sequential() if not model_variation=='CNN-static': model.add(Embedding(len(vocabulary), embedding_dim, input_length=sequence_length, weights=embedding_weights)) model.add(Dropout(dropout_prob[0], input_shape=(sequence_length, embedding_dim))) model.add(graph) model.add(Dense(hidden_dim)) model.add(Dropout(dropout_prob[1])) model.add(Activation('relu')) model.add(Dense(cat)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def create_cnn(W, max_length, dim=300, dropout=.5, output_dim=8): # Convolutional model filter_sizes=(2,3,4) num_filters = 3 graph_in = Input(shape=(max_length, len(W[0]))) convs = [] for fsz in filter_sizes: conv = Convolution1D(nb_filter=num_filters, filter_length=fsz, border_mode='valid', activation='relu', subsample_length=1)(graph_in) pool = MaxPooling1D(pool_length=2)(conv) flatten = Flatten()(pool) convs.append(flatten) out = Merge(mode='concat')(convs) graph = Model(input=graph_in, output=out) # Full model model = Sequential() model.add(Embedding(output_dim=W.shape[1], input_dim=W.shape[0], input_length=max_length, weights=[W], trainable=True)) model.add(Dropout(dropout)) model.add(graph) model.add(Dense(dim, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(output_dim, activation='softmax')) if output_dim == 2: model.compile('adam', 'binary_crossentropy', metrics=['accuracy']) else: model.compile('adam', 'categorical_crossentropy', metrics=['accuracy']) return model return model
def __get_base_model(maxlen, max_features, word_idx, use_pretrained_embeddings=False): """ :param maxlen: sentence size. Longer sentences will be truncated. :param max_features: vocab size. :param word_idx: {word1: index1, word2: index2} :return: """ print >> sys.stderr, 'Build model...' model = Sequential() # we start off with an efficient embedding layer which maps # our vocab indices into embedding_dims dimensions if use_pretrained_embeddings: print >> sys.stderr, 'Reading embeddings...' embedding_weights = get_embedding_weights(word_idx) model.add(Embedding(max_features, embedding_dims, input_length=maxlen, dropout=0.0, weights=[embedding_weights])) else: model.add(Embedding(max_features, embedding_dims, input_length=maxlen, dropout=0.0)) # we add a Convolution1D, which will learn nb_filter # word group filters of size filter_length: model.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) # we use max over time pooling by defining a python function to use # in a Lambda layer model.add(Lambda(max_1d, output_shape=(nb_filter,))) # We add a vanilla hidden layer: model.add(Dense(hidden_dims)) model.add(Dropout(0.2)) model.add(Activation('relu')) model.add(Dense(1)) return model
def build_model(fragment_length, nb_filters, nb_output_bins, dilation_depth, nb_stacks, use_skip_connections, learn_all_outputs, _log, desired_sample_rate, use_bias, res_l2, final_l2): def residual_block(x): original_x = x # TODO: initalization, regularization? # Note: The AtrousConvolution1D with the 'causal' flag is implemented in github.com/basveeling/keras#@wavenet. tanh_out = CausalAtrousConvolution1D(nb_filters, 2, atrous_rate=2 ** i, border_mode='valid', causal=True, bias=use_bias, name='dilated_conv_%d_tanh_s%d' % (2 ** i, s), activation='tanh', W_regularizer=l2(res_l2))(x) sigm_out = CausalAtrousConvolution1D(nb_filters, 2, atrous_rate=2 ** i, border_mode='valid', causal=True, bias=use_bias, name='dilated_conv_%d_sigm_s%d' % (2 ** i, s), activation='sigmoid', W_regularizer=l2(res_l2))(x) x = layers.Merge(mode='mul', name='gated_activation_%d_s%d' % (i, s))([tanh_out, sigm_out]) res_x = layers.Convolution1D(nb_filters, 1, border_mode='same', bias=use_bias, W_regularizer=l2(res_l2))(x) skip_x = layers.Convolution1D(nb_filters, 1, border_mode='same', bias=use_bias, W_regularizer=l2(res_l2))(x) res_x = layers.Merge(mode='sum')([original_x, res_x]) return res_x, skip_x input = Input(shape=(fragment_length, nb_output_bins), name='input_part') out = input skip_connections = [] out = CausalAtrousConvolution1D(nb_filters, 2, atrous_rate=1, border_mode='valid', causal=True, name='initial_causal_conv')(out) for s in range(nb_stacks): for i in range(0, dilation_depth + 1): out, skip_out = residual_block(out) skip_connections.append(skip_out) if use_skip_connections: out = layers.Merge(mode='sum')(skip_connections) out = layers.Activation('relu')(out) out = layers.Convolution1D(nb_output_bins, 1, border_mode='same', W_regularizer=l2(final_l2))(out) out = layers.Activation('relu')(out) out = layers.Convolution1D(nb_output_bins, 1, border_mode='same')(out) if not learn_all_outputs: raise DeprecationWarning('Learning on just all outputs is wasteful, now learning only inside receptive field.') out = layers.Lambda(lambda x: x[:, -1, :], output_shape=(out._keras_shape[-1],))( out) # Based on gif in deepmind blog: take last output? out = layers.Activation('softmax', name="output_softmax")(out) model = Model(input, out) receptive_field, receptive_field_ms = compute_receptive_field() _log.info('Receptive Field: %d (%dms)' % (receptive_field, int(receptive_field_ms))) return model
def __init__(self,samples=1000): #??Model input_1=Input(shape=(20,200)) x=Convolution1D(400,3,border_mode='same',input_shape=(20,200))(input_1) #x=Dropout(0.5)(x) x=AveragePooling1D(20)(x) x=Dropout(0.25)(x) x=Flatten()(x) output_1=Dense(128,activation="tanh")(x) self.model_1=Model(input=input_1,output=output_1) self.model_1.compile(optimizer="sgd",loss="mse",metrics=['accuracy']) #????Model input_2=Input(shape=(1000,)) x=Dense(400,activation="tanh")(input_2) x=Dropout(0.25)(x) output_2=Dense(128,activation="tanh")(x) #output_2=Lambda(lambda x:x*(-1))(output_2) self.model_2=Model(input=input_2,output=output_2) self.model_2.compile(optimizer="sgd",loss="mse",metrics=['accuracy']) #?????????????? input_2_a=Input(shape=(1000,)) input_2_b=Input(shape=(1000,)) input_2_c=Input(shape=(1000,)) input_2_d=Input(shape=(1000,)) input_2_e=Input(shape=(1000,)) input_2_f=Input(shape=(1000,)) output_2_a=self.model_2(input_2_a) output_2_b=self.model_2(input_2_b) output_2_c=self.model_2(input_2_c) output_2_d=self.model_2(input_2_d) output_2_e=self.model_2(input_2_e) output_2_f=self.model_2(input_2_f) #??,?? output=merge(inputs=[output_1,output_2_a,output_2_b,output_2_c,output_2_d,output_2_e,output_2_f],mode=cosine_error,output_shape=(None,1)) #???? self.model=Model([input_1,input_2_a,input_2_b,input_2_c,input_2_d,input_2_e,input_2_f],output=output) self.model.compile(optimizer="sgd",loss='mse',metrics=['accuracy']) #???? #rand=np.random self.X_train_1=load_data(samples=samples) #question self.X_train_2_a=load_predicate_data(samples=samples) #??? #self.X_train_2_b=np.array([self.X_train_2_a[rand.randint(999)] for i in range(1000)]) #??? #self.X_train_2_c=np.array([self.X_train_2_a[rand.randint(999)] for i in range(1000)]) #???
def get_model_4(params): embedding_weights = pickle.load(open(common.TRAINDATA_DIR+"/embedding_weights_w2v_%s.pk" % params['embeddings_suffix'],"rb")) graph_in = Input(shape=(params['sequence_length'], params['embedding_dim'])) convs = [] for fsz in params['filter_sizes']: conv = Convolution1D(nb_filter=params['num_filters'], filter_length=fsz, border_mode='valid', activation='relu', subsample_length=1) x = conv(graph_in) logging.debug("Filter size: %s" % fsz) logging.debug("Output CNN: %s" % str(conv.output_shape)) pool = GlobalMaxPooling1D() x = pool(x) logging.debug("Output Pooling: %s" % str(pool.output_shape)) convs.append(x) if len(params['filter_sizes'])>1: merge = Merge(mode='concat') out = merge(convs) logging.debug("Merge: %s" % str(merge.output_shape)) else: out = convs[0] graph = Model(input=graph_in, output=out) # main sequential model model = Sequential() if not params['model_variation']=='CNN-static': model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'], weights=embedding_weights)) model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim']))) model.add(graph) model.add(Dense(params['n_dense'])) model.add(Dropout(params['dropout_prob'][1])) model.add(Activation('relu')) model.add(Dense(output_dim=params["n_out"], init="uniform")) model.add(Activation(params['final_activation'])) logging.debug("Output CNN: %s" % str(model.output_shape)) if params['final_activation'] == 'linear': model.add(Lambda(lambda x :K.l2_normalize(x, axis=1))) return model # word2vec ARCH with LSTM
def create_default_model(config_data): nb_filter = 200 filter_length = 6 hidden_dims = nb_filter embedding_matrix = load_embedding_matrix(config_data) max_features = embedding_matrix.shape[0] embedding_dims = embedding_matrix.shape[1] max_len = config_data['max_sentence_length'] logging.info('Build Model...') logging.info('Embedding Dimensions: ({},{})'.format(max_features, embedding_dims)) main_input = Input(batch_shape=(None, max_len), dtype='int32', name='main_input') if not config_data.get('random_embedding', None): logging.info('Pretrained Word Embeddings') embeddings = Embedding( max_features, embedding_dims, input_length=max_len, weights=[embedding_matrix], trainable=False )(main_input) else: logging.info('Random Word Embeddings') embeddings = Embedding(max_features, embedding_dims, init='lecun_uniform', input_length=max_len)(main_input) zeropadding = ZeroPadding1D(filter_length - 1)(embeddings) conv1 = Convolution1D( nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)(zeropadding) max_pooling1 = MaxPooling1D(pool_length=4, stride=2)(conv1) conv2 = Convolution1D( nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)(max_pooling1) max_pooling2 = MaxPooling1D(pool_length=conv2._keras_shape[1])(conv2) flatten = Flatten()(max_pooling2) hidden = Dense(hidden_dims)(flatten) softmax_layer1 = Dense(3, activation='softmax', name='sentiment_softmax', init='lecun_uniform')(hidden) model = Model(input=[main_input], output=softmax_layer1) test_model = Model(input=[main_input], output=[softmax_layer1, hidden]) return model, test_model
def generate_model(self): k_inp = Input(shape=(self.max_len,), dtype='int32', name='input') k_emb = Embedding(input_dim=self.max_features+3, output_dim=self.embedding_dim, input_length=self.max_len, weights=self.embedding_weights)(k_inp) k_conv_list = [] for n in self.filter_size: k_conv = Convolution1D(nb_filter=self.num_filters, filter_length=n, border_mode='valid', activation='relu', subsample_length=1)(k_emb) k_maxpool1d = MaxPooling1D(pool_length=self.max_len - n + 1)(k_conv) k_flatten = Flatten()(k_maxpool1d) k_conv_list.append(k_flatten) if len(k_conv_list)==1: k_merge = k_conv_list[0] else: k_merge = merge(k_conv_list, mode='concat', concat_axis=1) # add hidden layers if wanted last_dims = len(self.filter_size)*self.num_filters last_layer = k_merge if self.num_hidden_layers == 0: # put dropout after merge if no hidden layers last_layer = Dropout(self.dropout)(last_layer) for n in range(self.num_hidden_layers): k_dn = Dense(self.dim_hidden_layers, input_dim=last_dims, W_regularizer=l2(3))(last_layer) k_dp = Dropout(self.dropout)(k_dn) last_layer = Activation('relu')(k_dp) last_dims = self.dim_hidden_layers k_dn = Dense(1, input_dim=last_dims)(last_layer) k_dp = Dropout(self.dropout)(k_dn) k_out = Activation('sigmoid', name="output")(k_dp) model = Model(input=[k_inp], output=[k_out]) model.compile(loss='binary_crossentropy', optimizer=self.optimizer, # metrics=['accuracy', num_true, target_tp_t, f1_score, precision, recall, specificity, spec_at_sens2, y_sum, y_ones, y_zeros, y_element, # yp_sum, yp_mean, yp_element]) # metrics=['accuracy', f1_score, precision, recall, specificity, specificity_at_recall, discriminance]) metrics=['accuracy']) return model
def __init__(self, word_index, embedding_matrix): embedding_layer_q = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH_Q, trainable=False) embedding_layer_a = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH_A, trainable=False) question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question') answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer') embedded_question = embedding_layer_q(question) embedded_answer = embedding_layer_a(answer) conv_blocksA = [] conv_blocksQ = [] for sz in [3,5]: conv = Convolution1D(filters=20, kernel_size=sz, padding="valid", activation="relu", strides=1)(embedded_answer) conv = MaxPooling1D(pool_size=2)(conv) conv = Flatten()(conv) conv_blocksA.append(conv) for sz in [5,7, 9]: conv = Convolution1D(filters=20, kernel_size=sz, padding="valid", activation="relu", strides=1)(embedded_question) conv = MaxPooling1D(pool_size=3)(conv) conv = Flatten()(conv) conv_blocksQ.append(conv) z = Concatenate()(conv_blocksA + conv_blocksQ) z = Dropout(0.5)(z) z = Dense(100, activation="relu")(z) softmax_c_q = Dense(2, activation='softmax')(z) self.model = Model([question, answer], softmax_c_q) opt = Nadam() self.model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc'])
def __init__(self, word_index, embedding_matrix): embedding_layer_c = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH_C, trainable=False) embedding_layer_q = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH_Q, trainable=False) embedding_layer_a = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH_A, trainable=False) context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context') question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question') answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer') embedded_context = embedding_layer_c(context) embedded_question = embedding_layer_q(question) embedded_answer = embedding_layer_a(answer) l_lstm_c = Bidirectional(LSTM(60, return_sequences=True))(embedded_context) conv_blocksC = [] for sz in [5,7]: conv = Convolution1D(filters=20, kernel_size=sz, padding="valid", activation="relu", strides=1)(l_lstm_c) conv = MaxPooling1D(pool_size=2)(conv) conv = Flatten()(conv) conv_blocksC.append(conv) l_lstm_q = Bidirectional(LSTM(60, return_sequences=True))(embedded_question) conv_blocksQ = [] for sz in [3, 5]: conv = Convolution1D(filters=20, kernel_size=sz, padding="valid", activation="relu", strides=1)(l_lstm_q) conv = MaxPooling1D(pool_size=2)(conv) conv = Flatten()(conv) conv_blocksQ.append(conv) l_lstm_a = Bidirectional(LSTM(60))(embedded_answer) concat_c_q = concatenate([l_lstm_a] + conv_blocksQ + conv_blocksC , axis=1) relu_c_q_a = Dense(100, activation='relu')(concat_c_q) relu_c_q_a = Dropout(0.25)(relu_c_q_a) softmax_c_q_a = Dense(2, activation='softmax')(relu_c_q_a) self.model = Model([question, answer, context], softmax_c_q_a) opt = Nadam() self.model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc'])
def get_model(self, num_features): embedding_dims = 128 nb_filter = 250 filter_length = 8 drop = 0.2 model = Sequential() # we start off with an efficient embedding layer which maps # our vocab indices into embedding_dims dimensions model.add(Embedding(self.vocab_size, embedding_dims, input_length=num_features[0], dropout=0.2)) # we add a Convolution1D, which will learn nb_filter # word group filters of size filter_length: model.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) # we use max over time pooling by defining a python function to use # in a Lambda layer def max_1d(X): return K.max(X, axis=1) model.add(Lambda(max_1d, output_shape=(nb_filter,))) model.add(Dropout(drop)) model.add(Dense(1024, init='glorot_uniform')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(drop)) model.add(Dense(1024, init='glorot_uniform')) model.add(BatchNormalization()) model.add(Activation('relu')) # We project onto a single unit output layer, and squash it with a sigmoid: model.add(Dense(1)) model.add(Activation('sigmoid')) return model
def create_network(**kwargs): defaults = { 'timesteps': 128, 'data_dim': 14, 'nb_filter': 64, 'filter_length': 3, 'pool_length': 2 } params = defaults params.update(**kwargs) network = Sequential() network.add(Convolution1D(nb_filter=params['nb_filter'], filter_length=params['filter_length'], border_mode='valid', activation='relu', subsample_length=1, input_shape=(params['timesteps'], params[ 'data_dim']))) network.add(MaxPooling1D(pool_length=params['pool_length'])) network.add(Dropout(0.5)) # network.add(Convolution1D(nb_filter=params['nb_filter'], # filter_length=params['filter_length'], # border_mode='valid', # activation='relu', # subsample_length=1)) # network.add(MaxPooling1D(pool_length=params['pool_length'])) # network.add(Dropout(0.5)) # network.add(Flatten()) # # Note: Keras does automatic shape inference. # network.add(Dense(params['nb_filter'] * 4)) # network.add(Activation('relu')) # network.add(Dropout(0.25)) network.add(LSTM(64)) network.add(Dropout(0.15)) network.add(Dense(1)) network.add(Activation('sigmoid')) network.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) return network
def build_cnn_char_threeModels(input_dim, output_dim,nb_filter,filter_size=3): left = Sequential() left.add(Embedding(input_dim, 32, # character embedding size input_length=L, dropout=0.2)) left.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1)) left.add(GlobalMaxPooling1D()) left.add(Dense(100)) left.add(Dropout(0.2)) left.add(Activation("tanh")) center = Sequential() center.add(Embedding(input_dim, 32, # character embedding size input_length=M, dropout=0.2)) center.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1)) center.add(GlobalMaxPooling1D()) center.add(Dense(100)) center.add(Dropout(0.2)) center.add(Activation("tanh")) right = Sequential() right.add(Embedding(input_dim, 32, # character embedding size input_length=R, dropout=0.2)) right.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1)) right.add(GlobalMaxPooling1D()) right.add(Dense(100)) right.add(Dropout(0.2)) right.add(Activation("tanh")) clf = Sequential() clf.add(Merge([left,center,right],mode="concat")) clf.add(Dense(output_dim=output_dim, activation='softmax')) clf.compile(optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) return clf
def block_deepFlavourConvolutions(charged,neutrals,vertices,dropoutRate,active=True,batchnorm=False,batchmomentum=0.6): ''' deep Flavour convolution part. ''' cpf=charged if active: cpf = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv0')(cpf) if batchnorm: cpf = BatchNormalization(momentum=batchmomentum ,name='cpf_batchnorm0')(cpf) cpf = Dropout(dropoutRate,name='cpf_dropout0')(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv1')(cpf) if batchnorm: cpf = BatchNormalization(momentum=batchmomentum,name='cpf_batchnorm1')(cpf) cpf = Dropout(dropoutRate,name='cpf_dropout1')(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv2')(cpf) if batchnorm: cpf = BatchNormalization(momentum=batchmomentum,name='cpf_batchnorm2')(cpf) cpf = Dropout(dropoutRate,name='cpf_dropout2')(cpf) cpf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu' , name='cpf_conv3')(cpf) else: cpf = Convolution1D(1,1, kernel_initializer='zeros',trainable=False)(cpf) npf=neutrals if active: npf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='npf_conv0')(npf) if batchnorm: npf = BatchNormalization(momentum=batchmomentum,name='npf_batchnorm0')(npf) npf = Dropout(dropoutRate,name='npf_dropout0')(npf) npf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu', name='npf_conv1')(npf) if batchnorm: npf = BatchNormalization(momentum=batchmomentum,name='npf_batchnorm1')(npf) npf = Dropout(dropoutRate,name='npf_dropout1')(npf) npf = Convolution1D(4, 1, kernel_initializer='lecun_uniform', activation='relu' , name='npf_conv2')(npf) else: npf = Convolution1D(1,1, kernel_initializer='zeros',trainable=False)(npf) vtx = vertices if active: vtx = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv0')(vtx) if batchnorm: vtx = BatchNormalization(momentum=batchmomentum,name='vtx_batchnorm0')(vtx) vtx = Dropout(dropoutRate,name='vtx_dropout0')(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv1')(vtx) if batchnorm: vtx = BatchNormalization(momentum=batchmomentum,name='vtx_batchnorm1')(vtx) vtx = Dropout(dropoutRate,name='vtx_dropout1')(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv2')(vtx) if batchnorm: vtx = BatchNormalization(momentum=batchmomentum,name='vtx_batchnorm2')(vtx) vtx = Dropout(dropoutRate,name='vtx_dropout2')(vtx) vtx = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv3')(vtx) else: vtx = Convolution1D(1,1, kernel_initializer='zeros',trainable=False)(vtx) return cpf,npf,vtx
def convolutional_model_lessbroad(Inputs,nclasses,nregclasses,dropoutRate=-1): """ the inputs are really not working as they are. need a reshaping well before """ #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform',input_shape=Inputshapes[0])(Inputs[0]) #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl) #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[1]) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Flatten()(cpf) npf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[2]) npf = Dropout(dropoutRate)(npf) npf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(npf) npf = Dropout(dropoutRate)(npf) npf = Flatten()(npf) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[3]) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Flatten()(vtx) x = Concatenate()( [Inputs[0],cpf,npf,vtx ] ) x = Dropout(dropoutRate)(x) x= Dense(600, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x) model = Model(inputs=Inputs, outputs=predictions) return model
def Dense_model_broad_reg(Inputs,nclasses,Inputshapes,dropoutRate=-1, npred = 1): """ the inputs are really not working as they are. need a reshaping well before """ #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform',input_shape=Inputshapes[0])(Inputs[0]) #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl) #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl) cpf = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[1]) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf) cpf = Flatten()(cpf) npf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu',input_shape=Inputshapes[2])(Inputs[2]) npf = Dropout(dropoutRate)(npf) npf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(npf) npf = Dropout(dropoutRate)(npf) npf = Convolution1D(4, 1, kernel_initializer='lecun_uniform', activation='relu')(npf) npf = Flatten()(npf) vtx = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu',input_shape=Inputshapes[3])(Inputs[3]) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx) vtx = Flatten()(vtx) x = merge( [Inputs[0],cpf,npf,vtx ] , mode='concat') x= Dense(350, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x = merge( [Inputs[4], x ] , mode='concat') predictions = Dense(npred, activation='linear',kernel_initializer='he_normal')(x) model = Model(inputs=Inputs, outputs=predictions) return model
def Dense_model_lessbroad(Inputs,nclasses,Inputshapes,dropoutRate=-1): """ the inputs are really not working as they are. need a reshaping well before """ #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform',input_shape=Inputshapes[0])(Inputs[0]) #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl) #gl = Dense(8, activation='relu',kernel_initializer='lecun_uniform')(gl) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(Inputs[1]) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Flatten()(cpf) npf = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu',input_shape=Inputshapes[2])(Inputs[2]) npf = Dropout(dropoutRate)(npf) npf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu')(npf) npf = Dropout(dropoutRate)(npf) npf = Flatten()(npf) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu',input_shape=Inputshapes[3])(Inputs[3]) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(16, 1, kernel_initializer='lecun_uniform', activation='relu')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Flatten()(vtx) x = merge( [Inputs[0],cpf,npf,vtx ] , mode='concat') x = Dropout(dropoutRate)(x) x= Dense(600, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) x = Dropout(dropoutRate)(x) x= Dense(100, activation='relu',kernel_initializer='lecun_uniform')(x) predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x) model = Model(inputs=Inputs, outputs=predictions) return model