我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers()。
def tsinalis(input_shape, n_classes): """ Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1) """ model = Sequential(name='Tsinalis') model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu')) print(model.input_shape) print(model.output_shape) model.add(MaxPooling1D(pool_size = (20), strides=(10))) print(model.output_shape) model.add(keras.layers.core.Reshape([20,-1,1])) print(model.output_shape) model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu')) print(model.output_shape) model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2))) print(model.output_shape) model.add(Flatten()) print(model.output_shape) model.add(Dense (500, activation='relu')) model.add(Dense (500, activation='relu')) model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2() )) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy]) return model
def largeann(input_shape, n_classes, layers=3, neurons=2000, dropout=0.35 ): """ for working with extracted features """ # gpu = switch_gpu() # with K.tf.device('/gpu:{}'.format(gpu)): # K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))) model = Sequential(name='ann') # model.gpu = gpu for l in range(layers): model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(dropout)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy]) return model #%% everyhing recurrent for ANN
def set_params(mo, bparams): i = 0 for la in mo.layers: we = bparams[i:i+2] print len(we) la.set_weights(we) i += 2 return mo #with open("best_model_keras.pkl", 'r') as f: # b_params = pkl.load(f) # #model = set_params(model, b_params) #out = model.predict(xvl, batch_size=xvl.shape[0], verbose=0) #error = np.mean(np.mean(np.power(out - yvl, 2), axis=1)) #print "Error vl", error #sys.exit() #init_p = get_params(model) #with open("init_keras_param.pkl", 'w') as f: # pkl.dump(init_p, f)
def make_model(dense_layer_sizes, filters, kernel_size, pool_size): '''Creates model comprised of 2 convolutional layers followed by dense layers dense_layer_sizes: List of layer sizes. This list has one number for each layer filters: Number of convolutional filters in each convolutional layer kernel_size: Convolutional kernel size pool_size: Size of pooling area for max pooling ''' model = Sequential() model.add(Conv2D(filters, kernel_size, padding='valid', input_shape=input_shape)) model.add(Activation('relu')) model.add(Conv2D(filters, kernel_size)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=pool_size)) model.add(Dropout(0.25)) model.add(Flatten()) for layer_size in dense_layer_sizes: model.add(Dense(layer_size)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) return model
def block(self, num_filters, num_layers, kernel_size, strides, input_tensor): x = Conv2D(num_layers, (1, 1), strides=strides)(input_tensor) x = Activation(selu)(x) x = Conv2D(num_filters, kernel_size, padding='same')(x) x = Activation(selu)(x) x = Conv2D(num_filters*4, (1, 1))(x) shortcut = Conv2D(num_filters*4, (1, 1), strides=strides, )(input_tensor) x = layers.add([x, shortcut]) x = Activation(selu)(x) return x
def BiDi(input_shape,vocabSize,veclen,wordWeights,nLayers,nHidden,lr): assert len(nHidden) == nLayers, '#Neurons for each layer does not match #Layers' r_flag = True _Input = Input(shape = (input_shape,),dtype = 'int32') E = keras.layers.embeddings.Embedding(vocabSize,veclen,weights=(wordWeights,),mask_zero = True)(_Input) for ind in range(nLayers): if ind == (nLayers-1): r_flag = False fwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag)(E) bkwd_layer = keras.layers.recurrent.GRU(nHidden[ind],init='glorot_uniform',inner_init='orthogonal',activation='tanh',inner_activation='hard_sigmoid',return_sequences = r_flag,go_backwards = True)(E) E = merge([fwd_layer,bkwd_layer],mode = 'ave') #nHidden/= 2 Output = Dense(1,activation = 'sigmoid')(Dropout(0.5)(E)) model = Model(input = _Input, output = Output) opt = keras.optimizers.Adam(lr) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def get_layers(self, name, next_layer=False, last_layer=False, type=None): if type is None: name2layer = {layer.name: layer for layer in self.model.layers} else: name2layer = {} for layer in self.model.layers: for t in type: if t.lower() in layer.name.lower(): name2layer[layer.name] = layer break # name2layer = {layer.name: layer for layer in self.model.layers if type.lower() in layer.name.lower()} def _get_layer(name): return name2layer[name] nodes = self.graph.get_nodes(name, next_layer, last_layer, type=type) if not isinstance(nodes, list): nodes = [nodes] ''' for node in nodes: if node.name not in name2layer: embed() ''' return map(_get_layer, [node.name for node in nodes])
def get_model_list(self, model): model_list = [] model_dict = json.loads(model.to_json()) model_layer = model_dict['config']['layers'] for layer in model_layer: layer_name = layer['config']['name'] layer_output_shape = model.get_layer(layer_name).output_shape if layer['class_name'] == 'Conv2D' and layer['config']['name'].lower().startswith('conv'): model_list.append([layer['class_name'], layer['config']['name'], {'kernel_size': layer['config']['kernel_size'], 'filters': layer['config']['filters']}]) elif layer['class_name'] == 'GlobalMaxPooling2D': model_list.append([layer['class_name'], layer['config']['name'], {}]) elif layer['class_name'] == 'Activation': model_list.append([layer['class_name'], layer['config']['name'], {'activation_type': 'softmax'}]) return model_list
def buildModelLSTM_3(self): model = Sequential() layers = [self.inOutVecDim, 57, 57 * 2, 32, self.inOutVecDim] model.add(LSTM(input_dim=layers[0], output_dim=layers[1], return_sequences=False)) model.add(Dense( output_dim=layers[4])) model.add(Activation(self.activation)) optimizer = keras.optimizers.RMSprop(lr=0.001) model.compile(loss="mae", optimizer=optimizer) return model
def buildModelLSTM_4(self): model = Sequential() layers = [self.inOutVecDim, 57, 57 * 2, 57, self.inOutVecDim] model.add(LSTM(input_dim=layers[0], output_dim=layers[1], return_sequences=True)) model.add(LSTM(layers[2], return_sequences=False)) model.add(Dense(output_dim=layers[4])) model.add(Activation(self.activation)) optimizer = keras.optimizers.RMSprop(lr=0.001) model.compile(loss="mae", optimizer=optimizer) return model
def test_initial_state_GRU(self): data = np.random.rand(1, 1, 2) model = keras.models.Sequential() model.add(keras.layers.GRU(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True)) model.get_layer(index=1).reset_states() coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output') keras_output_1 = model.predict(data) coreml_full_output_1 = coreml_model.predict({'data': data}) coreml_output_1 = coreml_full_output_1['output'] coreml_output_1 = np.expand_dims(coreml_output_1, 1) np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1) hidden_state = (np.random.rand(1, 5)) model.get_layer(index=1).reset_states(states=hidden_state) coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output') spec = coreml_model.get_spec() keras_output_2 = model.predict(data) coreml_full_output_2 = coreml_model.predict({'data': data, spec.description.input[1].name: hidden_state[0]}) coreml_output_2 = coreml_full_output_2['output'] coreml_output_2 = np.expand_dims(coreml_output_2, 1) np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
def test_initial_state_SimpleRNN(self): data = np.random.rand(1, 1, 2) model = keras.models.Sequential() model.add(keras.layers.SimpleRNN(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True)) model.get_layer(index=1).reset_states() coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output') keras_output_1 = model.predict(data) coreml_full_output_1 = coreml_model.predict({'data': data}) coreml_output_1 = coreml_full_output_1['output'] coreml_output_1 = np.expand_dims(coreml_output_1, 1) np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1) hidden_state = np.random.rand(1, 5) model.get_layer(index=1).reset_states(states=hidden_state) coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output') spec = coreml_model.get_spec() keras_output_2 = model.predict(data) coreml_full_output_2 = coreml_model.predict({'data': data, spec.description.input[1].name: hidden_state[0]}) coreml_output_2 = coreml_full_output_2['output'] coreml_output_2 = np.expand_dims(coreml_output_2, 1) np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
def buildConvolution(self, name): filters = self.params.get('filters') nb_filter = self.params.get('nb_filter') assert filters assert nb_filter convs = [] for fsz in filters: layer_name = '%s-conv-%d' % (name, fsz) conv = Convolution2D( nb_filter=nb_filter, nb_row=fsz, nb_col=self.wdim, border_mode='valid', init='glorot_uniform', W_constraint=maxnorm(self.params.get('w_maxnorm')), b_constraint=maxnorm(self.params.get('b_maxnorm')), name=layer_name ) convs.append(conv) self.layers['%s-convolution' % name] = convs
def build_model(): input_tensor = Input(shape=(150, 150, 3)) vgg16_model = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor) dense = Flatten()( \ Dense(2048, activation='relu')( \ BN()( \ vgg16_model.layers[-1].output ) ) ) result = Activation('sigmoid')(\ Activation('linear')( \ Dense(4096)(\ dense) ) ) model = Model(input=vgg16_model.input, output=result) for i in range(len(model.layers)): print(i, model.layers[i]) for layer in model.layers[:12]: # default 15 layer.trainable = False model.compile(loss='binary_crossentropy', optimizer='adam') return model #build_model()
def create_resnet50(input_img): net = ResNet50(weights='imagenet', include_top=False, input_tensor=input_img) for layer in net.layers[1:]: layer.trainable = False net = Reshape((-1,))(net.outputs[0]) return net
def on_epoch_begin(self, epoch, logs=None): super(MyLearningRateScheduler, self).on_epoch_begin(epoch, logs=logs) if epoch > self.epoch_unfreeze: for i, layer in enumerate(self.model.layers[1:]): layer.trainable = i >= self.num_layers_to_freeze else: for layer in self.model.layers[1:-1]: layer.trainable = False self.model.layers[-1].trainable = True if not self.recompiled_first or (not self.recompiled and epoch > self.epoch_unfreeze): adam = keras.optimizers.Adam(lr=self.step_decay(epoch)) self.model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy', custom_metrics.f2score_samples]) print self.model.summary() if not self.recompiled_first: self.recompiled_first = True else: self.recompiled = True
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False): """Basic 3 X 3 convolution blocks for use on resnets with layers <= 34. Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf """ def f(input): if is_first_block_of_first_layer: # don't repeat bn->relu since we just did bn->relu->maxpool conv1 = Conv2D(filters=filters, kernel_size=(3, 3), strides=init_strides, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(1e-4))(input) else: conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3), strides=init_strides)(input) residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1) return _shortcut(input, residual) return f
def resnet(repetition=2, k=1): '''Wide Residual Network (with a slight modification) depth == repetition*6 + 2 ''' from keras.models import Model from keras.layers import Input, Dense, Flatten, AveragePooling2D from keras.regularizers import l2 input_shape = (1, _img_len, _img_len) output_dim = len(_columns) x = Input(shape=input_shape) z = conv2d(nb_filter=8, k_size=5, downsample=True)(x) # out_shape == 8, _img_len/ 2, _img_len/ 2 z = bn_lrelu(0.01)(z) z = residual_block(nb_filter=k*16, repetition=repetition)(z) # out_shape == k*16, _img_len/ 4, _img_len/ 4 z = residual_block(nb_filter=k*32, repetition=repetition)(z) # out_shape == k*32, _img_len/ 8, _img_len/ 8 z = residual_block(nb_filter=k*64, repetition=repetition)(z) # out_shape == k*64, _img_len/16, _img_len/16 z = AveragePooling2D((_img_len/16, _img_len/16))(z) z = Flatten()(z) z = Dense(output_dim=output_dim, activation='sigmoid', W_regularizer=l2(_Wreg_l2), init='zero')(z) return Model(input=x, output=z)
def residual_block(nb_filter, repetition): '''(down dample ->) residual blocks ....... -> BatchNormalization -> LeakyReLU''' from keras.layers import merge def f(x): for i in xrange(repetition): if i == 0: y = conv2d(nb_filter, downsample=True, k_size=1)(x) z = conv2d(nb_filter, downsample=True)(x) else: y = x z = bn_lrelu(0.01)(x) z = conv2d(nb_filter)(z) z = bn_lrelu(0.01)(z) z = conv2d(nb_filter)(z) x = merge([y, z], mode='sum') return bn_lrelu(0.01)(x) return f
def build_model(): input_tensor = Input(shape=(224, 224, 3)) resnet_model = ResNet50(include_top=False, weights='imagenet', input_tensor=input_tensor) dense = Flatten()( \ Dense(2048, activation='relu')( \ BN()( \ resnet_model.layers[-1].output ) ) ) result = Activation('sigmoid')( \ Dense(2048, activation="linear")(\ dense) ) model = Model(inputs=resnet_model.input, outputs=result) for layer in model.layers[:139]: # default 179 #print(layer) if 'BatchNormalization' in str(layer): ... else: layer.trainable = False model.compile(loss='binary_crossentropy', optimizer='adam') return model
def define_model(weights_path): ''' Define model structure with weights. ''' from resnet50 import ResNet50 from keras.models import Model from keras.layers import Dense, GlobalAveragePooling2D resnet50_model = ResNet50() fc1000 = resnet50_model.get_layer('fc1000').output final_softmax = Dense(output_dim=2, activation='softmax')(fc1000) resnet50_finetune_1skip = Model(input=resnet50_model.input, output=final_softmax) resnet50_finetune_1skip.load_weights(weights_path) resnet50_finetune_1skip.compile(loss="categorical_crossentropy", optimizer='nadam', metrics=['accuracy']) return resnet50_finetune_1skip
def make_trainable(net, val): net.trainable = val for l in net.layers: l.trainable = val
def ann(input_shape, n_classes, layers=2, neurons=80, dropout=0.35 ): """ for working with extracted features """ model = Sequential(name='ann') for l in range(layers): model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(dropout)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy]) return model
def pure_rnn_do(input_shape, n_classes,layers=2, neurons=80, dropout=0.3): """ just replace ANN by RNNs """ model = Sequential(name='pure_rnn') model.add(LSTM(neurons, return_sequences=False if layers==1 else True, input_shape=input_shape,dropout=dropout, recurrent_dropout=dropout)) for i in range(layers-1): model.add(LSTM(neurons, return_sequences=False if i==layers-2 else True,dropout=dropout, recurrent_dropout=dropout)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001), metrics=[keras.metrics.categorical_accuracy]) return model
def bi_lstm(input_shape, n_classes,layers=2, neurons=80, dropout=0.3): """ just replace ANN by RNNs """ model = Sequential(name='pure_rnn') model.add(Bidirectional(LSTM(neurons, return_sequences=False if layers==1 else True, dropout=dropout, recurrent_dropout=dropout), input_shape=input_shape)) model.add(LSTM(neurons, return_sequences=False if layers==1 else True, input_shape=input_shape,dropout=dropout, recurrent_dropout=dropout)) for i in range(layers-1): model.add(Bidirectional(LSTM(neurons, return_sequences=False if i==layers-2 else True,dropout=dropout, recurrent_dropout=dropout))) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001), metrics=[keras.metrics.categorical_accuracy]) return model
def cnn_model(): ''' Construct the CNN model, 2*(Conv1D + relu + MaxPooling1D) + Flatten + 2*(Dense + relu) + Dense + softmax Tissue inputs will be inserted after the first Dense/relu layer, if activate ''' print('Contruct CNN model') main_inputs = Input(shape=X_DATA[0].shape, name='sequence_inputs') hidden = Conv1D(128, kernel_size=2, padding='same', activation='relu')(main_inputs) hidden = MaxPooling1D(pool_size=10)(hidden) hidden = Conv1D(128, kernel_size=2, padding='same', activation='relu')(hidden) hidden = MaxPooling1D(pool_size=10)(hidden) if ARGS.d: hidden = Dropout(ARGS.d/100)(hidden) hidden = Flatten()(hidden) hidden = Dense(625)(hidden) hidden = Activation('relu')(hidden) if ARGS.T: auxiliary_inputs = Input(shape=TISSUE_DATA[0].shape, name='tissue_inputs') hidden = keras.layers.concatenate([hidden, auxiliary_inputs]) hidden = Dense(125)(hidden) hidden = Activation('relu')(hidden) outputs = Dense(CLASSES, activation='softmax')(hidden) if ARGS.T: model = Model(inputs=[main_inputs, auxiliary_inputs], outputs=outputs) else: model = Model(inputs=main_inputs, outputs=outputs) model.summary() model.compile( loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy']) model.save_weights('{}model.h5~'.format(ARGS.o)) return model
def dnn_model(): ''' Construct the DNN model, Flatten + 2*(Dense + relu) + Dense + softmax Tissue inputs will be inserted after the first Dense/relu layer, if activate ''' print('Contruct DNN model') main_inputs = Input(shape=X_DATA[0].shape, name='sequence_inputs') hidden = Flatten()(main_inputs) hidden = Dense(128)(hidden) hidden = Activation('relu')(hidden) if ARGS.T: auxiliary_inputs = Input(shape=TISSUE_DATA[0].shape, name='tissue_inputs') hidden = keras.layers.concatenate([hidden, auxiliary_inputs]) hidden = Dense(128)(hidden) hidden = Activation('relu')(hidden) outputs = Dense(CLASSES, activation='softmax')(hidden) if ARGS.T: model = Model(inputs=[main_inputs, auxiliary_inputs], outputs=outputs) else: model = Model(inputs=main_inputs, outputs=outputs) model.summary() model.compile( loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy']) model.save_weights('{}model.h5~'.format(ARGS.o)) return model
def get_params(mo): para = [] for la in mo.layers: par = la.get_weights() para += par return para
def _get_softmax_name(self): """ Looks for the name of the softmax layer. :return: Softmax layer name """ for i, layer in enumerate(self.model.layers): cfg = layer.get_config() if 'activation' in cfg and cfg['activation'] == 'softmax': return layer.name raise Exception("No softmax layers found")
def get_layer_names(self): """ :return: Names of all the layers kept by Keras """ layer_names = [x.name for x in self.model.layers] return layer_names
def fprop(self, x): """ Exposes all the layers of the model returned by get_layer_names. :param x: A symbolic representation of the network input :return: A dictionary mapping layer names to the symbolic representation of their output. """ from keras.models import Model as KerasModel if self.keras_model is None: # Get the input layer new_input = self.model.get_input_at(0) # Make a new model that returns each of the layers as output out_layers = [x_layer.output for x_layer in self.model.layers] self.keras_model = KerasModel(new_input, out_layers) # and get the outputs for that model on the input x outputs = self.keras_model(x) # Keras only returns a list for outputs of length >= 1, if the model # is only one layer, wrap a list if len(self.model.layers) == 1: outputs = [outputs] # compute the dict to return fprop_dict = dict(zip(self.get_layer_names(), outputs)) return fprop_dict
def _build_block_model(inputs, block): if isinstance(inputs, list) and len(inputs) == 1: inputs = inputs[0] if block.input_layers and len(block.input_layers) > 0: for layer in block.input_layers: inputs = _build_layer_model(inputs, layer) for layer in block.layers: inputs = _build_layer_model(inputs, layer) return inputs
def _get_layer_model(layer_type): if is_custom_layer(layer_type): return get_custom_layer(layer_type)[0] modules = [keras.layers, keras.layers.normalization] for module in modules: model = getattr(module, layer_type) if model: return model return None
def copy_weights(teacher_model, student_model, layer_names): '''Copy weights from teacher_model to student_model, for layers with names listed in layer_names ''' for name in layer_names: weights = teacher_model.get_layer(name=name).get_weights() student_model.get_layer(name=name).set_weights(weights) # methods to construct teacher_model and student_models
def __init__(self, cp): print("Building network ...") # First, we build the network, starting with an input layer # Recurrent layers expect input of shape # (batch size, SEQ_LENGTH, num_features) # this is the placeholder tensor for the input sequences sequence = Input(shape=(maxlen,), dtype='int32') # this embedding layer will transform the sequences of integers # into vectors of size 128 embedded = Embedding(max_features, 128, input_length=maxlen)(sequence) # apply forwards LSTM forwards = LSTM(64)(embedded) # apply backwards LSTM backwards = LSTM(64, go_backwards=True)(embedded) # concatenate the outputs of the 2 LSTMs merged = merge([forwards, backwards], mode='concat', concat_axis=-1) after_dp = Dropout(0.5)(merged) output = Dense(1, activation='sigmoid')(after_dp) self.model = Model(input=sequence, output=output) # try using different optimizers and different optimizer configs self.model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
def TimeDistributedResNet18(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a time distributed `keras.models.Model` according to the ResNet18 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) :return model: Time distributed ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> y = keras_resnet.models.TimeDistributedResNet18(x) >>> y = keras.layers.TimeDistributed(keras.layers.Flatten())(y.output) >>> y = keras.layers.TimeDistributed(keras.layers.Dense(classes, activation="softmax"))(y) >>> model = keras.models.Model(x, y) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [2, 2, 2, 2] return TimeDistributedResNet(inputs, blocks, block=keras_resnet.blocks.time_distributed_basic_2d, include_top=include_top, classes=classes, *args, **kwargs)
def TimeDistributedResNet34(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a time distributed `keras.models.Model` according to the ResNet34 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) :return model: Time distributed ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> y = keras_resnet.models.TimeDistributedResNet34(x) >>> y = keras.layers.TimeDistributed(keras.layers.Flatten())(y.output) >>> y = keras.layers.TimeDistributed(keras.layers.Dense(classes, activation="softmax"))(y) >>> model = keras.models.Model(x, y) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [3, 4, 6, 3] return TimeDistributedResNet(inputs, blocks, block=keras_resnet.blocks.time_distributed_basic_2d, include_top=include_top, classes=classes, *args, **kwargs)
def TimeDistributedResNet50(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a time distributed `keras.models.Model` according to the ResNet50 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> y = keras_resnet.models.TimeDistributedResNet50(x) >>> y = keras.layers.TimeDistributed(keras.layers.Flatten())(y.output) >>> y = keras.layers.TimeDistributed(keras.layers.Dense(classes, activation="softmax"))(y) >>> model = keras.models.Model(x, y) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [3, 4, 6, 3] return TimeDistributedResNet(inputs, blocks, block=keras_resnet.blocks.time_distributed_bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
def TimeDistributedResNet101(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a time distributed `keras.models.Model` according to the ResNet101 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) :return model: Time distributed ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> y = keras_resnet.models.TimeDistributedResNet101(x) >>> y = keras.layers.TimeDistributed(keras.layers.Flatten())(y.output) >>> y = keras.layers.TimeDistributed(keras.layers.Dense(classes, activation="softmax"))(y) >>> model = keras.models.Model(x, y) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [3, 4, 23, 3] return TimeDistributedResNet(inputs, blocks, block=keras_resnet.blocks.time_distributed_bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
def TimeDistributedResNet152(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a time distributed `keras.models.Model` according to the ResNet152 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) :return model: Time distributed ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> y = keras_resnet.models.TimeDistributedResNet152(x) >>> y = keras.layers.TimeDistributed(keras.layers.Flatten())(y.output) >>> y = keras.layers.TimeDistributed(keras.layers.Dense(classes, activation="softmax"))(y) >>> model = keras.models.Model(x, y) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [3, 8, 36, 3] return TimeDistributedResNet(inputs, blocks, block=keras_resnet.blocks.time_distributed_bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet18(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a `keras.models.Model` according to the ResNet18 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) :return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> model = keras_resnet.models.ResNet18(x, classes=classes) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [2, 2, 2, 2] return ResNet(inputs, blocks, block=keras_resnet.blocks.basic_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet34(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a `keras.models.Model` according to the ResNet34 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) :return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> model = keras_resnet.models.ResNet34(x, classes=classes) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [3, 4, 6, 3] return ResNet(inputs, blocks, block=keras_resnet.blocks.basic_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet50(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a `keras.models.Model` according to the ResNet50 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) :return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> model = keras_resnet.models.ResNet50(x) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [3, 4, 6, 3] numerical_names = [False, False, False, False] return ResNet(inputs, blocks, numerical_names=numerical_names, block=keras_resnet.blocks.bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet101(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a `keras.models.Model` according to the ResNet101 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) :return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> model = keras_resnet.models.ResNet101(x, classes=classes) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [3, 4, 23, 3] numerical_names = [False, True, True, False] return ResNet(inputs, blocks, numerical_names=numerical_names, block=keras_resnet.blocks.bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
def ResNet152(inputs, blocks=None, include_top=True, classes=1000, *args, **kwargs): """ Constructs a `keras.models.Model` according to the ResNet152 specifications. :param inputs: input tensor (e.g. an instance of `keras.layers.Input`) :param blocks: the network’s residual architecture :param include_top: if true, includes classification layers :param classes: number of classes to classify (include_top must be true) :return model: ResNet model with encoding output (if `include_top=False`) or classification output (if `include_top=True`) Usage: >>> import keras_resnet.models >>> shape, classes = (224, 224, 3), 1000 >>> x = keras.layers.Input(shape) >>> model = keras_resnet.models.ResNet152(x, classes=classes) >>> model.compile("adam", "categorical_crossentropy", ["accuracy"]) """ if blocks is None: blocks = [3, 8, 36, 3] numerical_names = [False, True, True, False] return ResNet(inputs, blocks, numerical_names=numerical_names, block=keras_resnet.blocks.bottleneck_2d, include_top=include_top, classes=classes, *args, **kwargs)
def __init__(self, n_in, hidden_layer_size, n_out, hidden_layer_type, output_type='linear', dropout_rate=0.0, loss_function='mse', optimizer='adam'): """ This function initialises a neural network :param n_in: Dimensionality of input features :param hidden_layer_size: The layer size for each hidden layer :param n_out: Dimensionality of output features :param hidden_layer_type: the activation types of each hidden layers, e.g., TANH, LSTM, GRU, BLSTM :param output_type: the activation type of the output layer, by default is 'LINEAR', linear regression. :param dropout_rate: probability of dropout, a float number between 0 and 1. :type n_in: Integer :type hidden_layer_size: A list of integers :type n_out: Integrer """ self.n_in = int(n_in) self.n_out = int(n_out) self.n_layers = len(hidden_layer_size) self.hidden_layer_size = hidden_layer_size self.hidden_layer_type = hidden_layer_type assert len(self.hidden_layer_size) == len(self.hidden_layer_type) self.output_type = output_type self.dropout_rate = dropout_rate self.loss_function = loss_function self.optimizer = optimizer # create model self.model = Sequential()
def define_feedforward_model(self): seed = 12345 np.random.seed(seed) # add hidden layers for i in range(self.n_layers): if i == 0: input_size = self.n_in else: input_size = self.hidden_layer_size[i - 1] self.model.add(Dense( units=self.hidden_layer_size[i], activation=self.hidden_layer_type[i], kernel_initializer="normal", input_dim=input_size)) self.model.add(Dropout(self.dropout_rate)) # add output layer self.final_layer = self.model.add(Dense( units=self.n_out, activation=self.output_type.lower(), kernel_initializer="normal", input_dim=self.hidden_layer_size[-1])) # Compile the model self.compile_model()
def concatenate(x): if hasattr(keras.layers, 'Concatenate'): return keras.layers.Concatenate()(x) else: return keras.layers.merge(x, mode='concat')
def add(x): if hasattr(keras.layers, 'Add'): return keras.layers.Add()(x) else: return keras.layers.merge(x, mode='sum')