我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.BatchNormalization()。
def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss='risk_estimation'): print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout)) self.model = Sequential() self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1]))) for i in range(0, n_layers - 1): self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh', recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', dropout=rate_dropout, recurrent_dropout=rate_dropout)) self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh', recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', dropout=rate_dropout, recurrent_dropout=rate_dropout)) self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform())) # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5), # moving_variance_initializer=Constant(value=0.25))) self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5))) self.model.add(Activation('relu_limited')) opt = RMSprop(lr=lr) self.model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = Conv1D(k1,1,padding='same')(tensor_input) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,padding='same')(out) pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input) # out = merge([out,pooling],mode='sum') out = add([out,pooling]) return out
def build_mlp(n_con,n_emb,vocabs_size,n_dis,emb_size,cluster_size): hidden_size = 800 con = Sequential() con.add(Dense(input_dim=n_con,output_dim=emb_size)) emb_list = [] for i in range(n_emb): emb = Sequential() emb.add(Embedding(input_dim=vocabs_size[i],output_dim=emb_size,input_length=n_dis)) emb.add(Flatten()) emb_list.append(emb) model = Sequential() model.add(Merge([con] + emb_list,mode='concat')) model.add(BatchNormalization()) model.add(Dense(hidden_size,activation='relu')) model.add(Dropout(0.5)) model.add(Dense(cluster_size,activation='softmax')) model.add(Lambda(caluate_point, output_shape =[2])) return model
def largeann(input_shape, n_classes, layers=3, neurons=2000, dropout=0.35 ): """ for working with extracted features """ # gpu = switch_gpu() # with K.tf.device('/gpu:{}'.format(gpu)): # K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))) model = Sequential(name='ann') # model.gpu = gpu for l in range(layers): model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(dropout)) model.add(Dense(n_classes, activation = 'softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy]) return model #%% everyhing recurrent for ANN
def create_actor_network(self, state_size,action_dim): print("Now we build the model") # Batch norm version S = Input(shape=[state_size]) s1 = BatchNormalization()(S) s1 = Dense(HIDDEN1_UNITS)(s1) s1 = BatchNormalization()(s1) s1 = Activation('relu')(s1) s1 = Dense(HIDDEN2_UNITS)(s1) s1 = BatchNormalization()(s1) h1 = Activation('relu')(s1) Steering = Dense(1,activation='tanh')(h1) Acceleration = Dense(1,activation='sigmoid')(h1) Brake = Dense(1,activation='sigmoid')(h1) # V = merge([Steering,Acceleration,Brake],mode='concat') V = layers.concatenate([Steering,Acceleration,Brake]) model = Model(inputs=S,outputs=V) return model, model.trainable_weights, S
def make_model(batch_size, image_dim): model = Sequential() model.add(BatchNormalization(batch_input_shape=(batch_size,image_dim[1],image_dim[2],1))) model.add(Conv2D( 16 , [3,3], activation='relu',padding='same')) #model.add(Dropout(0.2)) model.add(Conv2D( 32 , [3,3], activation='relu',padding='same')) #model.add(Dropout(0.2)) model.add(Conv2D( 64 , [3,3], activation='relu',padding='same')) model.add(Dropout(0.2)) #model.add(Conv2D( 16 , [3,3], activation='relu',padding='same')) #model.add(Dropout(0.2)) #model.add(Conv2D( 16 , [3,3], activation='relu',padding='same')) #model.add(Dropout(0.2)) #model.add(Conv2D( 16 , [3,3], activation='relu',padding='same')) #model.add(Conv2D(64, (3, 3), activation='relu',padding='same')) #model.add(Conv2D(64, (3, 3), activation='relu',padding='same')) #model.add(Conv2D(64, (3, 3), activation='relu',padding='same')) model.add(Conv2D(1, kernel_size=1, padding='same', activation='sigmoid')) return(model)
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5): k1,k2 = filters out = BatchNormalization()(x) out = Activation('relu')(out) out = Conv1D(k1,kernel_size,strides=2,padding='same')(out) out = BatchNormalization()(out) out = Activation('relu')(out) out = Dropout(dropout)(out) out = Conv1D(k2,kernel_size,strides=2,padding='same')(out) pooling = MaxPooling1D(pooling_size,strides=4,padding='same')(x) out = add([out, pooling]) #out = merge([out,pooling]) return out
def test_keras_import(self): model = Sequential() model.add(BatchNormalization(center=True, scale=True, beta_regularizer=regularizers.l2(0.01), gamma_regularizer=regularizers.l2(0.01), beta_constraint='max_norm', gamma_constraint='max_norm', input_shape=(10, 16))) model.build() json_string = Model.to_json(model) with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out: json.dump(json.loads(json_string), out, indent=4) sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r') response = self.client.post(reverse('keras-import'), {'file': sample_file}) response = json.loads(response.content) layerId = sorted(response['net'].keys()) self.assertEqual(response['result'], 'success') self.assertEqual(response['net'][layerId[0]]['info']['type'], 'Scale') self.assertEqual(response['net'][layerId[1]]['info']['type'], 'BatchNorm') # ********** Noise Layers **********
def test_keras_export(self): tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app', 'keras_export_test.json'), 'r') response = json.load(tests) tests.close() net = yaml.safe_load(json.dumps(response['net'])) net = {'l0': net['Input'], 'l1': net['BatchNorm'], 'l2': net['Scale']} net['l0']['connection']['output'].append('l1') # Test 1 inp = data(net['l0'], '', 'l0')['l0'] temp = batch_norm(net['l1'], [inp], 'l1', 'l2', net['l2']) model = Model(inp, temp['l2']) self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization') # Test 2 net['l2']['params']['filler'] = 'VarianceScaling' net['l2']['params']['bias_filler'] = 'VarianceScaling' inp = data(net['l0'], '', 'l0')['l0'] temp = batch_norm(net['l1'], [inp], 'l1', 'l2', net['l2']) model = Model(inp, temp['l2']) self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization') # Test 3 inp = data(net['l0'], '', 'l0')['l0'] temp = batch_norm(net['l1'], [inp], 'l1', 'l0', net['l0']) model = Model(inp, temp['l1']) self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization')
def conv2d_bn(x, nb_filter, nb_row, nb_col, border_mode='same', subsample=(1, 1), name=None): '''Utility function to apply conv + BN. ''' if name is not None: bn_name = name + '_bn' conv_name = name + '_conv' else: bn_name = None conv_name = None if K.image_dim_ordering() == 'th': bn_axis = 1 else: bn_axis = 3 x = Convolution2D(nb_filter, nb_row, nb_col, subsample=subsample, activation='relu', border_mode=border_mode, name=conv_name)(x) x = BatchNormalization(axis=bn_axis, name=bn_name)(x) return x
def keepsize_256(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7): """ Deep residual network that keeps the size of the input throughout the whole network """ def residual(inputs, n_filters): x = ReflectionPadding2D()(inputs) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = Activation(activation)(x) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = add([x, inputs]) return x inputs = Input(shape=(nx, ny, 1)) x = GaussianNoise(noise)(inputs) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x0 = Activation(activation)(x) x = residual(x0, n_filters) for i in range(depth-1): x = residual(x, n_filters) x = ReflectionPadding2D()(x) x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization()(x) x = add([x, x0]) # Upsampling for superresolution x = UpSampling2D()(x) x = ReflectionPadding2D()(x) x = Conv2D(4*n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) x = Activation(activation)(x) final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x) return Model(inputs=inputs, outputs=final)
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1): subsample = (subsample_factor, subsample_factor, subsample_factor) x = BatchNormalization(axis=4)(input_tensor) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x) if subsample_factor > 1: shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor) else: shortcut = input_tensor x = merge([x, shortcut], mode='sum') return x
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1): subsample = (subsample_factor, subsample_factor) x = BatchNormalization(axis=3)(input_tensor) x = Activation('relu')(x) x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x) x = BatchNormalization(axis=3)(x) x = Activation('relu')(x) x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x) if subsample_factor > 1: shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor) else: shortcut = input_tensor x = merge([x, shortcut], mode='sum') return x
def prep_model(inputs, N, s0pad, s1pad, c): # Word-level projection before averaging inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0]) inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0]) inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1]) inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1]) merged = concatenate([inputs[0], inputs[1]]) # Deep for i in range(c['deep']): merged = Dense(c['nndim'], activation=c['nnact'])(merged) merged = Dropout(c['nndropout'])(merged) merged = BatchNormalization()(merged) is_duplicate = Dense(1, activation='sigmoid')(merged) return [is_duplicate], N
def addLayer(previousLayer, nChannels, nOutChannels, dropRate, blockNum): bn = BatchNormalization(name = 'denseb_BatchNorm_{}'.format(blockNum) , axis = 1)(previousLayer) relu = Activation('relu', name ='denseb_relu_{}'.format(blockNum))(bn) conv = Convolution2D(nOutChannels, 3, 3, border_mode='same', name='denseb_conv_{}'.format(blockNum))(relu) if dropRate is not None: dp = Dropout(dropRate, name='denseb_dropout_{}'.format)(conv) return merge([dp, previousLayer], mode='concat', concat_axis=1) else: return merge([conv, previousLayer], mode='concat', concat_axis=1)
def addTransition(previousLayer, nChannels, nOutChannels, dropRate, blockNum): bn = BatchNormalization(name = 'tr_BatchNorm_{}'.format(blockNum), axis = 1)(previousLayer) relu = Activation('relu', name ='tr_relu_{}'.format(blockNum))(bn) conv = Convolution2D(nOutChannels, 1, 1, border_mode='same', name='tr_conv_{}'.format(blockNum))(relu) if dropRate is not None: dp = Dropout(dropRate, name='tr_dropout_{}'.format)(conv) avgPool = AveragePooling2D(pool_size=(2, 2))(dp) else: avgPool = AveragePooling2D(pool_size=(2, 2))(conv) return avgPool
def ResidualBlock1D_helper(layers, kernel_size, filters, final_stride=1): def f(_input): basic = _input for ln in range(layers): #basic = BatchNormalization()( basic ) # triggers known keras bug w/ TimeDistributed: https://github.com/fchollet/keras/issues/5221 basic = ELU()(basic) basic = Conv1D(filters, kernel_size, kernel_initializer='he_normal', kernel_regularizer=l2(1.e-4), padding='same')(basic) # note that this strides without averaging return AveragePooling1D(pool_size=1, strides=final_stride)(Add()([_input, basic])) return f
def build_generator(self): model = Sequential() model.add(Dense(1024, activation='relu', input_dim=self.latent_dim)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(128 * 7 * 7, activation="relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(self.channels, kernel_size=4, padding='same')) model.add(Activation("tanh")) model.summary() gen_input = Input(shape=(self.latent_dim,)) img = model(gen_input) return Model(gen_input, img)
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=100)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(1, kernel_size=3, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(100,)) img = model(noise) return Model(noise, img)
def build_generator(self): noise_shape = (100,) model = Sequential() model.add(Dense(256, input_shape=noise_shape)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=noise_shape) img = model(noise) return Model(noise, img)
def build_discriminator(self): model = Sequential() model.add(Dense(512, input_dim=self.encoded_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1, activation="sigmoid")) model.summary() encoded_repr = Input(shape=(self.encoded_dim, )) validity = model(encoded_repr) return Model(encoded_repr, validity)
def build_encoder(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.latent_dim)) model.summary() img = Input(shape=self.img_shape) z = model(img) return Model(img, z)
def build_generator(self): model = Sequential() model.add(Dense(512, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() z = Input(shape=(self.latent_dim,)) gen_img = model(z) return Model(z, gen_img)
def _model(self, input_shape): self.model.add(Dense(self.hidden[0], input_shape=(input_shape[1],), kernel_regularizer=l2(self.wd), kernel_initializer=self.ki)) if self.bn: self.model.add(BatchNormalization(axis=1)) self.model.add(Activation(self.activation)) for i in self.hidden[1:]: self.model.add(Dense(i, kernel_regularizer=l2(self.wd), kernel_initializer=self.ki)) if self.bn: self.model.add(BatchNormalization(axis=1)) self.model.add(Activation(self.activation)) self.model.add(Dense(self.N, activation='softmax', kernel_regularizer=l2(self.wd), kernel_initializer=self.ki))
def _adversary(): model = Sequential() model.add(Convolution2D( 64, 5, 5, border_mode='same', input_shape=(3, 32, 32),subsample=(2,2))) model.add(LeakyReLU(0.2)) model.add(Convolution2D(128, 5, 5,subsample=(2,2))) model.add(BatchNormalization(mode=2)) model.add(LeakyReLU(0.2)) model.add(Flatten()) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(Dense(1)) model.add(Activation('sigmoid')) return model
def test_conv_batchnorm_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 3) num_kernels = 3 kernel_height = 5 kernel_width = 5 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size = (kernel_height, kernel_width))) model.add(BatchNormalization(epsilon=1e-5)) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_keras_model(model, model_precision=model_precision)
def test_conv_batchnorm_no_gamma_no_beta(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 3) num_kernels = 3 kernel_height = 5 kernel_width = 5 # Define a model model = Sequential() model.add(Conv2D(input_shape = input_shape, filters = num_kernels, kernel_size = (kernel_height, kernel_width))) model.add(BatchNormalization(center=False, scale=False, epsilon=1e-5)) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_keras_model(model, model_precision=model_precision)
def test_tiny_mcrnn_music_tagger(self): x_in = Input(shape=(4,6,1)) x = ZeroPadding2D(padding=(0, 1))(x_in) x = BatchNormalization(axis=2, name='bn_0_freq')(x) # Conv block 1 x = Conv2D(2, (3, 3), padding='same', name='conv1')(x) x = BatchNormalization(axis=3, name='bn1')(x) x = Activation('elu')(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x) # Conv block 2 x = Conv2D(4, (3, 3), padding='same', name='conv2')(x) x = BatchNormalization(axis=3, name='bn2')(x) x = Activation('elu')(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x) # Should get you (1,1,2,4) x = Reshape((2, 4))(x) x = GRU(32, return_sequences=True, name='gru1')(x) x = GRU(32, return_sequences=False, name='gru2')(x) # Create model. model = Model(x_in, x) model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) self._test_keras_model(model, mode='random_zero_mean', delta=1e-2)
def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), name=None): '''Utility function to apply conv + BN. ''' if name is not None: bn_name = name + '_bn' conv_name = name + '_conv' else: bn_name = None conv_name = None if K.image_data_format() == 'channels_first': bn_axis = 1 else: bn_axis = 3 x = Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, use_bias=False, name=conv_name)(x) x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) x = Activation('relu', name=name)(x) return x
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4): ''' Adds an initial conv block, with batch norm and relu for the DPN Args: input: input tensor initial_conv_filters: number of filters for initial conv block weight_decay: weight decay factor Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
def _bn_relu_conv_block(input, filters, kernel=(3, 3), stride=(1, 1), weight_decay=5e-4): ''' Adds a Batchnorm-Relu-Conv block for DPN Args: input: input tensor filters: number of output filters kernel: convolution kernel size stride: stride of convolution Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(filters, kernel, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=stride)(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) return x