我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用keras.regularizers.WeightRegularizer()。
def test_regularizer(layer_class): layer = layer_class(output_dim, return_sequences=False, weights=None, batch_input_shape=(nb_samples, timesteps, embedding_dim), W_regularizer=regularizers.WeightRegularizer(l1=0.01), U_regularizer=regularizers.WeightRegularizer(l1=0.01), b_regularizer='l2') shape = (nb_samples, timesteps, embedding_dim) layer.build(shape) output = layer(K.variable(np.ones(shape))) K.eval(output) if layer_class == recurrent.SimpleRNN: assert len(layer.losses) == 3 if layer_class == recurrent.GRU: assert len(layer.losses) == 9 if layer_class == recurrent.LSTM: assert len(layer.losses) == 12
def test_regularizer(layer_class): layer = layer_class(output_dim, return_sequences=False, weights=None, batch_input_shape=(nb_samples, timesteps, embedding_dim), W_regularizer=regularizers.WeightRegularizer(l1=0.01), U_regularizer=regularizers.WeightRegularizer(l1=0.01), b_regularizer='l2') shape = (nb_samples, timesteps, embedding_dim) layer.build(shape) output = layer(K.variable(np.ones(shape))) K.eval(output)
def construct_model(model_spec, input_dim, output_dim): """ Helper to construct a Keras model based on dict of specs and input size Parameters ---------- model_spec: dict Dict containing keys: arch, activation, dropout, optimizer, loss, w_reg, metrics input_dim: int Size of input dimension output_dim: int Size of input dimension Returns ------- model: Compiled keras.models.Sequential """ model = Sequential() for li, layer_size in enumerate(model_spec['arch']): # Set output size for last layer if layer_size == 'None': layer_size = output_dim # For input layer, add input dimension if li == 0: temp_input_dim = input_dim model.add(Dense(layer_size, input_dim=input_dim, activation=model_spec['activation'], W_regularizer=weight_reg(model_spec['w_reg'][0], model_spec['w_reg'][1]), name='Input')) else: model.add(Dense(layer_size, activation=model_spec['activation'], W_regularizer=weight_reg(model_spec['w_reg'][0], model_spec['w_reg'][1]), name='Layer_%i' % li)) if model_spec['dropout'] > 0.: model.add(Dropout(model_spec['dropout'], name='Dropout_%i' % li)) model.compile(optimizer=model_spec['optimizer'], loss=model_spec['loss'], metrics=model_spec['metrics']) return model