我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用keras.regularizers()。
def _build_layer_parameters(layer): parameters = deepcopy(layer.parameters) regularizers = [ 'activity_regularizer', 'b_regularizer', 'W_regularizer', 'gamma_regularizer', 'beta_regularizer'] for regularizer in regularizers: if regularizer in parameters: parameters[regularizer] = _get_regularizer(parameters[regularizer]) activation = parameters.get('activation', None) if activation: if is_custom_activation(activation): parameters['activation'] = get_custom_activation(activation) return parameters
def resnet(repetition=2, k=1): '''Wide Residual Network (with a slight modification) depth == repetition*6 + 2 ''' from keras.models import Model from keras.layers import Input, Dense, Flatten, AveragePooling2D from keras.regularizers import l2 input_shape = (1, _img_len, _img_len) output_dim = len(_columns) x = Input(shape=input_shape) z = conv2d(nb_filter=8, k_size=5, downsample=True)(x) # out_shape == 8, _img_len/ 2, _img_len/ 2 z = bn_lrelu(0.01)(z) z = residual_block(nb_filter=k*16, repetition=repetition)(z) # out_shape == k*16, _img_len/ 4, _img_len/ 4 z = residual_block(nb_filter=k*32, repetition=repetition)(z) # out_shape == k*32, _img_len/ 8, _img_len/ 8 z = residual_block(nb_filter=k*64, repetition=repetition)(z) # out_shape == k*64, _img_len/16, _img_len/16 z = AveragePooling2D((_img_len/16, _img_len/16))(z) z = Flatten()(z) z = Dense(output_dim=output_dim, activation='sigmoid', W_regularizer=l2(_Wreg_l2), init='zero')(z) return Model(input=x, output=z)
def __init__(self, h, output_dim, init='glorot_uniform', **kwargs): self.init = initializations.get(init) self.h = h self.output_dim = output_dim #removing the regularizers and the dropout super(AttenLayer, self).__init__(**kwargs) # this seems necessary in order to accept 3 input dimensions # (samples, timesteps, features) self.input_spec=[InputSpec(ndim=3)]
def add_activity_regularizer(layer): if layer.activity_regularizer and not keras_2: layer.activity_regularizer.set_layer(layer) if not hasattr(layer, 'regularizers'): layer.regularizers = [] layer.regularizers.append(layer.activity_regularizer)
def l1l2(l1_weight=0, l2_weight=0): if keras_2: from keras.regularizers import L1L2 return L1L2(l1_weight, l2_weight) else: from keras.regularizers import l1l2 return l1l2(l1_weight, l2_weight)
def add_weight(layer, shape, name, initializer='random_uniform', regularizer=None, constraint=None): initializer = get_initializer(initializer) if keras_2: return layer.add_weight(initializer=initializer, shape=shape, name=name, regularizer=regularizer, constraint=constraint) else: # create weight w = initializer(shape, name=name) # add to trainable_weights if not hasattr(layer, 'trainable_weights'): layer.trainable_weights = [] layer.trainable_weights.append(w) # add to regularizers if regularizer: if not hasattr(layer, 'regularizers'): layer.regularizers = [] regularizer.set_param(w) layer.regularizers.append(regularizer) return w
def buildConvolution(self, name): filters = self.params.get('filters') nb_filter = self.params.get('nb_filter') assert filters assert nb_filter convs = [] for fsz in filters: layer_name = '%s-conv-%d' % (name, fsz) conv = Convolution1D( nb_filter=nb_filter, filter_length=fsz, border_mode='valid', #activation='relu', subsample_length=1, init='glorot_uniform', #init=init, #init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name), W_constraint=maxnorm(self.params.get('w_maxnorm')), b_constraint=maxnorm(self.params.get('b_maxnorm')), #W_regularizer=regularizers.l2(self.params.get('w_l2')), #b_regularizer=regularizers.l2(self.params.get('b_l2')), #input_shape=(self.q_length, self.wdim), name=layer_name ) convs.append(conv) self.layers['%s-convolution' % name] = convs
def feed_forward_net(input, output, hidden_layers=[64, 64], activations='relu', dropout_rate=0., l2=0., constrain_norm=False): ''' Helper function for building a Keras feed forward network. input: Keras Input object appropriate for the data. e.g. input=Input(shape=(20,)) output: Function representing final layer for the network that maps from the last hidden layer to output. e.g. if output = Dense(10, activation='softmax') if we're doing 10 class classification or output = Dense(1, activation='linear') if we're doing regression. ''' state = input if isinstance(activations, str): activations = [activations] * len(hidden_layers) for h, a in zip(hidden_layers, activations): if l2 > 0.: w_reg = keras.regularizers.l2(l2) else: w_reg = None const = maxnorm(2) if constrain_norm else None state = Dense(h, activation=a, kernel_regularizer=w_reg, kernel_constraint=const)(state) if dropout_rate > 0.: state = Dropout(dropout_rate)(state) return output(state)
def conv2d(nb_filter, k_size=3, downsample=False): from keras.layers import Convolution2D from keras.regularizers import l2 def f(x): subsample = (2, 2) if downsample else (1, 1) border_mode = 'valid' if k_size == 1 else 'same' return Convolution2D( nb_filter=nb_filter, nb_row=k_size, nb_col=k_size, subsample=subsample, init='glorot_normal', W_regularizer=l2(_Wreg_l2), border_mode=border_mode)(x) return f