我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.constraints.get()。
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializations.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, **kwargs): self.W_initializer = initializers.get(init) self.b_initializer = initializers.get('zeros') self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.initial_weights = weights self.input_spec = InputSpec(ndim=2) if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(SparseFullyConnectedLayer, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None, U_constraint=None, b_start_constraint=None, b_end_constraint=None, weights=None, **kwargs): self.supports_masking = True self.uses_learning_phase = True self.input_spec = [InputSpec(ndim=3)] self.init = initializations.get(init) self.U_regularizer = regularizers.get(U_regularizer) self.b_start_regularizer = regularizers.get(b_start_regularizer) self.b_end_regularizer = regularizers.get(b_end_regularizer) self.U_constraint = constraints.get(U_constraint) self.b_start_constraint = constraints.get(b_start_constraint) self.b_end_constraint = constraints.get(b_end_constraint) self.initial_weights = weights super(ChainCRF, self).__init__(**kwargs)
def __init__(self, alpha_initializer=0.2, beta_initializer=5.0, alpha_regularizer=None, alpha_constraint=None, beta_regularizer=None, beta_constraint=None, shared_axes=None, **kwargs): super(ParametricSoftplus, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) self.beta_initializer = initializers.get(beta_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.beta_constraint = constraints.get(beta_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes)
def __init__(self, units, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=constraints.NonNeg(), k_initializer='zeros', k_regularizer=None, k_constraint=None, tied_k=False, activity_regularizer=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(SoftMinMax, self).__init__(**kwargs) self.units = units self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.k_initializer = initializers.get(k_initializer) self.k_regularizer = regularizers.get(k_regularizer) self.k_constraint = constraints.get(k_constraint) self.tied_k = tied_k self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, units, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=constraints.NonNeg(), activity_regularizer=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(WeightedMean, self).__init__(**kwargs) self.units = units self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, output_dim, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(DenseNonNeg, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(Feedback, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(DivisiveNormalization, self).__init__(**kwargs)
def __init__(self, nb_kernels, kernel_dim, init='glorot_uniform', weights=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, input_dim=None, **kwargs): self.init = initializers.get(init) self.nb_kernels = nb_kernels self.kernel_dim = kernel_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(MinibatchDiscrimination, self).__init__(**kwargs)
def __init__(self, input_dim, output_dim, init='uniform', input_length=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, mask_zero=False, weights=None, **kwargs): self.input_dim = input_dim self.output_dim = output_dim self.init = initializations.get(init) self.input_length = input_length self.mask_zero = mask_zero self.W_constraint = constraints.get(W_constraint) self.constraints = [self.W_constraint] self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.initial_weights = weights kwargs['input_shape'] = (self.input_dim,) super(Embedding2D, self).__init__(**kwargs)
def __init__(self, input_dim, output_dim, init='uniform', input_length=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, mask_zero=False, weights=None, **kwargs): self.input_dim = input_dim self.output_dim = output_dim self.init = initializations.get(init) self.input_length = input_length self.mask_zero = mask_zero self.W_constraint = constraints.get(W_constraint) self.constraints = [self.W_constraint] self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.initial_weights = weights kwargs['input_shape'] = (self.input_dim,) super(Embedding, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None, U_constraint=None, b_start_constraint=None, b_end_constraint=None, weights=None, **kwargs): super(ChainCRF, self).__init__(**kwargs) self.init = initializers.get(init) self.U_regularizer = regularizers.get(U_regularizer) self.b_start_regularizer = regularizers.get(b_start_regularizer) self.b_end_regularizer = regularizers.get(b_end_regularizer) self.U_constraint = constraints.get(U_constraint) self.b_start_constraint = constraints.get(b_start_constraint) self.b_end_constraint = constraints.get(b_end_constraint) self.initial_weights = weights self.supports_masking = True self.uses_learning_phase = True self.input_spec = [InputSpec(ndim=3)]
def __init__(self, downsampling_factor=10, init='glorot_uniform', activation='linear', weights=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, input_dim=None, **kwargs): self.downsampling_factor = downsampling_factor self.init = initializations.get(init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.initial_weights = weights self.input_dim = input_dim if self.input_dim: kwargs['input_shape'] = (self.input_dim,) self.input_spec = [InputSpec(ndim=4)] super(EltWiseProduct, self).__init__(**kwargs)
def __init__(self, nb_classes, frequency_table=None, mode=0, init='glorot_uniform', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, verbose=False, **kwargs): ''' # Arguments: nb_classes: Number of classes. frequency_table: list. Frequency of each class. More frequent classes will have shorter huffman codes. mode: integer. One of [0, 1] verbose: boolean. Set to true to see the progress of building huffman tree. ''' self.nb_classes = nb_classes if frequency_table is None: frequency_table = [1] * nb_classes self.frequency_table = frequency_table self.mode = mode self.init = initializations.get(init) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.verbose = verbose super(Huffmax, self).__init__(**kwargs)
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, W_dropout=0., u_dropout=0., bias=True, **kwargs): self.supports_masking = True self.W_init = initializers.get('orthogonal') self.u_init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.W_dropout = min(1., max(0., W_dropout)) self.u_dropout = min(1., max(0., u_dropout)) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention()) """ self.supports_masking = True self.init = initializations.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs)
def __init__(self, output_dim, window_size=3, stride=1, kernel_initializer='uniform', bias_initializer='zero', activation='linear', activity_regularizer=None, kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, input_dim=None, input_length=None, **kwargs): self.output_dim = output_dim self.window_size = window_size self.strides = (stride, 1) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.activation = activations.get(activation) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = [InputSpec(ndim=3)] self.input_dim = input_dim self.input_length = input_length if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) super(GCNN, self).__init__(**kwargs)
def __init__(self, units, window_size=2, stride=1, return_sequences=False, go_backwards=False, stateful=False, unroll=False, activation='tanh', kernel_initializer='uniform', bias_initializer='zero', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, dropout=0, use_bias=True, input_dim=None, input_length=None, **kwargs): self.return_sequences = return_sequences self.go_backwards = go_backwards self.stateful = stateful self.unroll = unroll self.units = units self.window_size = window_size self.strides = (stride, 1) self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = dropout self.supports_masking = True self.input_spec = [InputSpec(ndim=3)] self.input_dim = input_dim self.input_length = input_length if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) super(QRNN, self).__init__(**kwargs)
def __init__(self, ratio, data_format=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(SE, self).__init__(**kwargs) self.ratio = ratio self.data_format= conv_utils.normalize_data_format(data_format) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.supports_masking = True
def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer)
def __init__(self, filters, kernel_size, kernel_initializer='glorot_uniform', activation=None, weights=None, padding='valid', strides=(1, 1), data_format=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, **kwargs): if data_format is None: data_format = K.image_data_format() if padding not in {'valid', 'same', 'full'}: raise ValueError('Invalid border mode for CosineConvolution2D:', padding) self.filters = filters self.kernel_size = kernel_size self.nb_row, self.nb_col = self.kernel_size self.kernel_initializer = initializers.get(kernel_initializer) self.activation = activations.get(activation) self.padding = padding self.strides = tuple(strides) self.data_format = normalize_data_format(data_format) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(CosineConvolution2D, self).__init__(**kwargs)
def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer) self._padding = _preprocess_padding(self.padding) self._strides = (1,) + self.strides + (1,) self._data_format = "NHWC"
def __init__(self, units, activation='linear', weights=None, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, bias_initializer='uniform', bias_regularizer=None, bias_constraint=None, activity_regularizer=None, bias=True, input_dim=None, factorization=simple_tensor_factorization(), **kwargs): self.activation = activations.get(activation) self.units = units self.input_dim = input_dim self.factorization = factorization self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_initializer = get_initializer(kernel_initializer) self.bias_initializer = get_initializer(bias_initializer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.activity_regularizer = regularizers.get(activity_regularizer) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(DenseTensor, self).__init__(**kwargs)
def __init__(self, init='one', power_init=1, weights=None, axis=-1, fit=True, **kwargs): self.supports_masking = True self.init = initializations.get(init) self.initial_weights = weights self.axis = axis self.power_init = power_init self.fit = fit super(PowerReLU, self).__init__(**kwargs)
def __init__(self, degree=2, init='zero', init1='one', weights=None, **kwargs): self.supports_masking = True self.init1 = initializations.get(init1) self.init = initializations.get(init) self.initial_weights = weights self.degree = degree super(Polynomial, self).__init__(**kwargs)
def __init__(self, filters, sum_axes, filter_axes, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_activation=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(FilterDims, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.activation = activations.get(activation) self.kernel_activation = activations.get(kernel_activation) self.filters = filters self.sum_axes = list(sum_axes) self.sum_axes.sort() self.filter_axes = list(filter_axes) self.filter_axes.sort() self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, input_length1=None, input_length2=None, **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] self.initial_weights = weights self.input_dim = input_dim self.input_length1 = input_length1 self.input_length2 = input_length2 if self.input_dim: kwargs['input_shape'] = (self.input_length1, self.input_length2, self.input_dim) self.input = K.placeholder(ndim=4) super(HigherOrderTimeDistributedDense, self).__init__(**kwargs)
def get_initial_state(self, inputs): print('inputs shape:', inputs.get_shape()) # apply the matrix on the first time step to get the initial s0. s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s)) # from keras.layers.recurrent to initialize a vector of (batchsize, # output_dim) y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims) y0 = K.sum(y0, axis=(1, 2)) # (samples, ) y0 = K.expand_dims(y0) # (samples, 1) y0 = K.tile(y0, [1, self.output_dim]) return [y0, s0]
def __init__(self, nb_filter, nb_row, nb_col, transform_bias=-1, init='glorot_uniform', activation='relu', weights=None, border_mode='same', subsample=(1, 1), dim_ordering='th', W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): if border_mode not in {'valid', 'same'}: raise Exception('Invalid border mode for Convolution2D:', border_mode) self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.transform_bias = transform_bias self.init = initializations.get(init, dim_ordering=dim_ordering) self.activation = activations.get(activation) assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}' self.border_mode = border_mode self.subsample = tuple(subsample) assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}' self.dim_ordering = dim_ordering self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(Conv2DHighway, self).__init__(**kwargs)
def __init__(self, output_dim, init = 'glorot_uniform', inner_init = 'orthogonal', activation = 'tanh', W_regularizer = None, U_regularizer = None, b_regularizer = None, dropout_W = 0.0, dropout_U = 0.0, tau=100, dt=20, noise=.1, dale_ratio = None, **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U self.tau = tau self.dt = dt self.noise = noise self.dale_ratio = dale_ratio if dale_ratio: #make dales law matrix dale_vec = np.ones(output_dim) dale_vec[int(dale_ratio*output_dim):] = -1 dale = np.diag(dale_vec) self.Dale = K.variable(dale) if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(leak_recurrent, self).__init__(**kwargs)
def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=False, input_dim=None, dale_ratio = .8, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] # OUR CHANGE self.dale_ratio = dale_ratio if dale_ratio: dale_vec = np.ones((input_dim, 1)) dale_vec[int(dale_ratio*input_dim):, 0] = 0 self.Dale = K.variable(dale_vec) if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(Dense, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializers.get(init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = InputSpec(ndim=2) self.input_dim = input_dim if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(Highway, self).__init__(**kwargs)
def __init__(self, nb_filter, nb_row, nb_col, rate=2, init='glorot_uniform', activation='linear', weights=None, border_mode='valid', dim_ordering=K.image_dim_ordering(), W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): if K._BACKEND != 'tensorflow': raise Exception('TensorBoard callback only works ' 'with the TensorFlow backend.') if border_mode not in {'valid', 'same'}: raise Exception('Invalid border mode for Convolution2D:', border_mode) self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.rate = rate self.init = initializations.get(init, dim_ordering=dim_ordering) self.activation = activations.get(activation) assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}' self.border_mode = border_mode assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}' self.dim_ordering = dim_ordering self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(ATrousConvolution2D, self).__init__(**kwargs)
def __init__(self, nb_filter, nb_row, nb_col, init='glorot_uniform', activation='linear', weights=None, border_mode='valid', subsample=(1, 1), dim_ordering=K.image_dim_ordering(), W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): if border_mode not in {'valid', 'same'}: raise Exception('Invalid border mode for Convolution2D:', border_mode) self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.dim_ordering = dim_ordering self.init = initializations.get(init, dim_ordering=self.dim_ordering) self.activation = activations.get(activation) assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}' self.border_mode = border_mode self.subsample = tuple(subsample) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(ConvolutionTranspose2D, self).__init__(**kwargs)
def __init__(self, kernel_initializer=initializers.Constant(1.0), kernel_regularizer=None, kernel_constraint=None, bias_initializer='zeros', bias_regularizer=None, bias_constraint=None, **kwargs): super(Scale, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_initializer = initializers.get(bias_initializer) self.bias_regularizer = regularizers.get(bias_regularizer) self.bias_constraint = constraints.get(bias_constraint)
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention(step_dim)) """ self.supports_masking = True # self.init = initializations.get('glorot_uniform') self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs)
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention()) """ self.supports_masking = True #self.init = initializations.get('glorot_uniform') self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs)
def __init__(self, units=None, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, **kwargs): if units is None: assert 'output_dim' in kwargs, 'Missing argument: units' else: kwargs['output_dim'] = units self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) super(ExtendedRNNCell, self).__init__(**kwargs)
def __init__(self, units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., implementation=1, **kwargs): super(MultiplicativeLSTM, self).__init__(**kwargs) self.units = units self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.unit_forget_bias = unit_forget_bias self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = min(1., max(0., dropout)) self.recurrent_dropout = min(1., max(0., recurrent_dropout)) self.state_spec = [InputSpec(shape=(None, self.units)), InputSpec(shape=(None, self.units))] self.state_size = (self.units, self.units) self.implementation = implementation
def __init__(self, kernel_initializer='he_normal', kernel_regularizer=None, kernel_constraint=None, use_bias=True, bias_initializer='zeros', bias_regularizer=None, bias_constraint=None, use_context=True, context_initializer='he_normal', context_regularizer=None, context_constraint=None, attention_dims=None, **kwargs): """ Args: attention_dims: The dimensionality of the inner attention calculating neural network. For input `(32, 10, 300)`, with `attention_dims` of 100, the output is `(32, 10, 100)`. i.e., the attended words are 100 dimensional. This is then collapsed via summation to `(32, 10, 1)` to indicate the attention weights for 10 words. If set to None, `features` dims are used as `attention_dims`. (Default value: None) """ if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(AttentionLayer, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.use_bias = use_bias self.bias_initializer = initializers.get(bias_initializer) self.bias_regularizer = regularizers.get(bias_regularizer) self.bias_constraint = constraints.get(bias_constraint) self.use_context = use_context self.context_initializer = initializers.get(context_initializer) self.context_regularizer = regularizers.get(context_regularizer) self.context_constraint = constraints.get(context_constraint) self.attention_dims = attention_dims self.supports_masking = True
def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer) self._padding = padding.upper() if K.image_data_format() == 'channels_last': self._strides = (1,) + strides + (1,) else: self._strides = (1, 1,) + strides if self.data_format == 'channels_last': self._data_format = "NHWC" else: self._data_format = "NCHW"
def __init__(self, a_initializer='ones', k_initializer='ones', n_initializer='ones', z_initializer='zeros', a_regularizer=None, a_constraint=constraints.NonNeg(), k_regularizer=None, k_constraint=constraints.NonNeg(), n_regularizer=None, n_constraint=constraints.NonNeg(), z_regularizer=None, z_constraint=constraints.NonNeg(), shared_axes=None, a_shared=True, k_shared=True, n_shared=True, z_shared=True, z_one=False, **kwargs): super(Hill, self).__init__(**kwargs) self.supports_masking = True self.a_initializer = initializers.get(a_initializer) self.a_regularizer = regularizers.get(a_regularizer) self.a_constraint = constraints.get(a_constraint) self.k_initializer = initializers.get(a_initializer) self.k_regularizer = regularizers.get(a_regularizer) self.k_constraint = constraints.get(a_constraint) self.n_initializer = initializers.get(a_initializer) self.n_regularizer = regularizers.get(a_regularizer) self.n_constraint = constraints.get(a_constraint) self.z_initializer = initializers.get(a_initializer) self.z_regularizer = regularizers.get(a_regularizer) self.z_constraint = constraints.get(a_constraint) self.a_shared = a_shared self.k_shared = k_shared self.n_shared = n_shared self.z_shared = z_shared self.z_one = z_one if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes)
def __init__(self, units, output_dim, activation='tanh', return_probabilities=False, name='AttentionDecoder', kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): """ Implements an AttentionDecoder that takes in a sequence encoded by an encoder and outputs the decoded states :param units: dimension of the hidden state and the attention matrices :param output_dim: the number of labels in the output space references: Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio. "Neural machine translation by jointly learning to align and translate." arXiv preprint arXiv:1409.0473 (2014). """ self.units = units self.output_dim = output_dim self.return_probabilities = return_probabilities self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) super(AttentionDecoder, self).__init__(**kwargs) self.name = name self.return_sequences = True # must return sequences
def __init__(self, nb_filter, nb_row, nb_col, init='glorot_uniform', activation=None, weights=None, border_mode='valid', subsample=(1, 1), dim_ordering='default', W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, epsilon=1e-3, momentum=0.99, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): if dim_ordering == 'default': dim_ordering = K.image_dim_ordering() if border_mode not in {'valid', 'same', 'full'}: raise ValueError('Invalid border mode for Convolution2D:', border_mode) self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.init = initializations.get(init) self.activation = activations.get(activation) self.border_mode = border_mode self.subsample = tuple(subsample) if dim_ordering not in {'tf', 'th'}: raise ValueError('dim_ordering must be in {tf, th}.') self.dim_ordering = dim_ordering self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights # added for BatchNormalization self.supports_masking = True self.beta_init = initializations.get(beta_init) self.gamma_init = initializations.get(gamma_init) self.epsilon = epsilon self.momentum = momentum self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights self.uses_learning_phase = True super(YOLOConvolution2D, self).__init__(**kwargs)