我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.initializers.get()。
def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, **kwargs): self.W_initializer = initializers.get(init) self.b_initializer = initializers.get('zeros') self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.initial_weights = weights self.input_spec = InputSpec(ndim=2) if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(SparseFullyConnectedLayer, self).__init__(**kwargs)
def __init__(self, epsilon=1e-3, mode=0, axis=-1, momentum=0.99, r_max_value=3., d_max_value=5., t_delta=1., weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon self.mode = mode self.axis = axis self.momentum = momentum self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights self.r_max_value = r_max_value self.d_max_value = d_max_value self.t_delta = t_delta if self.mode == 0: self.uses_learning_phase = True super(BatchRenormalization, self).__init__(**kwargs)
def __init__(self, alpha_initializer=0.2, beta_initializer=5.0, alpha_regularizer=None, alpha_constraint=None, beta_regularizer=None, beta_constraint=None, shared_axes=None, **kwargs): super(ParametricSoftplus, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) self.beta_initializer = initializers.get(beta_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.beta_constraint = constraints.get(beta_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes)
def __init__(self, filters, centers_initializer='zeros', centers_regularizer=None, centers_constraint=None, stds_initializer='ones', stds_regularizer=None, stds_constraint=None, gauss_scale=100, **kwargs): self.filters = filters self.gauss_scale = gauss_scale super(GaussianReceptiveFields, self).__init__(**kwargs) self.centers_initializer = initializers.get(centers_initializer) self.stds_initializer = initializers.get(stds_initializer) self.centers_regularizer = regularizers.get(centers_regularizer) self.stds_regularizer = regularizers.get(stds_regularizer) self.centers_constraint = constraints.get(centers_constraint) self.stds_constraint = constraints.get(stds_constraint)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None, W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None, W_quad_constraint=None, W_lin_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.quadratic_filters = quadratic_filters self.input_dim = input_dim self.W_quad_regularizer = regularizers.get(W_quad_regularizer) self.W_lin_regularizer = regularizers.get(W_lin_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_quad_constraint = constraints.get(W_quad_constraint) self.W_lin_constraint = constraints.get(W_lin_constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GQM, self).__init__(**kwargs)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None, W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None, W_quad_constraint=None, W_lin_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.quadratic_filters = quadratic_filters self.input_dim = input_dim self.W_quad_regularizer = regularizers.get(W_quad_regularizer) self.W_lin_regularizer = regularizers.get(W_lin_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_quad_constraint = constraints.get(W_quad_constraint) self.W_lin_constraint = constraints.get(W_lin_constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=5)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GQM_conv, self).__init__(**kwargs)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None, W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None, W_quad_constraint=None, W_lin_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.quadratic_filters = quadratic_filters self.input_dim = input_dim self.W_quad_regularizer = regularizers.get(W_quad_regularizer) self.W_lin_regularizer = regularizers.get(W_lin_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_quad_constraint = constraints.get(W_quad_constraint) self.W_lin_constraint = constraints.get(W_lin_constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=5)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GQM_4D, self).__init__(**kwargs)
def __init__(self, units, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=constraints.NonNeg(), k_initializer='zeros', k_regularizer=None, k_constraint=None, tied_k=False, activity_regularizer=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(SoftMinMax, self).__init__(**kwargs) self.units = units self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.k_initializer = initializers.get(k_initializer) self.k_regularizer = regularizers.get(k_regularizer) self.k_constraint = constraints.get(k_constraint) self.tied_k = tied_k self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, output_dim, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(DenseNonNeg, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(Feedback, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(DivisiveNormalization, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None, U_constraint=None, b_start_constraint=None, b_end_constraint=None, weights=None, **kwargs): self.supports_masking = True self.uses_learning_phase = True self.input_spec = [InputSpec(ndim=3)] self.init = initializations.get(init) self.U_regularizer = regularizers.get(U_regularizer) self.b_start_regularizer = regularizers.get(b_start_regularizer) self.b_end_regularizer = regularizers.get(b_end_regularizer) self.U_constraint = constraints.get(U_constraint) self.b_start_constraint = constraints.get(b_start_constraint) self.b_end_constraint = constraints.get(b_end_constraint) self.initial_weights = weights super(ChainCRF, self).__init__(**kwargs)
def __init__(self, nb_kernels, kernel_dim, init='glorot_uniform', weights=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, input_dim=None, **kwargs): self.init = initializers.get(init) self.nb_kernels = nb_kernels self.kernel_dim = kernel_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(MinibatchDiscrimination, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None, U_constraint=None, b_start_constraint=None, b_end_constraint=None, weights=None, **kwargs): super(ChainCRF, self).__init__(**kwargs) self.init = initializers.get(init) self.U_regularizer = regularizers.get(U_regularizer) self.b_start_regularizer = regularizers.get(b_start_regularizer) self.b_end_regularizer = regularizers.get(b_end_regularizer) self.U_constraint = constraints.get(U_constraint) self.b_start_constraint = constraints.get(b_start_constraint) self.b_end_constraint = constraints.get(b_end_constraint) self.initial_weights = weights self.supports_masking = True self.uses_learning_phase = True self.input_spec = [InputSpec(ndim=3)]
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, W_dropout=0., u_dropout=0., bias=True, **kwargs): self.supports_masking = True self.W_init = initializers.get('orthogonal') self.u_init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.W_dropout = min(1., max(0., W_dropout)) self.u_dropout = min(1., max(0., u_dropout)) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
def __init__(self, state_sync=False, decode=False, output_length=None, return_states=False, readout=False, readout_activation='linear', teacher_force=False, state_initializer=None, **kwargs): self.state_sync = state_sync self.cells = [] if decode and output_length is None: raise Exception('output_length should be specified for decoder') self.decode = decode self.output_length = output_length if decode: if output_length is None: raise Exception('output_length should be specified for decoder') kwargs['return_sequences'] = True self.return_states = return_states super(RecurrentModel, self).__init__(**kwargs) self.readout = readout self.readout_activation = activations.get(readout_activation) self.teacher_force = teacher_force self._optional_input_placeholders = {} if state_initializer: if type(state_initializer) in [list, tuple]: state_initializer = [initializers.get(init) if init else initializers.get('zeros') for init in state_initializer] else: state_initializer = initializers.get(state_initializer) self._state_initializer = state_initializer
def __init__(self, output_dim, window_size=3, stride=1, kernel_initializer='uniform', bias_initializer='zero', activation='linear', activity_regularizer=None, kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, input_dim=None, input_length=None, **kwargs): self.output_dim = output_dim self.window_size = window_size self.strides = (stride, 1) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.activation = activations.get(activation) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = [InputSpec(ndim=3)] self.input_dim = input_dim self.input_length = input_length if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) super(GCNN, self).__init__(**kwargs)
def __init__(self, units, window_size=2, stride=1, return_sequences=False, go_backwards=False, stateful=False, unroll=False, activation='tanh', kernel_initializer='uniform', bias_initializer='zero', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, dropout=0, use_bias=True, input_dim=None, input_length=None, **kwargs): self.return_sequences = return_sequences self.go_backwards = go_backwards self.stateful = stateful self.unroll = unroll self.units = units self.window_size = window_size self.strides = (stride, 1) self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = dropout self.supports_masking = True self.input_spec = [InputSpec(ndim=3)] self.input_dim = input_dim self.input_length = input_length if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) super(QRNN, self).__init__(**kwargs)
def __init__(self, axis=-1, gamma_init='one', beta_init='zero', gamma_regularizer=None, beta_regularizer=None, epsilon=1e-6, **kwargs): super(LayerNormalization, self).__init__(**kwargs) self.axis = to_list(axis) self.gamma_init = initializers.get(gamma_init) self.beta_init = initializers.get(beta_init) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.epsilon = epsilon self.supports_masking = True
def __init__(self, ratio, data_format=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(SE, self).__init__(**kwargs) self.ratio = ratio self.data_format= conv_utils.normalize_data_format(data_format) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.supports_masking = True
def __init__(self, epsilon=1e-3, axis=-1, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon self.axis = axis self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights super(FixedBatchNormalization, self).__init__(**kwargs)
def __init__(self, vocab_words, initializer): self._vocab_words = set(vocab_words) self._word_vector_of = dict() self._initializer = initializers.get(initializer)
def vectorize_words(self, words): vectors = [] for word in words: vector = self._word_vector_of.get(word) vectors.append(vector) num_unknowns = len(filter(lambda x: x is None, vectors)) inits = self._initializer(shape=(num_unknowns, self._embedding_size)) inits = K.get_session().run(inits) inits = iter(inits) for i in range(len(vectors)): if vectors[i] is None: vectors[i] = next(inits) return np.array(vectors)
def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer)
def __init__(self, filters, kernel_size, kernel_initializer='glorot_uniform', activation=None, weights=None, padding='valid', strides=(1, 1), data_format=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, **kwargs): if data_format is None: data_format = K.image_data_format() if padding not in {'valid', 'same', 'full'}: raise ValueError('Invalid border mode for CosineConvolution2D:', padding) self.filters = filters self.kernel_size = kernel_size self.nb_row, self.nb_col = self.kernel_size self.kernel_initializer = initializers.get(kernel_initializer) self.activation = activations.get(activation) self.padding = padding self.strides = tuple(strides) self.data_format = normalize_data_format(data_format) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(CosineConvolution2D, self).__init__(**kwargs)
def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer) self._padding = _preprocess_padding(self.padding) self._strides = (1,) + self.strides + (1,) self._data_format = "NHWC"
def get_initializer(initializer): if keras_2: from keras import initializers return initializers.get(initializer) else: from keras import initializations return initializations.get(initializer)
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs): self.momentum = momentum self.axis = axis self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.initial_weights = weights super(Scale, self).__init__(**kwargs)
def __init__(self, filters_simple, filters_complex, nb_row, nb_col, init='glorot_uniform', activation='relu', weights=None, padding='valid', strides=(1, 1), data_format=K.image_data_format(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, W_constraint=None, bias_constraint=None, bias=True, **kwargs): if padding not in {'valid', 'same'}: raise Exception('Invalid border mode for Convolution2DEnergy:', padding) self.filters_simple = filters_simple self.filters_complex = filters_complex self.nb_row = nb_row self.nb_col = nb_col self.init = initializers.get(init, data_format=data_format) self.activation = activations.get(activation) assert padding in {'valid', 'same'}, 'padding must be in {valid, same}' self.padding = padding self.strides = tuple(strides) assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {tf, th}' self.data_format = data_format self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.UnitNormOrthogonal(filters_complex, data_format) self.bias_constraint = constraints.get(bias_constraint) self.bias = bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(Convolution2DEnergy, self).__init__(**kwargs)
def __init__(self, rank, kernel_size=3, data_format=None, kernel_initialization=.1, bias_initialization=1, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(_ConvGDN, self).__init__(**kwargs) self.rank = rank self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(1, rank, 'strides') self.padding = conv_utils.normalize_padding('same') self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(1, rank, 'dilation_rate') self.kernel_initializer = initializers.Constant(kernel_initialization) self.bias_initializer = initializers.Constant(bias_initialization) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, filters, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=kconstraints.NonNeg(), k_initializer='zeros', k_regularizer=None, k_constraint=None, tied_k=False, activity_regularizer=None, strides=1, padding='valid', dilation_rate=1, data_format=K.image_data_format(), **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(Conv2DSoftMinMax, self).__init__(**kwargs) self.filters = filters self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.k_initializer = initializers.get(k_initializer) self.k_regularizer = regularizers.get(k_regularizer) self.k_constraint = constraints.get(k_constraint) self.tied_k = tied_k self.activity_regularizer = regularizers.get(activity_regularizer) self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate') self.padding = conv_utils.normalize_padding(padding) self.input_spec = InputSpec(min_ndim=2) self.data_format = data_format self.supports_masking = True
def __init__(self, init='one', power_init=1, weights=None, axis=-1, fit=True, **kwargs): self.supports_masking = True self.init = initializations.get(init) self.initial_weights = weights self.axis = axis self.power_init = power_init self.fit = fit super(PowerReLU, self).__init__(**kwargs)
def __init__(self, quadratic_filters_ex=2, quadratic_filters_sup=2, W_quad_ex_initializer='glorot_uniform', W_quad_sup_initializer='glorot_uniform', W_lin_initializer='glorot_uniform', W_quad_ex_regularizer=None, W_quad_sup_regularizer=None, W_lin_regularizer=None, W_quad_ex_constraint=None, W_quad_sup_constraint=None, W_lin_constraint=None, **kwargs): self.quadratic_filters_ex = quadratic_filters_ex self.quadratic_filters_sup = quadratic_filters_sup self.W_quad_ex_initializer = initializers.get(W_quad_ex_initializer) self.W_quad_sup_initializer = initializers.get(W_quad_sup_initializer) self.W_lin_initializer = initializers.get(W_lin_initializer) self.W_quad_ex_constraint = constraints.get(W_quad_ex_constraint) self.W_quad_sup_constraint = constraints.get(W_quad_sup_constraint) self.W_lin_constraint = constraints.get(W_lin_constraint) self.W_quad_ex_regularizer = regularizers.get(W_quad_ex_regularizer) self.W_quad_sup_regularizer = regularizers.get(W_quad_sup_regularizer) self.W_lin_regularizer = regularizers.get(W_lin_regularizer) self.input_spec = [InputSpec(ndim=2)] if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(RustSTC, self).__init__(**kwargs)
def __init__(self, weights=None, kernel_initializer='glorot_uniform', alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=None, beta_delta_initializer='ones', beta_delta_regularizer=None, beta_delta_constraint=None, gamma_eta_initializer='ones', gamma_eta_regularizer=None, gamma_eta_constraint=None, rho_initializer='ones', rho_regularizer=None, rho_constraint=None, **kwargs): self.alpha_initializer = initializers.get(alpha_initializer) self.beta_delta_initializer = initializers.get(beta_delta_initializer) self.gamma_eta_initializer = initializers.get(gamma_eta_initializer) self.rho_initializer = initializers.get(rho_initializer) self.alpha_constraint = constraints.get(alpha_constraint) self.beta_delta_constraint = constraints.get(beta_delta_constraint) self.gamma_eta_constraint = constraints.get(gamma_eta_constraint) self.rho_constraint = constraints.get(rho_constraint) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.beta_delta_regularizer = regularizers.get(beta_delta_regularizer) self.gamma_eta_regularizer = regularizers.get(gamma_eta_regularizer) self.rho_regularizer = regularizers.get(rho_regularizer) self.input_spec = [InputSpec(ndim=2)] if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(NakaRushton, self).__init__(**kwargs)
def __init__(self, filters, sum_axes, filter_axes, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_activation=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(FilterDims, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.activation = activations.get(activation) self.kernel_activation = activations.get(kernel_activation) self.filters = filters self.sum_axes = list(sum_axes) self.sum_axes.sort() self.filter_axes = list(filter_axes) self.filter_axes.sort() self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, filters_simple, filters_complex, sum_axes, filter_axes, activation='relu', use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_activation=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(FilterDimsV1, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.activation = activations.get(activation) self.kernel_activation = activations.get(kernel_activation) self.filters_simple = filters_simple self.filters_complex = filters_complex self.sum_axes = list(sum_axes) self.sum_axes.sort() self.filter_axes = list(filter_axes) self.filter_axes.sort() self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = kconstraints.UnitNormOrthogonal(self.filters_complex) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, output_dim, num_components, init='glorot_uniform', weights=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.num_components = num_components self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(Dense, self).__init__(**kwargs)
def __init__(self, return_attention=False, **kwargs): self.init = initializers.get('uniform') self.supports_masking = True self.return_attention = return_attention super(AttentionWeightedAverage, self).__init__(** kwargs)
def __init__(self, filters, num_neighbors, neighbors_ix_mat, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if K.backend() != 'theano': raise Exception("GraphConv Requires Theano Backend.") if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(GraphConv, self).__init__(**kwargs) self.filters = filters self.num_neighbors = num_neighbors self.neighbors_ix_mat = neighbors_ix_mat self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=3)
def get_initial_state(self, inputs): print('inputs shape:', inputs.get_shape()) # apply the matrix on the first time step to get the initial s0. s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s)) # from keras.layers.recurrent to initialize a vector of (batchsize, # output_dim) y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims) y0 = K.sum(y0, axis=(1, 2)) # (samples, ) y0 = K.expand_dims(y0) # (samples, 1) y0 = K.tile(y0, [1, self.output_dim]) return [y0, s0]
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs): self.momentum = momentum self.axis = axis self.beta_init = initializations.get(beta_init) self.gamma_init = initializations.get(gamma_init) self.initial_weights = weights super(Scale, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializers.get(init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = InputSpec(ndim=2) self.input_dim = input_dim if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(Highway, self).__init__(**kwargs)
def __init__(self, output_dim, support=1, init='glorot_uniform', activation='linear', weights=None, W_regularizer=None, b_regularizer=None, bias=False, **kwargs): self.init = initializers.get(init) self.activation = activations.get(activation) self.output_dim = output_dim # number of features per node self.support = support # filter support / number of weights assert support >= 1 self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.bias = bias self.initial_weights = weights # these will be defined during build() self.input_dim = None self.W = None self.b = None super(GraphConvolution, self).__init__(**kwargs) # def get_output_shape_for(self, input_shapes): # features_shape = input_shapes[0] # output_shape = (features_shape[0], self.output_dim) # return output_shape # (batch_size, output_dim)
def __init__(self, weights=None, axis=-1, momentum=0.9, beta_init='zero', gamma_init='one', **kwargs): self.momentum = momentum self.axis = axis self.beta_init = initializations.get(beta_init) self.gamma_init = initializations.get(gamma_init) self.initial_weights = weights super(Scale, self).__init__(**kwargs)
def __init__(self, kernel_initializer=initializers.Constant(1.0), kernel_regularizer=None, kernel_constraint=None, bias_initializer='zeros', bias_regularizer=None, bias_constraint=None, **kwargs): super(Scale, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_initializer = initializers.get(bias_initializer) self.bias_regularizer = regularizers.get(bias_regularizer) self.bias_constraint = constraints.get(bias_constraint)
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention(step_dim)) """ self.supports_masking = True # self.init = initializations.get('glorot_uniform') self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs)
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention()) """ self.supports_masking = True #self.init = initializations.get('glorot_uniform') self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs)