Python keras.initializers 模块,Constant() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用keras.initializers.Constant()

项目:DeepTrade_keras    作者:happynoom    | 项目源码 | 文件源码
def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss='risk_estimation'):
        print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout))
        self.model = Sequential()
        self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
        for i in range(0, n_layers - 1):
            self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
        # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
        #               moving_variance_initializer=Constant(value=0.25)))
        self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
        self.model.add(Activation('relu_limited'))
        opt = RMSprop(lr=lr)
        self.model.compile(loss=loss,
                      optimizer=opt,
                      metrics=['accuracy'])
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def fcn_32s(input_dim, nb_classes=2):
    inputs = Input(shape=(input_dim,input_dim,3))
    vgg16 = VGG16(weights=None, include_top=False, input_tensor=inputs)
    pretrain_model_path = "../weights/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"
    if not os.path.exists(pretrain_model_path):
        raise RuntimeError("No pretrained model loaded.")
    vgg16.load_weights(pretrain_model_path)
    x = Conv2D(filters=nb_classes,
               kernel_size=(1, 1))(vgg16.output)
    x = Conv2DTranspose(filters=nb_classes,
                        kernel_size=(64, 64),
                        strides=(32, 32),
                        padding='same',
                        activation='sigmoid',
                        kernel_initializer=initializers.Constant(bilinear_upsample_weights(32, nb_classes)))(x)
    model = Model(inputs=inputs, outputs=x)
    for layer in model.layers[:15]:
        layer.trainable = False
    return model
项目:recurrentshop    作者:farizrahman4u    | 项目源码 | 文件源码
def RHN(input_dim, hidden_dim, depth):
    # Wrapped model
    inp = Input(batch_shape=(batch_size, input_dim))
    state = Input(batch_shape=(batch_size, hidden_dim))
    drop_mask = Input(batch_shape=(batch_size, hidden_dim))
    # To avoid all zero mask causing gradient to vanish
    inverted_drop_mask = Lambda(lambda x: 1.0 - x, output_shape=lambda s: s)(drop_mask)
    drop_mask_2 = Lambda(lambda x: x + 0., output_shape=lambda s: s)(inverted_drop_mask)
    dropped_state = multiply([state, inverted_drop_mask])
    y, new_state = RHNCell(units=hidden_dim, recurrence_depth=depth,
                           kernel_initializer=weight_init,
                           kernel_regularizer=l2(weight_decay),
                           kernel_constraint=max_norm(gradient_clip),
                           bias_initializer=Constant(transform_bias),
                           recurrent_initializer=weight_init,
                           recurrent_regularizer=l2(weight_decay),
                           recurrent_constraint=max_norm(gradient_clip))([inp, dropped_state])
    return RecurrentModel(input=inp, output=y,
                          initial_states=[state, drop_mask],
                          final_states=[new_state, drop_mask_2])


# lr decay Scheduler
项目:recurrentshop    作者:farizrahman4u    | 项目源码 | 文件源码
def QRNcell():
    xq = Input(batch_shape=(batch_size, embedding_dim * 2))
    # Split into context and query
    xt = Lambda(lambda x, dim: x[:, :dim], arguments={'dim': embedding_dim},
                output_shape=lambda s: (s[0], s[1] / 2))(xq)
    qt = Lambda(lambda x, dim: x[:, dim:], arguments={'dim': embedding_dim},
                output_shape=lambda s: (s[0], s[1] / 2))(xq)

    h_tm1 = Input(batch_shape=(batch_size, embedding_dim))

    zt = Dense(1, activation='sigmoid', bias_initializer=Constant(2.5))(multiply([xt, qt]))
    zt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(zt)
    ch = Dense(embedding_dim, activation='tanh')(concatenate([xt, qt], axis=-1))
    rt = Dense(1, activation='sigmoid')(multiply([xt, qt]))
    rt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(rt)
    ht = add([multiply([zt, ch, rt]), multiply([Lambda(lambda x: 1 - x, output_shape=lambda s: s)(zt), h_tm1])])
    return RecurrentModel(input=xq, output=ht, initial_states=[h_tm1], final_states=[ht], return_sequences=True)


#
# Load data
#
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def __init__(self, rank,
                 kernel_size=3,
                 data_format=None,
                 kernel_initialization=.1,
                 bias_initialization=1,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(_ConvGDN, self).__init__(**kwargs)
        self.rank = rank
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(1, rank, 'strides')
        self.padding = conv_utils.normalize_padding('same')
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(1, rank, 'dilation_rate')
        self.kernel_initializer = initializers.Constant(kernel_initialization)
        self.bias_initializer = initializers.Constant(bias_initialization)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=self.rank + 2)
项目:Keras-GAN-Animeface-Character    作者:forcecore    | 项目源码 | 文件源码
def bilinear2x(x, nfilters):
    '''
    Ugh, I don't like making layers.
    My credit goes to: https://kivantium.net/keras-bilinear
    '''
    return Conv2DTranspose(nfilters, (4, 4),
        strides=(2, 2),
        padding='same',
        kernel_initializer=Constant(bilinear_upsample_weights(2, nfilters)))(x)
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def __init__(self, sequences_value, pred_length, delta = 1., sequence_weights=None, proxy_layer=None, sample_stddev=None, **kwargs):
        """
        can only be the first layer of an architecture

        sequences_value[sequence, event, type, feature]

        sequences only contain training events
        """
        self.sequences_value = np.array(sequences_value,dtype='float32')
        self.sequences_initializer = Constant(self.sequences_value)
        shape = self.sequences_value.shape
        self.nb_sequence = shape[0]
        self.nb_event = shape[1]
        self.nb_type = shape[2]
        self.nb_feature = shape[3]
        self.pred_length = pred_length
        self.delta = delta
        self.proxy_layer = proxy_layer
        self.sample_stddev = sample_stddev

        if self.proxy_layer:
            super(HawkesLayer, self).__init__(**kwargs)
            return

        if sequence_weights:
            assert len(sequence_weights) == self.nb_sequence
            assert len(sequence_weights[0]['spont']) == self.nb_type

            self.spont_initializer = Constant(np.array([x['spont'] for x in sequence_weights]))
            self.Theta_initializer = Constant(np.array([x['theta'] for x in sequence_weights]))
            self.W_initializer = Constant(np.array([x['w'] for x in sequence_weights]))
            self.Alpha_initializer = Constant(np.array([x['alpha'] for x in sequence_weights]))
        else:
            self.spont_initializer = Constant(np.array([[1.3 for j in range(self.nb_type)] for i in range(self.nb_sequence)]))
            self.Theta_initializer = Constant(np.array([[0.05 for j in range(self.nb_type)] for i in range(self.nb_sequence)]))
            self.W_initializer = Constant(np.array([[1. for j in range(self.nb_type)] for i in range(self.nb_sequence)]))
            self.Alpha_initializer = Constant(np.array([[[1. for k in range(self.nb_type)] for j in range(self.nb_type)] for i in range(self.nb_sequence)]))

        super(HawkesLayer, self).__init__(**kwargs)
项目:zhihu-machine-learning-challenge-2017    作者:HouJP    | 项目源码 | 文件源码
def __init__(self, 
                 kernel_initializer=initializers.Constant(1.0),
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 **kwargs):
        super(Scale, self).__init__(**kwargs)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)
项目:SSD-Keras_Tensorflow    作者:jedol    | 项目源码 | 文件源码
def __init__(self, scale=20.0, scale_regularizer=None, **kwargs):
        self.scale_initializer = Constant(scale)
        self.scale_regularizer = scale_regularizer
        super(Normalize2D, self).__init__(**kwargs)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss=risk_estimation): # risk_estimation, risk_estimation_bhs

        print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout))

        # build a model with Sequential()
        self.model = Sequential()

        # todo: ask why dropout on input layer?
        self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))

        # build a number of LSTM layers
        for i in range(0, n_layers - 1):
            self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        # add another LSTM layer
        self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))

        #######################
        # original deep trader
        #######################
        # # add a dense layer, with BatchRenormalization, relu_limited
        # self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))

        # self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
        # self.model.add(Activation(relu_limited))

        #######################
        # revised version 1
        #######################
        self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
        self.model.add(Activation('sigmoid'))

        #######################
        # revised 2 for classification style solution
        #######################
        # self.model.add(Dense(5, kernel_initializer=initializers.glorot_uniform()))
        # self.model.add(Activation('softmax'))

        #######################
        # revised 1.5 for buy_hold_sell activation function
        #######################
        # self.model.add(Activation(buy_hold_sell))

        # compile model
        opt = RMSprop(lr=lr)
        self.model.compile(loss=loss,
                      optimizer=opt,
                      metrics=['accuracy'])