Python keras.models 模块,Input() 实例源码

我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用keras.models.Input()

项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        outputs = RNNEncoder(
            RNNCell(
                rnn_layer(
                    self.hidden_size
                ),
                Dense(
                    self.encoding_size
                ),
                dense_dropout=0.1
            )
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        outputs = RNNCell(
            recurrent_layer=rnn_layer(
                self.hidden_size,
                return_sequences=True
            ),
            dense_layer=Dense(
                units=self.encoding_size
            ),
            dense_dropout=0.1
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        encoded = RNNEncoder(
            rnn_layer(
                self.encoding_size,
            )
        )(masked_inputs)
        outputs = RNNDecoder(
            rnn_layer(
                self.feature_size,
            )
        )(encoded)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        outputs = BidirectionalRNNEncoder(
            RNNCell(
                rnn_layer(
                    self.hidden_units,
                ),
                Dense(
                    self.cell_units
                ),
                dense_dropout=0.1
            )
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.x, self.y, self.channel_size))
        masked_inputs = MaskConv(self.mask_value)(inputs)
        masked_seq = MaskToSeq(MaskConv(self.mask_value))(inputs)
        conv_outputs = MaskConvNet(
            Conv2D(
                self.filters,
                self.kernel,
                strides=self.strides,
            )
        )(masked_inputs)
        pooling_outputs = MaskPooling(
            MaxPool2D(
                self.mask_kernel,
                self.mask_strides,
                self.padding,
            )
        )(conv_outputs)
        outputs = ConvEncoder()(
            [pooling_outputs, masked_seq]
        )
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.x, self.y, self.channel_size))
        masked_inputs = MaskConv(self.mask_value)(inputs)
        conv_outputs = MaskConvNet(
            Conv2D(
                self.filters,
                self.kernel,
                strides=self.strides
            )
        )(masked_inputs)
        pooling_outputs = MaskPooling(
            MaxPool2D(
                self.mask_kernel,
                self.mask_strides,
                self.padding,
            )
        )(conv_outputs)
        outputs = MaskFlatten()(pooling_outputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(
            shape=(
                self.max_length,
                self.feature_size
            )
        )
        encoded_seq = RNNCell(
            LSTMPeephole(
                self.hidden_size,
                return_sequences=False
            ),
            Dense(
                self.encoding_size
            )
        )(inputs)
        outputs = PaddingZero(
            self.max_length
        )(encoded_seq)

        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_simple_rnn_model(timestep,input_dim,output_dim,dropout=0.4,lr=0.001):
    input = Input((timestep,input_dim))
    # LSTM, Single
    output = LSTM(50,return_sequences=False)(input)
    # for _ in range(1):
    #     output = LSTM(32,return_sequences=True)(output)
    # output = LSTM(50,return_sequences=False)(output)
    output = Dropout(dropout)(output)
    output = Dense(output_dim)(output)

    model =  Model(inputs=input,outputs=output)

    optimizer = Adam(lr=lr)

    model.compile(loss='mae',optimizer=optimizer,metrics=['mse'])

    return model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def __init__(self, data_dim, latent_dim=2, noise_dim=None, name_prefix=None):
        """
        Args:
            data_dim: int, flattened data dimensionality
            latent_dim: int, flattened latent dimensionality
            noise_dim: int, flattened noise, dimensionality
            name_prefix: str, the prefix of named layers
        """
        self.data_dim = data_dim
        self.noise_dim = noise_dim or latent_dim
        self.latent_dim = latent_dim
        name_prefix = name_prefix or 'base_vae'

        if not hasattr(self, 'encoder') and hasattr(self, 'decoder'):
            raise AttributeError("Initialise the attributes `encoder` and `decoder` in the child classes first!")

        self.data_input = Input(shape=(data_dim,), name='{}_data_input'.format(name_prefix))
        self.latent_input = Input(shape=(latent_dim,), name='{}_latent_prior_input'.format(name_prefix))

        # define the testing models
        self.inference_model = Model(inputs=self.data_input,
                                     outputs=self.encoder(self.data_input, is_learning=False))
        self.generative_model = Model(inputs=self.latent_input,
                                      outputs=self.decoder(self.latent_input, is_learning=False))
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def __call__(self, *args, **kwargs):
        """
        Make the Decoder model callable on lists of Input layers or tensors.

        Args:
            *args: a list of input layers or tensors or numpy arrays, or a single input layer, tensor or numpy array.
        Keyword Args:
            is_learning: bool, whether the model is used for training or data generation. The output is either 
                the reconstruction log likelihood or the output probabilities in the data space respectively.

        Returns:
            A Decoder model in `training` or `data generation` mode. 
        """
        is_learninig = kwargs.get('is_learning', True)
        if is_learninig:
            return self.ll_estimator(args[0])
        else:
            return self.generator(args[0])
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def synthetic_adaptive_prior_discriminator(data_dim, latent_dim):
    data_input = Input(shape=(data_dim,), name='disc_internal_data_input')
    # center the data around 0 in [-1, 1] as it is in [0, 1].
    centered_data = Lambda(lambda x: 2 * x - 1, name='disc_centering_data_input')(data_input)
    discriminator_body_data = repeat_dense(centered_data, n_layers=2, n_units=256, name_prefix='disc_body_data')
    theta = Dense(4*256, activation='relu', name='disc_theta')(discriminator_body_data)
    discriminator_body_data_t = repeat_dense(centered_data, n_layers=2, n_units=256, name_prefix='disc_body_data_t')
    discriminator_body_data_t = Dense(1, activation=None, name='disc_data_squash')(discriminator_body_data_t)

    latent_input = Input(shape=(latent_dim,), name='disc_internal_latent_input')
    discriminator_body_latent = repeat_dense(latent_input, n_layers=2, n_units=256, name_prefix='disc_body_latent')
    sigma = Dense(4*256, activation='relu', name='disc_sigma')(discriminator_body_latent)
    discriminator_body_latent_t = repeat_dense(latent_input, n_layers=2, n_units=256, name_prefix='disc_body_latent_t')
    discriminator_body_latent_t = Dense(1, activation=None, name='disc_latent_squash')(discriminator_body_latent_t)

    merged_data_latent = Multiply(name='disc_mul_sigma_theta')([theta, sigma])
    merged_data_latent = Lambda(lambda x: ker.sum(x, axis=-1), name='disc_add_activ_sig_the')(merged_data_latent)
    discriminator_output = Add(name='disc_add_data_latent_t')([discriminator_body_data_t,
                                                               discriminator_body_latent_t,
                                                               merged_data_latent])
    collapsed_noise = Lambda(lambda x: 0.5 * ker.sum(x ** 2, axis=-1), name='disc_noise_addition')(latent_input)
    discriminator_output = Add(name='disc_add_all_toghether')([discriminator_output, collapsed_noise])
    discriminator_model = Model(inputs=[data_input, latent_input], outputs=discriminator_output,
                                name='disc_internal_model')
    return discriminator_model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def mnist_adaptive_prior_discriminator(data_dim, latent_dim):
    data_input = Input(shape=(data_dim,), name='disc_internal_data_input')
    # center the data around 0 in [-1, 1] as it is in [0, 1].
    centered_data = Lambda(lambda x: 2 * x - 1, name='disc_centering_data_input')(data_input)
    discriminator_body_data = repeat_dense(centered_data, n_layers=3, n_units=512, name_prefix='disc_body_data')
    theta = Dense(4*512, activation='relu', name='disc_theta')(discriminator_body_data)
    discriminator_body_data_t = repeat_dense(centered_data, n_layers=3, n_units=512, name_prefix='disc_body_data_t')
    discriminator_body_data_t = Dense(1, activation=None, name='disc_data_squash')(discriminator_body_data_t)

    latent_input = Input(shape=(latent_dim,), name='disc_internal_latent_input')
    discriminator_body_latent = repeat_dense(latent_input, n_layers=3, n_units=512, name_prefix='disc_body_latent')
    sigma = Dense(4*512, activation='relu', name='disc_sigma')(discriminator_body_latent)
    discriminator_body_latent_t = repeat_dense(latent_input, n_layers=3, n_units=512, name_prefix='disc_body_latent_t')
    discriminator_body_latent_t = Dense(1, activation=None, name='disc_latent_squash')(discriminator_body_latent_t)

    merged_data_latent = Multiply(name='disc_mul_sigma_theta')([theta, sigma])
    merged_data_latent = Lambda(lambda x: ker.sum(x, axis=-1), name='disc_add_activ_sig_the')(merged_data_latent)
    discriminator_output = Add(name='disc_add_data_latent_t')([discriminator_body_data_t,
                                                               discriminator_body_latent_t,
                                                               merged_data_latent])
    collapsed_noise = Lambda(lambda x:  0.5 * ker.sum(x**2, axis=-1), name='disc_noise_addition')(latent_input)
    discriminator_output = Add(name='disc_add_all_toghether')([discriminator_output, collapsed_noise])
    discriminator_model = Model(inputs=[data_input, latent_input], outputs=discriminator_output,
                                name='disc_internal_model')
    return discriminator_model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def __init__(self, data_dim, noise_dim, latent_dim, network_architecture='synthetic', name='encoder'):
        logger.info("Initialising {} model with {}-dimensional data and {}-dimensional noise input "
                    "and {} dimensional latent output".format(name, data_dim, noise_dim, latent_dim))
        self.name = name
        self.data_dim = data_dim
        self.noise_dim = noise_dim
        self.latent_dim = latent_dim
        self.network_architecture = network_architecture
        self.data_input = Input(shape=(data_dim,), name='enc_data_input')

        self.standard_normal_sampler = Lambda(sample_standard_normal_noise, name='enc_standard_normal_sampler')
        self.standard_normal_sampler.arguments = {'data_dim': self.data_dim, 'noise_dim': self.noise_dim,
                                                  'seed': config['seed']}

        self.standard_normal_sampler2 = Lambda(sample_standard_normal_noise, name='enc_standard_normal_sampler2')
        self.standard_normal_sampler2.arguments = {'data_dim': self.data_dim, 'noise_dim': self.noise_dim,
                                                   'seed': config['seed']}
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def __call__(self, *args, **kwargs):
        """
        Make the Encoder model callable on a list of Input layers.

        Args:
            *args: a list of input layers from the super-model or numpy arrays in case of test-time inference.

        Keyword Args:
            is_learning: bool, whether the model is used for training or inference. The output is either 
                the latent space or the latent space and the means and variances from which it is reparametrised.  

        Returns:
            An Encoder model.
        """
        is_learning = kwargs.get('is_learning', True)
        if is_learning:
            return self.encoder_learning_model(args[0])
        else:
            return self.encoder_inference_model(args[0])
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.x, self.y, self.channel_size))
        outputs = MaskConv(self.mask_value)(inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.x, self.y, self.z, self.channel_size))
        outputs = MaskConv(self.mask_value)(inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.x, self.y, self.z, self.channel_size))
        masked_inputs = MaskConv(self.mask_value)(inputs)
        outputs = MaskPooling(
            AvgPool3D(
                self.pool_size,
                self.strides,
                self.padding
            ),
            pool_mode='avg'
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.x, self.y, self.channel_size))
        masked_inputs = MaskConv(self.mask_value)(inputs)
        outputs = MaskPooling(
            AvgPool2D(
                self.pool_size,
                self.strides,
                self.padding
            ),
            pool_mode='avg'
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.x, self.y, self.channel_size))
        masked_inputs = MaskConv(self.mask_value)(inputs)
        outputs = MaskPooling(
            MaxPool2D(
                self.pool_size,
                self.strides,
                self.padding
            ),
            pool_mode='max'
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        outputs = rnn_layer(
            self.encoding_size,
            return_sequences=True
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        outputs = RNNEncoder(
            rnn_layer(
                self.encoding_size,
            )
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        outputs = BidirectionalRNNEncoder(
            rnn_layer(
                self.cell_units,
            )
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        encoded = RNNEncoder(
            RNNCell(
                rnn_layer(
                    self.hidden_size,
                ),
                Dense(
                    self.encoding_size
                ),
                dense_dropout=0.1
            )
        )(masked_inputs)
        outputs = RNNDecoder(
            RNNCell(
                rnn_layer(
                    self.hidden_size,
                ),
                Dense(
                    self.feature_size
                ),
                dense_dropout=0.1
            ),
            time_steps=self.decoding_length
        )(encoded)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        encoded = RNNEncoder(
            LSTM(
                self.encoding_size,
                return_sequences=True
            )
        )(masked_inputs)
        outputs = Pick()(encoded)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.x, self.y, self.channel_size))
        outputs = MaskToSeq(MaskConv(self.mask_value))(inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:MachineLearning-2048    作者:codetiger    | 项目源码 | 文件源码
def __init__(self):
        random.seed(int(time.time()))
        np.random.seed(int(time.time()))

        window_length = 1
        nb_hidden = 256
        nb_actions = 4

        self.env = GameLogic(size = self.GRID_SIZE)

        # input_layer = Input(shape=(1, self.GRID_SIZE * self.GRID_SIZE))

        # layer = Dense(8)(input_layer)
        # output_layer = Dense(3)(layer)

        # self.model = Model(input_layer, output_layer)
        # self.model.compile(Adam(), 'mse')

        self.model = Sequential()
        self.model.add(Flatten(input_shape=(window_length, self.GRID_SIZE * self.GRID_SIZE)))
        self.model.add(Dense(nb_hidden))
        self.model.add(Activation('relu'))
        self.model.add(Dense(nb_hidden))
        self.model.add(Activation('relu'))
        self.model.add(Dense(nb_actions, activation='linear'))
        print(self.model.summary())

        self.es = EvolutionStrategy(self.model.get_weights(), self.get_reward, self.POPULATION_SIZE, self.SIGMA, self.LEARNING_RATE)
        self.exploration = self.INITIAL_EXPLORATION
项目:keras-visualize-activations    作者:philipperemy    | 项目源码 | 文件源码
def get_multi_inputs_model():
    a = Input(shape=(10,))
    b = Input(shape=(10,))
    c = merge([a, b], mode='mul')
    c = Dense(1, activation='sigmoid', name='only_this_layer')(c)
    m_multi = Model(inputs=[a, b], outputs=c)
    return m_multi
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_model(timestep,input_dim,output_dim,dropout=0.5,recurrent_layers_num=4,cnn_layers_num=6,lr=0.001):
    inp = Input(shape=(timestep,input_dim))
    output = TimeDistributed(Masking(mask_value=0))(inp)
    #output = inp
    output = Conv1D(128, 1)(output)
    output = BatchNormalization()(output)
    output = Activation('relu')(output)

    output = first_block(output, (64, 128), dropout=dropout)


    output = Dropout(dropout)(output)
    for _ in range(cnn_layers_num):
        output = repeated_block(output, (64, 128), dropout=dropout)

    output = Flatten()(output)
    #output = LSTM(128, return_sequences=False)(output)

    output = BatchNormalization()(output)
    output = Activation('relu')(output)
    output = Dense(output_dim)(output)


    model = Model(inp,output)

    optimizer = Adam(lr=lr)

    model.compile(optimizer,'mse',['mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_2d_main_residual_network(batch_size,
                                width,
                                height,
                                channel_size,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(width,height,channel_size))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv2D(128,5,data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_2d_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_2d_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def __init__(self, latent_dim, data_dim, network_architecture='synthetic', name='decoder'):
        logger.info("Initialising {} model with {}-dimensional data "
                    "and {}-dimensional latent input.".format(name, data_dim, latent_dim))
        self.name = name
        self.data_dim = data_dim
        self.latent_dim = latent_dim
        self.network_architecture = network_architecture
        self.data_input = Input(shape=(self.data_dim,), name='dec_ll_estimator_data_input')
        self.latent_input = Input(shape=(self.latent_dim,), name='dec_latent_input')
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def __init__(self, data_dim, latent_dim, network_architecture='synthetic', name='discriminator'):
        logger.info("Initialising {} model with {}-dimensional data "
                    "and {}-dimensional prior/latent input.".format(name, data_dim, latent_dim))
        self.name = name
        self.data_dim = data_dim
        self.latent_dim = latent_dim
        self.network_architecture = network_architecture
        self.data_input = Input(shape=(data_dim,), name='disc_data_input')
        self.latent_input = Input(shape=(latent_dim,), name='disc_latent_input')
        self.prior_sampler = Lambda(sample_adaptive_normal_noise, name='disc_prior_sampler')
        self.prior_sampler.arguments = {'latent_dim': self.latent_dim, 'seed': config['seed']}
        self.discriminator_from_prior_model = None
        self.discriminator_from_posterior_model = None
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def __call__(self, *args, **kwargs):
        """
        Make the Discriminator model callable on a list of Inputs (coming from the AVB model)

        Args:
            *args: a list of Input layers
            **kwargs: 

        Returns:
            A trainable Discriminator model. 
        """
        from_posterior = kwargs.get('from_posterior', False)
        if not from_posterior:
            return self.discriminator_from_prior_model(args[0])
        return self.discriminator_from_posterior_model(args[0])
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def __call__(self, *args, **kwargs):
        """
        Make the Discriminator model callable on a list of Inputs (coming from the AVB model)

        Args:
            *args: a list of Input layers
            **kwargs: 

        Returns:
            A trainable Discriminator model. 
        """
        from_posterior = kwargs.get('from_posterior', False)
        if not from_posterior:
            return self.discriminator_from_prior_model(args[0])
        return self.discriminator_from_posterior_model(args[0])
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def synthetic_encoder(data_dim, noise_dim, latent_dim=2):
    data_input = Input(shape=(data_dim,), name='enc_internal_data_input')
    noise_input = Input(shape=(noise_dim,), name='enc_internal_noise_input')
    data_noise_concat = Concatenate(axis=1, name='enc_data_noise_concatenation')([data_input, noise_input])
    encoder_body = repeat_dense(data_noise_concat, n_layers=2, n_units=256, name_prefix='enc_body')
    latent_factors = Dense(latent_dim, activation=None, name='enc_latent')(encoder_body)
    latent_model = Model(inputs=[data_input, noise_input], outputs=latent_factors, name='enc_internal_model')
    return latent_model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def synthetic_moment_estimation_encoder(data_dim, noise_dim, noise_basis_dim, latent_dim=2):
    noise_input = Input(shape=(noise_basis_dim, noise_dim,), name='enc_internal_noise_input')

    # compute the noise basis vectors by attaching small independent fully connected networks to each noise input from
    # noise_basis ones in total
    def get_inp_row(inputs, **kwargs):
        row = kwargs.get('i', 0)
        # first axis is the batch size, so skip it
        return inputs[:, row]

    noise_basis_vectors = []
    for i in range(noise_basis_dim):
        l = Lambda(get_inp_row, arguments={'i': i}, name='enc_noise_basis_vec_select_{}'.format(i))(noise_input)
        fc = Dense(32, activation='relu', name='enc_noise_basis_body_0_basis_{}'.format(i))(l)
        fc = Dense(32, activation='relu', name='enc_noise_basis_body_1_basis_{}'.format(i))(fc)
        fc = Dense(latent_dim, activation=None, name='enc_noise_basis_body_2_basis_{}'.format(i))(fc)
        noise_basis_vectors.append(fc)

    noise_basis_vectors_model = Model(inputs=noise_input, outputs=noise_basis_vectors,
                                      name='enc_noise_basis_vector_model')

    data_input = Input(shape=(data_dim,), name='enc_internal_data_input')
    # center the input around 0
    centered_data = Lambda(lambda x: 2 * x - 1, name='enc_centering_data_input')(data_input)
    # compute the data embedding using deep convolutional neural network and reshape the output to the noise dim.
    extracted_features = repeat_dense(centered_data, n_layers=2, n_units=256, name_prefix='enc_body')
    latent_0 = Dense(latent_dim, name='enc_coefficients')(extracted_features)
    coefficients = []
    for i in range(noise_basis_dim):
        coefficients.append(Dense(latent_dim, name='enc_coefficients_{}'.format(i))(extracted_features))
    coefficients.append(latent_0)
    coefficients_model = Model(inputs=data_input, outputs=coefficients, name='enc_coefficients_model')

    return coefficients_model, noise_basis_vectors_model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def synthetic_discriminator(data_dim, latent_dim):
    data_input = Input(shape=(data_dim,), name='disc_internal_data_input')
    # center the data around 0 in [-1, 1] as it is in [0, 1].
    centered_data = Lambda(lambda x: 2 * x - 1, name='disc_centering_data_input')(data_input)
    discriminator_body_data = repeat_dense(centered_data, n_layers=2, n_units=256, name_prefix='disc_body_data')

    latent_input = Input(shape=(latent_dim,), name='disc_internal_latent_input')
    discriminator_body_latent = repeat_dense(latent_input, n_layers=2, n_units=256, name_prefix='disc_body_latent')

    discriminator_output = Dot(axes=1, name='disc_output_dot')([discriminator_body_data, discriminator_body_latent])
    discriminator_model = Model(inputs=[data_input, latent_input], outputs=discriminator_output,
                                name='disc_internal_model')
    return discriminator_model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def mnist_encoder_simple(data_dim, noise_dim, latent_dim=8):
    data_input = Input(shape=(data_dim,), name='enc_internal_data_input')
    noise_input = Input(shape=(noise_dim,), name='enc_internal_noise_input')
    # center the input around 0
    # centered_data = Lambda(lambda x: 2 * x - 1, name='enc_centering_data_input')(data_input)
    # concat_input = Concatenate(axis=-1, name='enc_noise_data_concat')([centered_data, noise_input])
    enc_body = repeat_dense(data_input, n_layers=2, n_units=256, activation='relu', name_prefix='enc_body')
    enc_output = Dense(100, activation='relu', name='enc_dense_before_latent')(enc_body)
    enc_output = Dense(latent_dim, name='enc_latent_features')(enc_output)
    noise_resized = Dense(latent_dim, activation=None, name='enc_noise_resizing')(noise_input)
    enc_output = Add(name='enc_add_noise_data')([enc_output, noise_resized])
    latent_factors = Model(inputs=[data_input, noise_input], outputs=enc_output, name='enc_internal_model')

    return latent_factors
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def mnist_discriminator_simple(data_dim, latent_dim):
    data_input = Input(shape=(data_dim,), name='disc_internal_data_input')
    # center the data around 0 in [-1, 1] as it is in [0, 1].
    centered_data = Lambda(lambda x: 2 * x - 1, name='disc_centering_data_input')(data_input)
    discriminator_body_data = repeat_dense(centered_data, n_layers=3, n_units=512, name_prefix='disc_body_data')

    latent_input = Input(shape=(latent_dim,), name='disc_internal_latent_input')
    discriminator_body_latent = repeat_dense(latent_input, n_layers=4, n_units=512, name_prefix='disc_body_latent')

    discriminator_output = Dot(axes=-1, name='disc_dot_sigma_theta')([discriminator_body_data,
                                                                      discriminator_body_latent])

    discriminator_model = Model(inputs=[data_input, latent_input], outputs=discriminator_output,
                                name='disc_internal_model')
    return discriminator_model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def __call__(self, *args, **kwargs):
        """
        Make the Encoder model callable on a list of Input layers.

        Args:
            *args: a list of input layers from the super-model or numpy arrays in case of test-time inference.
            **kwargs: 

        Returns:
            An Encoder model.
        """
        is_learning = kwargs.get('is_learning', True)
        if is_learning:
            return self.encoder_trainable_model(args[0])
        return self.encoder_inference_model(args[0])
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self):
        inputs = Input(shape=(self.x, self.y, self.z, self.channel_size))
        masked_inputs = MaskConv(self.mask_value)(inputs)
        outputs = MaskConvNet(
            Conv3D(
                self.filters,
                self.kernel,
                strides=self.strides
            )
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_multi_input_main_residual_network(batch_size,
                                a2_time_step,
                                d2_time_step,
                                d1_time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    '''
    a multiple residual network for wavelet transformation
    :param batch_size: as you might see
    :param a2_time_step: a2_size
    :param d2_time_step: d2_size
    :param d1_time_step: d1_size
    :param input_dim: input_dim
    :param output_dim: output_dim
    :param loop_depth: depth of residual network
    :param dropout: rate of dropout
    :return: 
    '''
    a2_inp = Input(shape=(a2_time_step,input_dim),name='a2')
    d2_inp = Input(shape=(d2_time_step,input_dim),name='d2')
    d1_inp = Input(shape=(d1_time_step,input_dim),name='a1')

    out = concatenate([a2_inp,d2_inp,d1_inp],axis=1)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inputs=[a2_inp,d2_inp,d1_inp],outputs=[out])

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):


    inp = Input(shape=(time_step,input_dim))



    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def mnist_moment_estimation_encoder(data_dim, noise_dim, noise_basis_dim, latent_dim=8):
    noise_input = Input(shape=(noise_basis_dim, noise_dim,), name='enc_internal_noise_input')

    # compute the noise basis vectors by attaching small independent fully connected networks to each noise input from
    # noise_basis ones in total
    def get_inp_row(inputs, **kwargs):
        row = kwargs.get('i', 0)
        # first axis is the batch size, so skip it
        return inputs[:, row]

    noise_basis_vectors = []
    for i in range(noise_basis_dim):
        l = Lambda(get_inp_row, arguments={'i': i}, name='enc_noise_basis_vec_select_{}'.format(i))(noise_input)
        fc = Dense(128, activation='relu', name='enc_noise_basis_body_0_basis_{}'.format(i))(l)
        fc = Dense(128, activation='relu', name='enc_noise_basis_body_1_basis_{}'.format(i))(fc)
        fc = Dense(128, activation='relu', name='enc_noise_basis_body_2_basis_{}'.format(i))(fc)
        fc = Dense(latent_dim, activation=None, name='enc_noise_basis_body_3_basis_{}'.format(i))(fc)
        noise_basis_vectors.append(fc)

    noise_basis_vectors_model = Model(inputs=noise_input, outputs=noise_basis_vectors,
                                      name='enc_noise_basis_vector_model')

    data_input = Input(shape=(data_dim,), name='enc_internal_data_input')
    assert data_dim == 28 ** 2, "MNIST data should be flattened to 784-dimensional vectors."
    # center the input around 0
    centered_data = Lambda(lambda x: 2 * x - 1, name='enc_centering_data_input')(data_input)
    # compute the data embedding using deep convolutional neural network and reshape the output to the noise dim.
    convnet_input = Reshape((28, 28, 1), name='enc_data_reshape')(centered_data)
    # add noise to the convolutions
    # partial_noise = Reshape((-1,), name='enc_noise_addition_conv')(noise_input)
    coefficients = deflating_convolution(convnet_input, n_deflation_layers=3,
                                         n_filters_init=64, name_prefix='enc_data_body')
    coefficients = Reshape((-1,), name='enc_data_features_reshape')(coefficients)
    extracted_features = Dense(800, activation='relu', name='enc_expanding_before_latent')(coefficients)

    latent_0 = Dense(latent_dim, name='enc_coefficients')(extracted_features)
    coefficients = []
    for i in range(noise_basis_dim):
        coefficients.append(Dense(latent_dim, name='enc_coefficients_{}'.format(i))(extracted_features))
    coefficients.append(latent_0)
    coefficients_model = Model(inputs=data_input, outputs=coefficients, name='enc_coefficients_model')

    return coefficients_model, noise_basis_vectors_model
项目:nn-transfer    作者:gzuidhof    | 项目源码 | 文件源码
def vggnet_keras():

    # Block 1
    img_input = Input((3, 224, 224))
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.0')(img_input)
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.5')(x)
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.7')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.10')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.12')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.14')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.17')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.19')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.21')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.24')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.26')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.28')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='classifier.0')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, activation='relu', name='classifier.3')(x)
    x = Dropout(0.5)(x)
    x = Dense(1000, activation=None, name='classifier.6')(x)

    # Create model.
    model = Model(img_input, x, name='vgg16')

    return model