Python keras.objectives 模块,binary_crossentropy() 实例源码

我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用keras.objectives.binary_crossentropy()

项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        x = Input(shape=input_shape)
        N = input_shape[0] // 2

        y = Sequential([
            flatten,
            *[Sequential([BN(),
                          Dense(self.parameters['layer'],activation=self.parameters['activation']),
                          Dropout(self.parameters['dropout']),])
              for i in range(self.parameters['num_layers']) ],
            Dense(1,activation="sigmoid")
        ])(x)

        self.loss = bce
        self.net = Model(x, y)
        # self.callbacks.append(self.linear_schedule([0.2,0.5], 0.1))
        self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
        # self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        _encoder = self.build_encoder(input_shape)
        _decoder = self.build_decoder(input_shape)

        x = Input(shape=input_shape)
        z = Sequential([flatten, *_encoder])(x)
        y = Sequential(_decoder)(flatten(z))

        z2 = Input(shape=K.int_shape(z)[1:])
        y2 = Sequential(_decoder)(flatten(z2))

        self.loss = bce
        self.encoder     = Model(x, z)
        self.decoder     = Model(z2, y2)
        self.net = Model(x, y)
        self.autoencoder = self.net
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def report(self,train_data,
               epoch=200,batch_size=1000,optimizer=Adam(0.001),
               test_data=None,
               train_data_to=None,
               test_data_to=None,):
        test_data     = train_data if test_data is None else test_data
        train_data_to = train_data if train_data_to is None else train_data_to
        test_data_to  = test_data  if test_data_to is None else test_data_to
        opts = {'verbose':0,'batch_size':batch_size}
        def test_both(msg, fn):
            print(msg.format(fn(train_data)))
            if test_data is not None:
                print((msg+" (validation)").format(fn(test_data)))
        self.autoencoder.compile(optimizer=optimizer, loss=bce)
        test_both("Reconstruction BCE: {}",
                  lambda data: self.autoencoder.evaluate(data,data,**opts))
        return self
项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def vae_loss(x, x_hat):
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    xent_loss = n * objectives.binary_crossentropy(x, x_hat)
    mse_loss = n * objectives.mse(x, x_hat) 
    if use_loss == 'xent':
        return xent_loss + kl_loss
    elif use_loss == 'mse':
        return mse_loss + kl_loss
    else:
        raise Expception, 'Nonknow loss!'
项目:VAE_NOTES    作者:FanhuaandLuomu    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:VAE_NOTES    作者:FanhuaandLuomu    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    # NOTE: binary_crossentropy expects a batch_size by dim
    # for x and x_decoded_mean, so we MUST flatten these!
    # Flatten
    x = K.flatten(x)
    x_decoded_mean = K.flatten(x_decoded_mean)
    xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss

# input_shape: (100,1,28,28)
# output_shape: (100,1,28,28)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _load(self):
        import json
        with open(self.local('aux.json'), 'r') as f:
            data = json.load(f)
            self.parameters = data["parameters"]
            self.build(tuple(data["input_shape"]))
        self.net.compile(Adam(0.0001),bce)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def report(self,train_data,
               test_data=None,
               train_data_to=None,
               test_data_to=None,
               batch_size=1000,
               **kwargs):
        test_data     = train_data if test_data is None else test_data
        train_data_to = train_data if train_data_to is None else train_data_to
        test_data_to  = test_data  if test_data_to is None else test_data_to
        opts = {'verbose':0,'batch_size':batch_size}
        def test_both(msg, fn):
            print(msg.format(fn(train_data)))
            if test_data is not None:
                print((msg+" (validation)").format(fn(test_data)))
        self.autoencoder.compile(optimizer='adam', loss=mse)
        test_both("Reconstruction MSE: {}",
                  lambda data: self.autoencoder.evaluate(data,data,**opts))
        test_both("Reconstruction MSE (gaussian 0.3): {}",
                  lambda data: self.autoencoder.evaluate(gaussian(data),data,**opts))
        test_both("Reconstruction MSE (salt 0.06): {}",
                  lambda data: self.autoencoder.evaluate(salt(data),data,**opts))
        test_both("Reconstruction MSE (pepper 0.06): {}",
                  lambda data: self.autoencoder.evaluate(pepper(data),data,**opts))
        # self.autoencoder.compile(optimizer=optimizer, loss=bce)
        # test_both("Reconstruction BCE: {}",
        #           lambda data: self.autoencoder.evaluate(data,data,**opts))
        # test_both("Noise reconstruction BCE (gaussian 0.3): {}",
        #           lambda data: self.autoencoder.evaluate(gaussian(data),data,**opts))
        # test_both("Noise reconstruction BCE (salt 0.1): {}",
        #           lambda data: self.autoencoder.evaluate(salt(data),data,**opts))
        # test_both("Noise reconstruction BCE (pepper 0.1): {}",
        #           lambda data: self.autoencoder.evaluate(pepper(data),data,**opts))
        test_both("Latent activation: {}",
                  lambda data: self.encode_binary(train_data,batch_size=batch_size,).mean())
        return self
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        data_dim = np.prod(input_shape) 
        self.gs = self.build_gs()
        self.gs2 = self.build_gs(N=data_dim)
        self.gs3 = self.build_gs(N=data_dim)

        _encoder = self.build_encoder(input_shape)
        _decoder = self.build_decoder(input_shape)

        x = Input(shape=input_shape)
        z = Sequential([flatten, *_encoder, self.gs])(x)
        y = Sequential([flatten,
                        *_decoder,
                        self.gs2,
                        Lambda(take_true),
                        Reshape(input_shape)])(z)

        z2 = Input(shape=(self.parameters['N'], self.parameters['M']))
        y2 = Sequential([flatten,
                        *_decoder,
                        self.gs3,
                        Lambda(take_true),
                        Reshape(input_shape)])(z2)

        def rec(x, y):
            return bce(K.reshape(x,(K.shape(x)[0],data_dim,)),
                       K.reshape(y,(K.shape(x)[0],data_dim,)))
        def loss(x, y):
            return rec(x,y) + self.gs.loss() + self.gs2.loss()

        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs.cool))
        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs2.cool))
        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs3.cool))
        self.custom_log_functions['tau'] = lambda: K.get_value(self.gs.tau)
        self.loss = loss
        self.metrics.append(rec)
        self.encoder     = Model(x, z)
        self.decoder     = Model(z2, y2)
        self.net = Model(x, y)
        self.autoencoder = self.net
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        x = Input(shape=input_shape)

        y = Sequential([
            Convolution2D(self.parameters['clayer'], (3,3), padding='same', activation=self.parameters['activation']),
            BN(),
            Dropout(self.parameters['dropout']),
            MaxPooling2D((2,2)),
            Convolution2D(self.parameters['clayer'], (3,3), padding='same', activation=self.parameters['activation']),
            BN(),
            Dropout(self.parameters['dropout']),
            MaxPooling2D((2,2)),
            Convolution2D(self.parameters['clayer'], (3,3), padding='same', activation=self.parameters['activation']),
            BN(),
            Dropout(self.parameters['dropout']),
            MaxPooling2D((2,2)),
            flatten,
            Dense(self.parameters['layer'], activation=self.parameters['activation']),
            # BN(),
            # Dropout(self.parameters['dropout'])
            # *[Sequential([,])
            #   for i in range(self.parameters['num_layers']) ],
            Dense(1,activation="sigmoid")
        ])(x)

        def loss(x,y):
            return bce(x,y)
        self.loss = loss
        self.net = Model(x, y)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        num_actions = 128
        N = input_shape[0] - num_actions
        x = Input(shape=input_shape)
        pre    = wrap(x,tf.slice(x, [0,0], [-1,N]),name="pre")
        action = wrap(x,tf.slice(x, [0,N], [-1,num_actions]),name="action")

        ys = []
        for i in range(num_actions):
            _x = Input(shape=(N,))
            _y = Sequential([
                flatten,
                *[Sequential([BN(),
                              Dense(self.parameters['layer'],activation=self.parameters['activation']),
                              Dropout(self.parameters['dropout']),])
              for i in range(self.parameters['num_layers']) ],
                Dense(1,activation="sigmoid")
            ])(_x)
            _m = Model(_x,_y,name="action_"+str(i))
            ys.append(_m(pre))

        ys = Concatenate()(ys)
        y  = Dot(-1)([ys,action])

        self.loss = bce
        self.net = Model(x, y)
        self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    # NOTE: binary_crossentropy expects a batch_size by dim
    # for x and x_decoded_mean, so we MUST flatten these!
    x = K.flatten(x)
    x_decoded_mean = K.flatten(x_decoded_mean)
    xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:pydl    作者:rafaeltg    | 项目源码 | 文件源码
def _vae_loss(self, x, x_decoded_mean):
        n_inputs = self._model.get_input_shape_at(0)[1]
        z_mean = self._model.get_layer('z_mean').inbound_nodes[0].output_tensors[0]
        z_log_var = self._model.get_layer('z_log_var').inbound_nodes[0].output_tensors[0]

        xent_loss = n_inputs * objectives.binary_crossentropy(x, x_decoded_mean)
        kl_loss = - 0.5 * K.sum(1 + z_log_var
                                - K.square(z_mean)
                                - K.exp(z_log_var), axis=-1)
        return xent_loss + kl_loss
项目:Generative-models    作者:aalitaiga    | 项目源码 | 文件源码
def vae_loss(x_, x_reconstruct):
    rec_loss = binary_crossentropy(x_, x_reconstruct)
    kl_loss = - 0.5 * K.mean(1 + 2*K.log(z_std + 1e-10) - z_mean**2 - z_std**2, axis=-1)
    return rec_loss + kl_loss
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    # NOTE: binary_crossentropy expects a batch_size by dim
    # for x and x_decoded_mean, so we MUST flatten these!
    x = K.flatten(x)
    x_decoded_mean = K.flatten(x_decoded_mean)
    xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:keras-autoencoder    作者:Rentier    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
        xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
        kl_loss = - 0.5 * K.mean(1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
        return xent_loss + kl_loss
项目:keras-mxnet-benchmarks    作者:sandeep-krishnamurthy    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    # NOTE: binary_crossentropy expects a batch_size by dim
    # for x and x_decoded_mean, so we MUST flatten these!
    x = K.flatten(x)
    x_decoded_mean = K.flatten(x_decoded_mean)
    xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:keras-mxnet-benchmarks    作者:sandeep-krishnamurthy    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:neural-decoder    作者:Krastanov    | 项目源码 | 文件源码
def e_binary_crossentropy(self, y_true, y_pred):
        if self.p:
            y_pred = undo_normcentererr(y_pred, self.p)
            y_true = undo_normcentererr(y_true, self.p)
        return K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)
项目:neural-decoder    作者:Krastanov    | 项目源码 | 文件源码
def s_binary_crossentropy(self, y_true, y_pred):
        if self.p:
            y_pred = undo_normcentererr(y_pred, self.p)
            y_true = undo_normcentererr(y_true, self.p)
        s_true = K.dot(y_true, K.transpose(self.H))%2
        twopminusone = 2*y_pred-1
        s_pred = ( 1 - tf.real(K.exp(K.dot(K.log(tf.cast(twopminusone, tf.complex64)), tf.cast(K.transpose(self.H), tf.complex64)))) ) / 2
        return K.mean(K.binary_crossentropy(s_pred, s_true), axis=-1)
项目:neural-decoder    作者:Krastanov    | 项目源码 | 文件源码
def create_model(L, hidden_sizes=[4], hidden_act='tanh', act='sigmoid', loss='binary_crossentropy',
                 Z=True, X=False, learning_rate=0.002,
                 normcentererr_p=None, batchnorm=0):
    in_dim = L**2 * (X+Z)
    out_dim = 2*L**2 * (X+Z)
    model = Sequential()
    model.add(Dense(int(hidden_sizes[0]*out_dim), input_dim=in_dim, kernel_initializer='glorot_uniform'))
    if batchnorm:
        model.add(BatchNormalization(momentum=batchnorm))
    model.add(Activation(hidden_act))
    for s in hidden_sizes[1:]:
        model.add(Dense(int(s*out_dim), kernel_initializer='glorot_uniform'))
        if batchnorm:
            model.add(BatchNormalization(momentum=batchnorm))
        model.add(Activation(hidden_act))
    model.add(Dense(out_dim, kernel_initializer='glorot_uniform'))
    if batchnorm:
        model.add(BatchNormalization(momentum=batchnorm))
    model.add(Activation(act))
    c = CodeCosts(L, ToricCode, Z, X, normcentererr_p)
    losses = {'e_binary_crossentropy':c.e_binary_crossentropy,
              's_binary_crossentropy':c.s_binary_crossentropy,
              'se_binary_crossentropy':c.se_binary_crossentropy}
    model.compile(loss=losses.get(loss,loss),
                  optimizer=Nadam(lr=learning_rate),
                  metrics=[c.triv_no_error, c.e_binary_crossentropy, c.s_binary_crossentropy]
                 )
    return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    # NOTE: binary_crossentropy expects a batch_size by dim
    # for x and x_decoded_mean, so we MUST flatten these!
    x = K.flatten(x)
    x_decoded_mean = K.flatten(x_decoded_mean)
    xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:VariationalAutoEncoder    作者:despoisj    | 项目源码 | 文件源码
def VAELoss(x, x_decoded_mean):
    # NOTE: binary_crossentropy expects a batchSize by dim
    # for x and x_decoded_mean, so we MUST flatten these!
    x = K.flatten(x)
    x_decoded_mean = K.flatten(x_decoded_mean)
    xent_loss = imageSize * imageSize * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss

# Convolutional models
项目:actinf    作者:x75    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:actinf    作者:x75    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:vae_example    作者:DingKe    | 项目源码 | 文件源码
def vae_loss(x, x_hat):
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    xent_loss = n * objectives.binary_crossentropy(x, x_hat)
    mse_loss = n * objectives.mse(x, x_hat) 
    if use_loss == 'xent':
        return xent_loss + kl_loss
    elif use_loss == 'mse':
        return mse_loss + kl_loss
    else:
        raise Expception, 'Nonknow loss!'
项目:DLPlaying    作者:Honlan    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss
项目:Siamese    作者:ascourge21    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
    return xent_loss + kl_loss
项目:Siamese    作者:ascourge21    | 项目源码 | 文件源码
def vae_loss(x, x_decoded_mean):
    xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
    return xent_loss + kl_loss
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        _encoder = self.build_encoder(input_shape)
        _decoder = self.build_decoder(input_shape)
        self.gs = self.build_gs()
        self.gs2 = self.build_gs()

        x = Input(shape=input_shape)
        z = Sequential([flatten, *_encoder, self.gs])(x)
        y = Sequential(_decoder)(flatten(z))

        z2 = Input(shape=(self.parameters['N'], self.parameters['M']))
        y2 = Sequential(_decoder)(flatten(z2))
        w2 = Sequential([*_encoder, self.gs2])(flatten(y2))

        data_dim = np.prod(input_shape)
        def rec(x, y):
            #return K.mean(K.binary_crossentropy(x,y))
            return bce(K.reshape(x,(K.shape(x)[0],data_dim,)),
                       K.reshape(y,(K.shape(x)[0],data_dim,)))

        def loss(x, y):
            return rec(x,y) + self.gs.loss()

        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs.cool))
        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs2.cool))
        self.custom_log_functions['tau'] = lambda: K.get_value(self.gs.tau)
        self.loss = loss
        self.metrics.append(rec)
        self.encoder     = Model(x, z)
        self.decoder     = Model(z2, y2)
        self.autoencoder = Model(x, y)
        self.autodecoder = Model(z2, w2)
        self.net = self.autoencoder
        y2_downsample = Sequential([
            Reshape((*input_shape,1)),
            MaxPooling2D((2,2))
            ])(y2)
        shape = K.int_shape(y2_downsample)[1:3]
        self.decoder_downsample = Model(z2, Reshape(shape)(y2_downsample))
        self.features = Model(x, Sequential([flatten, *_encoder[:-2]])(x))
        if 'lr_epoch' in self.parameters:
            ratio = self.parameters['lr_epoch']
        else:
            ratio = 0.5
        self.callbacks.append(
            LearningRateScheduler(lambda epoch: self.parameters['lr'] if epoch < self.parameters['full_epoch'] * ratio else self.parameters['lr']*0.1))
        self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):

        dim = np.prod(input_shape) // 2
        print("{} latent bits".format(dim))
        M, N = self.parameters['M'], self.parameters['N']

        x = Input(shape=input_shape)

        _pre = tf.slice(x, [0,0],   [-1,dim])
        _suc = tf.slice(x, [0,dim], [-1,dim])

        pre = wrap(x,_pre,name="pre")
        suc = wrap(x,_suc,name="suc")

        print("encoder")
        _encoder = self.build_encoder([dim])
        action_logit = ConditionalSequential(_encoder, pre, axis=1)(suc)

        gs = self.build_gs()
        action = gs(action_logit)

        print("decoder")
        _decoder = self.build_decoder([dim])
        suc_reconstruction = ConditionalSequential(_decoder, pre, axis=1)(flatten(action))
        y = Concatenate(axis=1)([pre,suc_reconstruction])

        action2 = Input(shape=(N,M))
        pre2    = Input(shape=(dim,))
        suc_reconstruction2 = ConditionalSequential(_decoder, pre2, axis=1)(flatten(action2))
        y2 = Concatenate(axis=1)([pre2,suc_reconstruction2])

        def rec(x, y):
            return bce(K.reshape(x,(K.shape(x)[0],dim*2,)),
                       K.reshape(y,(K.shape(x)[0],dim*2,)))
        def loss(x, y):
            kl_loss = gs.loss()
            reconstruction_loss = rec(x, y)
            return reconstruction_loss + kl_loss

        self.metrics.append(rec)
        self.callbacks.append(LambdaCallback(on_epoch_end=gs.cool))
        self.custom_log_functions['tau'] = lambda: K.get_value(gs.tau)
        self.loss = loss
        self.encoder     = Model(x, [pre,action])
        self.decoder     = Model([pre2,action2], y2)

        self.net = Model(x, y)
        self.autoencoder = self.net