我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用models.Discriminator()。
def build_model(self): Gen=GeneratorTypes[self.gan_type] config=self.config self.gen=Gen(config.batch_size,config.gen_hidden_size,config.gen_z_dim) with tf.variable_scope('Disc') as scope: self.D1 = Discriminator(self.data.X, config.disc_hidden_size) scope.reuse_variables() self.D2 = Discriminator(self.gen.X, config.disc_hidden_size) d_var = tf.contrib.framework.get_variables(scope) d_loss_real=tf.reduce_mean( sxe(self.D1,1) ) d_loss_fake=tf.reduce_mean( sxe(self.D2,0) ) self.loss_d = d_loss_real + d_loss_fake self.loss_g = tf.reduce_mean( sxe(self.D2,1) ) optimizer=tf.train.AdamOptimizer g_optimizer=optimizer(self.config.lr_gen) d_optimizer=optimizer(self.config.lr_disc) self.opt_d = d_optimizer.minimize(self.loss_d,var_list= d_var) self.opt_g = g_optimizer.minimize(self.loss_g,var_list= self.gen.tr_var, global_step=self.gen.step) with tf.control_dependencies([self.inc_step]): self.train_op=tf.group(self.opt_d,self.opt_g)
def training_step(args, train_iter, noise_iter, opt_generator, opt_discriminator): noise_samples = get_batch(noise_iter, args.device_id) # generate an image generated = opt_generator.target(noise_samples) # get a batch of the dataset train_samples = get_batch(train_iter, args.device_id) # update the discriminator Dreal = opt_discriminator.target(train_samples) Dgen = opt_discriminator.target(generated) Dloss = 0.5 * (F.sum((Dreal - 1.0)**2) + F.sum(Dgen**2)) / args.batchsize update_model(opt_discriminator, Dloss) # update the generator noise_samples = get_batch(noise_iter, args.device_id) generated = opt_generator.target(noise_samples) Gloss = 0.5 * F.sum((opt_discriminator.target(generated) - 1.0)**2) / args.batchsize update_model(opt_generator, Gloss) if train_iter.is_new_epoch: print("[{}] Discriminator loss: {} Generator loss: {}".format(train_iter.epoch, Dloss.data, Gloss.data)) print_sample(os.path.join(args.output, "epoch_{}.png".format(train_iter.epoch)), noise_samples, opt_generator)
def train_target_cnn(source, target, source_cnn, target_cnn, args, epochs=10000): print(":: training encoder with target domain") discriminator = Discriminator() if args.device >= 0: source_cnn.to_gpu() target_cnn.to_gpu() discriminator.to_gpu() # target_optimizer = chainer.optimizers.Adam(alpha=1.0E-5, beta1=0.5) target_optimizer = chainer.optimizers.RMSprop(lr=args.lr) # target_optimizer = chainer.optimizers.MomentumSGD(lr=1.0E-4, momentum=0.99) target_optimizer.setup(target_cnn.encoder) target_optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay)) # discriminator_optimizer = chainer.optimizers.Adam(alpha=1.0E-5, beta1=0.5) discriminator_optimizer = chainer.optimizers.RMSprop(lr=args.lr) # discriminator_optimizer = chainer.optimizers.MomentumSGD(lr=1.0E-4, momentum=0.99) discriminator_optimizer.setup(discriminator) discriminator_optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay)) source_train_iterator, source_test_iterator = data2iterator(source, args.batchsize, multiprocess=False) target_train_iterator, target_test_iterator = data2iterator(target, args.batchsize, multiprocess=False) updater = ADDAUpdater(source_train_iterator, target_train_iterator, source_cnn, target_optimizer, discriminator_optimizer, args) trainer = chainer.training.Trainer(updater, (epochs, 'epoch'), out=args.output) trainer.extend(extensions.Evaluator(target_test_iterator, target_cnn, device=args.device)) # trainer.extend(extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'), trigger=(10, "epoch")) trainer.extend(extensions.snapshot_object(target_cnn, "target_model_epoch_{.updater.epoch}"), trigger=(epochs, "epoch")) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.extend(extensions.LogReport(trigger=(1, "epoch"))) trainer.extend(extensions.PrintReport( ["epoch", "loss/discrim", "loss/encoder", "validation/main/loss", "validation/main/accuracy", "elapsed_time"])) trainer.run()
def __init__(self, nic, noc, ngf, ndf, beta=0.5, lamb=100, lr=1e-3, cuda=True, crayon=False): """ Args: nic: Number of input channel noc: Number of output channels ngf: Number of generator filters ndf: Number of discriminator filters lamb: Weight on L1 term in objective """ self.cuda = cuda self.start_epoch = 0 self.crayon = crayon if crayon: self.cc = CrayonClient(hostname="localhost", port=8889) try: self.logger = self.cc.create_experiment('pix2pix') except: self.cc.remove_experiment('pix2pix') self.logger = self.cc.create_experiment('pix2pix') self.gen = self.cudafy(Generator(nic, noc, ngf)) self.dis = self.cudafy(Discriminator(nic, noc, ndf)) # Optimizers for generators self.gen_optim = self.cudafy(optim.Adam( self.gen.parameters(), lr=lr, betas=(beta, 0.999))) # Optimizers for discriminators self.dis_optim = self.cudafy(optim.Adam( self.dis.parameters(), lr=lr, betas=(beta, 0.999))) # Loss functions self.criterion_bce = nn.BCELoss() self.criterion_mse = nn.MSELoss() self.criterion_l1 = nn.L1Loss() self.lamb = lamb
def main(args): # if we enabled GPU mode, set the GPU to use if args.device_id >= 0: chainer.cuda.get_device(args.device_id).use() # Load dataset (we will only use the training set) if args.mnist: train, test = chainer.datasets.get_mnist(withlabel=False, scale=2, ndim=3) generator = GeneratorMNIST() else: train, test = chainer.datasets.get_cifar10(withlabel=False, scale=2, ndim=3) generator = GeneratorCIFAR() # subtracting 1, after scaling to 2 (done above) will make all pixels in the range [-1,1] train -= 1.0 num_training_samples = train.shape[0] # make data iterators train_iter = chainer.iterators.SerialIterator(train, args.batchsize) # build optimizers and models opt_generator = chainer.optimizers.RMSprop(lr=args.learning_rate) opt_discriminator = chainer.optimizers.RMSprop(lr=args.learning_rate) opt_generator.setup(generator) opt_discriminator.setup(Discriminator()) # make a random noise iterator (uniform noise between -1 and 1) noise_iter = RandomNoiseIterator(UniformNoiseGenerator(-1, 1, args.num_z), args.batchsize) # send to GPU if args.device_id >= 0: opt_generator.target.to_gpu() opt_discriminator.target.to_gpu() # make the output folder if not os.path.exists(args.output): os.makedirs(args.output, exist_ok=True) print("Starting training loop...") while train_iter.epoch < args.num_epochs: training_step(args, train_iter, noise_iter, opt_generator, opt_discriminator) print("Finished training.")
def train(self, loader, c_epoch): self.dis.train() self.gen.train() self.reset_gradients() max_idx = len(loader) for idx, features in enumerate(tqdm(loader)): orig_x = Variable(self.cudafy(features[0])) orig_y = Variable(self.cudafy(features[1])) """ Discriminator """ # Train with real self.dis.volatile = False dis_real = self.dis(torch.cat((orig_x, orig_y), 1)) real_labels = Variable(self.cudafy( torch.ones(dis_real.size()) )) dis_real_loss = self.criterion_bce( dis_real, real_labels) # Train with fake gen_y = self.gen(orig_x) dis_fake = self.dis(torch.cat((orig_x, gen_y.detach()), 1)) fake_labels = Variable(self.cudafy( torch.zeros(dis_fake.size()) )) dis_fake_loss = self.criterion_bce( dis_fake, fake_labels) # Update weights dis_loss = dis_real_loss + dis_fake_loss dis_loss.backward() self.dis_optim.step() self.reset_gradients() """ Generator """ self.dis.volatile = True dis_real = self.dis(torch.cat((orig_x, gen_y), 1)) real_labels = Variable(self.cudafy( torch.ones(dis_real.size()) )) gen_loss = self.criterion_bce(dis_real, real_labels) + \ self.lamb * self.criterion_l1(gen_y, orig_y) gen_loss.backward() self.gen_optim.step() # Pycrayon or nah if self.crayon: self.logger.add_scalar_value('pix2pix_gen_loss', gen_loss.data[0]) self.logger.add_scalar_value('pix2pix_dis_loss', dis_loss.data[0]) if idx % 50 == 0: tqdm.write('Epoch: {} [{}/{}]\t' 'D Loss: {:.4f}\t' 'G Loss: {:.4f}'.format( c_epoch, idx, max_idx, dis_loss.data[0], gen_loss.data[0] ))