我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用visdom.Visdom()。
def __init__(self, opt): # self.opt = opt self.display_id = opt.display_id self.use_html = opt.isTrain and not opt.no_html self.win_size = opt.display_winsize self.name = opt.name if self.display_id > 0: import visdom self.vis = visdom.Visdom() if self.use_html: self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') print('create web directory %s...' % self.web_dir) util.mkdirs([self.web_dir, self.img_dir]) # |visuals|: dictionary of images to display or save
def __init__(self, opt): # self.opt = opt self.display_id = opt.display_id self.use_html = opt.isTrain and not opt.no_html self.win_size = opt.display_winsize self.name = opt.name if self.display_id > 0: import visdom self.vis = visdom.Visdom(port = opt.display_port) self.display_single_pane_ncols = opt.display_single_pane_ncols if self.use_html: self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') print('create web directory %s...' % self.web_dir) util.mkdirs([self.web_dir, self.img_dir]) self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') with open(self.log_name, "a") as log_file: now = time.strftime("%c") log_file.write('================ Training Loss (%s) ================\n' % now) # |visuals|: dictionary of images to display or save
def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097): ''' Args: fields: Currently unused plot_type: The name of the plot type, in Visdom Examples: >>> # Image example >>> img_to_use = skimage.data.coffee().swapaxes(0,2).swapaxes(1,2) >>> image_logger = VisdomLogger('image') >>> image_logger.log(img_to_use) >>> # Histogram example >>> hist_data = np.random.rand(10000) >>> hist_logger = VisdomLogger('histogram', , opts=dict(title='Random!', numbins=20)) >>> hist_logger.log(hist_data) ''' super(VisdomLogger, self).__init__(fields, win, env, opts, port) self.plot_type = plot_type self.chart = getattr(self.viz, plot_type) self.viz_logger = self._viz_prototype(self.chart)
def __init__(self, xp, visdom_opts, xlabel): super(Plotter, self).__init__() if visdom_opts is None: visdom_opts = {} assert visdom is not None, "visdom could not be imported" # visdom env is given by Experiment name unless specified if 'env' not in list(visdom_opts.keys()): visdom_opts['env'] = xp.name self.viz = visdom.Visdom(**visdom_opts) self.xlabel = None if xlabel is None else str(xlabel) self.windows = {} self.append = {} self.cache = defaultdict(Cache)
def __init__(self, opt): # self.opt = opt self.display_id = opt.display_id self.use_html = opt.isTrain and not opt.no_html self.win_size = opt.display_winsize self.name = opt.name self.opt = opt self.saved = False if self.display_id > 0: import visdom self.vis = visdom.Visdom(port=opt.display_port) if self.use_html: self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') print('create web directory %s...' % self.web_dir) util.mkdirs([self.web_dir, self.img_dir]) self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') with open(self.log_name, "a") as log_file: now = time.strftime("%c") log_file.write('================ Training Loss (%s) ================\n' % now)
def __init__(self, env=None, log_checkpoints=True, losses=('loss', ), phases=('train', 'valid'), server='http://localhost', port=8097, max_y=None, **opts): if Visdom is None: warnings.warn("Couldn't import visdom: `pip install visdom`") else: self.viz = Visdom(server=server, port=port, env=env) self.legend = ['{}.{}'.format(p, l) for p in phases for l in losses] opts.update({'legend': self.legend}) self.opts = opts self.env = env self.max_y = max_y self.log_checkpoints = log_checkpoints self.losses = set(losses) self.last = {p: {l: None for l in losses} for p in phases} self.pane = self._init_pane()
def __init__(self, env, cmdl): super(VisdomMonitor, self).__init__(env) self.freq = cmdl.report_freq # in steps self.cmdl = cmdl if self.cmdl.display_plots: from visdom import Visdom self.vis = Visdom() self.plot = self.vis.line( Y=np.array([0]), X=np.array([0]), opts=dict( title=cmdl.label, caption="Episodic reward per 1200 steps.") ) self.step_cnt = 0 self.ep_cnt = -1 self.ep_rw = [] self.last_reported_ep = 0
def __init__(self, opt): # self.opt = opt self.display_id = opt.display_id self.use_html = not opt.no_html self.name = opt.name if self.display_id > 0: import visdom self.vis = visdom.Visdom() if self.use_html: self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') self.win_size = opt.display_winsize print('create web directory %s...' % self.web_dir) util.mkdirs([self.web_dir, self.img_dir]) # |visuals|: dictionary of images to display or save
def train(args): # Setup Dataloader data_loader = get_loader(args.dataset) data_path = get_data_path(args.dataset) loader = data_loader(data_path, is_transform=True, img_size=(args.img_rows, args.img_cols)) n_classes = loader.n_classes trainloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4, shuffle=True) # Setup visdom for visualization if args.visdom: vis = visdom.Visdom() loss_window = vis.line(X=torch.zeros((1,)).cpu(), Y=torch.zeros((1)).cpu(), opts=dict(xlabel='minibatches', ylabel='Loss', title='Training Loss', legend=['Loss'])) # Setup Model model = get_model(args.arch, n_classes) model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) model.cuda() optimizer = torch.optim.SGD(model.parameters(), lr=args.l_rate, momentum=0.99, weight_decay=5e-4) for epoch in range(args.n_epoch): for i, (images, labels) in enumerate(trainloader): images = Variable(images.cuda()) labels = Variable(labels.cuda()) optimizer.zero_grad() outputs = model(images) loss = cross_entropy2d(outputs, labels) loss.backward() optimizer.step() if args.visdom: vis.line( X=torch.ones((1, 1)).cpu() * i, Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(), win=loss_window, update='append') if (i+1) % 20 == 0: print("Epoch [%d/%d] Loss: %.4f" % (epoch+1, args.n_epoch, loss.data[0])) torch.save(model, "{}_{}_{}_{}.pkl".format(args.arch, args.dataset, args.feature_scale, epoch))
def __init__(self, fields=None, win=None, env=None, opts={}, port=8097): super(BaseVisdomLogger, self).__init__(fields) self.win = win self.env = env self.opts = opts self._viz = visdom.Visdom(port=port)
def _viz_prototype(self, vis_fn): ''' Outputs a function which will log the arguments to Visdom in an appropriate way. Args: vis_fn: A function, such as self.vis.image ''' def _viz_logger(*args, **kwargs): self.win = vis_fn(*args, win=self.win, env=self.env, opts=self.opts, **kwargs) return _viz_logger
def __init__(self, envs=None, port=8097): super(VisdomSaver, self).__init__() self.envs = envs self.viz = visdom.Visdom(port=port)
def __init__(self, env): try: self.viz = visdom.Visdom() except: self.viz = None print "Enter Cmd: python -m visdom.server on shell" self.env = env self.win = None
def __init__(self, envs=None, interval=[(1, 'epoch')]): super(VisdomSaver, self).__init__(interval) self.envs = envs self.viz = visdom.Visdom()
def __init__(self, plot_type, fields, interval=None, win=None, env=None, opts={}): ''' Args: plot_type: The name of the plot type, in Visdom fields: The fields to log. May either be the name of some stat (e.g. ProgressMonitor) will have `stat_name='progress'`, in which case all of the fields under `log_HOOK_fields` will be logged. Finer-grained control can be specified by using individual fields such as `progress.percent`. interval: A List of 2-tuples where each tuple contains (k, HOOK_TIME). k (int): The logger will be called every 'k' HOOK_TIMES HOOK_TIME (string): The logger will be called at the given hook Examples: >>> # Image example >>> img_to_use = skimage.data.coffee().swapaxes(0,2).swapaxes(1,2) >>> image_plug = ConstantMonitor(img_to_use, "image") >>> image_logger = VisdomLogger('image', ["image.data"], [(2, 'iteration')]) >>> # Histogram example >>> hist_plug = ConstantMonitor(np.random.rand(10000), "random") >>> hist_logger = VisdomLogger('histogram', ["random.data"], [(2, 'iteration')], opts=dict(title='Random!', numbins=20)) ''' super(VisdomLogger, self).__init__(fields, interval, win, env, opts) self.plot_type = plot_type self.chart = getattr(self.viz, plot_type) self.viz_logger = self._viz_prototype(self.chart)
def __init__(self, env_name='main'): self.viz = Visdom() self.env = env_name self.plots = {}
def __init__(self, metrics, title, ylabel, xlabel='t', running_n=100): self.vis = visdom.Visdom() self.metrics = metrics self.opts = dict( fillarea=False, xlabel=xlabel, ylabel=ylabel, title=title, ) self.win = None self.running_n = running_n self.vals = dict() self.cnts = dict()
def __init__(self, env='default', **kwargs): import visdom self.vis = visdom.Visdom(env=env, **kwargs) # ????????????? # ???’loss',23? ?loss??23?? self.index = {} self.log_text = ''
def reinit(self,env='default',**kwargs): ''' ??visdom??? ''' self.vis = visdom.Visdom(env=env,**kwargs) return self
def testGAN(trained_model_path=None, n_batches=40): weights = initialiseWeights() z_vector = tf.placeholder(shape=[batch_size,z_size],dtype=tf.float32) net_g_test = generator(z_vector, phase_train=True, reuse=True) vis = visdom.Visdom() sess = tf.Session() saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, trained_model_path) # output generated chairs for i in range(n_batches): next_sigma = float(raw_input()) z_sample = np.random.normal(0, next_sigma, size=[batch_size, z_size]).astype(np.float32) g_objects = sess.run(net_g_test,feed_dict={z_vector:z_sample}) id_ch = np.random.randint(0, batch_size, 4) for i in range(4): print g_objects[id_ch[i]].max(), g_objects[id_ch[i]].min(), g_objects[id_ch[i]].shape if g_objects[id_ch[i]].max() > 0.5: d.plotVoxelVisdom(np.squeeze(g_objects[id_ch[i]]>0.5), vis, '_'.join(map(str,[i])))
def initialize(self, state, subs): self.state = state self.subs = subs self.vis = visdom.Visdom(port=FLAGS.port, send=False) self.handlers = { 'update': UpdateHandler, 'save': SaveHandler, 'close': CloseHandler, 'win_exists': ExistsHandler, }
def visdom_reporting(metrics, tick, phase, tick_type=None): """This method will write its results to visdom :param metrics: A map of metrics to scores :param tick: The time (resolution defined by `tick_type`) :param phase: The phase of training (`Train`, `Valid`, `Test`) :param tick_type: The resolution of tick (`STEP`, `EPOCH`) :return: """ # To use this: # python -m visdom.server # http://localhost:8097/ global g_vis global g_vis_win if g_vis is None: import visdom print('Creating g_vis instance') g_vis = visdom.Visdom() for metric in metrics.keys(): chart_id = '(%s) %s' % (phase, metric) if chart_id not in g_vis_win: print('Creating visualization for %s' % chart_id) g_vis_win[chart_id] = g_vis.line(X=np.array([0]), Y=np.array([metrics[metric]]), opts=dict( fillarea=True, legend=False, xlabel='Time', ylabel='Metric', title=chart_id, ), ) else: g_vis.updateTrace(X=np.array([tick]), Y=np.array([metrics[metric]]), win=g_vis_win[chart_id])
def __init__(self, env, cmdl): super(EvaluationMonitor, self).__init__(env) self.freq = cmdl.eval_frequency # in steps self.eval_steps = cmdl.eval_steps self.cmdl = cmdl if self.cmdl.display_plots: import Visdom self.vis = Visdom() self.plot = self.vis.line( Y=np.array([0]), X=np.array([0]), opts=dict( title=cmdl.label, caption="Episodic reward per %d steps." % self.eval_steps) ) self.eval_cnt = 0 self.crt_training_step = 0 self.step_cnt = 0 self.ep_cnt = 1 self.total_rw = 0 self.max_mean_rw = -1000 no_of_evals = cmdl.training_steps // cmdl.eval_frequency \ - (cmdl.eval_start-1) // cmdl.eval_frequency self.eval_frame_idx = torch.LongTensor(no_of_evals).fill_(0) self.eval_rw_per_episode = torch.FloatTensor(no_of_evals).fill_(0) self.eval_rw_per_frame = torch.FloatTensor(no_of_evals).fill_(0) self.eval_eps_per_eval = torch.LongTensor(no_of_evals).fill_(0)
def __init__(self, port, title): self.keys = [] self.values = {} self.viz = visdom.Visdom(port=port) self.iteration = 0 self.title = title
def __init__(self,opt): self.vis = visdom.Visdom(port=opt.port) self.trainLossInit = True self.testLossInit = True self.meanVarInit = True
def main(): """ :return: """ _visualiser = None if C.USE_VISDOM: _visualiser = Visdom(C.VISDOM_SERVER) _environment = neo.make(C.ENVIRONMENT, connect_to_running=C.CONNECT_TO_RUNNING, logging_directory=C.LOGGING_DIRECTORY, debug_logging=C.USE_LOGGING) _environment.seed(C.RANDOM_SEED) if type(C.ARCH_PARAMS['input_size']) == str: C.ARCH_PARAMS['input_size'] = _environment.observation_space.shape print('observation dimensions: ', C.ARCH_PARAMS['input_size']) if type(C.ARCH_PARAMS['output_size']) == str: C.ARCH_PARAMS['output_size'] = _environment.action_space.n print('action dimensions: ', C.ARCH_PARAMS['output_size']) _model = C.ARCH(C.ARCH_PARAMS) if C.LOAD_PREVIOUS_MODEL_IF_AVAILABLE: _model.load_state_dict(load_model(C)) _target_model = C.ARCH(C.ARCH_PARAMS) _target_model.load_state_dict(_model.state_dict()) if C.USE_CUDA_IF_AVAILABLE: _model = _model.cuda() _target_model.cuda() _trained_model = training_loop(_model, _target_model, _environment, _visualiser) _environment.close() save_model(_trained_model, C)
def main(): """ :return: """ _visualiser = None if C.USE_VISDOM: _visualiser = Visdom(C.VISDOM_SERVER) _environment = neo.make(C.ENVIRONMENT, connect_to_running=C.CONNECT_TO_RUNNING, logging_directory=C.LOGGING_DIRECTORY, debug_logging=C.USE_LOGGING) _environment.seed(C.RANDOM_SEED) if type(C.ARCH_PARAMS['input_size']) == str: C.ARCH_PARAMS['input_size'] = _environment.observation_space.shape print('observation dimensions: ', C.ARCH_PARAMS['input_size']) if type(C.ARCH_PARAMS['output_size']) == str: C.ARCH_PARAMS['output_size'] = _environment.action_space.n print('action dimensions: ', C.ARCH_PARAMS['output_size']) _agent = PolicyAgent(C.ARCH_PARAMS['input_size'],C.ARCH_PARAMS['output_size']) _trained_model = training_loop(_agent, _environment) _environment.render(close=True) _environment.close() save_model(_trained_model, C)
def __init__(self, server, port, outdir): self.vis = visdom.Visdom(port=port, server=server) titles = ['VAE -- KL Div', 'VAE -- Weighted L2', 'VAE -- L2'] self.vis_plot_vae = [] for title in titles: self.vis_plot_vae.append(self.vis.line( X=np.array([0.], dtype='f'), Y=np.array([0.], dtype='f'), opts=dict( xlabel='Iteration',\ ylabel='Loss',\ title=title))) self.vis_plot_test_vae = self.vis.line( X=np.array([0.], dtype='f'), Y=np.array([0.], dtype='f'), opts=dict( xlabel='Iteration',\ ylabel='Test Loss',\ title='VAE Test Loss')) self.vis_plot_mdn = [] titles = ['MDN Loss', 'MDN -- L2'] for title in titles: self.vis_plot_mdn.append(self.vis.line( X=np.array([0.], dtype='f'), Y=np.array([0.], dtype='f'), opts=dict( xlabel='Iteration',\ ylabel='Loss',\ title=title))) self.fp_vae = open('%s/log_vae.txt' % outdir, 'w') self.fp_vae.write('Iteration; KLDiv; WeightedL2; L2;\n') self.fp_vae.flush() self.fp_test_vae = open('%s/log_test_vae.txt' % outdir, 'w') self.fp_test_vae.write('Iteration; Loss;\n') self.fp_test_vae.flush() self.fp_mdn = open('%s/log_mdn.txt' % outdir, 'w') self.fp_mdn.write('Iteration; Loss; L2 Loss;\n') self.fp_mdn.flush()
def __init__(self): self.verbose = 0 # 0(warning) | 1(info) | 2(debug) # training signature self.machine = "daim" # "machine_id" self.timestamp = "17080800" # "yymmdd##" # training configuration self.mode = 1 # 1(train) | 2(test model_file) self.config = 1 self.seed = 1 self.render = False # whether render the window from the original envs or not self.visualize = True # whether do online plotting and stuff or not self.save_best = False # save model w/ highest reward if True, otherwise always save the latest model self.agent_type, self.env_type, self.game, self.circuit_type = CONFIGS[self.config] self.use_cuda = torch.cuda.is_available() self.dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor # prefix for model/log/visdom self.refs = self.machine + "_" + self.timestamp # NOTE: using this as env for visdom self.root_dir = os.getcwd() # model files # NOTE: will save the current model to model_name self.model_name = self.root_dir + "/models/" + self.refs + ".pth" # NOTE: will load pretrained model_file if not None self.model_file = None#self.root_dir + "/models/{TODO:FILL_IN_PRETAINED_MODEL_FILE}.pth" if self.mode == 2: self.model_file = self.model_name # NOTE: so only need to change self.mode to 2 to test the current training assert self.model_file is not None, "Pre-Trained model is None, Testing aborted!!!" self.refs = self.refs + "_test" # NOTE: using this as env for visdom for testing, to avoid accidentally redraw on the training plots # logging configs self.log_name = self.root_dir + "/logs/" + self.refs + ".log" self.logger = loggerConfig(self.log_name, self.verbose) self.logger.warning("<===================================>") if self.visualize: self.vis = visdom.Visdom() self.logger.warning("bash$: python -m visdom.server") # activate visdom server on bash self.logger.warning("http://localhost:8097/env/" + self.refs) # open this address on browser
def main(): """ :return: """ _visualiser = None if C.USE_VISDOM: _visualiser = Visdom(C.VISDOM_SERVER) _environment = neo.make(C.ENVIRONMENT, connect_to_running=C.CONNECT_TO_RUNNING, logging_directory=C.LOGGING_DIRECTORY, debug_logging=C.USE_LOGGING) _environment.seed(C.RANDOM_SEED) if type(C.ARCH_PARAMS['input_size']) == str: C.ARCH_PARAMS['input_size'] = _environment.observation_space.shape print('observation dimensions: ', C.ARCH_PARAMS['input_size']) if type(C.ARCH_PARAMS['output_size']) == str: C.ARCH_PARAMS['output_size'] = _environment.action_space.n print('action dimensions: ', C.ARCH_PARAMS['output_size']) _model = C.ARCH(C.ARCH_PARAMS) if C.LOAD_PREVIOUS_MODEL_IF_AVAILABLE: _model.load_state_dict(load_model(C)) _target_model = C.ARCH(C.ARCH_PARAMS) _target_model.load_state_dict(_model.state_dict()) if C.USE_CUDA_IF_AVAILABLE: _model = _model.cuda() _target_model.cuda() _trained_model = training_loop(_model, _target_model, _environment, _visualiser) # _environment.render(close=True) _environment.close() save_model(_trained_model, C)