我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用IPython.display.clear_output()。
def displayResult(self): fig = plt.figure(101) plt.subplot(221) plt.imshow(np.abs(self.reconstruction),origin='lower') plt.draw() plt.title('Reconstruction Magnitude') plt.subplot(222) plt.imshow(np.angle(self.reconstruction),origin='lower') plt.draw() plt.title('Reconstruction Phase') plt.subplot(223) plt.imshow(np.abs(self.aperture),origin='lower') plt.title("Aperture Magnitude") plt.draw() plt.subplot(224) plt.imshow(np.angle(self.aperture),origin='lower') plt.title("Aperture Phase") plt.draw() fig.canvas.draw() #fig.tight_layout() # display.display(fig) # display.clear_output(wait=True) # time.sleep(.00001)
def drawState(fruitRow, fruitColumn, basket): global gridSize # column is the x axis fruitX = fruitColumn # Invert matrix style points to coordinates fruitY = (gridSize - fruitRow + 1) statusTitle = "Wins: " + str(winCount) + " Losses: " + str(loseCount) + " TotalGame: " + str(numberOfGames) axis.set_title(statusTitle, fontsize=30) for p in [ patches.Rectangle( ((ground - 1), (ground)), 11, 10, facecolor="#000000" # Black ), patches.Rectangle( (basket - 1, ground), 2, 0.5, facecolor="#FF0000" # No background ), patches.Rectangle( (fruitX - 0.5, fruitY - 0.5), 1, 1, facecolor="#FF0000" # red ), ]: axis.add_patch(p) display.clear_output(wait=True) display.display(pl.gcf())
def plot_dynamics(vecs, vec2, n=5, MAX_ITER=100): """ Plot how the distances between pairs change with each n iterations of the optimization method. """ for i in xrange(MAX_ITER): if (i%n==0): plt.clf() plt.xlim([-0.01, 1.2]) plt.plot(vecs[i], vec2, 'ro', color='blue') plt.grid() display.clear_output(wait=True) display.display(plt.gcf()) plt.clf() fig, ax = plt.subplots(1, 2, figsize=(13, 4)) for i in xrange(2): ax[i].set_xlim([-0.01, 1.2]) ax[i].plot(vecs[-i], vec2, 'ro', color='blue') ax[i].set_title(str(i*MAX_ITER)+' iterations') ax[i].set_xlabel('Cosine distance', fontsize=14) ax[i].set_ylabel('Assesor grade', fontsize=14) ax[i].grid()
def plot(self, show=True): """ Assumes nothing in self.settings is None (i.e., there are no keys in settings such that settings[key] == None""" kwargs = {e['encoding']: _get_plot_command(e) for e in self.settings['encodings']} mark_opts = {k: v for k, v in self.settings['mark'].items()} mark = mark_opts.pop('mark') Chart_mark = getattr(altair.Chart(self.df), mark) self.chart = Chart_mark(**mark_opts).encode(**kwargs) if show and self.show: clear_output() display(self.chart)
def _add_dim(self, button): i = len(self.controller.children) - 1 encoding = _get_encodings()[i] shelf = self._create_shelf(i=i) kids = self.controller.children teens = list(kids)[:-1] + [shelf] + [list(kids)[-1]] self.controller.children = teens # clear_output() # display(self.controller) self.settings['encodings'] += [{'encoding': encoding}] self.plot(self.settings)
def _update_interactive(displays, options): displays = displays or [] if options.get('interactive'): from IPython import display display.clear_output(wait=True) displays.insert(0, plt.gcf()) display.display(*displays)
def do_training(solver, step_size, nb_step=0): solver.step(step_size) heat_map = solver.test_nets[0].blobs["score-final"].data[0,:,:,:].transpose(1,2,0) heat_map_normalize = normalize_heatmap(heat_map) # heat_map_normalize = heat_map minimum = np.min(heat_map[:,:,0]) nb_subplot = 4 plt.figure(figsize=(10,10)) image_test = solver.test_nets[0].blobs["data"].data[0,0,:,:] image_test_label = solver.test_nets[0].blobs["label"].data[0,0,:,:] plt.subplot(1,nb_subplot,1) plt.imshow(image_test) plt.title("image test") plt.subplot(1,nb_subplot,2) plt.imshow(image_test_label) plt.title("Label of the test image") plt.subplot(1,nb_subplot,3) plt.imshow(np.append(heat_map_normalize, np.zeros((heat_map_normalize.shape[0], heat_map_normalize.shape[1],1)), 2)) plt.title("Heat map") # plt.subplot(1,nb_subplot,4) # plt.imshow(np.append(heat_map_normalize, np.zeros(heat_map_normalize.shape[0], heat_map_normalize.shape[1],1), 3)) # plt.title("score") plt.subplot(1,nb_subplot,4) plt.imshow(solver.test_nets[0].blobs["score-final"].data[0,:,:,:].transpose(1,2,0).argmax(2), vmin=0, vmax=1) plt.title("After : " + str(nb_step+step_size) + " itterations") display.display(plt.gcf()) display.clear_output(wait=True) time.sleep(1) ### # save_image : place where to save the image ###
def clear_output(): pass
def plot_durations(): plt.figure(2) plt.clf() durations_t = torch.FloatTensor(episode_durations) plt.title('Training...') plt.xlabel('Episode') plt.ylabel('Duration') plt.plot(durations_t.numpy()) # Take 100 episode averages and plot them too if len(durations_t) >= 100: means = durations_t.unfold(0, 100, 1).mean(1).view(-1) means = torch.cat((torch.zeros(99), means)) plt.plot(means.numpy()) plt.pause(0.001) # pause a bit so that plots are updated if is_ipython: display.clear_output(wait=True) display.display(plt.gcf()) ###################################################################### # Training loop # ^^^^^^^^^^^^^ # # Finally, the code for training our model. # # Here, you can find an ``optimize_model`` function that performs a # single step of the optimization. It first samples a batch, concatenates # all the tensors into a single one, computes :math:`Q(s_t, a_t)` and # :math:`V(s_{t+1}) = \max_a Q(s_{t+1}, a)`, and combines them into our # loss. By defition we set :math:`V(s) = 0` if :math:`s` is a terminal # state.
def find_threshold(Lsmall=3, Llarge=5, p=0.8, high=1, low=0.79, samples=1000, logfile=None): '''Use binary search (between two sizes of codes) to find the threshold for the toric code.''' ps = [] samples_small = [] samples_large = [] def step(p): ps.append(p) samples_small.append(stat_estimator(sample(Lsmall, p, samples=samples))) samples_large.append(stat_estimator(sample(Llarge, p, samples=samples))) def intersection(xs, y1s, y2s, log=True): d = np.linalg.det if log: y1s, y2s = np.log([y1s,y2s]) ones = np.array([1.,1.]) dx = d([xs , ones]) dy1 = d([y1s, ones]) dy2 = d([y2s, ones]) x = (d([xs, y1s])-d([xs, y2s])) / (dy2-dy1) y = (d([xs, y1s])*dy2 - d([xs, y2s])*dy1) / dx / (dy2-dy1) if log: y = np.exp(y) return x, y step(p) if logfile: with open(logfile, 'w') as f: ss = samples_small[0] sl = samples_large[0] f.write(str((np.vstack([ps, [ss[0]], [ss[1]-ss[0]], [ss[2]-ss[0]], [sl[0]], [sl[1]-sl[0]], [sl[2]-sl[0]]]), (ss[0]+sl[0])/2, ps[0]))) else: f = plt.figure() s = f.add_subplot(1,1,1) while not (samples_large[-1][1]<samples_small[-1][0]<samples_large[-1][2] or samples_small[-1][1]<samples_large[-1][0]<samples_small[-1][2]): if samples_small[-1][0]<samples_large[-1][0]: p, high = low+(ps[-1]-low)/2, p else: p, low = ps[-1]+(high-ps[-1])/2, p step(p) _argsort = np.argsort(ps) _ps = np.array(ps)[_argsort] _ss = np.array(samples_small) _small = _ss[_argsort,0] _small_err = np.abs(_ss[_argsort,1:].T - _small) _sl = np.array(samples_large) _large = _sl[_argsort,0] _large_err = np.abs(_sl[_argsort,1:].T - _large) ix, iy = intersection(ps[-2:],[_[0] for _ in samples_small[-2:]],[_[0] for _ in samples_large[-2:]]) if logfile: with open(logfile, 'w') as f: f.write(str((np.vstack([_ps, _small, _small_err, _large, _large_err]), iy, ix))) else: s.clear() s.errorbar(_ps,_small,yerr=_small_err,alpha=0.6,label=str(Lsmall)) s.errorbar(_ps,_large,yerr=_large_err,alpha=0.6,label=str(Llarge)) s.plot([ix],[iy],'ro',alpha=0.5) s.set_title('intersection at p = %f'%ix) s.set_yscale('log') display.clear_output(wait=True) display.display(f) return ps, samples_small, samples_large
def update_stats(self, stats, tid=0): self.e += 1 # update stats store for k in stats.keys(): self.stats[k].add( stats[k] ) # only plot from thread 0 if self.stats == None or tid > 0: return # plot if its time if(self.e >= self.next_plot): self.next_plot = self.e + self.stats_rate if self.ipy_clear: from IPython import display display.clear_output(wait=True) fig = plt.figure(1) fig.canvas.set_window_title("A3C Training Stats for %s"%(self.experiment)) plt.clf() plt.subplot(2,2,1) self.stats["tr"].plot() plt.title("Total Reward per Episode") plt.xlabel("Episode") plt.ylabel("Total Reward") plt.legend(loc=2) plt.subplot(2,2,2) self.stats["ft"].plot() plt.title("Finishing Time per Episode") plt.xlabel("Episode") plt.ylabel("Finishing Time") plt.legend(loc=2) plt.subplot(2,2,3) self.stats["maxvf"].plot2(fill_col='lightblue', label='Avg Max VF') self.stats["minvf"].plot2(fill_col='slategrey', label='Avg Min VF') plt.title("Value Function Outputs") plt.xlabel("Episode") plt.ylabel("Value Fn") plt.legend(loc=2) ax = plt.subplot(2,2,4) self.stats["cost"].plot2() plt.title("Training Loss") plt.xlabel("Training Epoch") plt.ylabel("Loss") try: # ax.set_yscale("log", nonposy='clip') plt.tight_layout() except: pass plt.show(block=False) plt.draw() plt.pause(0.001)
def update_stats(self, stats, tid=0): self.e += 1 # update stats store for k in stats.keys(): self.stats[k].add( stats[k] ) # only plot from thread 0 if self.stats == None or tid > 0: return # plot if its time if(self.e >= self.next_plot): self.next_plot = self.e + self.stats_rate if self.ipy_clear: from IPython import display display.clear_output(wait=True) fig = plt.figure(1) fig.canvas.set_window_title("DQN Training Stats for %s"%(self.experiment)) plt.clf() plt.subplot(2,2,1) self.stats["tr"].plot() plt.title("Total Reward per Episode") plt.xlabel("Episode") plt.ylabel("Total Reward") plt.legend(loc=2) plt.subplot(2,2,2) self.stats["ft"].plot() plt.title("Finishing Time per Episode") plt.xlabel("Episode") plt.ylabel("Finishing Time") plt.legend(loc=2) plt.subplot(2,2,3) self.stats["maxvf"].plot2(fill_col='lightblue', label='Avg Max VF') self.stats["minvf"].plot2(fill_col='slategrey', label='Avg Min VF') plt.title("Value Function Outputs") plt.xlabel("Episode") plt.ylabel("Value Fn") plt.legend(loc=2) ax = plt.subplot(2,2,4) self.stats["cost"].plot2() plt.title("Training Loss") plt.xlabel("Training Epoch") plt.ylabel("Loss") try: # ax.set_yscale("log", nonposy='clip') plt.tight_layout() except: pass plt.show(block=False) plt.draw() plt.pause(0.001)
def run(self, train=True, movie=False, enableLog=False): self.env.reset(0,0) self.reset_seq() total_reward=0 for i in range(300): # ???state???????????? old_seq = self.seq.copy() # ????????????? action = self.agent.get_action(old_seq,train) # ?????????? self.env.update_state(action) reward=self.env.get_reward() total_reward +=reward # ???????state??????????? state = self.env.get_state() self.push_seq(state) new_seq = self.seq.copy() # ?????????????????? self.agent.experience_local(old_seq, action, reward, new_seq) if enableLog: self.log.append(np.hstack([old_seq[0], action, reward])) # ???????????? if movie: display.clear_output(wait=True) display.display(self.env.get_svg()) time.sleep(0.01) # ???????????????????????????? self.agent.experience_global(total_reward) if train: # ?????????????????? self.agent.update_model(old_seq, action, reward, new_seq) self.agent.reduce_epsilon() if enableLog: return total_reward,self.log return total_reward