我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.text()。
def plot_events_with_event_scores(gt_event_scores, detected_event_scores, ground_truth_events, detected_events, show=True): fig = plt.figure(figsize=(10, 3)) for i in range(len(detected_events)): d = detected_events[i] plt.axvspan(d[0], d[1], 0, 0.5) plt.text((d[1] + d[0]) / 2, 0.2, detected_event_scores[i], horizontalalignment='center', verticalalignment='center') for i in range(len(ground_truth_events)): gt = ground_truth_events[i] plt.axvspan(gt[0], gt[1], 0.5, 1) plt.text((gt[1] + gt[0]) / 2, 0.8, gt_event_scores[i], horizontalalignment='center', verticalalignment='center') plt.tight_layout() if show: plt.show() else: plt.draw()
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys, block=True): # Colormaps: jet, Greys cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap) # Show confidences for i, cas in enumerate(cm): for j, c in enumerate(cas): if c > 0: plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000') f = plt.figure(1) f.clf() plt.title(title) plt.colorbar() tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show(block=block)
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys): # Colormaps: jet, Greys cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap) # Show confidences for i, cas in enumerate(cm): for j, c in enumerate(cas): if c > 0: plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000') plt.title(title) plt.colorbar() tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show(block=True)
def plot_axes_scaling(self, iabscissa=1): from matplotlib import pyplot if not hasattr(self, 'D'): self.load() dat = self if np.max(dat.D[:, 5:]) == np.min(dat.D[:, 5:]): pyplot.text(0, dat.D[-1, 5], 'all axes scaling values equal to %s' % str(dat.D[-1, 5]), verticalalignment='center') return self # nothing interesting to plot self._enter_plotting() pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b') # pyplot.hold(True) pyplot.grid(True) ax = array(pyplot.axis()) # ax[1] = max(minxend, ax[1]) pyplot.axis(ax) pyplot.title('Principle Axes Lengths') # pyplot.xticks(xticklocs) self._xlabel(iabscissa) self._finalize_plotting() return self
def plot_histogram(counter, label, plot=None): import matplotlib.pyplot as plt plt.figure() nums = list(counter.keys()) counts = list(counter.values()) indices = range(len(counts)) bars = plt.bar(indices, counts, align="center") plt.xticks(indices, nums) top = 1.06 * max(counts) plt.ylim(min(counts), top) plt.xlabel("number of %s" % label) plt.ylabel("count") for bar in bars: count = bar.get_height() plt.text(bar.get_x() + bar.get_width() / 2., count, "%.1f%%" % (100.0 * count / sum(counts)), ha="center", va="bottom") if plot: plt.savefig(plot + "histogram_" + label + ".png") else: plt.show()
def plot_trace(n=0, lg=False): plt.plot(trueC[n], c=col[2], clip_on=False, zorder=5, label='Truth') plt.plot(solution, c=col[0], clip_on=False, zorder=7, label='Estimate') plt.plot(y, c=col[7], alpha=.7, lw=1, clip_on=False, zorder=-10, label='Data') if lg: plt.legend(frameon=False, ncol=3, loc=(.1, .62), columnspacing=.8) spks = np.append(0, solution[1:] - g * solution[:-1]) plt.text(800, 2.2, 'Correlation: %.3f' % (np.corrcoef(trueSpikes[n], spks)[0, 1]), size=24) plt.gca().set_xticklabels([]) simpleaxis(plt.gca()) plt.ylim(0, 2.85) plt.xlim(0, 1500) plt.yticks([0, 2], [0, 2]) plt.xticks([300, 600, 900, 1200], ['', '']) # init params
def plot_convergence(history, prefix='', prefix2=''): plt.figure(figsize=(8, 5)) ax = plt.subplot(111) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() plt.plot(history["TC"], '-', lw=2.5, color=tableau20[0]) x = len(history["TC"]) y = np.max(history["TC"]) plt.text(0.5 * x, 0.8 * y, "TC", fontsize=18, fontweight='bold', color=tableau20[0]) if history.has_key("additivity"): plt.plot(history["additivity"], '-', lw=2.5, color=tableau20[1]) plt.text(0.5 * x, 0.3 * y, "additivity", fontsize=18, fontweight='bold', color=tableau20[1]) plt.ylabel('TC', fontsize=12, fontweight='bold') plt.xlabel('# Iterations', fontsize=12, fontweight='bold') plt.suptitle('Convergence', fontsize=12) filename = '{}/summary/convergence{}.pdf'.format(prefix, prefix2) if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) plt.savefig(filename, bbox_inches="tight") plt.close('all') return True
def plot(file_path, iterations): im = Image.open(file_path) im = np.array(im, dtype=np.uint8) plt.figure(figsize=(20, 16)) plt.subplot(121) plt.imshow(im) plt.axis('off') plt.subplot(122) plt.imshow(np.zeros((640, 300, 3))) height = 14 for i in range(len(labels)): plt.text(0, height * i + height / 2, labels[i], family='Times New Roman', size=14, color='#ffffff') plt.axis('off') # plt.savefig(idx) plt.show()
def info(self,burn=1000,plot=False): """ Print the summary statistics and optionally plot the results """ rows=len(self.varnames) cols=2 chain=np.array(self.chain[burn:]) nsize=chain.shape[0] # print rows,cols print '%4s %16s %12s %12s [%12s, %12s, %12s]'%('no','name','mean','stddev','16%','50%','84%') for i,name in enumerate(self.varnames): temp=np.percentile(chain[:,i],[16.0,84.0,50.0]) print '%4i %16s %12g %12g [%12g, %12g, %12g]'%(i,name,np.mean(chain[:,i]),(temp[1]-temp[0])/2.0,temp[0],temp[2],temp[1]) if plot: ax=plt.subplot(rows,cols,2*i+1) # plt.text(0.05,0.9,r'$\tau$='+'%5.1f'%(acor.acor(chain[:,i])[0]),transform=ax.transAxes) plt.plot(chain[:,i]) plt.ylabel(self.model.descr[name][3]) plt.xlabel('Iteration') ax=plt.subplot(rows,cols,2*i+2) plt.hist(chain[:,i],bins=100,histtype='step') plt.text(0.05,0.9,sround(np.mean(chain[:,i]),temp[0],temp[1]),transform=ax.transAxes) plt.xlabel(self.model.descr[name][3]) # plt.text(0.05,0.9,'%6g %3g (%4g-%4g)'%(np.mean(chain[:,i]),(temp[1]-temp[0])/2.0,temp[0],temp[1]),transform=ax.transAxes)
def hist_test(): mu, sigma = 100, 15 x = mu + sigma * np.random.randn(10000) # ?????? n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75) plt.xlabel('Smarts') plt.ylabel('Probability') #???? plt.title('Histogram of IQ') #???? plt.text(60, .025, r'$mu=100, sigma=15$') plt.axis([40, 160, 0, 0.03]) plt.grid(True) plt.show()
def update(self, conf_mat, classes, normalize=False): """This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(conf_mat, interpolation='nearest', cmap=self.cmap) plt.title(self.title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: conf_mat = conf_mat.astype('float') / conf_mat.sum(axis=1)[:, np.newaxis] thresh = conf_mat.max() / 2. for i, j in itertools.product(range(conf_mat.shape[0]), range(conf_mat.shape[1])): plt.text(j, i, conf_mat[i, j], horizontalalignment="center", color="white" if conf_mat[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.draw()
def iv2txt(self,TES): ''' extract the I-V data from a given TES to a text file with two columns ''' if not self.exist_iv_data():return None fname='QUBIC_TES%03i_array-%s_ASIC%i_%.0fmK_IV_%s.txt' % (TES,self.detector_name,self.asic,1000*self.temperature,self.obsdate.strftime('%Y%m%dT%H%M%S')) h=open(fname,'w') Ites=self.Ites(TES) if not isinstance(Ites,np.ndarray):return None Vtes=self.Vtes(TES) for idx in range(len(Ites)): h.write('%.6e %.6e\n' % (Vtes[idx],Ites[idx])) h.close() return fname ################################################### ### helper functions to return info from the filter ###################################################
def iqplot(data, spec='.', labels=None): """Plot signal points. :param data: complex baseband signal points :param spec: plot specifier (see :func:`matplotlib.pyplot.plot`) :param labels: label for each signal point >>> import arlpy >>> arlpy.comms.iqplot(arlpy.comms.psk(8)) >>> arlpy.comms.iqplot(arlpy.comms.qam(16), 'rx') >>> arlpy.comms.iqplot(arlpy.comms.psk(4), labels=['00', '01', '11', '10']) """ import matplotlib.pyplot as plt data = _np.asarray(data) if labels is None: plt.plot(data.real, data.imag, spec) else: if labels == True: labels = range(len(data)) for i in range(len(data)): plt.text(data[i].real, data[i].imag, str(labels[i])) plt.axis([-2, 2, -2, 2]) plt.grid() plt.show()
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0): reward = 0 signal.fillna(value=0, inplace=True) if eval == False: bt = twp.Backtest(pd.Series(data=[x for x in xdata[time_step-2:time_step]], index=signal[time_step-2:time_step].index.values), signal[time_step-2:time_step], signalType='shares') reward = ((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2])*bt.data['shares'].iloc[-1]) if terminal_state == 1 and eval == True: #save a figure of the test set bt = twp.Backtest(pd.Series(data=[x for x in xdata], index=signal.index.values), signal, signalType='shares') reward = bt.pnl.iloc[-1] plt.figure(figsize=(3,4)) bt.plotTrades() plt.axvline(x=400, color='black', linestyle='--') plt.text(250, 400, 'training data') plt.text(450, 400, 'test data') plt.suptitle(str(epoch)) plt.savefig('plt/'+str(epoch)+'.png', bbox_inches='tight', pad_inches=1, dpi=72) plt.close('all') #print(time_step, terminal_state, eval, reward) return reward
def plot_hist(runornot): while runornot: plt.figure(1) timelist = ['year','month','day','hour','dayofweek'] layoutlist = [231,232,234,235,236] for timeiterm in timelist: plt.subplot(layoutlist[timelist.index(timeiterm)]) plt.hist(blog[timeiterm].values, bins = len(set(blog[timeiterm].values)), facecolor='blue', alpha=0.5) #plt.xlabel() plt.ylabel('freq') plt.title(timeiterm) #plt.text(60, .025, r'$\mu=100,\ \sigma=15$') #plt.axis([40, 160, 0, 0.03]) #plt.grid(True) plt.show() #.title('Histogram') plt.show()
def summary(data): '''docstring for plot_table''' data_win = data[data.exit_profit>0] data_lose = data[data.exit_profit<0] zero_df = data[data.exit_profit==0] total_num = len(data) av_period = data['period'].mean() #plt.text(12,3.4,'Table Title',size=8) print "******************************************" print u"?: " + str(data_win.exit_profit.sum() * 300) print u"?: " + str(data_lose.exit_profit.sum() * 300) print u": " + str((data.exit_profit.sum()) * 300) print "******************************************" print u"?: " + str(total_num) print u"?: " + str(len(data_win)) print u": " + str(len(data_lose)) print u"? :%s" % str(data_win.exit_profit.sum()/ total_num*300) print u"?: " + str(abs(data_win.exit_profit.sum()/len(data_win) / (data_lose.exit_profit.sum()/len(data_lose)))) print u"?: " + str(len(data_win)/float(total_num)*100) + "%" print u"??: " + str(av_period) print "******************************************"
def show_labes(image, probs, lables, true_label): gs = gridspec.GridSpec(1, 3) ax1 = plt.subplot(gs[1]) x = list(reversed(lables)) y = list(reversed(probs)) colors = ['#edf8fb', '#ccece6', '#99d8c9', '#66c2a4', '#41ae76'] # colors = ['#624ea7', 'g', 'yellow', 'k', 'maroon'] # colors=list(reversed(colors)) width = 0.4 # the width of the bars ind = np.arange(len(y)) # the x locations for the groups ax1.barh(ind, y, width, align='center', color=colors) ax1.set_yticks(ind + width / 2) ax1.set_yticklabels(x, minor=False) for i, v in enumerate(y): ax1.text(v, i, '%5.2f%%' % v, fontsize=14) plt.title('Probability Output', fontsize=20) ax2 = plt.subplot(gs[2]) ax2.axis('off') ax2.imshow(image) # fig = plt.gcf() # fig.set_size_inches(8, 6) plt.title(true_label, fontsize=20) plt.show()
def show_labes(image, probs, lables, true_label): fig = plt.figure() gs = gridspec.GridSpec(1, 3) ax1 = plt.subplot(gs[1]) x = list(reversed(lables)) y = list(reversed(probs)) colors = ['#edf8fb', '#ccece6', '#99d8c9', '#66c2a4', '#41ae76'] # colors = ['#624ea7', 'g', 'yellow', 'k', 'maroon'] # colors=list(reversed(colors)) width = 0.4 # the width of the bars ind = np.arange(len(y)) # the x locations for the groups ax1.barh(ind, y, width, align='center', color=colors) ax1.set_yticks(ind + width / 2) ax1.set_yticklabels(x, minor=False) for i, v in enumerate(y): ax1.text(v + 1, i, '%5.2f%%' % v, fontsize=14) plt.title('Probability Output', fontsize=20) ax2 = plt.subplot(gs[2]) ax2.axis('off') ax2.imshow(image) plt.title(true_label, fontsize=20) plt.show() # if true_label != lables[0]: # unique_filename = uuid.uuid4() # fig.savefig('predit_worng/' + str(unique_filename) + '.jpg')
def shot_heatmap(df,sigma = 1,log=False,player_pic=True,ax=None,cmap='jet'): ''' This function plots a heatmap based on the shot chart. input - dataframe with x and y coordinates. optional - log (default false) plots heatmap in log scale. player (default true) adds player's picture and name if true sigma - the sigma of the Gaussian kernel. In feet (default=1) ''' n,_,_ = np.histogram2d( 0.1*df['LOC_X'].values, 0.1*df['LOC_Y'].values,bins = [500, 500],range = [[-25,25],[-5.25,44.75]]) KDE = ndimage.filters.gaussian_filter(n,10.0*sigma) N = 1.0*KDE/np.sum(KDE) if ax is None: ax = plt.gca(xlim = [30,-30],ylim = [-7,43],xticks=[],yticks=[],aspect=1.0) court(ax,outer_lines=True,color='black',lw=2.0,direction='down') ax.axis('off') if log: ax.imshow(np.rot90(np.log10(N+1)),cmap=cmap,extent=[25.0, -25.0, -5.25, 44.75]) else: ax.imshow(np.rot90(N),cmap=cmap,extent=[25.0, -25.0, -5.25, 44.75]) if player_pic: player_id = df.PLAYER_ID.values[0] pic = players_picture(player_id) ax.imshow(pic,extent=[15,25,30,37.8261]) ax.text(0,-7,'By: Doingthedishes',color='white',horizontalalignment='center',fontsize=20,fontweight='bold')
def show_labes(image,probs,lables,true_label): gs = gridspec.GridSpec(1, 2,width_ratios=[1,1],height_ratios=[1,1]) ax1 = plt.subplot(gs[0]) x = list(reversed(lables)) y = list(reversed(probs)) colors=['#edf8fb','#b2e2e2','#66c2a4','#2ca25f','#006d2c'] #colors = ['#624ea7', 'g', 'yellow', 'k', 'maroon'] #colors=list(reversed(colors)) width = 0.4 # the width of the bars ind = np.arange(len(y)) # the x locations for the groups ax1.barh(ind, y, width, align='center', color=colors) ax1.set_yticks(ind+width/2) ax1.set_yticklabels(x, minor=False) for i, v in enumerate(y): ax1.text(v, i, '%5.2f%%' %v,fontsize=14) plt.title('Probability Output',fontsize=20) ax2 = plt.subplot(gs[1]) ax2.axis('off') ax2.imshow(image) # fig = plt.gcf() # fig.set_size_inches(8, 6) plt.title(true_label,fontsize=20) plt.show()
def plot(self, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(self.cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(self.classes)) plt.xticks(tick_marks, self.classes, rotation=45) plt.yticks(tick_marks, self.classes) if normalize: self.cm = self.cm.astype('float') / self.cm.sum(axis=1)[:, np.newaxis] thresh = self.cm.max() / 2. for i, j in itertools.product(range(self.cm.shape[0]), range(self.cm.shape[1])): plt.text(j, i, self.cm[i, j], horizontalalignment="center", color="white" if self.cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show()
def colorwheel(col='black'): """ Color wheel for phases in hsv colormap. From: pyVincent/ptycho.py """ xwheel=np.linspace(-1,1,100) ywheel=np.linspace(-1,1,100)[:,np.newaxis] rwheel=np.sqrt(xwheel**2+ywheel**2) phiwheel=-np.arctan2(ywheel,xwheel) # Need the - sign because imshow starts at (top,left) # rhowheel=rwheel*np.exp(1j*phiwheel) rhowheel=1*np.exp(1j*phiwheel) plt.gca().set_axis_off() rgba=complex2rgbalin(rhowheel*(rwheel<1)) plt.imshow(rgba,aspect='equal') plt.text(1.1, 0.5,'$0$',fontsize=14,horizontalalignment='center',verticalalignment='center',transform = plt.gca().transAxes,color=col) plt.text(-.1, 0.5,'$\pi$',fontsize=16,horizontalalignment='center',verticalalignment='center',transform = plt.gca().transAxes,color=col)
def draw_sub_graph(visit_total,domain_count,cname_count,ip_count,sub_graph_count): N = 5 ind = np.arange(1, N + 1) width = 0.7 plt.figure(1, figsize=(8, 6)) data = [domain_count,cname_count,ip_count,sub_graph_count,visit_total] plt.bar(ind, data, width, color='c', align='center') x_min, x_max = ind.min(), ind.max() plt.xlim(x_min - 1, x_max + 1) plt.ylabel('The numbers') plt.xlabel('Categories') plt.xticks(ind,('Domain','CNAME','IP','Sub_Graph','DNS Hits')) plt.yticks() # ??legend for a, b in zip(ind, data): plt.text(a, b, str(b)) plt.savefig('./graph/domain_overall.png', dpi=75) plt.show()
def Grap(space,X1,X2,t,x0,xmax,ymin,ymax,i,rank) : """ Create the graph of the solution at a given time step t and save it. Arguments: space (array(1,n)) : Interval [x0,xmax] X1,X2 (array(1,n)) : Numerical solutions found at the time step t, their lenght must be equal to the lenght of space t (float) : Time x0,xmax (float) : Left and right boundaries of the space interval ymin,ymax (float) : Minimal value and maximal value of the solutions i (int) : Used to name the graph file rank (int) : Rank of the core that run the program """ plt.plot(space,X1,label="Preys") plt.plot(space,X2,label="Predators") plt.text(x0 + (xmax-x0)/10,ymax, "t=" + str("%.3f"% t) + "s", horizontalalignment = 'center', verticalalignment = 'center') plt.title ("Lokta-Voltera System") plt.xlabel ( 'Space') plt.ylabel ( 'Number of individuals') plt.ylim(ymin,ymax + 0.1*abs(ymax)) plt.xlim(x0,xmax) plt.legend(loc=1,prop={'size':7.5}) plt.savefig('rde' + str(rank) + "0"*(10-len(str(i))) + str(i) + '.png', transparent=False) plt.clf()
def show_pca(X, sentences): plt.figure() plt.plot(X[:,0], X[:,1], 'x') for x, sentence in zip(X, sentences): plt.text(x[0]-0.01, x[1]-0.01, sentence, horizontalalignment='center', verticalalignment='top') plt.show()
def show_pca(X, sentences): plt.figure() plt.plot(X[:,0], X[:,1], 'x') for x, sentence in zip(X, sentences): plt.text(x[0]+0.01, x[1]-0.01, sentence, horizontalalignment='left', verticalalignment='top') plt.show()
def correct_function(): # order is para-prim, para-comp, cheat-prim, cheat-comp, scenario-prim, scenario-comp SEMPRE = [85.04, 66.98, 77.5, 49.01, 60, 33] DEEP_SEMPRE = [95.23, 75.64, 50, 47.05, 42.85, 16.66] X = np.arange(3) width = (0.8-0.1)/4 s_p = [SEMPRE[0], SEMPRE[2], SEMPRE[4]] s_c = [SEMPRE[1], SEMPRE[3], SEMPRE[5]] d_p = [DEEP_SEMPRE[0], DEEP_SEMPRE[2], DEEP_SEMPRE[4]] d_c = [DEEP_SEMPRE[1], DEEP_SEMPRE[3], DEEP_SEMPRE[5]] plt.bar(X, s_p, width=width, color='#85c1e5') plt.bar(X+width, d_p, width=width, color='#254e7b') plt.bar(X+2*width+0.1, s_c, width=width, color='#85c1e5') plt.bar(X+3*width+0.1, d_c, width=width, color='#254e7b') width = (0.8-0.1)/4 plt.xticks(np.array([width, 3*width+0.1, 1+width, 1+3*width+0.1, 2+width, 2+3*width+0.1]), ["Prim.", "Comp.", "Prim.", "Comp.", "Prim.", "Comp."]) plt.text(0.4, -10, "Paraphrasing", ha='center', fontsize=18) plt.text(1.4, -10, "Scenarios", ha='center', fontsize=18) plt.text(2.4, -10, "Composition", ha='center', fontsize=18) plt.ylim(0, 100) plt.xlim(-0.1, 2.9) #plt.tight_layout() plt.legend(["SEMPRE", "Neural Net"], loc ="upper right") plt.savefig('./figures/correct-function.pdf')
def accuracy_against_sempre(): # order is para-prim, para-comp, cheat-prim, cheat-comp, scenario-prim, scenario-comp SEMPRE = [71.4, 50.2, 67.5, 33.3, 34.28, 30.5] DEEP_SEMPRE = [89.11, 55.27, 47.5, 29.4, 34.28, 16.66] X = np.arange(3) width = (0.8-0.1)/4 s_p = [SEMPRE[0], SEMPRE[2], SEMPRE[4]] s_c = [SEMPRE[1], SEMPRE[3], SEMPRE[5]] d_p = [DEEP_SEMPRE[0], DEEP_SEMPRE[2], DEEP_SEMPRE[4]] d_c = [DEEP_SEMPRE[1], DEEP_SEMPRE[3], DEEP_SEMPRE[5]] plt.bar(X, s_p, width=width, color='#85c1e5') plt.bar(X+width, d_p, width=width, color='#254e7b') plt.bar(X+2*width+0.1, s_c, width=width, color='#85c1e5') plt.bar(X+3*width+0.1, d_c, width=width, color='#254e7b') width = (0.8-0.1)/4 plt.xticks(np.array([width, 3*width+0.1, 1+width, 1+3*width+0.1, 2+width, 2+3*width+0.1]), ["Prim.", "Comp.", "Prim.", "Comp.", "Prim.", "Comp."]) plt.text(0.4, -10, "Paraphrasing", ha='center', fontsize=18) plt.text(1.4, -10, "Scenarios", ha='center', fontsize=18) plt.text(2.4, -10, "Composition", ha='center', fontsize=18) plt.ylim(0, 100) plt.xlim(-0.1, 2.9) #plt.tight_layout() plt.legend(["SEMPRE", "Neural Net"], loc ="upper right") plt.savefig('./figures/accuracy-combined.pdf')
def extensibility(): # order is new device acc, new device recall, new domain acc, new domain recall SEMPRE = [100 * 117./214., 100 * (10.+63.)/(15.+104.), 100 * (42.+232.)/(535.+75.), 100 * (32.+136.)/(286.+48.)] DEEP_SEMPRE = [38, 47, 55, 74] X = np.arange(2) width = (0.8-0.1)/4 s_a = [SEMPRE[0], SEMPRE[2]] s_r = [SEMPRE[1], SEMPRE[3]] d_a = [DEEP_SEMPRE[0], DEEP_SEMPRE[2]] d_r = [DEEP_SEMPRE[1], DEEP_SEMPRE[3]] plt.bar(X, s_a, width=width, color='#85c1e5') plt.bar(X+width, d_a, width=width, color='#254e7b') plt.bar(X+2*width+0.1, s_r, width=width, color='#85c1e5') plt.bar(X+3*width+0.1, d_r, width=width, color='#254e7b') width = (0.8-0.1)/4 plt.xticks(np.array([width, 3*width+0.1, 1+width, 1+3*width+0.1, 2+width, 2+3*width+0.1]), ["Accuracy", "Recall", "Accuracy", "Recall"]) plt.text(0.4, -10, "New Device", ha='center', fontsize=18) plt.text(1.4, -10, "New Domain", ha='center', fontsize=18) plt.ylim(0, 100) plt.xlim(-0.1, 1.9) #plt.tight_layout() plt.legend(["SEMPRE", "Neural Net"], loc ="upper right") plt.savefig('./figures/extensibility.pdf')
def show_pca(X, programs): plt.figure() plt.plot(X[:,0], X[:,1], 'x') for x, program in zip(X, programs): plt.text(x[0]-0.01, x[1]-0.01, program, horizontalalignment='center', verticalalignment='top') plt.show()
def add_panning_to_svg(source, destin=None): """Add pan and zoom to an svg file by embedding SVGPan in the file. Args: source: Path to the input file. destin: Path to the output file. Defaults to source. """ if destin is None: destin = source with io.open(source) as f: source_lines = list(f) destin_lines = [] add = destin_lines.append for line in source_lines: if re.search('SVGPan library', line): raise ValueError('{} already supports panning'.format(source)) if line.startswith('<svg '): add('<svg height="100%" width="100%" version="1.1"' ' xmlns="http://www.w3.org/2000/svg"' ' xmlns:xlink="http://www.w3.org/1999/xlink">\n') add('<script type="text/ecmascript"><![CDATA[\n') add(open(SVGPAN).read()) add(']]></script>\n') add('<g id="viewport" transform="scale(1,1) translate(0,0)">\n') elif line.startswith('</svg>'): add('</g>\n') add(line) else: add(line) with io.open(destin, 'w') as f: for line in destin_lines: f.write(line)
def axes4x4(labels=("t","x"),xmin=-4, xmax=4, ymin=-4, ymax=4, fontsize=20): """Set axes to [-4,4]×[-4,4] and label them args ==== - labels — axes labels (x, y) """ plt.axis([xmin,xmax, ymin, ymax]) center_spines() xscale = (xmax - xmin) / 8. yscale = (ymax - ymin) / 8. plt.text(xmax - 0.2 * xscale, 0.2 * yscale, "$%s$" % labels[0], fontsize=fontsize, verticalalignment='bottom') plt.text(0.1 * xscale, ymax - 0.3 * yscale, "$%s$" % labels[1], fontsize=fontsize)
def draw_axes(xmin, xmax, ymin, ymax, labels=("x", "y")): plt.axis([xmin, xmax, ymin, ymax]) center_spines() plt.text(xmax, 0, "$%s$" % labels[0],fontsize=20, verticalalignment='bottom', horizontalalignment='right') plt.text(0, ymax, "$%s$" % labels[1],fontsize=20, verticalalignment='top', horizontalalignment='right')
def handle_cs_error(self): cs = self.ui.cs_input.text() try: self.cs = float(cs) if self.cs < 0: return QtGui.QMessageBox.question(self, 'Error !', 'Please input a confidence score >=0', QtGui.QMessageBox.Ok) except ValueError: return QtGui.QMessageBox.question(self, 'Error !', 'Please input a confidence score >=0', QtGui.QMessageBox.Ok)
def confirm_gs(self): self.gs= self.ui.gs_input.text() try: self.gs = float(self.gs) if self.gs<=0: return QtGui.QMessageBox.question(self, 'Error !', 'Please input a genome size bigger than 0 !', QtGui.QMessageBox.Ok) except ValueError: return QtGui.QMessageBox.question(self, 'Error !', 'Please check your input genome size !', QtGui.QMessageBox.Ok)
def plot_correlations(self, iabscissa=1): """spectrum of correlation matrix and largest correlation""" if not hasattr(self, 'corrspec'): self.load() if len(self.corrspec) < 2: return self x = self.corrspec[:, iabscissa] y = self.corrspec[:, 6:] # principle axes ys = self.corrspec[:, :6] # "special" values from matplotlib.pyplot import semilogy, text, grid, axis, title self._enter_plotting() semilogy(x, y, '-c') # hold(True) semilogy(x[:], np.max(y, 1) / np.min(y, 1), '-r') text(x[-1], np.max(y[-1, :]) / np.min(y[-1, :]), 'axis ratio') if ys is not None: semilogy(x, 1 + ys[:, 2], '-b') text(x[-1], 1 + ys[-1, 2], '1 + min(corr)') semilogy(x, 1 - ys[:, 5], '-b') text(x[-1], 1 - ys[-1, 5], '1 - max(corr)') semilogy(x[:], 1 + ys[:, 3], '-k') text(x[-1], 1 + ys[-1, 3], '1 + max(neg corr)') semilogy(x[:], 1 - ys[:, 4], '-k') text(x[-1], 1 - ys[-1, 4], '1 - min(pos corr)') grid(True) ax = array(axis()) # ax[1] = max(minxend, ax[1]) axis(ax) title('Spectrum (roots) of correlation matrix') # pyplot.xticks(xticklocs) self._xlabel(iabscissa) self._finalize_plotting() return self
def plot(self, plot_cmd=None, tf=lambda y: y): """plot the data we have, return ``self``""" from matplotlib import pyplot if not plot_cmd: plot_cmd = self.plot_cmd colors = 'bgrcmyk' pyplot.gcf().clear() res = self.res flatx, flatf = self.flattened() minf = np.inf for i in flatf: minf = min((minf, min(flatf[i]))) addf = 1e-9 - minf if minf <= 1e-9 else 0 for i in sorted(res.keys()): # we plot not all values here if isinstance(i, int): color = colors[i % len(colors)] arx = sorted(res[i].keys()) plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-') pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i) plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o') pyplot.ylabel('f + ' + str(addf)) pyplot.draw() pyplot.ion() pyplot.show() return self
def makedistplots(ppdf1,pamt1,bincrates): #### This is how we'll normalize to get changes per degree warming. dry=ppdf1[0]*100 # Change in dry days # % rain rates in mm/d for x axis ticks and labeling otn=np.linspace(1,9,9) xtickrates=np.append(0,otn*.1) xtickrates=np.append(xtickrates,otn) xtickrates=np.append(xtickrates,otn*10) xtickrates=np.append(xtickrates,otn*100) xticks=np.interp(xtickrates,bincrates,range(0,len(bincrates))); #% bin numbers associated with nice number rain rate xticks,indices=np.unique(xticks,return_index=True) xtickrates=xtickrates[indices] ### Bin width - needed to normalize the rain amount distribution db=(bincrates[2]-bincrates[1])/bincrates[1]; ### Now we plot plt.figure(figsize=(4,6)) plt.clf() ax=plt.subplot(211) plt.plot(range(0,len(pamt1)),pamt1/db, 'k') #plt.ylim((-.05,.15)) plt.xlim((4,130)) #plt.setp(ax,xticks=xticks,xticklabels=['0','0.1','','','','','','','','','','1','','','','','','','','','10','','','','','','','','','100','','','','','','','','','1000']) plt.setp(ax,xticks=xticks,xticklabels=['']) #plt.xlabel('Rain rate (mm/d)') plt.title('Rain amount (mm/d)') ax=plt.subplot(212) plt.plot(range(0,len(ppdf1)),ppdf1*100, 'k') plt.plot((0,len(ppdf1)),(0,0),'0.5') plt.xlim((4,130)) ### Annotate with the dry day frequency ymin, ymax = ax.get_ylim() t=plt.text(4,ymax*0.95, "{:.1f}".format(dry)+'%') plt.setp(t,va='top',ha='left') plt.setp(ax,xticks=xticks,xticklabels=['0','0.1','','','','','','','','','','1','','','','','','','','','10','','','','','','','','','100','','','','','','','','','1000']) plt.xlabel('Rain rate (mm/d)') plt.title('Rain frequency (%)') plt.savefig("rainmetricdemo.pdf") return ### Call the function to make the rain distribution
def plot_confusion_matrix( cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
def plot_confusion_matrix(cm, classes=np.asarray(['spiced', 'non-spliced']), normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
def trendline(xd, yd, order=1, c='r', alpha=1, plot_r=False, text_pos=None): """Make a line of best fit""" #Calculate trendline coeffs = np.polyfit(xd, yd, order) intercept = coeffs[-1] slope = coeffs[-2] if order == 2: power = coeffs[0] else: power = 0 minxd = np.min(xd) maxxd = np.max(xd) xl = np.array([minxd, maxxd]) yl = power * xl ** 2 + slope * xl + intercept #Plot trendline plt.plot(xl, yl, color=c, alpha=alpha) #Calculate R Squared r = sp.stats.pearsonr(xd, yd)[0] if plot_r == False: #Plot R^2 value if text_pos == None: text_pos = (0.9 * maxxd + 0.1 * minxd, 0.9 * np.max(yd) + 0.1 * np.min(yd),) plt.text(text_pos[0], text_pos[1], '$R = %0.2f$' % r) else: #Return the R^2 value: return r
def plot_words(word1, words, fitted, cmap, sims): # TODO: remove this and just set the plot axes directly plt.scatter(fitted[:,0], fitted[:,1], alpha=0) plt.suptitle("%s" % word1, fontsize=30, y=0.1) plt.axis('off') annotations = [] isArray = type(word1) == list for i in xrange(len(words)): pt = fitted[i] ww,decade = [w.strip() for w in words[i].split("|")] color = cmap((int(decade) - 1840) / 10 + CMAP_MIN) word = ww sizing = sims[words[i]] * 30 # word1 is the word we are plotting against if ww == word1 or (isArray and ww in word1): annotations.append((ww, decade, pt)) word = decade color = 'black' sizing = 15 plt.text(pt[0], pt[1], word, color=color, size=int(sizing)) return annotations
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ np.set_printoptions(precision=2) plt.figure() plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] logger.info("Normalized confusion matrix") else: logger.info('Confusion matrix, without normalization') logger.info(cm) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show()
def autolabel(rects): for rect in rects: height = rect.get_height() plt.text(rect.get_x()+rect.get_width()/2., 1.03*height, '%s' % int(height)) #barGraphics('??','??',['A','B','C','D','E','F'],[29,30,40,47,38,23],'????') #linePlotGraphics("xLabel","yLabel",[1,2,3,4,5,6,7,8,9,10],[1.1,1.9,2.6,3.6,9.8,14,24,40,80,150],graphicTitle='??') #scatterPlotsGraphics("xLabel","yLabel",[1,2,3,4,5,6,7,8,9,10],[1,11.9,2,6.3,6,9.8,14,4,8,5],graphicTitle='??')
def autolabel(rects): for rect in rects: height = rect.get_height() plt.text(rect.get_x()+rect.get_width()/2., 1.03*height, '%s' % int(height)) #???