我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.xscale()。
def plot_training_parameters(self): fr = open("training_param.csv", "r") fr.readline() lines = fr.readlines() fr.close() n = 100 nu = np.empty(n, dtype=np.float64) gamma = np.empty(n, dtype=np.float64) diff = np.empty([n, n], dtype=np.float64) for row in range(len(lines)): m = lines[row].strip().split(",") i = row / n j = row % n nu[i] = Decimal(m[0]) gamma[j] = Decimal(m[1]) diff[i][j] = Decimal(m[2]) plt.pcolor(gamma, nu, diff, cmap="coolwarm") plt.title("The Difference of Guassian Classifier with Different nu, gamma") plt.xlabel("gamma") plt.ylabel("nu") plt.xscale("log") plt.yscale("log") plt.colorbar() plt.show()
def plot_mean_debye(sol, ax): x = np.log10(sol[0]["data"]["tau"]) x = np.linspace(min(x), max(x),100) list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol] # list_best_rtd = [s["fit"]["best"] for s in sol] y = np.mean(list_best_rtd, axis=0) y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0) y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0) ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10) plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range") plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1) plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"]) , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD") ax.set_xlabel("Relaxation time (s)", fontsize=14) ax.set_ylabel("Chargeability (%)", fontsize=14) plt.yticks(fontsize=14), plt.xticks(fontsize=14) plt.xscale("log") ax.set_xlim([1e-6, 1e1]) ax.set_ylim([0, 5.0]) ax.legend(loc=1, fontsize=12) # ax.set_title(title+" step method", fontsize=14)
def plot(): ''' ''' # Register the functions builtins.__dict__.update(globals()) # Loop over various dataset sizes Narr = np.logspace(0, 5, 5) tpp = np.zeros_like(Narr) tbm = np.zeros_like(Narr) tps = np.zeros_like(Narr) for i, N in enumerate(Narr): tpp[i] = timeit.timeit('run_pp(%d)' % N, number = 10) / 10. if batman is not None: tbm[i] = timeit.timeit('run_bm(%d)' % N, number = 10) / 10. if ps is not None: tps[i] = timeit.timeit('run_ps(%d)' % N, number = 10) / 10. pl.plot(Narr, tpp, '-o', label = 'planetplanet') if batman is not None: pl.plot(Narr, tbm, '-o', label = 'batman') if ps is not None: pl.plot(Narr, tps, '-o', label = 'pysyzygy') pl.legend() pl.yscale('log') pl.xscale('log') pl.ylabel('Time [seconds]', fontweight = 'bold') pl.xlabel('Number of datapoints', fontweight = 'bold')
def training_process_3d(data, fontsizefig=18): """ :param data: List of arrays, each containing a "loss trajectory" as a numpy array [3 x T] (Note: The loss trajectories can have different lenght) :return: """ n_traj = len(data) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for i in range(n_traj): assert data[i].shape[0] == 3 #ax.plot(data[i][0, :], data[i][1, :], data[i][2, :], c='r', marker='-o') ax.plot(data[i][0, :], data[i][1, :], data[i][2, :]) ax.set_xlabel('Training set', fontsize=fontsizefig) ax.set_ylabel('Test set', fontsize=fontsizefig) ax.set_zlabel('Validation set', fontsize=fontsizefig) #plt.xscale('log') #plt.yscale('log') #plt.zscale('log') plt.show() return 0
def SimilarityPlot(self, SpectralDict): fig = plt.figure(figsize=(18,9)) # Add each data set to the Spectral Qualithy Plot for n, data_set in enumerate(SpectralDict['data_sets']): plt.scatter(SpectralDict['x_data'][n], SpectralDict['y_data'][n], color=self.color_codes[n], label=data_set) # Horizontal 800 Similarity line plt.axhline(y=800, xmin=0, xmax=1, hold=None, color=self.red_hex_code, label='800 Similarity') # Make your plot pretty plt.legend(loc='upper left') plt.ylabel('Similarity vs. Main NIST Hit') plt.xlabel('Concentration (pg)') plt.title('%s - Spectral Quality' % SpectralDict['analyte_name']) plt.xscale('log') plt.xlim(SpectralDict['x_axis_min'], SpectralDict['x_axis_max']) plt.savefig(SpectralDict['file_name'], bbox_inches='tight')
def exponential_hist(times, a, b, **params): cutoff = 0.03 # cutoff frequency in ms if len(times) == 0: return bins = np.logspace(a, b, 100) hist = plt.hist(times, bins=bins, alpha=0.5, **params) plt.xscale("log") params.pop("label") color = params.pop("color") total = integrate_hist(hist, cutoff) if sum(times > cutoff) == 0: return tmean = times[times > cutoff].mean() T = np.logspace(a-3, b, 1000) fT = np.exp(-T/tmean)*T/tmean fT *= total/integrate_values(T, fT, cutoff) plt.plot(T, fT, label="exp. fit, mean = %.2f ms" % (tmean,), color="dark" + color, **params) plt.xlim(10**a, 10**b)
def loadplots(name, show=True): defaultstyle = "-x" DIR = os.path.join(DATADIR, "plots") dic = load_dict(DIR, name) meta = dic["meta"] plots = dic["plots"] for plot in plots: plt.figure() for line in plot["data"]: style = line["style"] if "style" in line else defaultstyle plt.plot(line["x"], line["y"], style, label=line["label"]) plt.xlabel(plot["xlabel"]) plt.ylabel(plot["ylabel"]) if "xscale" in plot: plt.xscale(plot["xscale"]) if "yscale" in plot: plt.yscale(plot["yscale"]) plt.legend() if show: plt.show() return meta # TODO: to make this truly magical, we could recursively modify all parents
def plot_gr_indicator(self): """ Make plot for the evolution of GR indicator as a function of steps """ gr_fname = self.config_param.chain_fname + '_GR.dat' data = np.loadtxt(gr_fname,unpack=True) steps = data[0]; grs = data[1:] plt.figure(1,figsize=(6,6)) for i in xrange(len(grs)): plt.plot(steps,grs[i],label=str(i)) plt.legend(loc='best') plt.xscale('log') plt.xlabel(r'$N(\rm{steps})$') plt.ylabel(r'$R_{\rm GR}$') output_name = self.config_param.processed_product_path + '/GR_' + self.config_param.chain_short_fname + '.pdf' plt.savefig(output_name,bbox_inches='tight',dpi=100) plt.clf() print('Written %s' % output_name)
def plot(dims, sequence, factorization): import matplotlib matplotlib.use('Agg') # NOQA import matplotlib.pyplot as plt import seaborn as sns sns.set_style("darkgrid") plt.ylabel("Speed improvement") plt.xlabel("Size of embedding layers") plt.title("Fitting speed (1.0 = no change)") plt.xscale('log') plt.plot(dims, 1.0 / sequence, label='Sequence model') plt.plot(dims, 1.0 / factorization, label='Factorization model') plt.legend(loc='lower right') plt.savefig('speed.png') plt.close()
def plot(self, title, debug): """ Plot training time of chainer and pytorch. """ # plot training time. plt.plot(self.chainer_log.t, self.chainer_log.v, label='chainer') plt.plot(self.pytorch_log.t, self.pytorch_log.v, label='pytorch') # plot settings. plt.title(title) plt.legend() plt.xlabel('(log scale) training time [sec]') plt.ylabel('loss function value') plt.xscale('log') # save plot. if debug: plt.show() else: filename = '_'.join(title.split(' ')) + '.png' plt.savefig(os.path.join(self.output, filename))
def show_errors_by_feature(error_data): plt.figure(1, figsize=(20,10)) plt.subplot(321) plt.xlabel('Producto_ID', fontsize=18) plt.ylabel('Error', fontsize=18) #plt.yscale('log') #plt.xscale('log') plt.scatter(error_data['Producto_ID'].values, error_data['mean'].values, alpha=0.5) plt.subplot(322) plt.xlabel('Cluster', fontsize=18) plt.ylabel('Error Sum', fontsize=18) plt.scatter(error_data['Cluster'].values, error_data['mean'].values, alpha=0.5) plt.subplot(323) plt.xlabel('Percent', fontsize=18) plt.ylabel('Error Sum', fontsize=18) plt.scatter(error_data['percent'].values, error_data['mean'].values, alpha=0.5) plt.tight_layout() plt.show()
def print_submission_data(sub_df=None, sub_file=None, show=False, command=0): if sub_df is None: sub_df = pd.read_csv(sub_file) print sub_df.describe() sub_df = sub_df.sample(100000) plt.figure(1, figsize=(20,10)) plt.subplot(321) plt.xlabel('id', fontsize=18) plt.ylabel('Demanda_uni_equil', fontsize=18) #plt.yscale('log') #plt.xscale('log') plt.scatter(sub_df['id'].values, sub_df['Demanda_uni_equil'].values, alpha=0.5) plt.tight_layout() plt.savefig('submission_stats-'+str(command)+'.png') if(show): plt.show()
def visualize_regret(mode): max_num = 0 for strategy in strategy_list: cum_regrets = np.load("results/regret_" + strategy + ".npy") plt.plot(cum_regrets, label = strategy) max_num = max(max_num, cum_regrets[-1]) #asymptoic_lower_bounds = [sum([(true_params[0] - true_params[k]) * (np.log(t) - np.log(100))/ (kl(true_params[k], true_params[0]) ) for k in range(1,arm_num)]) for t in range(1, int(T))] #ab=[0.1*99/kl(0.4,0.5) * (np.log(t) - np.log(100)) for t in range(int(T))] ab = get_asymptoic_lower_bound(mode) max_num = max(ab[-1], max_num) plt.plot(ab, label = "asymptoic bound") #plt.xticks([1e2,1e3,1e4,1e5], ["1e2", "1e3", "1e4", "1e5"]) xticks_name = ["1e" + str(t) for t in range(2, int(np.log10(T)))] xticks = [float(xtick) for xtick in xticks_name] plt.xticks(xticks, xticks_name) plt.xlim(xmin = 100) #plt.ylim(0,10000) plt.ylim(0,max_num*2) plt.xscale("log") plt.legend(loc="best") #plt.show() plt.savefig("figures/regret_bernoulli.png") # simple model test
def plotEndtoendSq(weightedEndtoendSq,weightedEndtoendSqStd,fittedWeightedEndtoendSq,popSize): x = np.linspace(1,c.nBeads, c.nBeads); # Plot the end-to-end distance squared plt.figure(5) plt.xlabel('Number of beads') plt.xlim(0,c.nBeads) plt.ylabel('End-to-end distance squared') plt.errorbar(x,weightedEndtoendSq,yerr=weightedEndtoendSqStd, label='Data') plt.plot(x,fittedWeightedEndtoendSq,color = "r", label='Fit') plt.plot(popSize, color = "g", label='Population') plt.xscale('log') plt.yscale('log') plt.xlim([3,c.nBeads]) plt.legend(loc='best')
def plotGyradiusSq(weightedGyradiusSq,weightedGyradiusSqStd,fittedGyradius,popSize): x = np.linspace(1,c.nBeads, c.nBeads); # Plot the gyradius plt.figure(6) plt.xlabel('Number of beads') plt.xlim(0,c.nBeads) plt.ylabel('Ensemble average $R_g^2$') plt.errorbar(x,weightedGyradiusSq,yerr=weightedGyradiusSqStd, label='Data') plt.plot(x,fittedGyradius,color = "r", label='Fit') plt.plot(popSize, color = "g", label='Population') plt.xscale('log') plt.yscale('log') plt.xlim([3,c.nBeads]) plt.legend(loc='best')
def PlotValidationCurve(param_range, train_mean, train_std, test_mean, test_std): #todo: automatically save plot to disk for exportation #plot data plot_params = [item[0] for item in param_range] plot.plot(plot_params, train_mean, color='blue', marker='o', markersize=5, label='training accuracy') plot.fill_between(plot_params, train_mean+train_std, train_mean-train_std, alpha=0.15, color='blue') plot.plot(plot_params, test_mean, color='green', linestyle='--', marker='s', markersize=5, label='validation accuracy') plot.fill_between(plot_params, test_mean+test_std, test_mean-test_std, alpha=0.15, color='green') #show plot plot.grid() plot.xscale('log', basex=2)#since hidden units are all powers of 2 plot.xlabel('Parameter: Number of hidden units') plot.ylabel('Accuracy') plot.legend(loc='lower right') plot.ylim([0, 1.0]) plot.show()
def plot_scatter (x, y, limits, corder, cmap='rainbow_r', symbol='o', xlabel='', ylabel='', legendlabel='', title='', filename='', simple=False, xscale='log', yscale='linear'): plt.axis(limits) plt.scatter(x, y, c=corder, cmap=cmap, alpha=1, label=legendlabel, edgecolors='black') plt.xscale(xscale) plt.yscale(yscale) if legendlabel!='': plt.legend(numpoints=1, fontsize='medium') plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) if filename != '': plt.savefig(filename) if show_plots: plt.show() plt.close() ################################################################################
def plot_ranks(hist, scale='log'): """Plots frequency vs. rank. hist: map from word to frequency scale: string 'linear' or 'log' """ t = rank_freq(hist) rs, fs = zip(*t) plt.clf() plt.xscale(scale) plt.yscale(scale) plt.title('Zipf plot') plt.xlabel('rank') plt.ylabel('frequency') plt.plot(rs, fs, 'r-', linewidth=3) plt.show()
def plot_deviance(sol, save=False, draw=True, save_as_png=True, fig_dpi=144): if save_as_png: save_as = 'png' else: save_as = 'pdf' filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0] model = get_model_type(sol) if draw or save: fig, ax = plt.subplots(figsize=(4,3)) deviance = sol.MDL.trace('deviance')[:] sampler_state = sol.MDL.get_state()["sampler"] x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"]) plt.plot(x, deviance, "-", color="C3", label="Model deviance\nDIC = %.2f\nBPIC = %.2f" %(sol.MDL.DIC,sol.MDL.BPIC)) plt.xlabel("Iteration") plt.ylabel("Model deviance") plt.legend(numpoints=1, loc="best", fontsize=9) plt.grid('on') if sampler_state["_burn"] == 0: plt.xscale('log') else: plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) fig.tight_layout() if save: save_where = '/Figures/ModelDeviance/' working_path = getcwd().replace("\\", "/")+"/" save_path = working_path+save_where print("\nSaving model deviance figure in:\n", save_path) if not path.exists(save_path): makedirs(save_path) fig.savefig(save_path+'ModelDeviance-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight') try: plt.close(fig) except: pass if draw: return fig else: return None
def plot_logp(sol, save=False, draw=True, save_as_png=True, fig_dpi=144): if save_as_png: save_as = 'png' else: save_as = 'pdf' filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0] model = get_model_type(sol) if draw or save: fig, ax = plt.subplots(figsize=(4,3)) logp = logp_trace(sol.MDL) sampler_state = sol.MDL.get_state()["sampler"] x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"]) plt.plot(x, logp, "-", color="C3") plt.xlabel("Iteration") plt.ylabel("Log-likelihood") plt.legend(numpoints=1, loc="best", fontsize=9) plt.grid('on') if sampler_state["_burn"] == 0: plt.xscale('log') else: plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) fig.tight_layout() if save: save_where = '/Figures/LogLikelihood/' working_path = getcwd().replace("\\", "/")+"/" save_path = working_path+save_where print("\nSaving logp trace figure in:\n", save_path) if not path.exists(save_path): makedirs(save_path) fig.savefig(save_path+'LogLikelihood-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight') try: plt.close(fig) except: pass if draw: return fig else: return None
def plot_debye(s, ax): x = np.log10(s["data"]["tau"]) x = np.linspace(min(x), max(x),100) y = 100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) # ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='lightgray',linewidth=1, label="RTD estimations (%d)"%len(sol)) ax.set_xlabel("Relaxation time (s)", fontsize=14) ax.set_ylabel("Chargeability (%)", fontsize=14) plt.yticks(fontsize=14), plt.xticks(fontsize=14) plt.xscale("log") ax.set_xlim([1e-6, 1e1]) ax.set_ylim([0, 5.0])
def plot_deviance(sol, save=False, draw=True, save_as_png=True, fig_dpi=144): if save_as_png: save_as = 'png' else: save_as = 'pdf' filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0] model = get_model_type(sol) if draw or save: fig, ax = plt.subplots(figsize=(4,3)) deviance = sol.MDL.trace('deviance')[:] sampler_state = sol.MDL.get_state()["sampler"] x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"]) plt.plot(x, deviance, "-", color="C3", label="Model deviance\nDIC = %.2f\nBPIC = %.2f" %(sol.MDL.DIC,sol.MDL.BPIC)) plt.xlabel("Iteration") plt.ylabel("Model deviance") plt.legend(numpoints=1, loc="best") plt.grid('on') if sampler_state["_burn"] == 0: plt.xscale('log') else: plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) fig.tight_layout() if save: save_where = '/Figures/ModelDeviance/' working_path = getcwd().replace("\\", "/")+"/" save_path = working_path+save_where print("\nSaving model deviance figure in:\n", save_path) if not path.exists(save_path): makedirs(save_path) fig.savefig(save_path+'ModelDeviance-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight') try: plt.close(fig) except: pass if draw: return fig else: return None
def set_plot_CC_T_rho_max(self,linestyle=[],burn_limit=0.997,color=['r'],marker=['o'],markevery=500): ''' Plots end_model - array, control how far in models a run is plottet, if -1 till end symbs_1 - set symbols of runs ''' if len(linestyle)==0: linestyle=200*['-'] plt.figure('CC evol') for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) t1_model=-1 sefiles.get('temperature') sefiles.get('density') mini=sefiles.get('mini') zini=sefiles.get('zini') model=sefiles.se.cycles model_list=[] for k in range(0,len(model),1): model_list.append(model[k]) rho1=sefiles.get(model_list,'rho') #[:(t1_model-t0_model)] T1=sefiles.get(model_list,'temperature')#[:(t1_model-t0_model)] rho=[] T=[] T_unit=sefiles.get('temperature_unit') labeldone=False for k in range(len(model_list)): t9=np.array(T1[k])*T_unit/1e9 T.append(max(t9)) rho.append(max(rho1[k])) label=str(mini)+'$M_{\odot}$, Z='+str(zini) plt.plot(T,rho,label=label,color=color[i],marker=marker[i],markevery=markevery) plt.xlabel('$T_{9,max} (GK)$') plt.ylabel(r'$\rho [cm^{-3}]$') plt.yscale('log') plt.xscale('log') plt.legend(loc=2)
def saveplots(name="plot", meta=None, uid=False): # collect data from every open figure plots = [] figs = map(plt.figure, plt.get_fignums()) for fig in figs: for ax in fig.axes: plot = dict( xlabel = ax.get_xlabel(), ylabel = ax.get_ylabel(), xscale = ax.get_xscale(), yscale = ax.get_yscale(), data = []) for line in ax.lines: x, y = line.get_data() marker = line.get_marker() if marker == "None": marker = "" data = dict( x = x, y = y, style = marker + line.get_linestyle(), label = line.get_label()) plot["data"].append(data) plots.append(plot) # add metadata if provided meta = {} if meta is None else meta data = dict(plots=plots, meta=meta) # save to txt file in DATADIR DIR = os.path.join(DATADIR, "plots") name = name + "_" + str(unique_id()) if uid else name save_dict(data, DIR, name) return plots
def compare_cdfs(self, sample, log=True): t = grid(sample, 20, 0.005, log=log) tt = grid(sample, 100, 0.005, log=log) plt.plot(t, empirical_cdf(t, sample), "o") self.plot_cdf(tt) if log: plt.xscale("log")
def compare_pdfs(self, sample, log=True): t = grid(sample, 20, 0.005, log=log) tt = grid(sample, 100, 0.005, log=log) t, epdf = empirical_pdf(t, sample, log=log) plt.plot(t, epdf, "o") self.plot_pdf(tt, log=log) if log: plt.xscale("log")
def analyze_eigenvals(*, pwru50_data=None, file=None, title=True): if not pwru50_data: pwru50_data = os.path.join(os.path.dirname(__file__), 'tests', 'data', 'pwru50_400000000000000.0.npz') nucs, matpwru50 = load_sparse_csr(pwru50_data) matdecay = decay_matrix() for desc, mat in {'pwru50': matpwru50, 'decay': matdecay}.items(): plt.clf() print("analyzing eigenvalues of", desc) eigvals, eigvects = scipy.sparse.linalg.eigen.eigs(mat, mat.shape[0]-2) plt.scatter(np.real(eigvals), np.imag(eigvals)) plt.yscale('symlog', linthreshy=1e-20) plt.xscale('symlog') plt.xlim([np.min(np.real(eigvals))*2, 1]) plt.ylim([np.min(np.imag(eigvals))*10, np.max(np.imag(eigvals))*10]) plt.xticks([0] + [-10**i for i in range(1, 1+int(np.ceil(np.log10(-plt.xlim()[0]))), 2)]) plt.yticks([-10**i for i in range(-19, int(np.log10(-plt.ylim()[0])), 2)] + [0] + [10**i for i in range(-19, int(np.log10(plt.ylim()[1])), 2)]) plt.minorticks_off() if title: plt.title("Eigenvalues of transmutation matrix for " + desc) plt_show_in_terminal() if file: path, ext = os.path.splitext(file) plt.savefig(path + '_' + desc + ext)
def make_experiment4_figure(logfile): """Generate high quality plot of data to reproduce figure 8. The logfile is a CSV of the format [congestion_control, loss_rate, goodput, rtt, capacity, specified_bw] """ results = {} cubic = {"loss": [], "goodput": []} bbr = {"loss": [], "goodput": []} # For available options on plot() method, see: https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot # We prefer to use explicit keyword syntax to help code readability. # Create a figure. fig_width = 8 fig_height = 5 fig, axes = plt.subplots(figsize=(fig_width, fig_height)) results = parse_results_csv(logfile) xmark_ticks = get_loss_percent_xmark_ticks(results) cubic = results['cubic'] bbr = results['bbr'] debug_print_verbose("CUBIC: %s" % cubic) debug_print_verbose("BBR: %s" % bbr) matplotlib.rcParams.update({'figure.autolayout': True}) plt.plot(cubic['loss'], cubic['goodput'], color='blue', linestyle='solid', marker='o', markersize=7, label='CUBIC') plt.plot(bbr['loss'], bbr['goodput'], color='red', linestyle='solid', marker='x', markersize=7, label='BBR') plt.xscale('log') apply_axes_formatting(axes, deduplicate_xmark_ticks(xmark_ticks)) plot_titles(plt, xaxis="Loss Rate (%) - Log Scale", yaxis="Goodput (Mbps)") plot_legend(plt, axes) save_figure(plt, name="figures/experiment4.png")
def convolveFilter(flux,wl,filter): input = np.loadtxt(filter) response_wl = input[:,0] * 1.e-4 response_trans = input[:,1] minwl = response_wl[0] maxwl = response_wl[len(response_wl)-1] intwl = np.copy(wl) intwl[intwl > maxwl] = -1 intwl[intwl < minwl] = -1 wlrange = intwl > 0 intwl = intwl[wlrange] transmission = np.zeros(len(flux)) interpfunc = interpolate.interp1d(response_wl,response_trans, kind='linear') transmission[wlrange] = interpfunc(intwl) tot_trans = integrate.simps(transmission,wl) tot_flux = integrate.simps(transmission*flux,wl) return tot_flux/tot_trans #plt.plot(wl,transmission,'bo') #plt.plot(wl,transmission,'k-') #plt.plot(response_wl,response_trans,'r+') #plt.xscale('log') #plt.show()
def test_alphas(alphas, modelname, cutoff=0.50, n_iter=1): # effect of alpha prec = np.empty(len(alphas)) idx = 0 for alpha in alphas: prec[idx] = ts.run_test(train_in, train_out, cv_in, cv_out, modelname, embeddings, run_parse=False, alpha=alpha, cutoff=cutoff, n_iter=n_iter) idx+=1 plt.plot(alphas,prec,'ro') plt.xscale('log') plt.show()
def test_C(Cs, modelname, n_iter=1): prec = np.empty(len(Cs)) idx = 0 for C in Cs: prec[idx] = ts.run_test(train_in, train_out, cv_in, cv_out, modelname, embeddings, run_parse=False, model_type='passive-aggressive', C=C, n_iter=n_iter) idx+=1 plt.plot(Cs,prec,'ro') plt.xscale('log') plt.show()
def plot_accuracy_by_freq_compare(freqs1, accuracies1, freqs2, accuracies2, label1, label2, title, filename=None, scale_acc=1.00, yscale_base=10.0, alpha=0.8, tags=None): plt.plot(freqs1, accuracies1, marker='o', color='r', label=label1, linestyle='None', fillstyle='none', alpha=alpha) plt.plot(freqs2, accuracies2, marker='+', color='c', label=label2, linestyle='None', fillstyle='none', alpha=alpha) if tags: print 'tags:', tags, 'len:', len(tags) print 'len(freqs1):', len(freqs1), 'len(freqs2)', len(freqs2) print 'len(accuracies1):', len(accuracies1), 'len(accuracies2)', len(accuracies2) if len(tags) == len(freqs1) and len(tags) == len(freqs2): print 'annotating tags' for i, tag in enumerate(tags): plt.annotate(tag, (freqs[1][i], accuracies[1][i])) plt.xscale('symlog') #plt.yscale('log', basey=yscale_base) plt.legend(loc='lower right', prop={'size':14}) plt.xlabel('Frequency', size='large', fontweight='demibold') plt.ylabel('Accuracy', size='large', fontweight='demibold') plt.ylim(ymax=1.01*scale_acc) plt.title(title, fontweight='demibold') plt.tight_layout() if filename: print 'saving plot to:', filename plt.savefig(filename)
def plot_accuracy_by_tag_compare(accuracies1, accuracies2, tags, tag_freq_dict, label1, label2, title, filename=None, scale_acc=1.00, yscale_base=10.0, alpha=0.5): #from adjustText import adjust_text tag_freqs = [tag_freq_dict[tag] for tag in tags] #plt.plot(tag_freqs, accuracies1, marker='o', color='r', label=label1, linestyle='None', fillstyle='none', alpha=alpha) #plt.plot(tag_freqs, accuracies2, marker='+', color='y', label=label2, linestyle='None', fillstyle='none', alpha=alpha) # plt.plot(tag_freqs, accuracies2-accuracies1, marker='o', color='c', label=label2, linestyle='None', fillstyle='none', alpha=alpha) plt.scatter(tag_freqs, accuracies2-accuracies1, s=np.pi * (0.5 * (accuracies2-accuracies1)+10 )**2, c = np.random.rand(len(tag_freqs)), alpha=0.5) print 'annotating tags' texts = [] for i, tag in enumerate(tags): #plt.annotate(tag, (tag_freqs[i], accuracies1[i]), xytext=(-10,10), \ # textcoords='offset points', ha='right', va='bottom', \ # arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0')) #plt.annotate(tag, (tag_freqs[i], accuracies1[i])) #plt.annotate(tag, (tag_freqs[i], accuracies2[i])) plt.annotate(tag, (tag_freqs[i], accuracies2[i]-accuracies1[i]), horizontalalignment='center', verticalalignment='center', size=10+0.05*(accuracies2[i]-accuracies1[i])) #texts.append(plt.text(tag_freqs[i], accuracies1[i], tag)) #adjust_text(texts, force_text=0.05, arrowprops=dict(arrowstyle="-|>", color='r', alpha=0.5)) plt.xscale('symlog') #plt.yscale('log', basey=yscale_base) #plt.legend(loc='lower right', prop={'size':14}) plt.xlabel('Frequency', size='large', fontweight='demibold') plt.ylabel('Increase in Accuracy', size='large', fontweight='demibold') #plt.ylim(ymax=1.05*scale_acc) plt.ylim(ymax=1.15*max(accuracies2-accuracies1)) plt.xlim(min(tag_freqs) / 2, max(tag_freqs) * 5) plt.title(title, fontweight='demibold') plt.tight_layout() if filename: print 'saving plot to:', filename plt.savefig(filename)
def analyze_error(): df = pd.read_csv('forecast_with_data.csv') df['error'] = np.abs(np.log(df['actual'].values +1) - np.log(df['predictions'].values + 1)) df['Slopes'] = np.round(df['Slopes'].values) print df.describe() plt.figure(1, figsize=(20,10)) plt.subplot(321) plt.xlabel('Error', fontsize=18) plt.ylabel('Slope', fontsize=18) #plt.yscale('log') #plt.xscale('log') plt.scatter(df['Slopes'].values, df['error'].values, alpha=0.5) groupe2d = df.groupby(['Slopes'])['error'].mean() plt.subplot(322) plt.xlabel('Slope', fontsize=18) plt.ylabel('Mean Error', fontsize=18) plt.scatter(groupe2d.index.values, groupe2d.values, alpha=0.5) df['groupedStd'] = np.round(df['groupedStd'].values) groupe2d = df.groupby(['groupedStd'])['error'].mean() plt.subplot(323) plt.xlabel('groupedStd', fontsize=18) plt.ylabel('Mean Error', fontsize=18) plt.scatter(groupe2d.index.values, groupe2d.values, alpha=0.5) df['groupedMeans'] = np.round(df['groupedMeans'].values) groupe2d = df.groupby(['groupedMeans'])['error'].mean() plt.subplot(324) plt.xlabel('groupedMeans', fontsize=18) plt.ylabel('Mean Error', fontsize=18) plt.scatter(groupe2d.index.values, groupe2d.values, alpha=0.5)
def show_error_by_feature(df, feature_name, chartloc, redo_x = True): start = time.time() group1 = df.groupby([feature_name])['error'] errors_by_feature = g2df_sum_mean(group1).sort("mean") #plt.subplot(chartloc) #plt.xlabel(feature_name + " in sorted order by Mean", fontsize=18) #plt.ylabel('Mean Error/Rank', fontsize=18) #plt.yscale('log') #plt.xscale('log') #plt.scatter(errors_by_feature[feature_name].values, errors_by_feature['mean'].values, alpha=0.5) if redo_x: x = range(0, errors_by_feature.shape[0]) else: x = errors_by_feature[feature_name] data = [(x, errors_by_feature['mean'].values), (x, np.log(errors_by_feature['rank'].values))] draw_scatterplot(data, "Mean Error, Rank vs. "+ feature_name, chartloc, c =['b', 'r']) #plt.scatter(x, errors_by_feature['mean'].values, alpha=0.5, color='b', s=5) #plt.scatter(x, np.log(errors_by_feature['rank'].values), alpha=0.5, color='r', s=5) print "show_error_by_feature", feature_name, "took", (time.time() - start), "s" return errors_by_feature
def plot_result(ngc_num,n): with open("../results/%s.dat" % ngc_num) as f: i=0 x = np.zeros(18) B4 = np.zeros(18) scale = np.zeros(18) for line in f: xt,yt,scalet=line.split() x[i]=float(xt) B4[i]=float(yt)*float(scalet) scale[i]=float(scalet) i += 1 f.close() plt.figure(figsize=(3,3)) plt.scatter(x,B4,color='black') plt.xscale('log') plt.title("a4/a value for NGC %s" % ngc_num) plt.ylabel("a4/a") plt.xlabel('semimajor axis (arcsec)') if a4[n] < 10: aa = np.ones(np.shape(x))*a4[n]*0.01 plt.plot(x,aa,color='red') plt.savefig('../results/%s.png' % ngc_num,bbox_inches='tight') plt.show()
def create_depth_chart(db, cj_amount, args=None): if args is None: args = {} rows = db.execute('SELECT * FROM orderbook;').fetchall() sqlorders = [o for o in rows if o["ordertype"] in filtered_offername_list] orderfees = sorted([calc_cj_fee(o['ordertype'], o['cjfee'], cj_amount) / 1e8 for o in sqlorders if o['minsize'] <= cj_amount <= o[ 'maxsize']]) if len(orderfees) == 0: return 'No orders at amount ' + str(cj_amount / 1e8) fig = plt.figure() scale = args.get("scale") if (scale is not None) and (scale[0] == "log"): orderfees = [float(fee) for fee in orderfees] if orderfees[0] > 0: ratio = orderfees[-1] / orderfees[0] step = ratio ** 0.0333 # 1/30 bins = [orderfees[0] * (step ** i) for i in range(30)] else: ratio = orderfees[-1] / 1e-8 # single satoshi placeholder step = ratio ** 0.0333 # 1/30 bins = [1e-8 * (step ** i) for i in range(30)] bins[0] = orderfees[0] # replace placeholder plt.xscale('log') else: bins = 30 if len(orderfees) == 1: # these days we have liquidity, but just in case... plt.hist(orderfees, bins, rwidth=0.8, range=(0, orderfees[0] * 2)) else: plt.hist(orderfees, bins, rwidth=0.8) plt.grid() plt.title('CoinJoin Orderbook Depth Chart for amount=' + str(cj_amount / 1e8) + 'btc') plt.xlabel('CoinJoin Fee / btc') plt.ylabel('Frequency') return get_graph_html(fig)
def cdf(v, title='', xlabel='', ylabel='', xlim=(), ylim=(), xscale='linear', yscale='linear', linewidth=1.5, outfile=None) : fs = count(v) values, freqs = zip(*sorted(fs.items())) # Split values and frequencies sorting by the values cum = np.cumsum(freqs, dtype=np.float64) cum /= np.sum(freqs) pp.clf() matplotlib.rc('font', size=24) pp.title(title) #, {'fontsize' : 22} pp.xlabel(xlabel) pp.ylabel(ylabel) pp.xscale(xscale) pp.yscale(yscale) pp.grid() # pp.tight_layout(pad=0.2) # pp.yscale('log') if xlim : pp.xlim(xlim) if ylim : pp.ylim(ylim) pp.tight_layout(pad=0.10) pp.plot(values, cum, lw=linewidth) # pp.show() if outfile: pp.savefig(outfile)
def rank(v, title='', xlabel='', ylabel='', xlim=(), ylim=(), xscale='linear', yscale='linear', linewidth=2, outfile='') : v.sort(reverse=True) pp.clf() pp.title(title) pp.xlabel(xlabel) pp.ylabel(ylabel) pp.xscale(xscale) pp.yscale(yscale) pp.grid() if xlim : pp.xlim(xlim) if ylim : pp.ylim(ylim) # Remove zeros v = filter(lambda x: x>0, v) cum = np.cumsum(v, dtype=np.float64) cum /= cum[-1] pp.plot(np.arange(1, len(cum)+1), cum, lw=linewidth) # pp.plot(values, cum, lw=linewidth) if outfile: pp.savefig(outfile) else: show()
def plot(self): for column_index,column_name in enumerate(self.get_column_names()): if self.hold_on is False: plt.figure() plt.title("Parameter %s" % column_name) for index_technique,technique in enumerate(self.technique_list): y=self.Values[:,index_technique,column_index] plt.plot(self.row_values,y,label="Exp: %s" %technique) for index_statistic,statistic in enumerate(self.statistic_list): y=np.ravel(self.Statistics[:,index_statistic,column_index]) plt.plot(self.row_values,y,label="Theo: %s" %statistic) plt.xlabel(self.row_name) plt.legend(loc="best") try: plt.xscale(self.xscale) plt.yscale(self.yscale) except: print("cannot change scale") if self.ylim is not None: plt.ylim(self.ylim)
def plot_degree_poly_l(Y): """ Same than plot_degree_poly, but for a list of random graph ie plot errorbar.""" x, y, yerr = random_degree(Y) plt.xscale('log'); plt.yscale('log') fit = np.polyfit(np.log(x), np.log(y), deg=1) plt.plot(x,np.exp(fit[0] *np.log(x) + fit[1]), 'm:', label='model power %.2f' % fit[1]) leg = plt.legend(loc='upper right',prop={'size':10}) plt.errorbar(x, y, yerr=yerr, fmt='o') plt.xlim(left=1) plt.ylim((.9,1e3)) plt.xlabel('Degree'); plt.ylabel('Counts')
def plot_regularization_path(columns, X, y): fig = plt.figure() ax = plt.subplot(111) colors = [ 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'pink', 'lightgreen', 'lightblue', 'gray', 'indigo', 'orange', ] weights = [] params = [] for c in np.arange(-4, 6): lr = LogisticRegression(penalty='l1', C=10**c, random_state=0) lr.fit(X, y) weights.append(lr.coef_[1]) params.append(10**c) weights = np.array(weights) for column, color in zip(range(weights.shape[1]), colors): plt.plot( params, weights[:, column], label=columns[column+1], color=color, ) plt.axhline(0, color='black', linestyle='--', linewidth=3) plt.xlim([10**-5, 10**5]) plt.ylabel('weight coefficient') plt.xlabel('C') plt.xscale('log') plt.legend(loc='upper left') ax.legend( loc='upper center', bbox_to_anchor=(1.38, 1.03), ncol=1, fancybox=True, ) plt.show()
def plot_lr_regularization(): iris = datasets.load_iris() X = iris.data[:, [2, 3]] y = iris.target X_train, _, y_train, _ = train_test_split( X, y, test_size=0.3, random_state=0, ) sc = StandardScaler() sc.fit(X_train) X_train_std = sc.transform(X_train) weights = [] params = [] for c in np.logspace(-5, 4, num=10): lr = LogisticRegression(C=c, random_state=0) lr.fit(X_train_std, y_train) weights.append(lr.coef_[1]) params.append(c) weights = np.array(weights) plt.plot(params, weights[:, 0], label='petal length') plt.plot(params, weights[:, 1], linestyle='--', label='petal width') plt.ylabel('weight coefficient') plt.xlabel('C') plt.legend(loc='upper left') plt.xscale('log') plt.show()
def gen_depthwise(): gtx_ = np.asarray(gtx[:,0:2]).transpose() tx1_ = np.asarray(tx1[:,0:2]).transpose() tk1_ = np.asarray(tk1[:,0:2]).transpose() raspi_ = np.asarray(raspi[:,0:2]).transpose() out = [gtx_, tx1_, tk1_, raspi_] for i in range(len(platform_dd)): plt.ylabel('log time per image(ms)') plt.yscale('log') plt.ylim([0.5, 300*10**(i+1) + 8**(i+1)]) #manipulating axis plt.xlabel('batch size') plt.xscale('log') plt.xlim([0.5,256]) plt.xticks(x_dd,x_dd) plt.figtext(.5,.93,platform_dd[i], fontsize=18, ha='center') plt.figtext(.5,.9,'mobilenet improvement by depthwise convolution',fontsize=10,ha='center') plt.minorticks_off() line = plt.plot(x_dd,out[i][0],'--o', label='mobilenet') line1 = plt.plot(x_dd,out[i][1],'--o', label='mobilenet depthwise') plt.legend() #plt.show() plt.savefig('mobilenet_'+platform_dd[i]+'.png', bbox_inches='tight') plt.clf() plt.close() ##run stuff here #gen_platform() #gen_cost() #gen_small_cost() #gen_depthwise()
def plot_step_loss(outdir): plt.cla() plt.xlabel("step") plt.ylabel("losses") files = glob.glob(outdir + "/*evaluator*") cmap = plt.get_cmap('jet') colors = cmap(np.linspace(0, 1.0, len(files))) plt.yscale('log') #plt.xscale('log') for i, fname in enumerate(files): label = fname.split("/")[-1] times, losses, precisions, steps = extract_times_losses_precision(fname) plt.plot(steps, losses, linestyle='solid', label=label, color=colors[i]) plt.legend(loc="upper right", fontsize=8) plt.savefig("step_losses.png")
def plot_time_loss(outdir): plt.cla() plt.xlabel("time (s)") plt.ylabel("loss") files = glob.glob(outdir + "/*evaluator*") cmap = plt.get_cmap('jet') colors = cmap(np.linspace(0, 1.0, len(files))) plt.yscale('log') #plt.xscale('log') for i, fname in enumerate(files): label = fname.split("/")[-1] times, losses, precisions, steps = extract_times_losses_precision(fname) plt.plot(times, losses, linestyle='solid', label=label, color=colors[i]) plt.legend(loc="upper right", fontsize=8) plt.savefig("time_loss.png")
def filter_genes_dispersion(result, log=False, save=None, show=None): """Plot dispersions vs. means for genes. Produces Supp. Fig. 5c of Zheng et al. (2017) and MeanVarPlot() of Seurat. Parameters ---------- result: np.recarray Result of sc.pp.filter_genes_dispersion. log : bool Plot on logarithmic axes. """ gene_subset = result.gene_subset means = result.means dispersions = result.dispersions dispersions_norm = result.dispersions_norm for id, d in enumerate([dispersions_norm, dispersions]): pl.figure(figsize=rcParams['figure.figsize']) for label, color, mask in zip(['highly variable genes', 'other genes'], ['black', 'grey'], [gene_subset, ~gene_subset]): if False: means_, disps_ = np.log10(means[mask]), np.log10(d[mask]) else: means_, disps_ = means[mask], d[mask] pl.scatter(means_, disps_, label=label, c=color, s=1) if log: # there's a bug in autoscale pl.xscale('log') pl.yscale('log') min_dispersion = np.min(dispersions) y_min = 0.95*min_dispersion if min_dispersion > 0 else 1e-1 pl.xlim(0.95*np.min(means), 1.05*np.max(means)) pl.ylim(y_min, 1.05*np.max(dispersions)) pl.legend() pl.xlabel(('$log_{10}$ ' if False else '') + 'mean expression of gene') pl.ylabel(('$log_{10}$ ' if False else '') + 'dispersion of gene' + (' (normalized)' if id == 0 else ' (not normalized)')) utils.savefig_or_show('filter_genes_dispersion', show=show, save=save)