我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用matplotlib.pylab.ylim()。
def test_plot_error_ellipse(self): # Generate random data x = np.random.normal(0, 1, 300) s = np.array([2.0, 2.0]) y1 = np.random.normal(s[0] * x) y2 = np.random.normal(s[1] * x) data = np.array([y1, y2]) # Calculate covariance and plot error ellipse cov = np.cov(data) plot_error_ellipse([0.0, 0.0], cov) debug = False if debug: plt.scatter(data[0, :], data[1, :]) plt.xlim([-8, 8]) plt.ylim([-8, 8]) plt.show() plt.clf()
def plotLine(self, x_vals, y_vals, x_label, y_label, title, filename=None): plt.clf() plt.xlabel(x_label) plt.xlim(((min(x_vals) - 0.5), (max(x_vals) + 0.5))) plt.ylabel(y_label) plt.ylim(((min(y_vals) - 0.5), (max(y_vals) + 0.5))) plt.title(title) plt.plot(x_vals, y_vals, c='k', lw=2) #plt.plot(x_vals, len(x_vals) * y_vals[0], c='r', lw=2) if filename == None: plt.show() else: plt.savefig(self.outputPath + filename)
def plot_clustering(x, y, title, mx=None, ymax=None, xmin=None, km=None): pylab.figure(num=None, figsize=(8, 6)) if km: pylab.scatter(x, y, s=50, c=km.predict(list(zip(x, y)))) else: pylab.scatter(x, y, s=50) pylab.title(title) pylab.xlabel("Occurrence word 1") pylab.ylabel("Occurrence word 2") pylab.autoscale(tight=True) pylab.ylim(ymin=0, ymax=1) pylab.xlim(xmin=0, xmax=1) pylab.grid(True, linestyle='-', color='0.75') return pylab
def plot_roc(auc_score, name, tpr, fpr, label=None): pylab.clf() pylab.figure(num=None, figsize=(5, 4)) pylab.grid(True) pylab.plot([0, 1], [0, 1], 'k--') pylab.plot(fpr, tpr) pylab.fill_between(fpr, tpr, alpha=0.5) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('False Positive Rate') pylab.ylabel('True Positive Rate') pylab.title('ROC curve (AUC = %0.2f) / %s' % (auc_score, label), verticalalignment="bottom") pylab.legend(loc="lower right") filename = name.replace(" ", "_") pylab.savefig( os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
def plot_roc(y_test, y_pred, label=''): """Compute ROC curve and ROC area""" fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot of a ROC curve for a specific class plt.figure() plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic' + label) plt.legend(loc="lower right") plt.show()
def plotSpeedupFigure(AllInfo, maxWorker=1, **kwargs): pylab.figure(2) xs = AllInfo['nWorker'] ts_mono = AllInfo['t_monolithic'] xgrid = np.linspace(0, maxWorker + 0.1, 100) pylab.plot(xgrid, xgrid, 'y--', label='ideal parallel') for method in getMethodNames(**kwargs): speedupRatio = ts_mono / AllInfo['t_' + method] pylab.plot(xs, speedupRatio, 'o-', label=method, color=ColorMap[method], markeredgecolor=ColorMap[method]) pylab.xlim([-0.2, maxWorker + 0.5]) pylab.ylim([0, maxWorker + 0.5]) pylab.legend(loc='upper left') pylab.xlabel('Number of Workers') pylab.ylabel('Speedup over Monolithic')
def plotBoundVsAlph(alphaVals=np.linspace(.001, 3, 1000), beta1=0.5): exactVals = cD_exact(alphaVals, beta1) boundVals = cD_bound(alphaVals, beta1) assert np.all(exactVals >= boundVals) pylab.plot(alphaVals, exactVals, 'k-', linewidth=LINEWIDTH) pylab.plot(alphaVals, boundVals, 'r--', linewidth=LINEWIDTH) pylab.xlabel("alpha", fontsize=FONTSIZE) pylab.ylabel(" ", fontsize=FONTSIZE) pylab.xlim([np.min(alphaVals) - 0.1, np.max(alphaVals) + 0.1]) pylab.ylim([np.min(exactVals) - 0.05, np.max(exactVals) + 0.05]) pylab.xticks(np.arange(np.max(alphaVals) + 1)) pylab.legend(['c_D exact', 'c_D surrogate'], fontsize=LEGENDSIZE, loc='lower right') pylab.tick_params(axis='both', which='major', labelsize=TICKSIZE)
def plot_sequence(seqID, Data, dimID=0, maxT=200): Xseq = Data.X[Data.doc_range[seqID]:Data.doc_range[seqID + 1]] Zseq = Data.TrueParams['Z'][ Data.doc_range[seqID]:Data.doc_range[ seqID + 1]] Xseq = Xseq[:maxT, dimID] # Xseq is 1D after this statement! Zseq = Zseq[:maxT] # Plot X, colored by segments Z changePts = np.flatnonzero(np.abs(np.diff(Zseq))) changePts = np.hstack([0, changePts + 1]) for ii, loc in enumerate(changePts[:-1]): nextloc = changePts[ii + 1] ts = np.arange(loc, nextloc) xseg = Xseq[loc:nextloc] kseg = int(Zseq[loc]) color = GaussViz.Colors[kseg % len(GaussViz.Colors)] pylab.plot(ts, xseg, '.-', color=color, markersize=8) pylab.plot( [nextloc - 1, nextloc], [Xseq[nextloc - 1], Xseq[nextloc]], 'k:') pylab.ylim([-2, 14])
def plot_tree_data(data, indicies_x, indicies_y, model, plot_indicies=False): plt.subplot(3, 1, 1) plt.plot(data, "o", color="blue", label="data") plt.plot(model, color="red", label="model") plt.ylim([-10, 10]) if plot_indicies: plt.plot( indicies_x, indicies_y, "o", color="green", label="fitness predictors" ) plt.title("Data and Model Output") plt.legend()
def test_abu_evolution(self): from nugridpy import ppn, utils import matplotlib matplotlib.use('agg') import matplotlib.pylab as mpy import os # Perform tests within temporary directory with TemporaryDirectory() as tdir: # wget the data for a ppn run from the CADC VOspace os.system("wget -q --content-disposition --directory '" + tdir + "' "\ + "'http://www.canfar.phys.uvic.ca/vospace/synctrans?TARGET="\ + "vos%3A%2F%2Fcadc.nrc.ca%21vospace%2Fnugrid%2Fdata%2Fprojects%2Fppn%2Fexamples%2F"\ + "ppn_Hburn_simple%2Fx-time.dat&DIRECTION=pullFromVoSpace&PROTOCOL"\ + "=ivo%3A%2F%2Fivoa.net%2Fvospace%2Fcore%23httpget'") #nugrid_dir= os.path.dirname(os.path.dirname(ppn.__file__)) #NuPPN_dir= nugrid_dir + "/NuPPN" #test_data_dir= NuPPN_dir + "/examples/ppn_Hburn_simple/RUN_MASTER" symbs=utils.symbol_list('lines2') x=ppn.xtime(tdir) specs=['PROT','HE 4','C 12','N 14','O 16'] i=0 for spec in specs: x.plot('time',spec,logy=True,logx=True,shape=utils.linestyle(i)[0],show=False,title='') i += 1 mpy.ylim(-5,0.2) mpy.legend(loc=0) mpy.xlabel('$\log t / \mathrm{min}$') mpy.ylabel('$\log X \mathrm{[mass fraction]}$') abu_evol_file = 'abu_evolution.png' mpy.savefig(abu_evol_file) self.assertTrue(os.path.exists(abu_evol_file))
def plot_prof_1(self, mod, species, xlim1, xlim2, ylim1, ylim2, symbol=None): """ plot one species for cycle between xlim1 and xlim2 Parameters ---------- mod : string or integer Model to plot, same as cycle number. species : list Which species to plot. xlim1, xlim2 : float Mass coordinate range. ylim1, ylim2 : float Mass fraction coordinate range. symbol : string, optional Which symbol you want to use. If None symbol is set to '-'. The default is None. """ DataPlot.plot_prof_1(self,species,mod,xlim1,xlim2,ylim1,ylim2,symbol) """ tot_mass=self.se.get(mod,'total_mass') age=self.se.get(mod,'age') mass=self.se.get(mod,'mass') Xspecies=self.se.get(mod,'iso_massf',species) pyl.plot(mass,np.log10(Xspecies),'-',label=species) pyl.xlim(xlim1,xlim2) pyl.ylim(ylim1,ylim2) pyl.legend() pl.xlabel('$Mass$ $coordinate$', fontsize=20) pl.ylabel('$X_{i}$', fontsize=20) pl.title('Mass='+str(tot_mass)+', Time='+str(age)+' years, cycle='+str(mod)) """
def plot_prof_sparse(self, mod, species, xlim1, xlim2, ylim1, ylim2, sparse, symbol): """ plot one species for cycle between xlim1 and xlim2. Parameters ---------- species : list which species to plot. mod : string or integer Model (cycle) to plot. xlim1, xlim2 : float Mass coordinate range. ylim1, ylim2 : float Mass fraction coordinate range. sparse : integer Sparsity factor for points. symbol : string which symbol you want to use? """ mass=self.se.get(mod,'mass') Xspecies=self.se.get(mod,'yps',species) pyl.plot(mass[0:len(mass):sparse],np.log10(Xspecies[0:len(Xspecies):sparse]),symbol) pyl.xlim(xlim1,xlim2) pyl.ylim(ylim1,ylim2) pyl.legend()
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"): """ Generate a lexical dispersion plot. :param text: The source text :type text: list(str) or enum(str) :param words: The target words :type words: list of str :param ignore_case: flag to set if case should be ignored when searching text :type ignore_case: bool """ try: from matplotlib import pylab except ImportError: raise ValueError('The plot function requires matplotlib to be installed.' 'See http://matplotlib.org/') text = list(text) words.reverse() if ignore_case: words_to_comp = list(map(str.lower, words)) text_to_comp = list(map(str.lower, text)) else: words_to_comp = words text_to_comp = text points = [(x,y) for x in range(len(text_to_comp)) for y in range(len(words_to_comp)) if text_to_comp[x] == words_to_comp[y]] if points: x, y = list(zip(*points)) else: x = y = () pylab.plot(x, y, "b|", scalex=.1) pylab.yticks(list(range(len(words))), words, color="b") pylab.ylim(-1, len(words)) pylab.title(title) pylab.xlabel("Word Offset") pylab.show()
def plot_pr(auc_score, name, phase, precision, recall, label=None): pylab.clf() pylab.figure(num=None, figsize=(5, 4)) pylab.grid(True) pylab.fill_between(recall, precision, alpha=0.5) pylab.plot(recall, precision, lw=1) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('Recall') pylab.ylabel('Precision') pylab.title('P/R curve (AUC=%0.2f) / %s' % (auc_score, label)) filename = name.replace(" ", "_") pylab.savefig(os.path.join(CHART_DIR, "pr_%s_%s.png" % (filename, phase)), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name): pylab.clf() pylab.ylim([0.0, 1.0]) pylab.xlabel('Data set size') pylab.ylabel('Error') pylab.title("Bias-Variance for '%s'" % name) pylab.plot( data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1) pylab.legend(["train error", "test error"], loc="upper right") pylab.grid() pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
def plot_roc(auc_score, name, fpr, tpr): pylab.figure(num=None, figsize=(6, 5)) pylab.plot([0, 1], [0, 1], 'k--') pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('False Positive Rate') pylab.ylabel('True Positive Rate') pylab.title('Receiver operating characteristic (AUC=%0.2f)\n%s' % ( auc_score, name)) pylab.legend(loc="lower right") pylab.grid(True, linestyle='-', color='0.75') pylab.fill_between(tpr, fpr, alpha=0.5) pylab.plot(fpr, tpr, lw=1) pylab.savefig( os.path.join(CHART_DIR, "roc_" + name.replace(" ", "_") + ".png"))
def plot_pr(auc_score, name, precision, recall, label=None): pylab.figure(num=None, figsize=(6, 5)) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('Recall') pylab.ylabel('Precision') pylab.title('P/R (AUC=%0.2f) / %s' % (auc_score, label)) pylab.fill_between(recall, precision, alpha=0.5) pylab.grid(True, linestyle='-', color='0.75') pylab.plot(recall, precision, lw=1) filename = name.replace(" ", "_") pylab.savefig(os.path.join(CHART_DIR, "pr_" + filename + ".png"))
def plot_k_complexity(ks, train_errors, test_errors): pylab.figure(num=None, figsize=(6, 5)) pylab.ylim([0.0, 1.0]) pylab.xlabel('k') pylab.ylabel('Error') pylab.title('Errors for for different values of $k$') pylab.plot( ks, test_errors, "--", ks, train_errors, "-", lw=1) pylab.legend(["test error", "train error"], loc="upper right") pylab.grid(True, linestyle='-', color='0.75') pylab.savefig( os.path.join(CHART_DIR, "kcomplexity.png"), bbox_inches="tight")
def plot_pr(auc_score, name, precision, recall, label=None): pylab.clf() pylab.figure(num=None, figsize=(5, 4)) pylab.grid(True) pylab.fill_between(recall, precision, alpha=0.5) pylab.plot(recall, precision, lw=1) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('Recall') pylab.ylabel('Precision') pylab.title('P/R curve (AUC = %0.2f) / %s' % (auc_score, label)) filename = name.replace(" ", "_") pylab.savefig( os.path.join(CHART_DIR, "pr_" + filename + ".png"), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name): pylab.clf() pylab.ylim([0.0, 1.0]) pylab.xlabel('Data set size') pylab.ylabel('Error') pylab.title("Bias-Variance for '%s'" % name) pylab.plot( data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1) pylab.legend(["train error", "test error"], loc="upper right") pylab.grid(True) pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
def plot_roc(auc_score, name, fpr, tpr): pylab.figure(num=None, figsize=(6, 5)) pylab.plot([0, 1], [0, 1], 'k--') pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('False Positive Rate') pylab.ylabel('True Positive Rate') pylab.title('Receiver operating characteristic (AUC=%0.2f)\n%s' % ( auc_score, name)) pylab.legend(loc="lower right") pylab.grid(True, linestyle='-', color='0.75') pylab.fill_between(tpr, fpr, alpha=0.5) pylab.plot(fpr, tpr, lw=1) pylab.savefig(os.path.join(CHART_DIR, "roc_" + name.replace(" ", "_")+ ".png"))
def plot_bias_variance(data_sizes, train_errors, test_errors, name, title): pylab.figure(num=None, figsize=(6, 5)) pylab.ylim([0.0, 1.0]) pylab.xlabel('Data set size') pylab.ylabel('Error') pylab.title("Bias-Variance for '%s'" % name) pylab.plot( data_sizes, test_errors, "--", data_sizes, train_errors, "b-", lw=1) pylab.legend(["train error", "test error"], loc="upper right") pylab.grid(True, linestyle='-', color='0.75') pylab.savefig(os.path.join(CHART_DIR, "bv_" + name.replace(" ", "_") + ".png"), bbox_inches="tight")
def plot_k_complexity(ks, train_errors, test_errors): pylab.figure(num=None, figsize=(6, 5)) pylab.ylim([0.0, 1.0]) pylab.xlabel('k') pylab.ylabel('Error') pylab.title('Errors for for different values of k') pylab.plot( ks, test_errors, "--", ks, train_errors, "-", lw=1) pylab.legend(["train error", "test error"], loc="upper right") pylab.grid(True, linestyle='-', color='0.75') pylab.savefig(os.path.join(CHART_DIR, "kcomplexity.png"), bbox_inches="tight")
def plot_entropy_vs_rVals(self): if not doViz: self.skipTest("Required module matplotlib unavailable.") H = np.sum(calcRlogR(self.R), axis=1) Hnew_exact = np.sum(calcRlogR(self.Rnew_Exact), axis=1) Hnew_approx = np.sum(calcRlogR(self.Rnew_Approx), axis=1) rVals = self.rVals np.set_printoptions(precision=4, suppress=True) print '' print '--- rVals' print rVals[:3], rVals[-3:] print '--- R original' print self.R[:3] print self.R[-3:, :] print '--- R proposal' print self.Rnew_Exact[:3] print self.Rnew_Exact[-3:, :] pylab.plot(rVals, H, 'k-', label='H original') pylab.plot(rVals, Hnew_exact, 'b-', label='H proposal exact') pylab.plot(rVals, Hnew_approx, 'r-', label='H proposal approx') pylab.legend(loc='best') pylab.xlim([rVals.min() - .01, rVals.max() + .01]) ybuf = 0.05 * H.max() pylab.ylim([ybuf, H.max() + ybuf]) pylab.show(block=True)
def plotSpeedupFigure(AllInfo, maxWorker=1, **kwargs): pylab.figure(2) xs = AllInfo['nWorker'] ts_mono = AllInfo['t_monolithic'] xgrid = np.linspace(0, maxWorker + 0.1, 100) pylab.plot(xgrid, xgrid, 'y--', label='ideal parallel') for method in getMethodNames(**kwargs): speedupRatio = ts_mono / AllInfo['t_' + method] pylab.plot(xs, speedupRatio, 'o-', label=method, color=ColorMap[method], markeredgecolor=ColorMap[method]) pylab.xlim([-0.2, maxWorker + 0.5]) pylab.ylim([0, maxWorker + 0.5]) pylab.legend(loc='upper left') pylab.xlabel('Number of Workers') pylab.ylabel('Speedup over Monolithic') if kwargs['savefig']: title = 'BenchmarkPlot_%s_%s_minDur=%.2f_Speedup.eps'\ % (platform.node(), kwargs['task'], kwargs['minSliceDuration']) pylab.savefig(title, format='eps', bbox_inches='tight', pad_inches=0)
def plot_convergence_data(errors, label): error_line, = plt.plot(errors, label=label) # plt.xlim([0, 100]) plt.xlim([100, 1000]) plt.ylim([0, 100]) plt.xlabel("Generation") plt.ylabel("Error") plt.legend(loc=0, prop={'size': 8})
def plot_model_no_control(model, plot_title='', name_suffix=''): # plot function mx, vx = model.get_posterior_x() mins = np.min(mx, axis=0) - 0.5 maxs = np.max(mx, axis=0) + 0.5 nGrid = 50 xspaced = np.linspace(mins[0], maxs[0], nGrid) yspaced = np.linspace(mins[1], maxs[1], nGrid) xx, yy = np.meshgrid(xspaced, yspaced) Xplot = np.vstack((xx.flatten(), yy.flatten())).T mf, vf = model.predict_f(Xplot) fig = plt.figure() plt.imshow((mf[:, 0]).reshape(*xx.shape), vmin=mf.min(), vmax=mf.max(), origin='lower', extent=[mins[0], maxs[0], mins[1], maxs[1]], aspect='auto') plt.colorbar() plt.contour( xx, yy, (mf[:, 0]).reshape(*xx.shape), colors='k', linewidths=2, zorder=100) zu = model.dyn_layer.zu plt.plot(zu[:, 0], zu[:, 1], 'wo', mew=0, ms=4) for i in range(mx.shape[0] - 1): plt.plot(mx[i:i + 2, 0], mx[i:i + 2, 1], '-bo', ms=3, linewidth=2, zorder=101) plt.xlabel(r'$x_{t, 1}$') plt.ylabel(r'$x_{t, 2}$') plt.xlim([mins[0], maxs[0]]) plt.ylim([mins[1], maxs[1]]) plt.title(plot_title) plt.savefig('/tmp/hh_gpssm_dim_0' + name_suffix + '.pdf') fig = plt.figure() plt.imshow((mf[:, 1]).reshape(*xx.shape), vmin=mf.min(), vmax=mf.max(), origin='lower', extent=[mins[0], maxs[0], mins[1], maxs[1]], aspect='auto') plt.colorbar() plt.contour( xx, yy, (mf[:, 1]).reshape(*xx.shape), colors='k', linewidths=2, zorder=100) zu = model.dyn_layer.zu plt.plot(zu[:, 0], zu[:, 1], 'wo', mew=0, ms=4) for i in range(mx.shape[0] - 1): plt.plot(mx[i:i + 2, 0], mx[i:i + 2, 1], '-bo', ms=3, linewidth=2, zorder=101) plt.xlabel(r'$x_{t, 1}$') plt.ylabel(r'$x_{t, 2}$') plt.xlim([mins[0], maxs[0]]) plt.ylim([mins[1], maxs[1]]) plt.title(plot_title) plt.savefig('/tmp/hh_gpssm_dim_1' + name_suffix + '.pdf')
def run_xor(): from operator import xor from scipy import special # create dataset print "generating dataset..." n = 25 Y = np.zeros((0, 3)) for i in [0, 1]: for j in [0, 1]: a = i * np.ones((n, 1)) b = j * np.ones((n, 1)) c = xor(bool(i), bool(j)) * np.ones((n, 1)) Y_ij = np.hstack((a, b, c)) Y = np.vstack((Y, Y_ij)) Y = 2 * Y - 1 # inference print "inference ..." M = 10 D = 2 lvm = aep.SGPLVM(Y, D, M, lik='Probit') lvm.optimise(method='L-BFGS-B', alpha=0.1, maxiter=200) # predict given inputs mx, vx = lvm.get_posterior_x() lims = [-1.5, 1.5] x = np.linspace(*lims, num=101) y = np.linspace(*lims, num=101) X, Y = np.meshgrid(x, y) X_ravel = X.ravel() Y_ravel = Y.ravel() inputs = np.vstack((X_ravel, Y_ravel)).T my, vy = lvm.predict_f(inputs) t = my / np.sqrt(1 + vy) Z = 0.5 * (1 + special.erf(t / np.sqrt(2))) for d in range(3): plt.figure() plt.scatter(mx[:, 0], mx[:, 1]) zu = lvm.sgp_layer.zu plt.plot(zu[:, 0], zu[:, 1], 'ko') plt.contour(X, Y, np.log(Z[:, d] + 1e-16).reshape(X.shape)) plt.xlim(*lims) plt.ylim(*lims) # Y_test = np.array([[1, -1, 1], [-1, 1, 1], [-1, -1, -1], [1, 1, -1]]) # # impute missing data # for k in range(3): # Y_test_k = Y_test # missing_mask = np.ones_like(Y_test_k) # missing_mask[:, k] = 0 # my_pred, vy_pred = lvm.impute_missing( # Y_test_k, missing_mask, # alpha=0.1, no_iters=100, add_noise=False) # print k, my_pred, vy_pred, Y_test_k plt.show()
def Main(self): """ Main demo for the Hodgkin Huxley neuron model """ X = odeint(self.dALLdt, [-65, 0.05, 0.6, 0.32], self.t, args=(self,)) V = X[:,0] m = X[:,1] h = X[:,2] n = X[:,3] ina = self.I_Na(V, m, h) ik = self.I_K(V, n) il = self.I_L(V) plt.figure() plt.subplot(3,1,1) plt.title('Hodgkin-Huxley Neuron') plt.plot(self.t, V, 'k') plt.ylabel('V (mV)') plt.xticks([]) # plt.subplot(4,1,2) # plt.plot(self.t, ina, 'c', label='$I_{Na}$') # plt.plot(self.t, ik, 'y', label='$I_{K}$') # plt.plot(self.t, il, 'm', label='$I_{L}$') # plt.ylabel('Current') # plt.xticks([]) # plt.legend(loc='upper center', ncol=3, prop=fontP) plt.subplot(3,1,2) plt.plot(self.t, m, 'r', label='m') plt.plot(self.t, h, 'g', label='h') plt.plot(self.t, n, 'b', label='n') plt.ylabel('Gating Value') plt.xticks([]) plt.legend(loc='upper center', ncol=3, prop=fontP) plt.subplot(3,1,3) i_inj_values = [self.I_inj(t) for t in self.t] plt.plot(self.t, i_inj_values, 'k') plt.xlabel('t (ms)') plt.ylabel('$I_{inj}$ ($\\mu{A}/cm^2$)') plt.ylim(-2, 42) plt.savefig('/tmp/hh_data_all.pdf') plt.figure() plt.plot(V, n, 'ok', alpha=0.2) plt.xlabel('V') plt.ylabel('n') np.savetxt('hh_data.txt', np.vstack((V, m, n, h, np.array(i_inj_values))).T, fmt='%.5f') plt.show() plt.savefig('/tmp/hh_data_V_n.pdf')
def main(args): e = Eligibility(length=args.length) if args.mode == "dexp": e.efunc_ = e.efunc_double_exp elif args.mode == "rect": e.efunc_ = e.efunc_rect elif args.mode == "ramp": e.efunc_ = e.efunc_ramp elif args.mode == "exp": e.efunc_ = e.efunc_exp e.gen_efunc_table() x = np.arange(args.length) print x et = e.efunc(x) # plot and test with array argument cmstr = "ko" pl.plot(x, et, cmstr, lw=1.) if args.mode == "rect": # negative time for readability without lines pl.plot(np.arange(-5, x[0]), np.zeros(5,), cmstr, lw=1.) # pl.plot([-10, -1, x[0]], [0, 0, et[0]], cmstr, lw=1.) pl.plot([x[-1], x[0] + args.length], [et[-1], 0.], cmstr, lw=1.) pl.plot(x + args.length, np.zeros((len(et))), cmstr, lw=1.) pl.ylim((-0.005, np.max(et) * 1.1)) # pl.plot(x, et, "k-", lw=1.) # pl.yticks([]) # line at zero # pl.axhline(0., c="black") pl.xlabel("t [steps]") pl.ylabel("Eligibility") if args.plotsave: pl.gcf().set_size_inches((6, 2)) pl.gcf().savefig("eligibility_window.pdf", dpi=300, bbox_inches="tight") pl.show() # check perf: loop, test with single integer arguments import time now = time.time() for i in range(100): for j in range(args.length): e.efunc(j) print "table took:", time.time() - now now = time.time() for i in range(100): for j in range(args.length): e.efunc_(j) print "feval took:", time.time() - now
def plotPolicy(self, policy, prefix): plt.clf() for idx in xrange(len(policy)): i, j = self.env.getStateXY(idx) dx = 0 dy = 0 if policy[idx] == 0: # up dy = 0.35 elif policy[idx] == 1: #right dx = 0.35 elif policy[idx] == 2: #down dy = -0.35 elif policy[idx] == 3: #left dx = -0.35 elif self.matrixMDP[i][j] != -1 and policy[idx] == 4: # termination circle = plt.Circle( (j + 0.5, self.numRows - i + 0.5 - 1), 0.025, color='k') plt.gca().add_artist(circle) if self.matrixMDP[i][j] != -1: plt.arrow(j + 0.5, self.numRows - i + 0.5 - 1, dx, dy, head_width=0.05, head_length=0.05, fc='k', ec='k') else: plt.gca().add_patch( patches.Rectangle( (j, self.numRows - i - 1), # (x,y) 1.0, # width 1.0, # height facecolor = "gray" ) ) plt.xlim([0, self.numCols]) plt.ylim([0, self.numRows]) for i in xrange(self.numCols): plt.axvline(i, color='k', linestyle=':') plt.axvline(self.numCols, color='k', linestyle=':') for j in xrange(self.numRows): plt.axhline(j, color='k', linestyle=':') plt.axhline(self.numRows, color='k', linestyle=':') plt.savefig(self.outputPath + prefix + 'policy.png') plt.close()
def SVD_Plot(imagepath_list): names = [] sing_vals = [] i = 0 for image_path in imagepath_list: # READ IMAGE AND COMPUTE SYMMETRICAL MATRICES if type(image_path) == str: img = Image.open(image_path) img = img.convert("L") ncols = img.size[0] nrows = img.size[1] A = np.asarray(img.getdata()).reshape(nrows, ncols) names.append(image_path.split("/")[1].split(".")[0]) else: i += 1 A = image_path ncols = image_path.shape[1] nrows = image_path.shape[0] names.append("random %s" %i) Q1 = A.dot(A.T) Q2 = A.T.dot(A) # FIND V AND SINGULAR VALUES sigma_2, v = np.linalg.eig(Q2) singular_vals = np.sqrt(sigma_2) sing_vals.append(singular_vals) for i in range(len(imagepath_list)): val = sing_vals[i] maxi = max(val) mini = min(val) normalized_val = (val - mini) / (maxi - mini) plab.plot(normalized_val,label = names[i]) plab.title("Singular Value Distributions") plab.xlabel("Singular Value Rank") plab.ylabel("Singular Value") plab.xlim(0, 30) plab.ylim(0, 1) plab.legend() plab.show()
def illustrate(Colors=Colors): if hasattr(Colors, 'colors'): Colors = Colors.colors from matplotlib import pylab rcParams = pylab.rcParams rcParams['pdf.fonttype'] = 42 rcParams['ps.fonttype'] = 42 rcParams['text.usetex'] = False rcParams['xtick.labelsize'] = 20 rcParams['ytick.labelsize'] = 20 rcParams['legend.fontsize'] = 25 import bnpy Data = get_data(T=1000, nDocTotal=8) for k in xrange(K): zmask = Data.TrueParams['Z'] == k pylab.plot(Data.X[zmask, 0], Data.X[zmask, 1], '.', color=Colors[k], markeredgecolor=Colors[k], alpha=0.4) sigEdges = np.flatnonzero(transPi[k] > 0.0001) for j in sigEdges: if j == k: continue dx = mus[j, 0] - mus[k, 0] dy = mus[j, 1] - mus[k, 1] pylab.arrow(mus[k, 0], mus[k, 1], 0.8 * dx, 0.8 * dy, head_width=2, head_length=4, facecolor=Colors[k], edgecolor=Colors[k]) tx = 0 - mus[k, 0] ty = 0 - mus[k, 1] xy = (mus[k, 0] - 0.2 * tx, mus[k, 1] - 0.2 * ty) ''' pylab.annotate( u'\u27F2', xy=(mus[k,0], mus[k,1]), color=Colors[k], fontsize=35, ) ''' pylab.gca().yaxis.set_ticks_position('left') pylab.gca().xaxis.set_ticks_position('bottom') pylab.axis('image') pylab.ylim([-38, 38]) pylab.xlim([-38, 38])