我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pylab.plot()。
def hrd_key(self, key_str): """ plot an HR diagram Parameters ---------- key_str : string A label string """ pyl.plot(self.data[:,self.cols['log_Teff']-1],\ self.data[:,self.cols['log_L']-1],label = key_str) pyl.legend() pyl.xlabel('log Teff') pyl.ylabel('log L') x1,x2=pl.xlim() if x2 > x1: self._xlimrev()
def compare_images(path = '.'): S_limit = 10. file_list = glob.glob(os.path.join(path, 'Abu*')) file_list_master = glob.glob(os.path.join(path, 'MasterAbu*')) file_list.sort() file_list_master.sort() S=[] print("Identifying images with rmq > "+'%3.1f'%S_limit) ierr_count = 0 for i in range(len(file_list)): this_S,fimg1,fimg2 = compare_entropy(file_list[i],file_list_master[i]) if this_S > S_limit: warnings.warn(file_list[i]+" and "+file_list_master[i]+" differ by "+'%6.3f'%this_S) ierr_count += 1 S.append(this_S) if ierr_count > 0: print("Error: at least one image differs by more than S_limit") sys.exit(1) #print ("S: ",S) #plb.plot(S,'o') #plb.xlabel("image number") #plb.ylabel("modified log KL-divergence to previous image") #plb.show()
def plot_prof_2(self, mod, species, xlim1, xlim2): """ Plot one species for cycle between xlim1 and xlim2 Parameters ---------- mod : string or integer Model to plot, same as cycle number. species : list Which species to plot. xlim1, xlim2 : float Mass coordinate range. """ mass=self.se.get(mod,'mass') Xspecies=self.se.get(mod,'yps',species) pyl.plot(mass,Xspecies,'-',label=str(mod)+', '+species) pyl.xlim(xlim1,xlim2) pyl.legend()
def plot(traj, x, y, **kwargs): """ Create a matplotlib plot of property x against property y Args: x,y (str): names of the properties **kwargs (dict): kwargs for :meth:`matplotlib.pylab.plot` Returns: List[matplotlib.lines.Lines2D]: the lines that were plotted """ from matplotlib import pylab xl = yl = None if type(x) is str: strx = x x = getattr(traj, x) xl = '%s / %s' % (strx, getattr(x, 'units', 'dimensionless')) if type(y) is str: stry = y y = getattr(traj, y) yl = '%s / %s' % (stry, getattr(y, 'units', 'dimensionless')) plt = pylab.plot(x, y, **kwargs) pylab.xlabel(xl); pylab.ylabel(yl); pylab.grid() return plt
def plot(m, Xtrain, ytrain): xx = np.linspace(-0.5, 1.5, 100)[:, None] mean, var = m.predict_y(xx) mean = np.reshape(mean, (xx.shape[0], 1)) var = np.reshape(var, (xx.shape[0], 1)) if isinstance(m, aep.SDGPR): zu = m.sgp_layers[0].zu elif isinstance(m, vfe.SGPR_collapsed): zu = m.zu else: zu = m.sgp_layer.zu mean_u, var_u = m.predict_f(zu) plt.figure() plt.plot(Xtrain, ytrain, 'kx', mew=2) plt.plot(xx, mean, 'b', lw=2) # pdb.set_trace() plt.fill_between( xx[:, 0], mean[:, 0] - 2 * np.sqrt(var[:, 0]), mean[:, 0] + 2 * np.sqrt(var[:, 0]), color='blue', alpha=0.2) plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro') plt.xlim(-0.1, 1.1)
def run_regression_1D_aep_two_layers(): np.random.seed(42) print "create dataset ..." Xtrain, ytrain, Xtest, ytest = create_dataset() alpha = 1 # other alpha is not valid here M = 20 model = aep.SDGPR(Xtrain, ytrain, M, hidden_sizes=[2]) model.optimise(method='L-BFGS-B', alpha=1, maxiter=5000, disp=False) my, vy = model.predict_y(Xtest) my = np.reshape(my, ytest.shape) vy = np.reshape(vy, ytest.shape) rmse = np.sqrt(np.mean((my - ytest)**2)) ll = np.mean(-0.5 * np.log(2 * np.pi * vy) - 0.5 * (ytest - my)**2 / vy) nlml, _ = model.objective_function(model.get_hypers(), Xtrain.shape[0], alpha) print 'alpha=%.3f, train ml=%3f, test rmse=%.3f, ll=%.3f' % (alpha, nlml, rmse, ll) # plot(model, Xtrain, ytrain) # plt.show() # should produce something like this # alpha=1.000, train ml=-51.385404, test rmse=0.168, ll=0.311
def run_regression_1D_aep_two_layers_stoc(): np.random.seed(42) print "create dataset ..." Xtrain, ytrain, Xtest, ytest = create_dataset() alpha = 1 # other alpha is not valid here M = 20 model = aep.SDGPR(Xtrain, ytrain, M, hidden_sizes=[2]) model.optimise(method='adam', alpha=1, maxiter=5000, disp=False) my, vy = model.predict_y(Xtest) my = np.reshape(my, ytest.shape) vy = np.reshape(vy, ytest.shape) rmse = np.sqrt(np.mean((my - ytest)**2)) ll = np.mean(-0.5 * np.log(2 * np.pi * vy) - 0.5 * (ytest - my)**2 / vy) nlml, _ = model.objective_function(model.get_hypers(), Xtrain.shape[0], alpha) print 'alpha=%.3f, train ml=%3f, test rmse=%.3f, ll=%.3f' % (alpha, nlml, rmse, ll) # plot(model, Xtrain, ytrain) # plt.show() # should produce something like this # alpha=1.000, train ml=-69.444086, test rmse=0.170, ll=0.318
def plot(param, show = 1): """Returns the plot of spectrum as a pyplot object or plot it on the screen Keyword arguments: param -- Output spectrum file show -- Optional, plot the spectrum on the screen. Enabled by default. """ s = sed.SED() s.grmonty(param) plt = pylab.plot(s.lognu, s.ll) if show == 1: pylab.show() else: return plt
def plot_position(self, pos_true, pos_est): N = pos_est.shape[1] pos_true = pos_true[:, :N] pos_est = pos_est[:, :N] # Figure plt.figure() plt.suptitle("Position") # Ground truth plt.plot(pos_true[0, :], pos_true[1, :], color="red", marker="o", label="Grouth truth") # Estimated plt.plot(pos_est[0, :], pos_est[1, :], color="blue", marker="o", label="Estimated") # Plot labels and legends plt.xlabel("East (m)") plt.ylabel("North (m)") plt.axis("equal") plt.legend(loc=0)
def test_project(self): # Load points points_file = join(test.TEST_DATA_PATH, "house/house.p3d") points = np.loadtxt(points_file).T # Setup camera K = np.eye(3) R = np.eye(3) t = np.array([0, 0, 0]) camera = PinholeCameraModel(320, 240, K) x = camera.project(points, R, t) # Assert self.assertEqual(x.shape, (3, points.shape[1])) self.assertTrue(np.all(x[2, :] == 1.0)) # Plot projection debug = False # debug = True if debug: plt.figure() plt.plot(x[0], x[1], 'k. ') plt.show()
def plot(self, track, track_cam_states, estimates): plt.figure() # Feature feature = T_global_camera * track.ground_truth plt.plot(feature[0], feature[1], marker="o", color="red", label="feature") # Camera states for cam_state in track_cam_states: pos = T_global_camera * cam_state.p_G plt.plot(pos[0], pos[1], marker="o", color="blue", label="camera") # Estimates for i in range(len(estimates)): cam_state = track_cam_states[i] cam_pos = T_global_camera * cam_state.p_G estimate = (T_global_camera * estimates[i]) + cam_pos plt.plot(estimate[0], estimate[1], marker="o", color="green") plt.legend(loc=0) plt.show()
def x_corr(a,b,center_time_s=1000.0,window_len_s=50.0,plot=True): center_index = int(center_time_s/a.dt) window_index = int(window_len_s/(a.dt)) print "center_index is", center_index print "window_index is", window_index t1 = a.trace_x[(center_index - window_index) : (center_index + window_index)] t2 = b.trace_x[(center_index - window_index) : (center_index + window_index)] print t1 time_window = np.linspace((-window_len_s/2.0), (window_len_s/2), len(t1)) #print time_window #plt.plot(time_window, t1) #plt.plot(time_window, t2) #plt.show() x_corr_time = correlate(t1, t2) delay = (np.argmax(x_corr_time) - (len(x_corr_time)/2) ) * a.dt #print "the delay is ", delay return delay
def plotValueFunction(self, valueFunction, prefix): '''3d plot of a value function.''' fig, ax = plt.subplots(subplot_kw = dict(projection = '3d')) X, Y = np.meshgrid(np.arange(self.numCols), np.arange(self.numRows)) Z = valueFunction.reshape(self.numRows, self.numCols) for i in xrange(len(X)): for j in xrange(len(X[i])/2): tmp = X[i][j] X[i][j] = X[i][len(X[i]) - j - 1] X[i][len(X[i]) - j - 1] = tmp my_col = cm.jet(np.random.rand(Z.shape[0],Z.shape[1])) ax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, cmap = plt.get_cmap('jet')) plt.gca().view_init(elev=30, azim=30) plt.savefig(self.outputPath + prefix + 'value_function.png') plt.close()
def plotLine(self, x_vals, y_vals, x_label, y_label, title, filename=None): plt.clf() plt.xlabel(x_label) plt.xlim(((min(x_vals) - 0.5), (max(x_vals) + 0.5))) plt.ylabel(y_label) plt.ylim(((min(y_vals) - 0.5), (max(y_vals) + 0.5))) plt.title(title) plt.plot(x_vals, y_vals, c='k', lw=2) #plt.plot(x_vals, len(x_vals) * y_vals[0], c='r', lw=2) if filename == None: plt.show() else: plt.savefig(self.outputPath + filename)
def plot_entropy(): pylab.clf() pylab.figure(num=None, figsize=(5, 4)) title = "Entropy $H(X)$" pylab.title(title) pylab.xlabel("$P(X=$coin will show heads up$)$") pylab.ylabel("$H(X)$") pylab.xlim(xmin=0, xmax=1.1) x = np.arange(0.001, 1, 0.001) y = -x * np.log2(x) - (1 - x) * np.log2(1 - x) pylab.plot(x, y) # pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in # [0,1,2,3,4]]) pylab.autoscale(tight=True) pylab.grid(True) filename = "entropy_demo.png" pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_roc(auc_score, name, tpr, fpr, label=None): pylab.clf() pylab.figure(num=None, figsize=(5, 4)) pylab.grid(True) pylab.plot([0, 1], [0, 1], 'k--') pylab.plot(fpr, tpr) pylab.fill_between(fpr, tpr, alpha=0.5) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('False Positive Rate') pylab.ylabel('True Positive Rate') pylab.title('ROC curve (AUC = %0.2f) / %s' % (auc_score, label), verticalalignment="bottom") pylab.legend(loc="lower right") filename = name.replace(" ", "_") pylab.savefig( os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
def plotKChart(self, misClassDict, saveFigPath): kList = [] misRateList = [] for k, misClassNum in misClassDict.iteritems(): kList.append(k) misRateList.append(1.0 - 1.0/k*misClassNum) fig = plt.figure(saveFigPath) plt.plot(kList, misRateList, 'r--') plt.title(saveFigPath) plt.xlabel('k Num.') plt.ylabel('Misclassified Rate') plt.legend(saveFigPath) plt.grid(True) plt.savefig(saveFigPath) plt.show() ################################### PART3 TEST ######################################## # ??
def linear_testing(): x_axis = np.linspace(1, 51, 100) x_nice = np.linspace(x_axis[0], x_axis[-1], 100) mod, params = qudi_fitting.make_linear_model() print('Parameters of the model', mod.param_names, ' with the independet variable', mod.independent_vars) params['slope'].value = 2 # + abs(np.random.normal(0,1)) params['offset'].value = 50 #+ abs(np.random.normal(0, 200)) #print('\n', 'beta', params['beta'].value, '\n', 'lifetime', #params['lifetime'].value) data_noisy = (mod.eval(x=x_axis, params=params) + 10 * np.random.normal(size=x_axis.shape)) result = qudi_fitting.make_linear_fit(axis=x_axis, data=data_noisy, add_parameters=None) plt.plot(x_axis, data_noisy, 'ob') plt.plot(x_nice, mod.eval(x=x_nice, params=params), '-g') print(result.fit_report()) plt.plot(x_axis, result.best_fit, '-r', linewidth=2.0) plt.plot(x_axis, result.init_fit, '-y', linewidth=2.0) plt.show()
def plot_penalty_vl(debug, tag, fold_exp): plt.close("all") vl = np.array(debug["penalty"]) fig = plt.figure(figsize=(15, 10.8), dpi=300) names = debug["names"] for i in range(vl.shape[1]): if vl.shape[1] > 1: plt.plot(vl[:, i], label="layer_"+str(names[i])) else: plt.plot(vl[:], label="layer_"+str(names[i])) plt.xlabel("mini-batchs") plt.ylabel("value of penlaty") plt.title( "Penalty value over layers:" + "_".join([str(k) for k in names]) + ". tag:" + tag) plt.legend(loc='upper right', fancybox=True, shadow=True, prop={'size': 8}) plt.grid(True) fig.savefig(fold_exp+"/penalty.png", bbox_inches='tight') plt.close('all') del fig
def add_cifar_10(x, x_cifar_10, sh=True): """Add cifar 10 as background.""" sz = x.shape mask = (x == 0) * 1. # binarize cifar back = x_cifar_10.reshape(x_cifar_10.shape[0], 3, 32, 32).mean(1) back = back[:, 2:30, 2:30] # take 28x28 from the center. back /= 255. back = back.astype(np.float32) # shuffle the index if sh: ind = np.random.randint(0, x_cifar_10.shape[0], sz[0]) # the index for i in range(10): np.random.shuffle(ind) else: # used only to plot images for paper. assert x_cifar_10.shape[0] == sz[0] ind = np.arange(0, sz[0]) # the index back_sh = back[ind] back_sh = back_sh.reshape(back_sh.shape[0], -1) back_ready = np.multiply(back_sh, mask) out = np.clip(x + back_ready, 0., 1.) return out
def plot_roc(y_test, y_pred, label=''): """Compute ROC curve and ROC area""" fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) # Plot of a ROC curve for a specific class plt.figure() plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic' + label) plt.legend(loc="lower right") plt.show()
def gen_overview_plot_image(ax,imagefile,imgext=0,cubelayer=1,title='Img Title?',fontsize=6,lthick=2,alpha=0.5, cmap='coolwarm'): """ Plotting commands for image (cube layer) overview plotting --- INPUT --- cubelayer If the content of the file is a cube, provide the cube layer to plot. If cubelayer = 'fmax' the layer with most flux is plotted """ ax.set_title(title,fontsize=fontsize) if os.path.isfile(imagefile): imgdata = pyfits.open(imagefile)[imgext].data if len(imgdata.shape) == 3: # it is a cube imgdata = imgdata[cubelayer,:,:] ax.imshow(imgdata, interpolation='None',cmap=cmap,aspect='equal', origin='lower') ax.set_xlabel('x-pixel') ax.set_ylabel('y-pixel ') ax.set_xticks([]) ax.set_yticks([]) else: textstr = 'No image\nfound' ax.text(1.0,22,textstr,horizontalalignment='center',verticalalignment='center',fontsize=fontsize) ax.set_ylim([28,16]) ax.plot([0.0,2.0],[28,16],'r--',lw=lthick) ax.plot([2.0,0.0],[28,16],'r--',lw=lthick) ax.set_xlabel(' ') ax.set_ylabel(' ') ax.set_xticks([]) ax.set_yticks([]) # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def energy_profile(self,ixaxis): """ Plot radial profile of key energy generations eps_nuc, eps_neu etc. Parameters ---------- ixaxis : 'mass' or 'radius' """ mass = self.get('mass') radius = self.get('radius') * ast.rsun_cm eps_nuc = self.get('eps_nuc') eps_neu = self.get('non_nuc_neu') if ixaxis == 'mass': xaxis = mass xlab = 'Mass / M$_\odot$' else: xaxis = old_div(radius, 1.e8) # Mm xlab = 'radius / Mm' pl.plot(xaxis, np.log10(eps_nuc), 'k-', label='$\epsilon_\mathrm{nuc}>0$') pl.plot(xaxis, np.log10(-eps_nuc), 'k--', label='$\epsilon_\mathrm{nuc}<0$') pl.plot(xaxis, np.log10(eps_neu), 'r-', label='$\epsilon_\\nu$') pl.xlabel(xlab) pl.ylabel('$\log(\epsilon_\mathrm{nuc},\epsilon_\\nu)$') pl.legend(loc='best').draw_frame(False)
def CO_ratio(self,ifig,ixaxis): """ plot surface C/O ratio in Figure ifig with x-axis quantity ixaxis Parameters ---------- ifig : integer Figure number in which to plot ixaxis : string what quantity is to be on the x-axis, either 'time' or 'model' The default is 'model' """ def C_O(model): surface_c12=model.get('surface_c12') surface_o16=model.get('surface_o16') CORatio=old_div((surface_c12*4.),(surface_o16*3.)) return CORatio if ixaxis=='time': xax=self.get('star_age') elif ixaxis=='model': xax=self.get('model_number') else: raise IOError("ixaxis not recognised") pl.figure(ifig) pl.plot(xax,C_O(self))
def hrd_new(self, input_label="", skip=0): """ plot an HR diagram with options to skip the first N lines and add a label string Parameters ---------- input_label : string, optional Diagram label. The default is "". skip : integer, optional Skip the first n lines. The default is 0. """ xl_old=pyl.gca().get_xlim() if input_label == "": my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z']) else: my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])+"; "+str(input_label) pyl.plot(self.data[skip:,self.cols['log_Teff']-1],self.data[skip:,self.cols['log_L']-1],label = my_label) pyl.legend(loc=0) xl_new=pyl.gca().get_xlim() pyl.xlabel('log Teff') pyl.ylabel('log L') if any(array(xl_old)==0): pyl.gca().set_xlim(max(xl_new),min(xl_new)) elif any(array(xl_new)==0): pyl.gca().set_xlim(max(xl_old),min(xl_old)) else: pyl.gca().set_xlim([max(xl_old+xl_new),min(xl_old+xl_new)])
def t_lumi(self,num_frame,xax): """ Luminosity evolution as a function of time or model. Parameters ---------- num_frame : integer Number of frame to plot this plot into. xax : string Either model or time to indicate what is to be used on the x-axis """ pyl.figure(num_frame) if xax == 'time': xaxisarray = self.get('star_age') elif xax == 'model': xaxisarray = self.get('model_number') else: print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"') logLH = self.get('log_LH') logLHe = self.get('log_LHe') pyl.plot(xaxisarray,logLH,label='L_(H)') pyl.plot(xaxisarray,logLHe,label='L(He)') pyl.ylabel('log L') pyl.legend(loc=2) if xax == 'time': pyl.xlabel('t / yrs') elif xax == 'model': pyl.xlabel('model number')
def plot_prof_1(self, mod, species, xlim1, xlim2, ylim1, ylim2, symbol=None): """ plot one species for cycle between xlim1 and xlim2 Parameters ---------- mod : string or integer Model to plot, same as cycle number. species : list Which species to plot. xlim1, xlim2 : float Mass coordinate range. ylim1, ylim2 : float Mass fraction coordinate range. symbol : string, optional Which symbol you want to use. If None symbol is set to '-'. The default is None. """ DataPlot.plot_prof_1(self,species,mod,xlim1,xlim2,ylim1,ylim2,symbol) """ tot_mass=self.se.get(mod,'total_mass') age=self.se.get(mod,'age') mass=self.se.get(mod,'mass') Xspecies=self.se.get(mod,'iso_massf',species) pyl.plot(mass,np.log10(Xspecies),'-',label=species) pyl.xlim(xlim1,xlim2) pyl.ylim(ylim1,ylim2) pyl.legend() pl.xlabel('$Mass$ $coordinate$', fontsize=20) pl.ylabel('$X_{i}$', fontsize=20) pl.title('Mass='+str(tot_mass)+', Time='+str(age)+' years, cycle='+str(mod)) """
def plot_prof_sparse(self, mod, species, xlim1, xlim2, ylim1, ylim2, sparse, symbol): """ plot one species for cycle between xlim1 and xlim2. Parameters ---------- species : list which species to plot. mod : string or integer Model (cycle) to plot. xlim1, xlim2 : float Mass coordinate range. ylim1, ylim2 : float Mass fraction coordinate range. sparse : integer Sparsity factor for points. symbol : string which symbol you want to use? """ mass=self.se.get(mod,'mass') Xspecies=self.se.get(mod,'yps',species) pyl.plot(mass[0:len(mass):sparse],np.log10(Xspecies[0:len(Xspecies):sparse]),symbol) pyl.xlim(xlim1,xlim2) pyl.ylim(ylim1,ylim2) pyl.legend()
def abup_se_plot(mod,species): """ plot species from one ABUPP file and the se file. You must use this function in the directory where the ABP files are and an ABUP file for model mod must exist. Parameters ---------- mod : integer Model to plot, you need to have an ABUPP file for that model. species : string The species to plot. Notes ----- The species is set to 'C-12'. """ # Marco, you have already implemented finding headers and columns in # ABUP files. You may want to transplant that into here? species='C-12' filename = 'ABUPP%07d0000.DAT' % mod print(filename) mass,c12=np.loadtxt(filename,skiprows=4,usecols=[1,18],unpack=True) c12_se=self.se.get(mod,'iso_massf','C-12') mass_se=self.se.get(mod,'mass') pyl.plot(mass,c12) pyl.plot(mass_se,c12_se,'o',label='cycle '+str(mod)) pyl.legend()
def draw(m, name, extra=None): FIG.clf() matrix = m orig_shape = np.shape(matrix) # lose the channel shape in the end of orig_shape new_shape = orig_shape[:-1] matrix = np.reshape(matrix, new_shape) ax = FIG.add_subplot(1,1,1) ax.set_aspect('equal') plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray) # plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean) plt.colorbar() if extra != None: greens, reds = extra grn_x, grn_y, = greens red_x, red_y = reds plt.scatter(x=grn_x, y=grn_y, c='g', s=40) plt.scatter(x=red_x, y=red_y, c='r', s=40) # # put a blue dot at (10, 20) # plt.scatter([10], [20]) # # put a red dot, size 40, at 2 locations: # plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40) # # plt.plot() plt.savefig(name)
def plot1D_mat(a, b, M, title=''): """ Plot matrix M with the source and target 1D distribution Creates a subplot with the source distribution a on the left and target distribution b on the tot. The matrix M is shown in between. Parameters ---------- a : np.array, shape (na,) Source distribution b : np.array, shape (nb,) Target distribution M : np.array, shape (na,nb) Matrix to plot """ na, nb = M.shape gs = gridspec.GridSpec(3, 3) xa = np.arange(na) xb = np.arange(nb) ax1 = pl.subplot(gs[0, 1:]) pl.plot(xb, b, 'r', label='Target distribution') pl.yticks(()) pl.title(title) ax2 = pl.subplot(gs[1:, 0]) pl.plot(a, xa, 'b', label='Source distribution') pl.gca().invert_xaxis() pl.gca().invert_yaxis() pl.xticks(()) pl.subplot(gs[1:, 1:], sharex=ax1, sharey=ax2) pl.imshow(M, interpolation='nearest') pl.axis('off') pl.xlim((0, nb)) pl.tight_layout() pl.subplots_adjust(wspace=0., hspace=0.2)
def plot2D_samples_mat(xs, xt, G, thr=1e-8, **kwargs): """ Plot matrix M in 2D with lines using alpha values Plot lines between source and target 2D samples with a color proportional to the value of the matrix G between samples. Parameters ---------- xs : ndarray, shape (ns,2) Source samples positions b : ndarray, shape (nt,2) Target samples positions G : ndarray, shape (na,nb) OT matrix thr : float, optional threshold above which the line is drawn **kwargs : dict paameters given to the plot functions (default color is black if nothing given) """ if ('color' not in kwargs) and ('c' not in kwargs): kwargs['color'] = 'k' mx = G.max() for i in range(xs.shape[0]): for j in range(xt.shape[0]): if G[i, j] / mx > thr: pl.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]], alpha=G[i, j] / mx, **kwargs)
def fst_delay_snd(fst, snd, samp_rate, max_delay): # Verify argument shape. s1, s2 = fst.shape, snd.shape if len(s1) != 1 or len(s2) != 1 or s1[0] != s2[0]: raise Exception("Argument shape invalid, in 'fst_delay_snd' function") half_len = int(s1[0]/2) a = numpy.array(fst, dtype=numpy.double) b = numpy.array(snd, dtype=numpy.double) corr = numpy.correlate(a, b, 'same') max_pos = numpy.argmax(corr) # plot(s1[0], samp_rate, a, b, corr) return corr, (max_pos - half_len) / samp_rate
def plot(l, samp, w1, w2, cor): time_range = numpy.arange(0, l) * (1.0 / samp) pl.figure(1) pl.subplot(211) pl.plot(time_range, w1) pl.subplot(212) pl.plot(time_range, w2, c="r") pl.xlabel("time") pl.figure(2) pl.plot(time_range, cor) pl.show()
def main(): sampling, maxvalue, wave_data = record.record() # Pick out two channels for our study. w1, w2 = wave_data[1:3] nframes = w1.shape[0] # Cut one channel in the tail, while the other in the head, # to guarantee same length and first delays second. cut_time_len = 0.2 # second cut_len = int(cut_time_len * sampling) wp1 = w1[:-cut_len] wp2 = w2[cut_len:] # Get their reduced (amplitude) version, and # calculate correlation. a = numpy.array(wp1, dtype=numpy.double) / maxvalue b = numpy.array(wp2, dtype=numpy.double) / maxvalue delay_time = delay.fst_delay_snd(a, b, sampling) # Plot the channels, also the correlation. time_range = numpy.arange(0, nframes - cut_len)*(1.0/sampling) # Still shows the original signal pl.figure(1) pl.subplot(211) pl.plot(time_range, wp1) pl.subplot(212) pl.plot(time_range, wp2, c="r") pl.xlabel("time") pl.show() # Print delay print("Chan 1 delay chan 2 by {0}".format(delay_time))
def main(): sampling, maxvalue, wave_data = record.record() # Pick out two channels for our study. w1, w2 = wave_data[0:2] nframes = w1.shape[0] # Pad one channel in the head, while the other in the tail, # to guarantee same length. pad_time_len = 0.01 # second pad_len = int(pad_time_len * sampling) pad_arr = numpy.zeros(pad_len) wp1 = numpy.concatenate((pad_arr, w1)) wp2 = numpy.concatenate((w2, pad_arr)) # Get their reduced (amplitude) version, and # calculate correlation. a = numpy.array(wp1, dtype=numpy.double) / maxvalue b = numpy.array(wp2, dtype=numpy.double) / maxvalue delay_time = delay.fst_delay_snd(a, b, sampling) # Plot the channels, also the correlation. time_range = numpy.arange(0, nframes + pad_len)*(1.0/sampling) # Still shows the original signal pl.figure(1) pl.subplot(211) pl.plot(time_range, wp1) pl.subplot(212) pl.plot(time_range, wp2, c="r") pl.xlabel("time") pl.show() # Print delay print("Chan 1 delay chan 2 by {0}".format(delay_time))
def lms(x1: numpy.array, x2: numpy.array, N: int): # Verify argument shape. s1, s2 = x1.shape, x2.shape if len(s1) != 1 or len(s2) != 1 or s1[0] != s2[0]: raise Exception("Argument shape invalid, in 'lms' function") l = s1[0] # Coefficient matrix W = numpy.mat(numpy.zeros([1, 2 * N + 1])) # Coefficient (time) matrix Wt = numpy.mat(numpy.zeros([l, 2 * N + 1])) # Feedback (time) matrix y = numpy.mat(numpy.zeros([l, 1])) # Error (time) matrix e = numpy.mat(numpy.zeros([l, 1])) # Traverse channel data for i in range(N, l-N): x1_vec = numpy.asmatrix(x1[i-N:i+N+1]) y[i] = x1_vec * numpy.transpose(W) e[i] = x2[i] - y[i] W += mu * e[i] * x1_vec Wt[i] = W # Find the coefficient matrix which has max maximum. Wt_maxs = numpy.max(Wt, axis=1) row_idx = numpy.argmax(Wt_maxs) max_W = Wt[row_idx] delay_count = numpy.argmax(max_W) - N plot(l, x1, x2, y, e) return delay_count
def init(): global fig1, ln_o, ln_x ln_o, = plt.plot([], [], 'ro') ln_x, = plt.plot([], [], 'bx') plt.xlim(-disp_bound, disp_bound) plt.ylim(-disp_bound, disp_bound) plt.xlabel('x') plt.ylabel('y') return ln_o,
def plot_channel(audio, sampling): channels, nframes = audio.shape[0], audio.shape[1] time_range = numpy.arange(0, nframes) * (1.0 / sampling) for i in range(1, channels + 1): pl.figure(i) pl.plot(time_range, audio[i - 1]) pl.xlabel("time{0}".format(i)) pl.show()
def plot_angular_velocities(title, angular_velocities, angular_velocities_filtered, block=True): fig = plt.figure() title_position = 1.05 fig.suptitle(title, fontsize='24') a1 = plt.subplot(1, 2, 1) a1.set_title( "Angular Velocities Before Filtering \nvx [red], vy [green], vz [blue]", y=title_position) plt.plot(angular_velocities[:, 0], c='r') plt.plot(angular_velocities[:, 1], c='g') plt.plot(angular_velocities[:, 2], c='b') a2 = plt.subplot(1, 2, 2) a2.set_title( "Angular Velocities After Filtering \nvx [red], vy [green], vz [blue]", y=title_position) plt.plot(angular_velocities_filtered[:, 0], c='r') plt.plot(angular_velocities_filtered[:, 1], c='g') plt.plot(angular_velocities_filtered[:, 2], c='b') plt.subplots_adjust(left=0.025, right=0.975, top=0.8, bottom=0.05) if plt.get_backend() == 'TkAgg': mng = plt.get_current_fig_manager() max_size = mng.window.maxsize() max_size = (max_size[0], max_size[1] * 0.45) mng.resize(*max_size) plt.show(block=block)
def plot(self): from matplotlib.pylab import show, plot, stem pass
def run_regression_1D_collapsed(): np.random.seed(42) print "create dataset ..." Xtrain, ytrain, Xtest, ytest = create_dataset() alphas = [0.001, 0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 1] for alpha in alphas: M = 20 model = vfe.SGPR_collapsed(Xtrain, ytrain, M) model.optimise(method='L-BFGS-B', alpha=alpha, maxiter=1000, disp=False) my, vy = model.predict_y(Xtest, alpha) my = np.reshape(my, ytest.shape) vy = np.reshape(vy, ytest.shape) rmse = np.sqrt(np.mean((my - ytest)**2)) ll = np.mean(-0.5 * np.log(2 * np.pi * vy) - 0.5 * (ytest - my)**2 / vy) nlml, _ = model.objective_function(model.get_hypers(), alpha) print 'alpha=%.3f, train ml=%3f, test rmse=%.3f, ll=%.3f' % (alpha, nlml, rmse, ll) # plot(model, Xtrain, ytrain) # plt.show() # should produce something like this # alpha=0.001, train ml=-64.573021, test rmse=0.169, ll=0.348 # alpha=0.100, train ml=-64.616618, test rmse=0.169, ll=0.348 # alpha=0.200, train ml=-64.626655, test rmse=0.169, ll=0.348 # alpha=0.300, train ml=-64.644053, test rmse=0.169, ll=0.348 # alpha=0.500, train ml=-64.756588, test rmse=0.169, ll=0.348 # alpha=0.700, train ml=-68.755871, test rmse=0.169, ll=0.350 # alpha=0.800, train ml=-72.153441, test rmse=0.167, ll=0.349 # alpha=1.000, train ml=-71.305002, test rmse=0.169, ll=0.303
def run_regression_1D_stoc(): np.random.seed(42) print "create dataset ..." N = 200 X = np.random.rand(N, 1) Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2 # plt.plot(X, Y, 'kx', mew=2) def plot(m): xx = np.linspace(-0.5, 1.5, 100)[:, None] mean, var = m.predict_f(xx) zu = m.sgp_layers[0].zu mean_u, var_u = m.predict_f(zu) plt.figure() plt.plot(X, Y, 'kx', mew=2) plt.plot(xx, mean, 'b', lw=2) plt.fill_between( xx[:, 0], mean[:, 0] - 2 * np.sqrt(var[:, 0]), mean[:, 0] + 2 * np.sqrt(var[:, 0]), color='blue', alpha=0.2) plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro') plt.xlim(-0.1, 1.1) # inference print "create model and optimize ..." M = 20 hidden_size = [2] model = aep.SDGPR(X, Y, M, hidden_size, lik='Gaussian') model.optimise(method='adam', alpha=1.0, maxiter=50000, mb_size=M, adam_lr=0.001) plot(model) plt.show() plt.savefig('/tmp/aep_dgpr_1D_stoc.pdf')
def run_cluster_MC(): import GPy # create dataset print "creating dataset..." N = 100 k1 = GPy.kern.RBF(5, variance=1, lengthscale=1. / np.random.dirichlet(np.r_[10, 10, 10, 0.1, 0.1]), ARD=True) k2 = GPy.kern.RBF(5, variance=1, lengthscale=1. / np.random.dirichlet(np.r_[10, 0.1, 10, 0.1, 10]), ARD=True) k3 = GPy.kern.RBF(5, variance=1, lengthscale=1. / np.random.dirichlet(np.r_[0.1, 0.1, 10, 10, 10]), ARD=True) X = np.random.normal(0, 1, (N, 5)) A = np.random.multivariate_normal(np.zeros(N), k1.K(X), 10).T B = np.random.multivariate_normal(np.zeros(N), k2.K(X), 10).T C = np.random.multivariate_normal(np.zeros(N), k3.K(X), 10).T Y = np.vstack((A, B, C)) labels = np.hstack((np.zeros(A.shape[0]), np.ones( B.shape[0]), np.ones(C.shape[0]) * 2)) # inference np.random.seed(42) print "inference ..." M = 30 D = 5 alpha = 0.5 lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian') lvm.optimise(method='adam', adam_lr=0.05, maxiter=2000, prop_mode=config.PROP_MC) ls = np.exp(lvm.sgp_layer.ls) print ls inds = np.argsort(ls) plt.figure() mx, vx = lvm.get_posterior_x() plt.scatter(mx[:, inds[0]], mx[:, inds[1]], c=labels) zu = lvm.sgp_layer.zu plt.plot(zu[:, inds[0]], zu[:, inds[1]], 'ko') # plt.show() plt.savefig('/tmp/gplvm_cluster_MC.pdf')
def run_pinwheel(): def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate, rs=np.random.RandomState(0)): """Based on code by Ryan P. Adams.""" rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False) features = rs.randn(num_classes * num_per_class, 2) \ * np.array([radial_std, tangential_std]) features[:, 0] += 1 labels = np.repeat(np.arange(num_classes), num_per_class) angles = rads[labels] + rate * np.exp(features[:, 0]) rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)]) rotations = np.reshape(rotations.T, (-1, 2, 2)) return np.einsum('ti,tij->tj', features, rotations) # create dataset print "creating dataset..." Y = make_pinwheel(radial_std=0.3, tangential_std=0.05, num_classes=3, num_per_class=50, rate=0.4) # inference print "inference ..." M = 20 D = 2 lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian') lvm.optimise(method='L-BFGS-B') mx, vx = lvm.get_posterior_x() fig = plt.figure() ax = fig.add_subplot(121) ax.plot(Y[:, 0], Y[:, 1], 'bx') ax = fig.add_subplot(122) ax.errorbar(mx[:, 0], mx[:, 1], xerr=np.sqrt( vx[:, 0]), yerr=np.sqrt(vx[:, 1]), fmt='xk') plt.show()
def run_semicircle(): # create dataset print "creating dataset..." N = 20 cos_val = [0.97, 0.95, 0.94, 0.89, 0.8, 0.88, 0.92, 0.96, 0.7, 0.65, 0.3, 0.25, 0.1, -0.25, -0.3, -0.6, -0.67, -0.75, -0.97, -0.98] cos_val = np.array(cos_val).reshape((N, 1)) # cos_val = 2*np.random.rand(N, 1) - 1 angles = np.arccos(cos_val) sin_val = np.sin(angles) Y = np.hstack((sin_val, cos_val)) Y += 0.05 * np.random.randn(Y.shape[0], Y.shape[1]) # inference print "inference ..." M = 10 D = 2 lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian') lvm.optimise(method='L-BFGS-B', maxiter=2000) # lvm.optimise(method='adam', maxiter=2000) plt.figure() plt.plot(Y[:, 0], Y[:, 1], 'sb') mx, vx = lvm.get_posterior_x() for i in range(mx.shape[0]): mxi = mx[i, :] vxi = vx[i, :] mxi1 = mxi + np.sqrt(vxi) mxi2 = mxi - np.sqrt(vxi) mxis = np.vstack([mxi.reshape((1, D)), mxi1.reshape((1, D)), mxi2.reshape((1, D))]) myis, vyis = lvm.predict_f(mxis) plt.errorbar(myis[:, 0], myis[:, 1], xerr=np.sqrt(vyis[:, 0]), yerr=np.sqrt(vyis[:, 1]), fmt='.k') plt.show()
def run_frey(): # import dataset data = pods.datasets.brendan_faces() # Y = data['Y'][:50, :] Y = data['Y'] Yn = Y - np.mean(Y, axis=0) Yn /= np.std(Y, axis=0) Y = Yn # inference print "inference ..." M = 30 D = 20 lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian') lvm.optimise(method='L-BFGS-B', maxiter=10) plt.figure() mx, vx = lvm.get_posterior_x() zu = lvm.sgp_layer.zu plt.scatter(mx[:, 0], mx[:, 1]) plt.plot(zu[:, 0], zu[:, 1], 'ko') nx = ny = 30 x_values = np.linspace(-5, 5, nx) y_values = np.linspace(-5, 5, ny) sx = 28 sy = 20 canvas = np.empty((sx * ny, sy * nx)) for i, yi in enumerate(x_values): for j, xi in enumerate(y_values): z_mu = np.array([[xi, yi]]) x_mean, x_var = lvm.predict_f(z_mu) canvas[(nx - i - 1) * sx:(nx - i) * sx, j * sy:(j + 1) * sy] = x_mean.reshape(sx, sy) plt.figure(figsize=(8, 10)) Xi, Yi = np.meshgrid(x_values, y_values) plt.imshow(canvas, origin="upper", cmap="gray") plt.tight_layout() plt.show()
def run_step_1D_collapsed(): np.random.seed(42) print "create dataset ..." N = 200 X = np.random.rand(N, 1) * 3 - 1.5 Y = step(X) # plt.plot(X, Y, 'kx', mew=2) def plot(m): xx = np.linspace(-3, 3, 100)[:, None] mean, var = m.predict_f(xx, alpha) zu = m.zu mean_u, var_u = m.predict_f(zu) plt.figure() plt.plot(X, Y, 'kx', mew=2) plt.plot(xx, mean, 'b', lw=2) plt.fill_between( xx[:, 0], mean[:, 0] - 2 * np.sqrt(var), mean[:, 0] + 2 * np.sqrt(var), color='blue', alpha=0.2) plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro') # no_samples = 20 # f_samples = m.sample_f(xx, no_samples) # for i in range(no_samples): # plt.plot(xx, f_samples[:, :, i], linewidth=0.5, alpha=0.5) plt.xlim(-3, 3) # inference print "create model and optimize ..." M = 20 alpha = 0.01 model = vfe.SGPR_collapsed(X, Y, M) model.optimise(method='L-BFGS-B', alpha=alpha, maxiter=1000) plot(model) plt.show()