我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.contourf()。
def showHeightMap(x,y,z,zi): ''' show height map in maptplotlib ''' zi=zi.transpose() plt.imshow(zi, vmin=z.min(), vmax=z.max(), origin='lower', extent=[ y.min(), y.max(),x.min(), x.max()]) plt.colorbar() CS = plt.contour(zi,15,linewidths=0.5,colors='k', extent=[ y.min(), y.max(),x.min(), x.max()]) CS = plt.contourf(zi,15,cmap=plt.cm.rainbow, extent=[ y.min(), y.max(),x.min(), x.max()]) z=z.transpose() plt.scatter(y, x, c=z) # achsen umkehren #plt.gca().invert_xaxis() #plt.gca().invert_yaxis() plt.show() return
def plot(self, nmin=-3.5, nmax=1.5): """Plots the field magnitude.""" x, y = meshgrid( linspace(XMIN/ZOOM+XOFFSET, XMAX/ZOOM+XOFFSET, 200), linspace(YMIN/ZOOM, YMAX/ZOOM, 200)) z = zeros_like(x) for i in range(x.shape[0]): for j in range(x.shape[1]): z[i, j] = log10(self.magnitude([x[i, j], y[i, j]])) levels = arange(nmin, nmax+0.2, 0.2) cmap = pyplot.cm.get_cmap('plasma') pyplot.contourf(x, y, numpy.clip(z, nmin, nmax), 10, cmap=cmap, levels=levels, extend='both') # pylint: disable=too-few-public-methods
def saturation_index_countour(lab, elem1, elem2, Ks, labels=False): plt.figure() plt.title('Saturation index %s%s' % (elem1, elem2)) resoluion = 100 n = math.ceil(lab.time.size / resoluion) plt.xlabel('Time') z = np.log10((lab.species[elem1]['concentration'][:, ::n] + 1e-8) * ( lab.species[elem2]['concentration'][:, ::n] + 1e-8) / lab.constants[Ks]) lim = np.max(abs(z)) lim = np.linspace(-lim - 0.1, +lim + 0.1, 51) X, Y = np.meshgrid(lab.time[::n], -lab.x) plt.xlabel('Time') CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap(sns.color_palette( "RdBu_r", 101)), origin='lower', levels=lim, extend='both') if labels: plt.clabel(CS, inline=1, fontsize=10, colors='w') # cbar = plt.colorbar(CS) if labels: plt.clabel(CS, inline=1, fontsize=10, colors='w') cbar = plt.colorbar(CS) plt.ylabel('Depth') ax = plt.gca() ax.ticklabel_format(useOffset=False) cbar.ax.set_ylabel('Saturation index %s%s' % (elem1, elem2)) return ax
def contour_plot_of_rates(lab, r, labels=False, last_year=False): plt.figure() plt.title('{}'.format(r)) resoluion = 100 n = math.ceil(lab.time.size / resoluion) if last_year: k = n - int(1 / lab.dt) else: k = 1 z = lab.estimated_rates[r][:, k - 1:-1:n] # lim = np.max(np.abs(z)) # lim = np.linspace(-lim - 0.1, +lim + 0.1, 51) X, Y = np.meshgrid(lab.time[k::n], -lab.x) plt.xlabel('Time') CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap( sns.color_palette("Blues", 51))) if labels: plt.clabel(CS, inline=1, fontsize=10, colors='w') cbar = plt.colorbar(CS) plt.ylabel('Depth') ax = plt.gca() ax.ticklabel_format(useOffset=False) cbar.ax.set_ylabel('Rate %s [M/V/T]' % r) return ax
def contour_plot_of_delta(lab, element, labels=False, last_year=False): plt.figure() plt.title('Rate of %s consumption/production' % element) resoluion = 100 n = math.ceil(lab.time.size / resoluion) if last_year: k = n - int(1 / lab.dt) else: k = 1 z = lab.species[element]['rates'][:, k - 1:-1:n] lim = np.max(np.abs(z)) lim = np.linspace(-lim - 0.1, +lim + 0.1, 51) X, Y = np.meshgrid(lab.time[k:-1:n], -lab.x) plt.xlabel('Time') CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap(sns.color_palette( "RdBu_r", 101)), origin='lower', levels=lim, extend='both') if labels: plt.clabel(CS, inline=1, fontsize=10, colors='w') cbar = plt.colorbar(CS) plt.ylabel('Depth') ax = plt.gca() ax.ticklabel_format(useOffset=False) cbar.ax.set_ylabel('Rate of %s change $[\Delta/T]$' % element) return ax
def save_texture(x, y, hfield, fname, path=None): ''' @param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure this matches the <texturedir> of the <compiler> element in the env XML ''' path = _checkpath(path) plt.figure() plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP) xmin, xmax = x.min(), x.max() ymin, ymax = y.min(), y.max() # for some reason plt.grid does not work here, so generate gridlines manually for i in np.arange(xmin,xmax,0.5): plt.plot([i,i], [ymin,ymax], 'k', linewidth=0.1) for i in np.arange(ymin,ymax,0.5): plt.plot([xmin,xmax],[i,i], 'k', linewidth=0.1) plt.savefig(os.path.join(path, fname), bbox_inches='tight') plt.close()
def plot_decision_boundary(X, Y, model): # X - some data in 2dimensional np.array x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01), np.arange(y_min, y_max, 0.01)) # here "model" is your model's prediction (classification) function Z = model(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis('off') for i in x: print i # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) #???????
def gen_blurred_diag_pxy(s): X = 1024 Y = X # generate pdf from scipy.stats import multivariate_normal pxy = np.zeros((X,Y)) rv = multivariate_normal(cov=s) for x in range(X): pxy[x,:] = np.roll(rv.pdf(np.linspace(-X/2,X/2,X+1)[:-1]),int(X/2+x)) pxy = pxy/np.sum(pxy) # plot p(x,y) import matplotlib.pyplot as plt plt.figure() plt.contourf(pxy) plt.ion() plt.title("p(x,y)") plt.show() return pxy
def _plot_policy(self, policy, n_iter): policy_matrix = numpy.zeros((10, 10), dtype='float') for stateid in range(len(self.states)): dealer_showing, player_state = self.states[stateid].split('#') dealer_showing = 0 if dealer_showing == 'A' else int(dealer_showing)-1 player_state = int(player_state) if player_state >= 12 and player_state < 22: for actionid in range(len(self.actions)): if policy[stateid, actionid] == 1.0: policy_matrix[player_state-12, dealer_showing] = actionid fig = plt.figure() print policy_matrix plt.contourf(range(10), range(12,22), policy_matrix, 1, cmap='coolwarm', \ corner_mask=True) plt.title('policy in iteration %i' % n_iter) plt.xlabel('dealer showing') plt.ylabel('player sum') plt.show() # fig.savefig('experiments/policy%i' % n_iter)
def _plot_policy(self, policy, n_iter): policy_matrix = numpy.zeros((10, 10), dtype='float') for stateid in range(len(self.states)): dealer_showing, player_state = self.states[stateid].split('#') dealer_showing = 0 if dealer_showing == 'A' else int(dealer_showing)-1 player_state = int(player_state) if player_state >= 12 and player_state < 22: for actionid in range(len(self.actions)): if policy[stateid, actionid] == 1.0: policy_matrix[player_state-12, dealer_showing] = actionid fig = plt.figure() # print policy_matrix plt.contourf(range(10), range(12,22), policy_matrix, 1, cmap='coolwarm', \ corner_mask=True) plt.title('policy in iteration %i' % n_iter) plt.xlabel('dealer showing') plt.ylabel('player sum') plt.show() # fig.savefig('experiments/policy%i' % n_iter)
def plot_decision_boundary(pred_func, X, y, bounds, filename=None): if plt is None: return fig = plt.figure() h = 0.01 # Generate a grid of points with distance h between them x_min, x_max, y_min, y_max = bounds xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) if filename: plt.savefig(filename) plt.close() else: plt.show() return fig
def plot_decision_boundary(pred_func, X, y, bounds, filename=None): if plt is None: return plt.figure() h = 0.01 # Generate a grid of points with distance h between them x_min, x_max, y_min, y_max = bounds xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) if filename: plt.savefig(filename) plt.close() else: plt.show()
def animateFrequency(self, filename): "Animate a 2D nuclear wavefunction as it evolves in time" plottingAmplitude = np.abs(self.filteredNonresphasingFrequencySignals + self.filteredResphasingFrequencySignals) zMin = np.min(plottingAmplitude) zMax = np.max(plottingAmplitude) contourLevels = 100 contourSpacings = np.linspace(zMin, zMax, contourLevels) yVals = self.probeFrequencies xVals = self.pumpFrequencies fig = plt.figure() im = plt.contourf(xVals, yVals, plottingAmplitude[0], contourSpacings) ax = fig.gca() def animate(i, data, ax, fig): ax.cla() im = ax.contourf(xVals, yVals, data[i], contourSpacings) plt.title(str(i)) return im, anim = animation.FuncAnimation(fig, animate, frames = self.rawNonresphasingFrequencySignals.shape[0], interval=20, blit=True, fargs=(plottingAmplitude, ax, fig) ) anim.save(filename, fps=20)
def plotSpaceFunction(self, spaceFunction): d = len(spaceFunction.shape) if d>2: print "NO PLOTTING FOR MORE THAN TWO DIMENSIONS" return None if d==1: x = self.xValues y = spaceFunction fig = plt.figure() plt.plot(x, y) return fig else: x = self.xValues y = self.xValues z = spaceFunction fig = plt.figure() plt.contourf(x, y, z) return fig
def plot_heatmap(self, data_matrix, title="", xlab="", ylab="", colormap=plt.cm.jet): """Plot heatmap of data matrix. :param self: object. :param data_matrix: 2D array to be plotted. :param title: Figure title. :param xlab: X axis label. :param ylab: Y axis label. :param colormap: matplotlib color map. :retuns: None :rtype: object """ """ """ fig = plt.figure() p = plt.contourf(data_matrix) plt.colorbar(p, orientation='vertical', cmap=colormap) self._set_properties_and_close(fig, title, xlab, ylab)
def plot_decision_boundary(pred_func, X, y): """ Set min and max values and give it some padding. """ x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
def plotDecisionBoundary(): svc = svm.SVC(kernel='linear', C=1,gamma=0).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 h = (x_max / x_min)/100 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) plt.subplot(1, 1, 1) Z = svc.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.title('SVC with linear kernel') plt.show()
def visualize2D(fig, ax, xs, ys, bins=200, xlabel='x', ylabel='y', xlim=None, ylim=None): H, xedges, yedges = numpy.histogram2d(xs, ys, bins) H = numpy.rot90(H) H = numpy.flipud(H) Hmasked = numpy.ma.masked_where(H == 0, H) ax.pcolormesh(xedges, yedges, Hmasked) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) if xlim is None: xlim = (min(xs), max(xs)) if ylim is None: ylim = (min(ys), max(ys)) ax.set_xlim(*xlim) ax.set_ylim(*ylim) fig.colorbar(pyplot.contourf(Hmasked))
def plot(X,Y,pred_func): # determine canvas borders mins = np.amin(X,0); mins = mins - 0.1*np.abs(mins); maxs = np.amax(X,0); maxs = maxs + 0.1*maxs; ## generate dense grid xs,ys = np.meshgrid(np.linspace(mins[0],maxs[0],300), np.linspace(mins[1], maxs[1], 300)); # evaluate model on the dense grid Z = pred_func(np.c_[xs.flatten(), ys.flatten()]); Z = Z.reshape(xs.shape) # Plot the contour and training examples plt.contourf(xs, ys, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=Y, s=50, cmap=colors.ListedColormap(['orange', 'blue'])) plt.show()
def plot_decision_boundary(pred_func, X, y, title=None): """??????????????????? :param pred_func: predict?? :param X: ???X :param y: ???Y :return: None """ # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral) if title: plt.title(title) plt.show()
def plot_decision_boundary(pred_func, X, y): # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) plt.show() # Helper function to evaluate the total loss on the dataset
def plot(X,Y,pred_func): # determine canvas borders mins = np.amin(X,0); mins = mins - 0.1*np.abs(mins); maxs = np.amax(X,0); maxs = maxs + 0.1*maxs; ## generate dense grid xs,ys = np.meshgrid(np.linspace(mins[0],maxs[0],300), np.linspace(mins[1], maxs[1], 300)); # evaluate model on the dense grid Z = pred_func(np.c_[xs.flatten(), ys.flatten()]); Z = Z.reshape(xs.shape) # Plot the contour and training examples plt.contourf(xs, ys, Z, cmap=plt.cm.Spectral, alpha=0.8) plt.scatter(X[:, 0], X[:, 1], c=Y, s=40, cmap=plt.cm.Spectral)
def lidar_plot(ds, var, step, img_filename): """ Plot lidar profile as heatmap. """ colormap = plt.get_cmap('jet') x = ds['Time'][::step] y = ds['Altitude'][:] z = ds[var][:,::step] plt.contourf(x, y, z, colormap=colormap) plt.xlim(x.min(), x.max()) plt.ylim(0, 10000) figure = plt.figure(1, (10, 8), 80) # remove any margins plt.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0) figure.savefig(img_filename) plt.close()
def plot_proba_map(i, lat,lon, clusters, class_prob, label, lat_event, lon_event): plt.clf() class_prob = class_prob / np.sum(class_prob) assert np.isclose(np.sum(class_prob),1) risk_map = np.zeros_like(clusters,dtype=np.float64) for cluster_id in range(len(class_prob)): x,y = np.where(clusters == cluster_id) risk_map[x,y] = class_prob[cluster_id] plt.contourf(lon,lat,risk_map,cmap='YlOrRd',alpha=0.9, origin='lower',vmin=0.0,vmax=1.0) plt.colorbar() plt.plot(lon_event, lat_event, marker='+',c='k',lw='5') plt.contour(lon,lat,clusters,colors='k',hold='on') plt.xlim((min(lon),max(lon))) plt.ylim((min(lat),max(lat))) png_name = os.path.join(args.output, '{}_pred_{}_label_{}.eps'.format(i,np.argmax(class_prob), label)) plt.savefig(png_name) plt.close()
def update(t, fu_hat): """Callback to do some intermediate processing.""" f_hat, u_hat = fu_hat[:] # views fu[:] = TT.backward(fu_hat, fu) f, u = fu[:] # views ekin = 0.5*energy_fourier(T.comm, f_hat) es = 0.5*energy_fourier(T.comm, 1j*K*u_hat) eg = gamma*np.sum(0.5*u**2 - 0.25*u**4)/np.prod(np.array(N)) eg = comm.allreduce(eg) gradu[:] = TV.backward(1j*K*u_hat, gradu) ep = comm.allreduce(np.sum(f*gradu)/np.prod(np.array(N))) ea = comm.allreduce(np.sum(np.array(X)*(0.5*f**2 + 0.5*gradu**2 - (0.5*u**2 - 0.25*u**4)*f))/np.prod(np.array(N))) if rank == 0: image.ax.clear() image.ax.contourf(X[1][..., 0], X[0][..., 0], u[..., N[2]//2], 100) plt.pause(1e-6) #plt.savefig('Klein_Gordon_{}_real_{}.png'.format(N[0], tstep)) print("Time = %2.2f Total energy = %2.8e Linear momentum %2.8e Angular momentum %2.8e" %(t, ekin+es+eg, ep, ea))
def plot_Locations(env, force): X, Y = np.meshgrid(np.arange(0, env.nX), np.arange(0, env.nY)) Z = env.getTerrainCellElevations() plt.figure() plt.contourf(X, Y, Z) c = ['r', 'b'] a = 0 for F in force: x = [] y = [] for C in np.arange(0, len(F.company)): for P in np.arange(0, len(F.company[C].platoon)): for S in np.arange(0, len(F.company[C].platoon[P].section)): for M in np.arange(0, len(F.company[C].platoon[P].section[S].unit.member)): if F.company[C].platoon[P].section[S].unit.member[M].status != 2: x.append(F.company[C].platoon[P].section[S].unit.member[M].location[0]) y.append(F.company[C].platoon[P].section[S].unit.member[M].location[1]) plt.scatter(x, y, c=c[a], marker='.') plt.scatter(F.hq.member.location[0], F.hq.member.location[1], c=c[a], marker='x') plt.scatter(F.objective.ctr[0], F.objective.ctr[1], c=c[a]) a += 1 plt.xlabel('X') plt.ylabel('Y') plt.title('Asset Location') plt.show()
def plot_Detected(env, force): X, Y = np.meshgrid(np.arange(0, env.nX), np.arange(0, env.nY)) Z = env.getTerrainCellElevations() plt.figure() plt.contourf(X, Y, Z) c = ['b', 'r'] # Colours are reversed (locations represent adversary) a = 0 for F in force: x = [] y = [] for l in F.detected_location: x.append(l[0]) y.append(l[1]) plt.scatter(x, y, c=c[a], marker='.') a += 1 plt.xlabel('X') plt.ylabel('Y') plt.title('Detected Enemy Asset Location') plt.show()
def plot_SectionCOA(force, env): X, Y = np.meshgrid(np.arange(0, env.nX), np.arange(0, env.nY)) Z = env.getTerrainCellElevations() c = ['r', 'b'] a = 0 for F in force: plt.figure() plt.contourf(X, Y, Z) for C in F.company: for P in C.platoon: for i in P.assignment: path = i[0] x_loc = [] y_loc = [] for s in path: x_loc.append(P.sector_loc[s][0]) y_loc.append(P.sector_loc[s][1]) plt.plot(x_loc, y_loc, c=c[a]) plt.xlabel('X') plt.ylabel('Y') plt.title('Section COAs') plt.show() a += 1
def visualize2d(self, x=None, y=None, plot_scale=2, plot_precision=0.01): x = self._x if x is None else x y = self._y if y is None else y plot_num = int(1 / plot_precision) xf = np.linspace(self._x_min * plot_scale, self._x_max * plot_scale, plot_num) yf = np.linspace(self._x_min * plot_scale, self._x_max * plot_scale, plot_num) input_x, input_y = np.meshgrid(xf, yf) input_xs = np.c_[input_x.ravel(), input_y.ravel()] if self._x.shape[1] != 2: return output_ys_2d = np.argmax(self.predict(input_xs), axis=1).reshape(len(xf), len(yf)) output_ys_3d = self.predict(input_xs)[..., 0].reshape(len(xf), len(yf)) xf, yf = np.meshgrid(xf, yf, sparse=True) plt.contourf(input_x, input_y, output_ys_2d, cmap=cm.Spectral) plt.scatter(x[..., 0], x[..., 1], c=np.argmax(y, axis=1), s=40, cmap=cm.Spectral) plt.axis("off") plt.show() if self._y.shape[1] == 2: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(xf, yf, output_ys_3d, cmap=cm.coolwarm, ) ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") plt.show()
def visualize2d(self, x=None, y=None, plot_scale=2, plot_precision=0.01): x = self._x if x is None else x y = self._y if y is None else y plot_num = int(1 / plot_precision) xf = np.linspace(self._x_min * plot_scale, self._x_max * plot_scale, plot_num) yf = np.linspace(self._x_min * plot_scale, self._x_max * plot_scale, plot_num) input_x, input_y = np.meshgrid(xf, yf) input_xs = np.c_[input_x.ravel(), input_y.ravel()] if self._x.shape[1] != 2: return output_ys_2d = np.argmax(self.predict(input_xs), axis=1).reshape(len(xf), len(yf)) output_ys_3d = self.predict(input_xs)[:, 0].reshape(len(xf), len(yf)) xf, yf = np.meshgrid(xf, yf, sparse=True) plt.contourf(input_x, input_y, output_ys_2d, cmap=cm.Spectral) plt.scatter(x[:, 0], x[:, 1], c=np.argmax(y, axis=1), s=40, cmap=cm.Spectral) plt.axis("off") plt.show() if self._y.shape[1] == 2: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(xf, yf, output_ys_3d, cmap=cm.coolwarm, ) ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") plt.show()
def contour_plot(lab, element, labels=False, days=False, last_year=False): plt.figure() plt.title(element + ' concentration') resoluion = 100 n = math.ceil(lab.time.size / resoluion) if last_year: k = n - int(1 / lab.dt) else: k = 1 if days: X, Y = np.meshgrid(lab.time[k::n] * 365, -lab.x) plt.xlabel('Time') else: X, Y = np.meshgrid(lab.time[k::n], -lab.x) plt.xlabel('Time') z = lab.species[element]['concentration'][:, k - 1:-1:n] CS = plt.contourf(X, Y, z, 51, cmap=ListedColormap( sns.color_palette("Blues", 51)), origin='lower') if labels: plt.clabel(CS, inline=1, fontsize=10, colors='w') cbar = plt.colorbar(CS) plt.ylabel('Depth') ax = plt.gca() ax.ticklabel_format(useOffset=False) cbar.ax.set_ylabel('%s [M/V]' % element) if element == 'Temperature': plt.title('Temperature contour plot') cbar.ax.set_ylabel('Temperature, C') if element == 'pH': plt.title('pH contour plot') cbar.ax.set_ylabel('pH') return ax
def plot_nps(X, Y, NPS): """Plots the 2D frequency plot for the NPS. Returns the figure reference.""" fig_nps = plt.figure() plt.contourf(X, Y, NPS, cmap='inferno') plt.xlabel('spatial frequency [cycles/length]') plt.ylabel('spatial frequency [cycles/length]') plt.axis(tight=True) plt.gca().set_aspect('equal') plt.colorbar() plt.title('Noise Power Spectrum') return fig_nps
def save_heightfield(x, y, hfield, fname, path=None): ''' @param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure the path + fname match the <file> attribute of the <asset> element in the env XML where the height field is defined ''' path = _checkpath(path) plt.figure() plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP) # terrain_cmap is necessary to make sure tops get light color plt.savefig(os.path.join(path, fname), bbox_inches='tight') plt.close()
def save_figure(x,y,state,position1,position2): fig = plt.figure(figsize=figsize,dpi=dpi) plt.contourf(x,y,state) plt.plot([position1],[position2],marker,markersize=markersize) fig.savefig(''.join([location,str(datetime.now()),filetype]))
def plot_objective(): x, y = np.meshgrid(np.arange(-6, 6, 0.1), np.arange(-6, 6, 0.1)) z = np.array([[objective.feedback([y[i, j], x[i, j]]) for i in range(x.shape[0])] for j in range(x.shape[1])]) plt.contourf(x, y, z, cmap=plt.cm.Blues, levels=np.linspace(z.min(), z.max(), 30)) plt.setp(plt.gca(), xticks=(), yticks=(), xlim=(-5, 5), ylim=(-5, 5))
def plot_decision_boundary(pred_func): # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
def weibull_contour(Y, U, is_discrete, true_alpha, true_beta, logx=True, samples=200, lines=True): xlist = np.linspace(true_alpha / np.e, true_alpha * np.e, samples) ylist = np.linspace(true_beta / np.e, true_beta * np.e, samples) x_grid, y_grid = np.meshgrid(xlist, ylist) loglik = x_grid * 0 if is_discrete: fun = weibull.discrete_loglik else: fun = weibull.continuous_loglik for i in xrange(len(Y)): loglik = loglik + \ fun(Y[i], x_grid, y_grid, U[i]) z_grid = loglik / len(Y) plt.figure() if logx: x_grid = np.log(x_grid) true_alpha = np.log(true_alpha) xlab = r'$\log(\alpha)$' else: xlab = r'$\alpha$' cp = plt.contourf(x_grid, y_grid, z_grid, 100, cmap='jet') plt.colorbar(cp) if lines: plt.axvline(true_alpha, linestyle='dashed', c='black') plt.axhline(true_beta, linestyle='dashed', c='black') plt.xlabel(xlab) plt.ylabel(r'$\beta$')
def plot_decision_boundary(pred_func): # Set min and max values and give it some padding x_min, x_max = train_X[:, 0].min() - .5, train_X[:, 0].max() + .5 y_min, y_max = train_X[:, 1].min() - .5, train_X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(train_X[:, 0], train_X[:, 1], c=train_y, cmap=plt.cm.Spectral) plt.show()
def gen_dir_pxy(): # param X = 128 Y = 16 cx = 1000. cys = np.logspace(-2.,1.,num=X,base=10) # build pxy px = np.random.dirichlet(cx*np.ones(X)) py_x = np.zeros((Y,X)) for x in range(X): py_x[:,x] = np.random.dirichlet(cys[x]*np.ones(Y)) pxy = np.multiply(np.tile(px,(Y,1)),py_x).T # plot p(x,y) import matplotlib.pyplot as plt plt.figure() plt.contourf(pxy) plt.ion() plt.title("p(x,y)") plt.show() # plot histogram of H(p(y|x)) over x plt.hist(entropy(py_x), bins='auto') plt.title("entropies of conditionals p(y|x)") plt.show() # calc ixy py = pxy.sum(axis=0) hy = entropy(py) hy_x = np.dot(px,entropy(py_x)) ixy = hy-hy_x print("I(X;Y) = %.3f" % ixy) return pxy
def gen_gaussian_pxy(): # param cov = np.array([[1.5,1.1],[1.1,1]]) X = 128 Y = 128 xlow = -2 xhigh = 2 ylow = -2 yhigh = 2 #x, y = np.mgrid[-1.5:1.5:.01, -1.5:1.5:.01] x,y = np.meshgrid(np.linspace(xlow,xhigh,X),np.linspace(ylow,yhigh,Y)) pos = np.empty(x.shape + (2,)) pos[:,:,0] = x; pos[:,:,1] = y # generate pdf from scipy.stats import multivariate_normal import matplotlib.pyplot as plt rv = multivariate_normal(cov=cov) pxy = rv.pdf(pos) pxy = pxy/np.sum(pxy) # plot to make sure everything looks right plt.figure() plt.contourf(x, y, rv.pdf(pos)) plt.ion() plt.show() # calc ixy analytically and numerically cx = abs(cov[0,0]) cy = abs(cov[1,1]) c = np.linalg.det(cov) ixy_true = .5*math.log2((cx*cy)/c) print("I(X;Y) = %.3f (analytical)" % ixy_true) px = pxy.sum(axis=1) py = pxy.sum(axis=0) py_x = np.multiply(pxy.T,np.tile(1./px,(Y,1))) hy = entropy(py) hy_x = np.dot(px,entropy(py_x)) ixy_emp = hy-hy_x print("I(X;Y) = %.3f (empirical)" % ixy_emp) return pxy
def plot_pxy(self,save=False,path=None): fig = plt.figure() if self.pxy is not None: if self.s==2: plt.xlabel('Y',fontsize=14,fontweight='bold') plt.ylabel('X',fontsize=14,fontweight='bold') plt.contourf(self.pxy) plt.show() if save: if path is None: raise ValueError('must specify path to save figure') else: fig.savefig(path+self.name+'_pxy_s%i'%self.s+'.pdf',bbox_inches='tight') else: print("pxy not yet defined")
def plot_interpolated(self, aperture_centers, aperture_means): """ This function ... :param aperture_centers: :param aperture_means: :return: """ x_values = np.array([center.x for center in aperture_centers]) y_values = np.array([center.y for center in aperture_centers]) x_ticks = np.arange(0, self.frame.xsize, 1) y_ticks = np.arange(0, self.frame.ysize, 1) z_grid = mlab.griddata(x_values, y_values, aperture_means, x_ticks, y_ticks) self.sky = Frame(z_grid) from matplotlib.backends import backend_agg as agg from matplotlib import cm # plot #fig = Figure() # create the figure fig = plt.figure() agg.FigureCanvasAgg(fig) # attach the rasterizer ax = fig.add_subplot(1, 1, 1) # make axes to plot on ax.set_title("Interpolated Contour Plot of Experimental Data") ax.set_xlabel("X") ax.set_ylabel("Y") cmap = cm.get_cmap("hot") # get the "hot" color map contourset = ax.contourf(x_ticks, y_ticks, z_grid, 10, cmap=cmap) cbar = fig.colorbar(contourset) cbar.set_ticks([0, 100]) fig.axes[-1].set_ylabel("Z") # last axes instance is the colorbar plt.show() # -----------------------------------------------------------------
def _plot_policy(self, policy, n_iter): policy_matrix = numpy.zeros((self.max_car+1, self.max_car+1), dtype='float') for stateid in range(len(self.states)): state = [int(t) for t in self.states[stateid].split('#')] for actionid in range(len(self.actions)): if policy[stateid, actionid] == 1.0: policy_matrix[state[0], state[1]] = self.actions[actionid] fig = plt.figure() plt.contourf(range(self.max_car+1), range(self.max_car+1), policy_matrix, 10, \ cmap='coolwarm') plt.title('policy in iteration %i' % n_iter) plt.xlabel('#cars at A') plt.ylabel('#cars at B') # plt.show() fig.savefig('experiments/policy%i' % n_iter)
def perform_adaboost(self,X_train_std,y_train,X_test_std, y_test): ##perform adaboost ada = AdaBoostClassifier(n_estimators=10) ada.fit(X_train_std, y_train) train_score=cross_val_score(ada,X_train_std, y_train) print('The training accuracy is {:.2f}%'.format(train_score.mean()*100)) test_score=cross_val_score(ada,X_test_std, y_test) print('The test accuracy is {:.2f}%'.format(test_score.mean()*100)) X=X_test_std y=y_test resolution=0.01 #Z = svm.predict(np.array([xx1.ravel(), xx2.ravel()]).T) markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'green', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y_test))]) X=X_test_std y=y_test # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = ada.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.5, c=cmap(idx), marker=markers[idx], label=cl) plt.show()
def perform_random_forest(self,X_train_std,y_train,X_test_std, y_test): ## perform random forest rfc = RandomForestClassifier(n_estimators=10, max_depth=None,min_samples_split=2, random_state=0) # we create an instance of Neighbours Classifier and fit the data. rfc.fit(X_train_std, y_train) train_score=cross_val_score(rfc,X_train_std, y_train) print('The training accuracy is {:.2f}%'.format(train_score.mean()*100)) test_score=cross_val_score(rfc,X_test_std, y_test) print('The test accuracy is {:.2f}%'.format(test_score.mean()*100)) X=X_test_std y=y_test resolution=0.01 #Z = svm.predict(np.array([xx1.ravel(), xx2.ravel()]).T) markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'green', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y_test))]) X=X_test_std y=y_test # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = rfc.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.5, c=cmap(idx), marker=markers[idx], label=cl) plt.show()
def perform_logistic(self,X_train_std,y_train,X_test_std, y_test): h = .02 # step size in the mesh logreg = linear_model.LogisticRegression(C=1e5) # we create an instance of Neighbours Classifier and fit the data. logreg.fit(X_train_std, y_train) print('The training accuracy is {:.2f}%'.format(logreg.score(X_train_std, y_train)*100)) print('The test accuracy is {:.2f}%'.format(logreg.score(X_test_std, y_test)*100)) X=X_test_std y=y_test resolution=0.01 x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) #Z = svm.predict(np.array([xx1.ravel(), xx2.ravel()]).T) markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'green', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y_test))]) X=X_test_std y=y_test # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = logreg.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.5, c=cmap(idx), marker=markers[idx], label=cl) plt.show()
def perform_svm(self,X_train_std,y_train,X_test_std, y_test): svm = SVC(kernel='rbf', random_state=0, gamma=.10, C=1.0) svm.fit(X_train_std, y_train) print('The training accuracy is {:.2f}%'.format(svm.score(X_train_std, y_train)*100)) print('The test accuracy is {:.2f}%'.format(svm.score(X_test_std, y_test)*100)) X=X_test_std y=y_test resolution=0.01 x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = svm.predict(np.array([xx1.ravel(), xx2.ravel()]).T) markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'green', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y_test))]) X=X_test_std y=y_test # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = svm.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.5, c=cmap(idx), marker=markers[idx], label=cl) plt.show()