我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.axis()。
def showData(self): print('???,????···') mask = imread(self.picfile) imgcolor = ImageColorGenerator(mask) wcc = WordCloud(font_path='./msyhl.ttc', mask=mask, background_color='white', max_font_size=200, max_words=300, color_func=imgcolor ) wc = wcc.generate_from_frequencies(self.data) plt.figure() plt.imshow(wc) plt.axis('off') print('?????') plt.show()
def plot_pts(points, values, colorbar=True, subplot_loc=None, mytitle=None, show_axis='on', vmin=None, vmax=None, xlim=(0,1), ylim=(0,1)): if subplot_loc is not None: plt.subplot(subplot_loc) pp = plt.scatter(points[:,0], points[:,1], c=values.get_local(), marker=",", s=20, vmin=vmin, vmax=vmax) plt.axis(show_axis) if colorbar: plt.colorbar(pp, fraction=.1, pad=0.2) else: plt.gca().set_aspect('equal') if mytitle is not None: plt.title(mytitle, fontsize=20) if xlim is not None: plt.xlim(xlim) if ylim is not None: plt.ylim(ylim) return pp
def layout_tree(correlation): """Layout tree for visualization with e.g. matplotlib. Args: correlation: A [V, V]-shaped numpy array of latent correlations. Returns: A [V, 3]-shaped numpy array of spectral positions of vertices. """ assert len(correlation.shape) == 2 assert correlation.shape[0] == correlation.shape[1] assert correlation.dtype == np.float32 laplacian = -correlation np.fill_diagonal(laplacian, 0) np.fill_diagonal(laplacian, -laplacian.sum(axis=0)) evals, evects = scipy.linalg.eigh(laplacian, eigvals=[1, 2, 3]) assert np.all(evals > 0) assert evects.shape[1] == 3 return evects
def play(self, nb_rounds): img_saver = save_image() img_saver.next() game_cnt = it.count(1) for i in xrange(nb_rounds): game = self.game(width=self.width, height=self.height) screen, _ = game.next() img_saver.send(screen) frame_cnt = it.count() try: state = np.asarray([screen] * self.nb_frames) while True: frame_cnt.next() act_idx = np.argmax( self.model.predict(state[np.newaxis]), axis=-1)[0] screen, _ = game.send(self.actions[act_idx]) state = np.roll(state, 1, axis=0) state[0] = screen img_saver.send(screen) except StopIteration: print 'Saved %4i frames for game %3i' % ( frame_cnt.next(), game_cnt.next()) img_saver.close()
def plotGeneratedImages(epoch,example=100,dim=(10,10),figsize=(10,10)): noise = np.random.normal(0,1,size=(example,randomDim)) generatedImage = generator.predict(noise) generatedImage = generatedImage.reshape(example,28,28) plt.figure(figsize=figsize) for i in range(example): plt.subplot(dim[0],dim[1],i+1) plt.imshow(generatedImage[i],interpolation='nearest',cmap='gray') '''drop the x and y axis''' plt.axis('off') plt.tight_layout() if not os.path.exists('generated_image'): os.mkdir('generated_image') plt.savefig('generated_image/wgan_generated_img_epoch_%d.png' % epoch)
def alleviate_conditioning_in_coordinates(self, condition=1e8): """pass scaling from `C` to `sigma_vec`. As a result, `C` is a correlation matrix, i.e., all diagonal entries of `C` are `1`. """ if max(self.dC) / min(self.dC) > condition: # allows for much larger condition numbers, if axis-parallel if hasattr(self, 'sm') and isinstance(self.sm, sampler.GaussFullSampler): old_coordinate_condition = max(self.dC) / min(self.dC) old_condition = self.sm.condition_number factors = self.sm.to_correlation_matrix() self.sigma_vec *= factors self.pc /= factors self._updateBDfromSM(self.sm) utils.print_message('\ncondition in coordinate system exceeded' ' %.1e, rescaled to %.1e, ' '\ncondition changed from %.1e to %.1e' % (old_coordinate_condition, max(self.dC) / min(self.dC), old_condition, self.sm.condition_number), iteration=self.countiter)
def plot_axes_scaling(self, iabscissa=1): from matplotlib import pyplot if not hasattr(self, 'D'): self.load() dat = self if np.max(dat.D[:, 5:]) == np.min(dat.D[:, 5:]): pyplot.text(0, dat.D[-1, 5], 'all axes scaling values equal to %s' % str(dat.D[-1, 5]), verticalalignment='center') return self # nothing interesting to plot self._enter_plotting() pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b') # pyplot.hold(True) pyplot.grid(True) ax = array(pyplot.axis()) # ax[1] = max(minxend, ax[1]) pyplot.axis(ax) pyplot.title('Principle Axes Lengths') # pyplot.xticks(xticklocs) self._xlabel(iabscissa) self._finalize_plotting() return self
def ddGa_rel(self): """Summary Calculates and returns the free energy of association relative to the parent free energy of association. Returns ------- ndarray Array of free energies corresponding to the mutant IDs from the Alascan.getMutIDs() method. """ Gsolv = self.Gsolv Gref = self.Gref Gcoul = self.Gcoul dGsolv = Gsolv - Gref dGsolu = Gsolv[:, 0] - Gsolv[:, 1:].sum(axis=1) dGcoul = Gcoul[:, 0] - Gcoul[:, 1:].sum(axis=1) ddGsolv = dGsolv[:, 0] - dGsolv[:, 1:].sum(axis=1) dGbind = ddGsolv + dGcoul dGbind_rel = dGbind - dGbind[0] return dGbind_rel
def _plot_cmc(cmcs, colors, labels, title, fontsize=10, position=None): if position is None: position = 'lower right' # open new page for current plot figure = pyplot.figure() max_R = 0 # plot the CMC curves for i in range(len(cmcs)): probs = bob.measure.cmc(cmcs[i]) R = len(probs) pyplot.semilogx(range(1, R+1), probs, figure=figure, color=colors[i], label=labels[i]) max_R = max(R, max_R) # change axes accordingly ticks = [int(t) for t in pyplot.xticks()[0]] pyplot.xlabel('Rank') pyplot.ylabel('Probability') pyplot.xticks(ticks, [str(t) for t in ticks]) pyplot.axis([0, max_R, -0.01, 1.01]) pyplot.legend(loc=position, prop = {'size':fontsize}) pyplot.title(title) return figure
def _plot_epc(scores_dev, scores_eval, colors, labels, title, fontsize=10, position=None): if position is None: position = 'upper center' # open new page for current plot figure = pyplot.figure() # plot the DET curves for i in range(len(scores_dev)): x,y = bob.measure.epc(scores_dev[i][0], scores_dev[i][1], scores_eval[i][0], scores_eval[i][1], 100) pyplot.plot(x, y, color=colors[i], label=labels[i]) # change axes accordingly pyplot.xlabel('alpha') pyplot.ylabel('HTER') pyplot.title(title) pyplot.axis([-0.01, 1.01, -0.01, 0.51]) pyplot.grid(True) pyplot.legend(loc=position, prop = {'size':fontsize}) pyplot.title(title) return figure
def _opt(self, i, _activation, _delta): if not isinstance(self._layers[i], ConvLayer): self._weights[i] *= self._regularization_param self._weights[i] += self._w_optimizer.run( i, _activation.reshape(_activation.shape[0], -1).T.dot(_delta) ) if self._whether_apply_bias: self._bias[i] += self._b_optimizer.run( i, np.sum(_delta, axis=0, keepdims=True) ) else: self._weights[i] *= self._regularization_param if _delta[1] is not None: self._weights[i] += self._w_optimizer.run(i, _delta[1]) if self._whether_apply_bias and _delta[2] is not None: self._bias[i] += self._b_optimizer.run(i, _delta[2]) # API
def eval_classificationT( self, y, p_y): """Calculate the error (100 - accuracy) of the DNN in the case of classification. :type y: vector :param y: vector (r,) of labels :type p_y: matrix :param p_y: matrix of the output of the network. Each raw is a vector of probailities (probablities of the classes) """ y_ = T.argmax(p_y, axis = 1) # Accuracy error = 1 - T.mean(T.eq(y_, y) * 1.) error = error * 100. return error
def eval_segmentation_bin( self, y, model_output, th=.5, path='../data/predict/'): '''Evaluation the performance of a binary segmentation. The default used threshold .5. The used evaluation is the mean squarre error. ''' # binarization nbr, dim = y.shape output_bin = np.float32((model_output > th) * 1.) mse = self.MeanSquareError(y, model_output) for i in xrange(nbr): im_gt = Image.fromarray(np.reshape(np.uint8(y[i,:] *255.), (128,128))) im_bin = Image.fromarray(np.reshape(np.uint8(output_bin[i,:] *255.), (128,128))) im_gr = Image.fromarray(np.reshape(np.uint8(model_output[i,:] *255.) , (128,128))) temp = np.concatenate((im_gt, im_bin, im_gr), axis=1) two_imgs = sc.misc.toimage(temp) sc.misc.imsave(path + str(i) +'.png', two_imgs) #two_imgs.show() #raw_input('Press ENTER to continue...') return mse
def segment( self, y, model_output, th=.5, path='../data/predict/'): '''Segment an image using the output of a neural network. The default used threshold .5. ''' # binarization nbr, dim = y.shape output_bin = np.float32((model_output > th) * 1.) for i in xrange(nbr): im_gt = Image.fromarray(np.reshape(np.uint8(y[i,:]) *255., (128,128))) im_bin = Image.fromarray(np.reshape(np.uint8(output_bin[i,:]) *255., (128,128))) im_gr = Image.fromarray(np.reshape(np.uint8(model_output[i,:] *255.) , (128,128))) temp = np.concatenate((im_gt, im_bin, im_gr), axis=1) two_imgs = sc.misc.toimage(temp) sc.misc.imsave(path + str(i) +'.png', two_imgs) #two_imgs.show() #raw_input('Press ENTER to continue...')
def updatePlot(self, data): """ Update the plot """ plt.figure(self.fig.number) #assert (data.shape[1] == self.nbCh), 'new data does not have the same number of channels' #assert (data.shape[0] == self.nbPoints), 'new data does not have the same number of points' data = data - np.mean(data,axis=0) std_data = np.std(data,axis=0) std_data[np.where(std_data == 0)] = 1 data = data/std_data*self.chRange/5.0 for i, chName in enumerate(self.chNames): self.chLinesDict[chName].set_ydata(data[:,i]+self.offsets[i]) plt.draw()
def drawComplex(origData, ripsComplex, axes=[-6,8,-6,6]): plt.clf() plt.axis(axes) plt.scatter(origData[:,0],origData[:,1]) #plotting just for clarity for i, txt in enumerate(origData): plt.annotate(i, (origData[i][0]+0.05, origData[i][1])) #add labels #add lines for edges for edge in [e for e in ripsComplex if len(e)==2]: #print(edge) pt1,pt2 = [origData[pt] for pt in [n for n in edge]] #plt.gca().add_line(plt.Line2D(pt1,pt2)) line = plt.Polygon([pt1,pt2], closed=None, fill=None, edgecolor='r') plt.gca().add_line(line) #add triangles for triangle in [t for t in ripsComplex if len(t)==3]: pt1,pt2,pt3 = [origData[pt] for pt in [n for n in triangle]] line = plt.Polygon([pt1,pt2,pt3], closed=False, color="blue",alpha=0.3, fill=True, edgecolor=None) plt.gca().add_line(line) plt.show()
def drawComplex(data, ph, axes=[-6, 8, -6, 6]): plt.clf() plt.axis(axes) # axes = [x1, x2, y1, y2] plt.scatter(data[:, 0], data[:, 1]) # plotting just for clarity for i, txt in enumerate(data): plt.annotate(i, (data[i][0] + 0.05, data[i][1])) # add labels # add lines for edges for edge in [e for e in ph.ripsComplex if len(e) == 2]: # print(edge) pt1, pt2 = [data[pt] for pt in [n for n in edge]] # plt.gca().add_line(plt.Line2D(pt1,pt2)) line = plt.Polygon([pt1, pt2], closed=None, fill=None, edgecolor='r') plt.gca().add_line(line) # add triangles for triangle in [t for t in ph.ripsComplex if len(t) == 3]: pt1, pt2, pt3 = [data[pt] for pt in [n for n in triangle]] line = plt.Polygon([pt1, pt2, pt3], closed=False, color="blue", alpha=0.3, fill=True, edgecolor=None) plt.gca().add_line(line) plt.show()
def vis_square(data): """Take an array of shape (n, height, width) or (n, height, width, 3) and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)""" # normalize data for display data = (data - data.min()) / (data.max() - data.min()) # force the number of filters to be square n = int(np.ceil(np.sqrt(data.shape[0]))) padding = (((0, n ** 2 - data.shape[0]), (0, 1), (0, 1)) # add some space between filters + ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one) data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white) # tile the filters into an image data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) plt.imshow(data, interpolation='nearest'); plt.axis('off')
def __plot_canvas(self, show, save): if len(self.result) == 0: raise Exception('Please run blur_image() method first.') else: plt.close() plt.axis('off') fig, axes = plt.subplots(1, len(self.result), figsize=(10, 10)) if len(self.result) > 1: for i in range(len(self.result)): axes[i].imshow(self.result[i]) else: plt.axis('off') plt.imshow(self.result[0]) if show and save: if self.path_to_save is None: raise Exception('Please create Trajectory instance with path_to_save') cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255) plt.show() elif save: if self.path_to_save is None: raise Exception('Please create Trajectory instance with path_to_save') cv2.imwrite(os.path.join(self.path_to_save, self.image_path.split('/')[-1]), self.result[0] * 255) elif show: plt.show()
def make_plot(counts): """ Plot the counts for the positive and negative words for each timestep. Use plt.show() so that the plot will popup. """ positive = [] negative = [] for count in counts: for word in count: if word[0] == "positive": positive.append(word[1]) else: negative.append(word[1]) plt.axis([-1, len(positive), 0, max(max(positive),max(negative))+100]) pos, = plt.plot(positive, 'b-', marker = 'o', markersize = 10) neg, = plt.plot(negative, 'g-', marker = 'o', markersize = 10) plt.legend((pos,neg),('Positive','Negative'),loc=2) plt.xticks(np.arange(0, len(positive), 1)) plt.xlabel("Time Step") plt.ylabel("Word Count") plt.show()
def pieGraphics(Labels,ValueList,graphicTitle='??'): colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral', "blue","green","cyan","magenta"] maxdata = max(ValueList) explode = [] for v in ValueList: if v == maxdata: explode.append(0.1) else: explode.append(0) print(explode) patches,l_text,p_text = plt.pie(ValueList, labels=Labels, colors=colors,autopct='%1.1f%%',explode=explode ,shadow=True, startangle=90) for font in l_text: font.set_fontproperties(FontProperties(fname=PATH_SUFFIX+'SIMLI.TTF')) plt.title(graphicTitle,fontproperties=font_set,y=1.05) # Set aspect ratio to be equal so that pie is drawn as a circle. plt.axis('equal') plt.show()
def pieGraphics(Labels,ValueList,graphicTitle='??'): # The slices will be ordered and plotted counter-clockwise. #labels = 'Frogs', 'Hogs', 'Dogs', 'Logs' #sizes = [15, 30, 45, 10] colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral', "blue","green","cyan","magenta"] explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs') plt.pie(ValueList, labels=Labels, colors=colors,autopct='%1.1f%%', shadow=True, startangle=90) # Set aspect ratio to be equal so that pie is drawn as a circle. plt.axis('equal') plt.show() #barGraphics('??','??',['A','B','C','D','E','F'],[29,30,40,47,38,23],'????') #linePlotGraphics("xLabel","yLabel",[1,2,3,4,5,6,7,8,9,10],[1.1,1.9,2.6,3.6,9.8,14,24,40,80,150],graphicTitle='??') #scatterPlotsGraphics("xLabel","yLabel",[1,2,3,4,5,6,7,8,9,10],[1,11.9,2,6.3,6,9.8,14,4,8,5],graphicTitle='??')
def compare_pr_auc(measures,fid=1,axis_interval=[0,1,0,1],marker_list=['g-','r-','b-']): plt.figure(fid) plt.title('P-R Curve') plt.xlabel('Recall') plt.ylabel('Precision') plt.axis(axis_interval) for i in range(len(measures['precisions'])): plt.plot(measures['recalls'][i],measures['precisions'][i],marker_list[i],label='%s pr_auc '%(measures['model_name'][i])) # plt.plot(measures['precisions'][i],measures['recalls'][i],marker_list[i],label='%s pr_auc : %f'%(measures['model_name'][i], measures['pr_auc'][i])) # plt.plot(measures['precisions'][0],measures['recalls'][0],'r-',measures['precisions'][0],measures['recalls'][0],'ro',label='%s pr_auc : %f'%(measures['model_name'][0], measures['pr_auc'][0])) # plt.plot(measures['precisions'][1], measures['recalls'][1], 'g-', label='%s pr_auc : %f' % (measures['model_name'][1], measures['pr_auc'][1])) # plt.plot(measures['precisions'][1], measures['recalls'][1], 'g-', measures['precisions'][1], measures['recalls'][1], 'go',label='%s pr_auc : %f' % (measures['model_name'][1], measures['pr_auc'][1])) # plt.legend(loc='lower center', shadow=True, fontsize='x-large') plt.legend(loc='lower center', shadow=True) plt.show()
def plot_confusion_matrix(self): # Calculate and create confusion matrix conf_mat = np.zeros((len(self.categories.keys()),len(self.categories.keys()))) for idx in range(len(self.predictions_int)): conf_mat[self.predictions_int[idx]][self.true_ys_int[idx]] += 1 for idx1 in range(conf_mat.shape[0]): total = np.sum(conf_mat, axis=0)[idx1] for idx2 in range(conf_mat.shape[1]): conf_mat[idx1][idx2] = float(conf_mat[idx1][idx2]/total) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_mat) fig.colorbar(cax) ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) ax.set_xticklabels([''] + self.inv_categories.values(), rotation='vertical') ax.set_yticklabels([''] + self.inv_categories.values()) plt.show()
def plot_axes_scaling(self, iabscissa=1): if not hasattr(self, 'D'): self.load() dat = self self._enter_plotting() pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b') pyplot.hold(True) pyplot.grid(True) ax = array(pyplot.axis()) # ax[1] = max(minxend, ax[1]) pyplot.axis(ax) pyplot.title('Principle Axes Lengths') # pyplot.xticks(xticklocs) self._xlabel(iabscissa) self._finalize_plotting() return self
def plot(file_path, iterations): im = Image.open(file_path) im = np.array(im, dtype=np.uint8) plt.figure(figsize=(20, 16)) plt.subplot(121) plt.imshow(im) plt.axis('off') plt.subplot(122) plt.imshow(np.zeros((640, 300, 3))) height = 14 for i in range(len(labels)): plt.text(0, height * i + height / 2, labels[i], family='Times New Roman', size=14, color='#ffffff') plt.axis('off') # plt.savefig(idx) plt.show()
def cellplot(fs, csf): """ Plots PSF kernels -------------------------------------------------------------------------- Usage: Call: cellplot(fs, csf) Input: fs PSF kernels, i.e. 3d array with kernels indexed by 0th index csf size of kernels in x and y direction Output: Shows stack of PSF kernels arranged according to csf -------------------------------------------------------------------------- Copyright (C) 2011 Michael Hirsch """ mp.clf() for i in range(np.prod(csf)): mp.subplot(csf[0],csf[1],i+1) mp.imshow(fs[i]) mp.axis('off') mp.draw()
def hist_test(): mu, sigma = 100, 15 x = mu + sigma * np.random.randn(10000) # ?????? n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75) plt.xlabel('Smarts') plt.ylabel('Probability') #???? plt.title('Histogram of IQ') #???? plt.text(60, .025, r'$mu=100, sigma=15$') plt.axis([40, 160, 0, 0.03]) plt.grid(True) plt.show()
def testcase1(): #http://www.jianshu.com/p/1ad947f98e4c np.random.seed(2000) y = np.random.standard_normal((20, 2)).cumsum(axis=0) plt.figure(figsize=(7, 4)) plt.plot(y[:,0], lw=1.5,label='1st') plt.plot(y[:,1], lw=1.5,label='2nd') plt.plot(y, 'ro') plt.grid(True) plt.legend(loc=0) plt.axis('tight') plt.xlabel('index') plt.ylabel('value') plt.title('A Simple Plot') plt.show()
def plot_svd(sigma_full, sigma_dls, k, plot_loc): """ Plot the variance explained by different principal components :param n_components: Number of components to show the variance :param ylim: y-axis limits :param fig: matplotlib Figure object :param ax: matplotlib Axis object :return: fig, ax """ fig, ax = plt.subplots() ax.scatter(range(len(sigma_full)),sigma_full,c='red',s=36,edgecolors='gray', lw = 0.5, label='TCC singular values') ax.scatter(range(len(sigma_dls)),sigma_dls,c='blue',s=36,edgecolors='gray', lw = 0.5, label='TCC_dls singular values') ax.legend(loc='upper right',bbox_to_anchor=(1.05, 1)) ax.set_xlabel('Components') ax.set_ylabel('Singular Values') plt.title('TCC Distribution Singular Values') fig.tight_layout() plt.savefig(plot_loc+ 'plot_pca_variance_explained_' +str(k) +'.pdf')
def tru_plot9(X,labels,t,plot_suffix,clust_names,clust_color, plot_loc): """ From clustering_on_transcript_compatibility_counts, see github for MIT license """ unique_labels = np.unique(labels) plt.figure(figsize=(15,10)) for i in unique_labels: ind = np.squeeze(labels == i) plt.scatter(X[ind,0],X[ind,1],c=clust_color[i],s=36,edgecolors='gray', lw = 0.5, label=clust_names[i]) plt.legend(loc='upper right',bbox_to_anchor=(1.1, 1)) plt.legend(loc='upper right',bbox_to_anchor=(1.19, 1.01)) plt.title(t) plt.xlim([-20,20]) plt.ylim([-20,20]) plt.axis('off') plt.savefig(plot_loc+ 't-SNE_plot_tru_plot9_'+ plot_suffix +'.pdf', bbox_inches='tight') # Plot function with Zeisel's colors corresponding to labels
def animate(i): y.append(j.readRawValues()) if(len(y) > 100): y.popleft() x = range(len(y)) ax1.clear() plt.axis([0,100,-2050,2050]) ax1.plot(x,y)
def heatmap(src_sent, tgt_sent, att_weights, idx): plt.figure(figsize=(8, 6), dpi=80) att_probs = np.stack(att_weights, axis=1) plt.imshow(att_weights, cmap='gray', interpolation='nearest') #src_sent = [ str(s) for s in src_sent] #tgt_sent = [ str(s) for s in tgt_sent] #plt.xticks(range(0, len(tgt_sent)), tgt_sent, rotation='vertical') #plt.yticks(range(0, len(src_sent)), src_sent) plt.xticks(range(0, len(tgt_sent)),"") plt.yticks(range(0, len(src_sent)),"") plt.axis('off') plt.savefig("att_matrix_"+str(idx), bbox_inches='tight') plt.close()
def show_setup(self, halt=True): """Open a plot window that shows the simulation setup including boundaries, outputs and material regions. Args: halt: Halt script execution until plot window is closed. """ pp.figure() self.axes = pp.gca() pp.axis('equal') self.axes.set_xlim(0, max(self.field.x.vector) / self._x_axis_factor) self.axes.set_ylim(0, max(self.field.y.vector) / self._y_axis_factor) self.axes.set_xlabel('{0} / {1}m'.format(self.x_label, self._x_axis_prefix)) self.axes.set_ylabel('{0} / {1}m'.format(self.y_label, self._y_axis_prefix)) if self.show_materials: for mat_region in self.field.material_regions: self.plot_region(mat_region.region) if self.show_boundaries: for name, component in self.field_components.items(): for boundary in component.boundaries: self.plot_region(boundary.region) if self.show_output: for name, component in self.field_components.items(): for output in component.outputs: self.plot_region(output.region) if halt: pp.show()
def plot_feature_overlap(df, cmap='binary', method='cluster'): """Plot feature-feature presence overlap of a pandas dataframe. Args: df: A pandas dataframe. cmap: A matplotlib colormap. method: Method of clustering, one of 'cluster' or 'tree'. """ V = len(df.columns) present = (df == df).as_matrix().astype(np.float32) overlap = np.dot(present.T, present) assert overlap.shape == (V, V) # Sort features to make blocks contiguous. if method == 'tree': # TODO(fritzo) Fix this to not look awful. grid = make_complete_graph(V) weights = np.empty(grid.shape[1], dtype=np.float32) for k, v1, v2 in grid.T: weights[k] = overlap[v1, v2] edges = estimate_tree(grid, weights) order, order_inv = order_vertices(edges) elif method == 'cluster': distance = scipy.spatial.distance.pdist(overlap) clustering = scipy.cluster.hierarchy.complete(distance) order_inv = scipy.cluster.hierarchy.leaves_list(clustering) else: raise ValueError(method) overlap = overlap[order_inv, :] overlap = overlap[:, order_inv] assert overlap.shape == (V, V) pyplot.imshow(overlap**0.5, cmap=cmap) pyplot.axis('off')
def contract_positions(XY, edges, stepsize): """Perturb vertex positions by an L1-minimizing attractive force. This is used to slightly adjust vertex positions to provide a visual hint to their grouping. Args: XY: A [V, 2]-shaped numpy array of the current positions. edges: An [E, 2]-shaped numpy array of edges as (vertex,vertex) pairs. """ E = edges.shape[0] V = E + 1 assert edges.shape == (E, 2) assert XY.shape == (V, 2) old = XY new = old.copy() heads = edges[:, 0] tails = edges[:, 1] diff = old[heads] - old[tails] distances = (diff**2).sum(axis=1)**0.5 spacing = distances.min() assert spacing > 0 diff /= distances[:, np.newaxis] diff *= spacing new[tails] += stepsize * diff new[heads] -= stepsize * diff return new
def plot(obj, colorbar=True, subplot_loc=None, mytitle=None, show_axis='off', vmin=None, vmax=None, logscale=False): if subplot_loc is not None: plt.subplot(subplot_loc) # plt.gca().set_aspect('equal') if isinstance(obj, dl.Function): pp = mplot_function(obj, vmin, vmax, logscale) elif isinstance(obj, dl.CellFunctionSizet): pp = mplot_cellfunction(obj) elif isinstance(obj, dl.CellFunctionDouble): pp = mplot_cellfunction(obj) elif isinstance(obj, dl.CellFunctionInt): pp = mplot_cellfunction(obj) elif isinstance(obj, dl.Mesh): if (obj.geometry().dim() != 2): raise AttributeError('Mesh must be 2D') pp = plt.triplot(mesh2triang(obj), color='#808080') colorbar = False else: raise AttributeError('Failed to plot %s'%type(obj)) plt.axis(show_axis) if colorbar: plt.colorbar(pp, fraction=.1, pad=0.2) else: plt.gca().set_aspect('equal') if mytitle is not None: plt.title(mytitle, fontsize=20) return pp
def load_mnist(self): data_dir = os.path.join("./data", "mnist") fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte')) loaded = np.fromfile(file=fd , dtype=np.uint8) trX = loaded[16:].reshape((60000, 28 , 28 , 1)).astype(np.float) fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte')) loaded = np.fromfile(file=fd, dtype=np.uint8) trY = loaded[8:].reshape((60000)).astype(np.float) fd = open(os.path.join(data_dir, 't10k-images-idx3-ubyte')) loaded = np.fromfile(file=fd, dtype=np.uint8) teX = loaded[16:].reshape((10000, 28 , 28 , 1)).astype(np.float) fd = open(os.path.join(data_dir, 't10k-labels-idx1-ubyte')) loaded = np.fromfile(file=fd, dtype=np.uint8) teY = loaded[8:].reshape((10000)).astype(np.float) trY = np.asarray(trY) teY = np.asarray(teY) X = np.concatenate((trX, teX), axis=0) y = np.concatenate((trY, teY), axis=0) seed = 547 np.random.seed(seed) np.random.shuffle(X) np.random.seed(seed) np.random.shuffle(y) #convert label to one-hot y_vec = np.zeros((len(y), 10), dtype=np.float) for i, label in enumerate(y): y_vec[i, int(y[i])] = 1.0 return X / 255. , y_vec
def vis_square(visu_path, data, type): """Take an array of shape (n, height, width) or (n, height, width , 3) and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)""" # normalize data for display data = (data - data.min()) / (data.max() - data.min()) # force the number of filters to be square n = int(np.ceil(np.sqrt(data.shape[0]))) padding = (((0, n ** 2 - data.shape[0]), (0, 1), (0, 1)) # add some space between filters + ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one) data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white) # tilethe filters into an im age data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) plt.imshow(data[:, :, 0]) plt.axis('off') if type: plt.savefig('./{}/weights.png'.format(visu_path), format='png') else: plt.savefig('./{}/activation.png'.format(visu_path), format='png')
def axes4x4(labels=("t","x"),xmin=-4, xmax=4, ymin=-4, ymax=4, fontsize=20): """Set axes to [-4,4]×[-4,4] and label them args ==== - labels — axes labels (x, y) """ plt.axis([xmin,xmax, ymin, ymax]) center_spines() xscale = (xmax - xmin) / 8. yscale = (ymax - ymin) / 8. plt.text(xmax - 0.2 * xscale, 0.2 * yscale, "$%s$" % labels[0], fontsize=fontsize, verticalalignment='bottom') plt.text(0.1 * xscale, ymax - 0.3 * yscale, "$%s$" % labels[1], fontsize=fontsize)
def draw_axes(xmin, xmax, ymin, ymax, labels=("x", "y")): plt.axis([xmin, xmax, ymin, ymax]) center_spines() plt.text(xmax, 0, "$%s$" % labels[0],fontsize=20, verticalalignment='bottom', horizontalalignment='right') plt.text(0, ymax, "$%s$" % labels[1],fontsize=20, verticalalignment='top', horizontalalignment='right')
def center_spines(ax=None, centerx=0, centery=0): """Centers the axis spines at <centerx, centery> on the axis "ax", and places arrows at the end of the axis spines.""" if ax is None: ax = plt.gca() # Set the axis's spines to be centered at the given point # (Setting all 4 spines so that the tick marks go in both directions) ax.spines['left'].set_position(('data', centerx)) ax.spines['bottom'].set_position(('data', centery)) ax.spines['right'].set_position(('data', centerx)) ax.spines['top'].set_position(('data', centery)) # Hide the line (but not ticks) for "extra" spines for side in ['right', 'top']: ax.spines[side].set_color('none') # On both the x and y axes... for axis, center in zip([ax.xaxis, ax.yaxis], [centerx, centery]): # Turn on minor and major gridlines and ticks axis.set_ticks_position('both') axis.grid(True, 'major', ls='solid', lw=0.5, color='gray') # axis.grid(True, 'minor', ls='solid', lw=0.1, color='gray') axis.set_minor_locator(mpl.ticker.AutoMinorLocator()) # Hide the ticklabels at <centerx, centery> formatter = CenteredFormatter() formatter.center = center axis.set_major_formatter(formatter) # Add offset ticklabels at <centerx, centery> using annotation # (Should probably make these update when the plot is redrawn...) xlabel, ylabel = map(formatter.format_data, [centerx, centery]) if centerx != 0 or centery != 0: annotation = '(%s, %s)' % (xlabel, ylabel) else: annotation = xlabel ax.annotate(annotation, (centerx, centery), xytext=(-4, -4), textcoords='offset points', ha='right', va='top')
def save_image(folder='images'): """ Coroutine of image saving """ from matplotlib import pyplot as plt from matplotlib import colors if folder not in os.listdir('.'): os.mkdir(folder) frame_cnt = it.count() cmap = colors.ListedColormap(['#009688', '#E0F2F1', '#004D40']) bounds = [0, 0.25, 0.75, 1] norm = colors.BoundaryNorm(bounds, cmap.N) while True: screen = (yield) shape = screen.shape plt.imshow( screen, interpolation='none', cmap=cmap, norm=norm, aspect='equal', extent=(0, shape[1], 0, shape[0])) plt.grid(True) plt.axis('off') plt.savefig('%s/frame%06i.png' % (folder, frame_cnt.next()))
def vis_detections(im, class_name, dets, thresh=0.5): """Draw detected bounding boxes.""" inds = np.where(dets[:, -1] >= thresh)[0] if len(inds) == 0: return im = im[:, :, (2, 1, 0)] #fig, ax = plt.subplots(figsize=(12, 12)) ax.imshow(im, aspect='equal') for i in inds: bbox = dets[i, :4] score = dets[i, -1] ax.add_patch( plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=3.5) ) ax.text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(class_name, score), bbox=dict(facecolor='blue', alpha=0.5), fontsize=14, color='white') ax.set_title(('{} detections with ' 'p({} | box) >= {:.1f}').format(class_name, class_name, thresh), fontsize=14) plt.axis('off') plt.tight_layout() plt.draw()
def imshow_plt(label, im, block=True): global figures if label not in figures: figures[label] = plt.imshow(im, interpolation=None, animated=True, label=label) plt.tight_layout() plt.axis('off') figures[label].set_data(im) # figures[label].canvas.draw() # plt.draw() plt.show(block=block)