我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.show()。
def showData(self): print('???,????···') mask = imread(self.picfile) imgcolor = ImageColorGenerator(mask) wcc = WordCloud(font_path='./msyhl.ttc', mask=mask, background_color='white', max_font_size=200, max_words=300, color_func=imgcolor ) wc = wcc.generate_from_frequencies(self.data) plt.figure() plt.imshow(wc) plt.axis('off') print('?????') plt.show()
def visualize(self, zv, path): self.ax1.clear() self.ax2.clear() z, v = zv if path: np.save(path + '/trajectory.npy', z) z = np.reshape(z, [-1, 2]) self.ax1.hist2d(z[:, 0], z[:, 1], bins=400) self.ax1.set(xlim=self.xlim(), ylim=self.ylim()) v = np.reshape(v, [-1, 2]) self.ax2.hist2d(v[:, 0], v[:, 1], bins=400) self.ax2.set(xlim=self.xlim(), ylim=self.ylim()) if self.display: import matplotlib.pyplot as plt plt.show() plt.pause(0.1) elif path: self.fig.savefig(path + '/visualize.png')
def vis_detections(im, class_name, dets, thresh=0.3): """Visual debugging of detections.""" import matplotlib.pyplot as plt im = im[:, :, (2, 1, 0)] for i in xrange(np.minimum(10, dets.shape[0])): bbox = dets[i, :4] score = dets[i, -1] if score > thresh: plt.cla() plt.imshow(im) plt.gca().add_patch( plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='g', linewidth=3) ) plt.title('{} {:.3f}'.format(class_name, score)) plt.show()
def test(path_test, input_size, hidden_size, batch_size, save_dir, model_name, maxlen): db = read_data(path_test) X = create_sequences(db[:-maxlen], win_size=maxlen, step=maxlen) X = np.reshape(X, (X.shape[0], X.shape[1], input_size)) # build the model: 1 layer LSTM print('Build model...') model = Sequential() model.add(LSTM(hidden_size, return_sequences=False, input_shape=(maxlen, input_size))) model.add(Dense(maxlen)) model.load_weights(save_dir + model_name) model.compile(loss='mse', optimizer='adam') prediction = model.predict(X, batch_size, verbose=1) prediction = prediction.flatten() # prediction_container = np.array(prediction).flatten() Y = db[maxlen:] plt.plot(prediction, label='prediction') plt.plot(Y, label='true') plt.legend() plt.show()
def get_feature_importance(list_of_features): n_estimators=10000 random_state=0 n_jobs=4 x_train=data_frame[list_of_features] y_train=data_frame.iloc[:,-1] feat_labels= data_frame.columns[1:] forest = BaggingRegressor(n_estimators=n_estimators,random_state=random_state,n_jobs=n_jobs) forest.fit(x_train,y_train) importances=forest.feature_importances_ indices = np.argsort(importances)[::-1] for f in range(x_train.shape[1]): print("%2d) %-*s %f" % (f+1,30,feat_labels[indices[f]], importances[indices[f]])) plt.title("Feature Importance") plt.bar(range(x_train.shape[1]),importances[indices],color='lightblue',align='center') plt.xticks(range(x_train.shape[1]),feat_labels[indices],rotation=90) plt.xlim([-1,x_train.shape[1]]) plt.tight_layout() plt.show()
def plot_bar_chart(label_to_value, title, x_label, y_label): """ Plots a bar chart from a dict. Args: label_to_value: A dict mapping ints or strings to numerical values (int or float). title: A string representing the title of the graph. x_label: A string representing the label for the x-axis. y_label: A string representing the label for the y-axis. """ n = len(label_to_value) labels = sorted(label_to_value.keys()) values = [label_to_value[label] for label in labels] plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label) plt.bar(range(n), values, align='center') plt.xticks(range(n), labels, rotation='vertical', fontsize='7') plt.gcf().subplots_adjust(bottom=0.2) # make room for x-axis labels plt.show()
def plot_line_graph_multiple_lines(x, label_to_values, title, x_label, y_label): if not all(len(x) == len(values) for values in label_to_values.values()): raise ValueError('values of label_to_values must have length len(x)') colors = ['b','g','r','c','m','y','k'] line_styles = ['-','--',':'] for (i, label) in enumerate(sorted(label_to_values.keys())): color = colors[i%len(colors)] line_style = line_styles[(i//len(colors))%len(line_styles)] plt.plot(x, label_to_values[label], label=label, color=color, linestyle=line_style) plt.legend(loc='center left', bbox_to_anchor=(1,0.5), prop={'size':9}) plt.tight_layout(pad=9) plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label) plt.show() # x_min, x_max for example proportion_initiated_by_user
def plot_histogram(x, n_bins, title, x_label, y_label): """ Plots a histogram from a list of data. Args: x: A list of floats representing the data. n_bins: An int representing the number of bins to plot. title: A string representing the title of the graph. x_label: A string representing the label for the x-axis. y_label: A string representing the label for the y-axis. """ plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label) plt.hist(x, bins=n_bins) plt.show() # probability
def decode_segmap(self, temp, plot=False): label_colours = self.get_pascal_labels() r = temp.copy() g = temp.copy() b = temp.copy() for l in range(0, self.n_classes): r[temp == l] = label_colours[l, 0] g[temp == l] = label_colours[l, 1] b[temp == l] = label_colours[l, 2] rgb = np.zeros((temp.shape[0], temp.shape[1], 3)) rgb[:, :, 0] = r / 255.0 rgb[:, :, 1] = g / 255.0 rgb[:, :, 2] = b / 255.0 if plot: plt.imshow(rgb) plt.show() else: return rgb
def decode_segmap(self, temp, plot=False): # TODO:(@meetshah1995) # Verify that the color mapping is 1-to-1 r = temp.copy() g = temp.copy() b = temp.copy() for l in range(0, self.n_classes): r[temp == l] = 10 * (l%10) g[temp == l] = l b[temp == l] = 0 rgb = np.zeros((temp.shape[0], temp.shape[1], 3)) rgb[:, :, 0] = (r/255.0) rgb[:, :, 1] = (g/255.0) rgb[:, :, 2] = (b/255.0) if plot: plt.imshow(rgb) plt.show() else: return rgb
def get_masks(scans,masks_list): #%matplotlib inline scans1=scans.copy() maxv=255 masks=np.zeros(shape=(scans.shape[0],1,img_rows,img_cols)) for i_m in range(len(masks_list)): for i in range(-masks_list[i_m][3],masks_list[i_m][3]+1): for j in range(-masks_list[i_m][3],masks_list[i_m][3]+1): masks[masks_list[i_m][0],0,masks_list[i_m][2]+i,masks_list[i_m][1]+j]=1 for i1 in range(-masks_list[i_m][3],masks_list[i_m][3]+1): scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]+masks_list[i_m][3]]=maxv=255 scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]-masks_list[i_m][3]]=maxv=255 scans1[masks_list[i_m][0],0,masks_list[i_m][2]+masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255 scans1[masks_list[i_m][0],0,masks_list[i_m][2]-masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255 for i in range(scans.shape[0]): print ('scan '+str(i)) f, ax = plt.subplots(1, 2,figsize=(10,5)) ax[0].imshow(scans1[i,0,:,:],cmap=plt.cm.gray) ax[1].imshow(masks[i,0,:,:],cmap=plt.cm.gray) plt.show() return(masks)
def test_penalty_env(env): import envs env = envs.create_env("Pong", location="bottom", catastrophe_type="1", classifier_file=save_classifier_path + '/0/final.ckpt') import matplotlib.pyplot as plt observation = env.reset() for _ in range(20): action = env.action_space.sample() observation, reward, done, info = env.step(action) plt.imshow(observation[:,:,0]) plt.show() print('Cat: ', info['frame/is_catastrophe']) print('reward: ', reward) if done: break
def analyze(context=None, results=None): import matplotlib.pyplot as plt # Plot the portfolio and asset data. ax1 = plt.subplot(211) results.algorithm_period_return.plot(ax=ax1,color='blue',legend=u'????') ax1.set_ylabel(u'??') results.benchmark_period_return.plot(ax=ax1,color='red',legend=u'????') # Show the plot. plt.gcf().set_size_inches(18, 8) plt.show() # loading the data
def analyze(context=None, results=None): import matplotlib.pyplot as plt import logbook logbook.StderrHandler().push_application() log = logbook.Logger('Algorithm') fig = plt.figure() ax1 = fig.add_subplot(211) results.algorithm_period_return.plot(ax=ax1,color='blue',legend=u'????') ax1.set_ylabel(u'??') results.benchmark_period_return.plot(ax=ax1,color='red',legend=u'????') plt.show() # capital_base is the base value of capital #
def writeBinaray(outputFile, imagePath, label): img = Image.open(imagePath) img = img.resize((imageSize, imageSize), PIL.Image.ANTIALIAS) img = (np.array(img)) r = img[:,:,0].flatten() g = img[:,:,1].flatten() b = img[:,:,2].flatten() label = [label] out = np.array(list(label) + list(r) + list(g) + list(b), np.uint8) outputFile.write(out.tobytes()) # if you want to show the encoded image. set up 'debugEncodedImage' flag if debugEncodedImage: showImage(r, g, b)
def plot_events_with_event_scores(gt_event_scores, detected_event_scores, ground_truth_events, detected_events, show=True): fig = plt.figure(figsize=(10, 3)) for i in range(len(detected_events)): d = detected_events[i] plt.axvspan(d[0], d[1], 0, 0.5) plt.text((d[1] + d[0]) / 2, 0.2, detected_event_scores[i], horizontalalignment='center', verticalalignment='center') for i in range(len(ground_truth_events)): gt = ground_truth_events[i] plt.axvspan(gt[0], gt[1], 0.5, 1) plt.text((gt[1] + gt[0]) / 2, 0.8, gt_event_scores[i], horizontalalignment='center', verticalalignment='center') plt.tight_layout() if show: plt.show() else: plt.draw()
def main(): fish = loadmat('./data/fish.mat') X1 = np.zeros((fish['X'].shape[0], fish['X'].shape[1] + 1)) X1[:,:-1] = fish['X'] X2 = np.ones((fish['X'].shape[0], fish['X'].shape[1] + 1)) X2[:,:-1] = fish['X'] X = np.vstack((X1, X2)) Y1 = np.zeros((fish['Y'].shape[0], fish['Y'].shape[1] + 1)) Y1[:,:-1] = fish['Y'] Y2 = np.ones((fish['Y'].shape[0], fish['Y'].shape[1] + 1)) Y2[:,:-1] = fish['Y'] Y = np.vstack((Y1, Y2)) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') callback = partial(visualize, ax=ax) reg = affine_registration(X, Y) reg.register(callback) plt.show()
def make_new_pie_from_callers(callers, call_name=None): # plot the stats fig, ax = plt.subplots() if call_name: ax.set_title('Breakdown of {} callees'.format(call_name)) labels, sizes, callbacks = make_pie_from_callers(callers) wedges, _ = ax.pie(sizes, labels=labels) for w in wedges: w.set_picker(True) def onclick(evt): l = evt.artist.get_label() cb = callbacks[l] if cb: if l == 'other': l = '{}/other'.format(call_name) make_new_pie_from_callers(cb, call_name=l) fig.canvas.mpl_connect('pick_event', onclick) ax.axis('equal') plt.show()
def main(): parser = generate_parser() args = parser.parse_args() infile1 = h5py.File(args.input1, 'r') infile2 = h5py.File(args.input2, 'r') resolutions = numpy.intersect1d(infile1['resolutions'][...], infile2['resolutions'][...]) chroms = numpy.intersect1d(infile2['chromosomes'][...], infile2['chromosomes'][...]) results = {} data1 = load_data(infile1, chroms, resolutions) data2 = load_data(infile2, chroms, resolutions) infile1.close() infile2.close() results = {} results[(args.input1.split('/')[-1].strip('.quasar'), args.input2.split('/')[-1].strip('.quasar'))] = correlate_samples(data1, data2) for resolution in data1.keys(): for chromo in chroms: plt.scatter(data1[resolution][chromo][1].flatten(),data2[resolution][chromo][1].flatten(),alpha=0.1,color='red') plt.show() plt.savefig(args.output+'.res'+str(resolution)+'.chr'+chromo+'.pdf')
def plot_labeled_images_random(image_list, label_list, categories, n, title_str, ypixels, xpixels, seed, filename): random.seed(seed) index_sample = random.sample(range(len(image_list)), n) plt.figure(figsize=(2*n, 2)) #plt.suptitle(title_str) for i, ind in enumerate(index_sample): ax = plt.subplot(1, n, i + 1) plt.imshow(image_list[ind].reshape(ypixels, xpixels)) plt.gray() ax.set_title(categories[label_list[ind]], fontsize=20) ax.get_xaxis().set_visible(False); ax.get_yaxis().set_visible(False) if 1: pylab.savefig(filename, bbox_inches='tight') else: plt.show() # plot_unlabeled_images_random: plots unlabeled images at random
def plot_unlabeled_images_random(image_list, n, title_str, ypixels, xpixels, seed, filename): random.seed(seed) index_sample = random.sample(range(len(image_list)), n) plt.figure(figsize=(2*n, 2)) plt.suptitle(title_str) for i, ind in enumerate(index_sample): ax = plt.subplot(1, n, i + 1) plt.imshow(image_list[ind].reshape(ypixels, xpixels)) plt.gray() ax.get_xaxis().set_visible(False); ax.get_yaxis().set_visible(False) if 1: pylab.savefig(filename, bbox_inches='tight') else: plt.show() # plot_compare: given test images and their reconstruction, we plot them for visual comparison
def plot_compare(x_test, decoded_imgs, filename): n = 10 plt.figure(figsize=(2*n, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if 1: pylab.savefig(filename, bbox_inches='tight') else: plt.show() # plot_img: plots greyscale image
def plot_traing_info(x, ylist, path): """ Loads log file and plot x and y values as provided by input. Saves as <path>/train_log.png """ file_name = os.path.join(path, __train_log_file_name) try: with open(file_name, "rb") as f: log = pickle.load(f) except IOError: # first time warnings.warn("There is no {} file here!!!".format(file_name)) return plt.figure() x_vals = log[x] for y in ylist: y_vals = log[y] if len(y_vals) != len(x_vals): warning.warn("One of y's: {} does not have the same length as x:{}".format(y, x)) plt.plot(x_vals, y_vals, label=y) # assert len(y_vals) == len(x_vals), "not the same len" plt.xlabel(x) plt.legend() #plt.show() plt.savefig(file_name[:-3]+'png', bbox_inches='tight') plt.close('all')
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys, block=True): # Colormaps: jet, Greys cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap) # Show confidences for i, cas in enumerate(cm): for j, c in enumerate(cas): if c > 0: plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000') f = plt.figure(1) f.clf() plt.title(title) plt.colorbar() tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show(block=block)
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys): # Colormaps: jet, Greys cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap) # Show confidences for i, cas in enumerate(cm): for j, c in enumerate(cas): if c > 0: plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000') plt.title(title) plt.colorbar() tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show(block=True)
def plot_single_day_traffic(df): y_tj_l1 = df["tj_level1_count"] y_tj_l2 = df["tj_level2_count"] y_tj_l3 = df["tj_level3_count"] y_tj_l4 = df["tj_level4_count"] x_time = df["time"] x_district = df["district"] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(x_time, x_district, y_tj_l1, ) #ax.plot_surface(x_time, x_district, y_tj_l1) print(plt.get_backend()) plt.show() plt.savefig("plot_traffic.png")
def plot_3D(img, threshold=-400): verts, faces = measure.marching_cubes(img, threshold) fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, projection='3d') mesh = Poly3DCollection(verts[faces], alpha=0.1) face_color = [0.5, 0.5, 1] mesh.set_facecolor(face_color) ax.add_collection3d(mesh) ax.set_xlim(0, img.shape[0]) ax.set_ylim(0, img.shape[1]) ax.set_zlim(0, img.shape[2]) plt.show()
def load_data(): """Draw the Mott lobes.""" res = np.load(r'data_%d.npy' % GRID_SIZE) x = res[:, 0] y = res[:, 1] z = [] for i, entry in enumerate(res): z.append(kinetic_energy(entry[2:], -1.)) plt.pcolor( np.reshape(x, (GRID_SIZE, GRID_SIZE)), np.reshape(y, (GRID_SIZE, GRID_SIZE)), np.reshape(z, (GRID_SIZE, GRID_SIZE)) ) plt.xlabel('$dt/U$') plt.ylabel('$\mu/U$') plt.show()
def plot_ecdf(x, y, xlabel='attribute', legend='x'): """ Plot distribution ECDF x should be sorted, y typically from 1/len(x) to 1 TODO: function should be improved to plot multiple overlayed ecdfs """ plt.plot(x, y, marker='.', linestyle='none') # Make nice margins plt.margins(0.02) # Annotate the plot plt.legend((legend,), loc='lower right') _ = plt.xlabel(xlabel) _ = plt.ylabel('ECDF') # Display the plot plt.show()
def analyseparamsneighbourhood(svdata, params, includejumps, randomstate): parameterndarray = transformparameterndarray(np.array(params), includejumps) offsets = np.linspace(-.5, .5, 10) for dimension in range(params.dimensioncount): xs, ys = [], [] parametername = params.getdimensionname(dimension) print('Perturbing %s...' % parametername) for offset in offsets: newparameterndarray = np.copy(parameterndarray) newparameterndarray[dimension] += offset xs.append(inversetransformparameterndarray(newparameterndarray, includejumps)[dimension]) y = runsvljparticlefilter(svdata, sv.Params(*inversetransformparameterndarray(newparameterndarray, includejumps)), randomstate).stochfilter.loglikelihood ys.append(y) fig = plt.figure() plot = fig.add_subplot(111) plot.plot(xs, ys) plot.axvline(x=inversetransformparameterndarray(parameterndarray, includejumps)[dimension], color='red') plot.set_xlabel(parametername) plot.set_ylabel('loglikelihood') plt.show()
def show_one_img_mask(data): w,h = 1918,1280 a = randint(0,31) path = "../input/test" data = np.load(data).item() name,masks = data['name'][a],data['pred'] img = Image.open("%s/%s"%(path,name)) #img.show() plt.imshow(img) plt.show() mask = np.squeeze(masks[a]) mask = imresize(mask,[h,w]).astype(np.float32) print(mask.shape,mask[0]) img = Image.fromarray(mask*256)#.resize([w,h]) plt.imshow(img) plt.show()
def plot_beta(): '''plot beta over training ''' beta = args.beta scale = args.scale beta_min = args.beta_min num_epoch = args.num_epoch epoch_size = int(float(args.num_examples) / args.batch_size) x = np.arange(num_epoch*epoch_size) y = beta * np.power(scale, x) y = np.maximum(y, beta_min) epoch_x = np.arange(num_epoch) * epoch_size epoch_y = beta * np.power(scale, epoch_x) epoch_y = np.maximum(epoch_y, beta_min) # plot beta descent curve plt.semilogy(x, y) plt.semilogy(epoch_x, epoch_y, 'ro') plt.title('beta descent') plt.ylabel('beta') plt.xlabel('epoch') plt.show()
def test_point_projects_to_edge(self): # p = (114.83299055, 26.8892277) p = (121.428387, 31.027371) a = time.time() edges, segments = self.sg.point_projects_to_edges(p, 0.01) print(time.time() - a) if self.show_plots: plt.figure() s2g.plot_lines(MultiLineString(segments), color='orange') # original roads for i in range(0, len(edges)): s, e = edges[i] sxy = self.sg.node_xy[s] exy = self.sg.node_xy[e] plt.plot([sxy[0], exy[0]], [sxy[1], exy[1]], color='green') # graph edges plt.plot(p[0], p[1], color='red', markersize=12, marker='o') # bridges plt.show()
def plotBestFit(weights): import matplotlib.pyplot as plt dataMat, labelMat = loadDataSet() dataArr = array(dataMat) n = shape(dataArr)[0] xcord1 = []; ycord1 = [] xcord2 = []; ycord2 = [] for i in range(n): if int(labelMat[i]) == 1: xcord1.append(dataArr[i, 1]);ycord1.append(dataArr[i, 2]) else: xcord2.append(dataArr[i, 1]);ycord2.append(dataArr[i, 2]) fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(xcord1, ycord1, s=30, c='red', marker='s') ax.scatter(xcord2, ycord2, s=30, c='green') x = arange(-3.0, 3.0, 0.1) y = (-weights[0]-weights[1]*x)/weights[2] # ?????? ax.plot(x, y) plt.xlabel('X1');plt.ylabel('X2') plt.show() # ??500???
def createPlot(inTree): fig = plt.figure(1, facecolor='white') fig.clf() axprops = dict(xticks=[], yticks=[]) createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks #createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses plotTree.totalW = float(getNumLeafs(inTree)) plotTree.totalD = float(getTreeDepth(inTree)) plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0; plotTree(inTree, (0.5,1.0), '') plt.show() # def createPlot(): # fig = plt.figure(1, facecolor='white') # fig.clf() # createPlot.ax1 = plt.subplot(111, frameon=True) # plotNode(U'a decision node',(0.5,0.1), (0.1,0.5), decisionNode) # plotNode(U'a leaf node',(0.8,0.1), (0.3,0.8), leafNode) # plt.show()
def plotValResults(self, save_path=None, label=None): if label is not None: accs = self.training_val_results['acc'][label] aucs = self.training_val_results['auc'][label] else: accs = self.training_val_results['acc'] aucs = self.training_val_results['auc'] plt.figure() plt.plot([i * ACCURACY_LOGGED_EVERY_N_STEPS for i in range(len(accs))], accs) plt.plot([i * ACCURACY_LOGGED_EVERY_N_STEPS for i in range(len(aucs))], aucs) plt.xlabel('Training step') plt.ylabel('Validation accuracy') plt.legend(['Accuracy','AUC']) if save_path is None: plt.show() else: plt.savefig(save_path) plt.close()
def plotValResults(self, save_path=None, label=None): if label: accs = self.training_val_results_per_task['acc'][label] aucs = self.training_val_results_per_task['auc'][label] else: accs = self.training_val_results['acc'] aucs = self.training_val_results['auc'] plt.figure() plt.plot([i * self.accuracy_logged_every_n for i in range(len(accs))], accs) plt.plot([i * self.accuracy_logged_every_n for i in range(len(aucs))], aucs) plt.xlabel('Training step') plt.ylabel('Validation accuracy') plt.legend(['Accuracy','AUC']) if save_path is None: plt.show() else: plt.savefig(save_path)
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'): assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" plt.figure(figsize=(18, 18)) # in inches x = low_dim_embs[:, 0] y = low_dim_embs[:, 1] plt.scatter(x, y) for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.show() # plt.savefig(filename)
def mfi(df): df['date'] = pd.to_datetime(df.date) fig = plt.figure(figsize=(16, 9)) gs = GridSpec(3, 1) # 2 rows, 3 columns fig.suptitle(df['date'][-1:].values[0]) fig.set_label('MFI') price = fig.add_subplot(gs[:2, 0]) price.plot(df['date'], df['close'], color='blue') indicator = fig.add_subplot(gs[2, 0], sharex=price) indicator.plot(df['date'], df['mfi'], c='pink') indicator.plot(df['date'], [20.]*len(df['date']), c='green') indicator.plot(df['date'], [80.]*len(df['date']), c='orange') price.grid(True) indicator.grid(True) plt.tight_layout() plt.show()
def atr(df): ''' Average True Range :param df: :return: ''' df['date'] = pd.to_datetime(df.date) fig = plt.figure(figsize=(16, 9)) gs = GridSpec(3, 1) # 2 rows, 3 columns fig.suptitle(df['date'][-1:].values[0]) fig.set_label('ATR') price = fig.add_subplot(gs[:2, 0]) price.plot(df['date'], df['close'], color='blue') indicator = fig.add_subplot(gs[2, 0], sharex=price) indicator.plot(df['date'], df['atr'], c='pink') # indicator.plot(df['date'], [20.]*len(df['date']), c='green') # indicator.plot(df['date'], [80.]*len(df['date']), c='orange') price.grid(True) indicator.grid(True) plt.tight_layout() plt.show()
def rocr(df): ''' Average True Range :param df: :return: ''' df['date'] = pd.to_datetime(df.date) fig = plt.figure(figsize=(16, 9)) gs = GridSpec(3, 1) # 2 rows, 3 columns fig.suptitle(df['date'][-1:].values[0]) fig.set_label('ATR') price = fig.add_subplot(gs[:2, 0]) price.plot(df['date'], df['close'], color='blue') indicator = fig.add_subplot(gs[2, 0], sharex=price) indicator.plot(df['date'], df['rocr'], c='pink') # indicator.plot(df['date'], [20.]*len(df['date']), c='green') # indicator.plot(df['date'], [80.]*len(df['date']), c='orange') price.grid(True) indicator.grid(True) plt.tight_layout() plt.show()
def main(): losses = [] accuracy = [] for echo in xrange(4000): logger.info('Iteration = {}'.format(echo)) train_data = simulator(M=20) print train_data['text'][-1] loss = learner(train_data, fr=0.) losses.append(loss) accuracy += train_data['acc'] if echo % 100 == 99: plt.plot(accuracy) plt.show() # pkl.dump(losses, open('losses.temp.pkl'))
def pieGraph(data_count): """ Graph's a pie graph of the data with count values; Only includes data that appears more than once! Parameter: -data_count: dict """ names, count = [], [] for val, key in data_count.items(): if key > 1: names.append(val) count.append(key) fig1, ax1 = plt.subplots() ax1.pie(count, labels=names, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. # plt.tight_layout() plt.show()
def pie_graph(data_count): """ Graph's a pie graph of the data with count values (only shows schools that appear more than once) Parameter: -data_count: dict """ names, count = [], [] for val, key in data_count.items(): if key > 1: names.append(val) count.append(key) fig1, ax1 = plt.subplots() ax1.pie(count, labels=names, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. # plt.tight_layout() plt.show()
def barGraph(data_count): names, count_in = [], [] data_count = sorted(data_count.items(), key=operator.itemgetter(1), reverse=True) for i in data_count: names.append(i[0]) count_in.append(i[-1]) plt.rcdefaults() fig, ax = plt.subplots() y_pos = np.arange(len(names)) ax.barh(y_pos, count_in, align='center', color='green', ecolor='black') ax.set_yticks(y_pos) ax.set_yticklabels(names) ax.invert_yaxis() # labels read top-to-bottom ax.set_xlabel('Categories') ax.set_title('# of job titles in each category') plt.show()
def plot_training_parameters(self): fr = open("training_param.csv", "r") fr.readline() lines = fr.readlines() fr.close() n = 100 nu = np.empty(n, dtype=np.float64) gamma = np.empty(n, dtype=np.float64) diff = np.empty([n, n], dtype=np.float64) for row in range(len(lines)): m = lines[row].strip().split(",") i = row / n j = row % n nu[i] = Decimal(m[0]) gamma[j] = Decimal(m[1]) diff[i][j] = Decimal(m[2]) plt.pcolor(gamma, nu, diff, cmap="coolwarm") plt.title("The Difference of Guassian Classifier with Different nu, gamma") plt.xlabel("gamma") plt.ylabel("nu") plt.xscale("log") plt.yscale("log") plt.colorbar() plt.show()
def visualize_gt_roidb(imdb, gt_roidb): """ visualize gt roidb :param imdb: the imdb to be visualized :param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped'] :return: None """ import matplotlib.pyplot as plt import skimage.io for i in range(len(gt_roidb)): im_path = imdb.image_path_from_index(imdb.image_set_index[i]) im = skimage.io.imread(im_path) roi_rec = gt_roidb[i] plt.imshow(im) for bbox, gt_class, overlap in zip(roi_rec['boxes'], roi_rec['gt_classes'], roi_rec['gt_overlaps']): box = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='g', linewidth=3) plt.gca().add_patch(box) plt.gca().text(bbox[0], bbox[1], imdb.classes[gt_class] + ' {}'.format(overlap[0, gt_class]), color='w') plt.show()