我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用pylab.xlabel()。
def twoDimensionalScatter(title, title_x, title_y, x, y, lim_x = None, lim_y = None, color = 'b', size = 20, alpha=None): """ Create a two-dimensional scatter plot. INPUTS """ pylab.figure() pylab.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors='none') pylab.xlabel(title_x) pylab.ylabel(title_y) pylab.title(title) if type(color) is not str: pylab.colorbar() if lim_x: pylab.xlim(lim_x[0], lim_x[1]) if lim_y: pylab.ylim(lim_y[0], lim_y[1]) ############################################################
def display_results_figure(results, METRIC): import pylab as pb color = iter(pb.cm.rainbow(np.linspace(0, 1, len(results)))) plots = [] for method in results.keys(): x = [] y = [] for train_perc in sorted(results[method].keys()): x.append(train_perc) y.append(results[method][train_perc][0]) c = next(color) (pi, ) = pb.plot(x, y, color=c) plots.append(pi) from matplotlib.font_manager import FontProperties fontP = FontProperties() fontP.set_size('small') pb.legend(plots, map(method_name_mapper, results.keys()), prop=fontP, bbox_to_anchor=(0.6, .65)) pb.xlabel('#Tweets from target rumour for training') pb.ylabel('Accuracy') pb.title(METRIC.__name__) pb.savefig('incrementing_training_size.png')
def predicted_vs_actual_y_xgb(self, xgb, best_nrounds, xgb_params, x_train_split, x_test_split, y_train_split, y_test_split, title_name): # Split the training data into an extra set of test # x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(x_train, y_train) dtrain_split = xgb.DMatrix(x_train_split, label=y_train_split) dtest_split = xgb.DMatrix(x_test_split) print(np.shape(x_train_split), np.shape(x_test_split), np.shape(y_train_split), np.shape(y_test_split)) gbdt = xgb.train(xgb_params, dtrain_split, best_nrounds) y_predicted = gbdt.predict(dtest_split) plt.figure(figsize=(10, 5)) plt.scatter(y_test_split, y_predicted, s=20) rmse_pred_vs_actual = self.rmse(y_predicted, y_test_split) plt.title(''.join([title_name, ', Predicted vs. Actual.', ' rmse = ', str(rmse_pred_vs_actual)])) plt.xlabel('Actual y') plt.ylabel('Predicted y') plt.plot([min(y_test_split), max(y_test_split)], [min(y_test_split), max(y_test_split)]) plt.tight_layout()
def display_pr_curve(precision, recall): # following examples from sklearn # TODO: f1 operating point import pylab as plt # Plot Precision-Recall curve plt.clf() plt.plot(recall, precision, label='Precision-Recall curve') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.title('Precision-Recall example: Max f1={0:0.2f}'.format(max_f1)) plt.legend(loc="lower left") plt.show()
def plot(self, fontsize=16): """Create the barplot from the stats file""" from sequana.lazy import pylab from sequana.lazy import pandas as pd pylab.clf() df = pd.DataFrame(self._parse_data()['rules']) ts = df.ix['mean-runtime'] total_time = df.ix['mean-runtime'].sum() #ts['total'] = self._parse_data()['total_runtime'] / float(self.N) ts['total'] = total_time ts.sort_values(inplace=True) ts.plot.barh(fontsize=fontsize) pylab.grid(True) pylab.xlabel("Seconds (s)", fontsize=fontsize) try: pylab.tight_layout() except: pass
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % epoch)) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) for i in range(self.num_display_words): pylab.subplot(self.num_display_words, 1, i + 1) if K.image_dim_ordering() == 'th': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input, cmap='Greys_r') pylab.xlabel('Truth = \'%s\' Decoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 12) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % epoch)) pylab.close() # Input Parameters
def edgescatter(self, ps): for ei,X in enumerate(self.edges): i,j = X[:2] matchdRA, matchdDec = X[10:12] mu = X[9] A = self.alignments[ei] plt.clf() if len(matchdRA) > 1000: plothist(matchdRA, matchdDec, 101) else: plt.plot(matchdRA, matchdDec, 'k.', alpha=0.5) plt.axvline(0, color='0.5') plt.axhline(0, color='0.5') plt.axvline(mu[0], color='b') plt.axhline(mu[1], color='b') for nsig in [1,2]: X,Y = A.getContours(nsigma=nsig) plt.plot(X, Y, 'b-') plt.xlabel('delta-RA (arcsec)') plt.ylabel('delta-Dec (arcsec)') plt.axis('scaled') ps.savefig()
def PlotProps(pars): import numpy as np import pylab as pl import vanGenuchten as vg psi = np.linspace(-10, 2, 200) pl.figure pl.subplot(3, 1, 1) pl.plot(psi, vg.thetaFun(psi, pars)) pl.ylabel(r'$\theta(\psi) [-]$') pl.subplot(3, 1, 2) pl.plot(psi, vg.CFun(psi, pars)) pl.ylabel(r'$C(\psi) [1/m]$') pl.subplot(3, 1, 3) pl.plot(psi, vg.KFun(psi, pars)) pl.xlabel(r'$\psi [m]$') pl.ylabel(r'$K(\psi) [m/d]$') # pl.show()
def plot_evaluation_episode_reward(): pylab.clf() sns.set_context("poster") pylab.plot(0, 0) episodes = [0] average_scores = [0] median_scores = [0] for n in xrange(len(csv_evaluation)): params = csv_evaluation[n] episodes.append(params[0]) average_scores.append(params[1]) median_scores.append(params[2]) pylab.plot(episodes, average_scores, sns.xkcd_rgb["windows blue"], lw=2) pylab.xlabel("episodes") pylab.ylabel("average score") pylab.savefig("%s/evaluation_episode_average_reward.png" % args.plot_dir) pylab.clf() pylab.plot(0, 0) pylab.plot(episodes, median_scores, sns.xkcd_rgb["windows blue"], lw=2) pylab.xlabel("episodes") pylab.ylabel("median score") pylab.savefig("%s/evaluation_episode_median_reward.png" % args.plot_dir)
def modBev_plot(ax, rangeX = [-10, 10 ], rangeXpx= [0, 400], numDeltaX = 5, rangeZ= [8,48 ], rangeZpx= [0, 800], numDeltaZ = 9, fontSize = None, xlabel = 'x [m]', ylabel = 'z [m]'): ''' @param ax: ''' #TODO: Configureabiltiy would be nice! if fontSize==None: fontSize = 8 ax.set_xlabel(xlabel, fontsize=fontSize) ax.set_ylabel(ylabel, fontsize=fontSize) zTicksLabels_val = np.linspace(rangeZpx[0], rangeZpx[1], numDeltaZ) ax.set_yticks(zTicksLabels_val) #ax.set_yticks([0, 100, 200, 300, 400, 500, 600, 700, 800]) xTicksLabels_val = np.linspace(rangeXpx[0], rangeXpx[1], numDeltaX) ax.set_xticks(xTicksLabels_val) xTicksLabels_val = np.linspace(rangeX[0], rangeX[1], numDeltaX) zTicksLabels = map(lambda x: str(int(x)), xTicksLabels_val) ax.set_xticklabels(zTicksLabels,fontsize=fontSize) zTicksLabels_val = np.linspace(rangeZ[1],rangeZ[0], numDeltaZ) zTicksLabels = map(lambda x: str(int(x)), zTicksLabels_val) ax.set_yticklabels(zTicksLabels,fontsize=fontSize)
def plotPopScore(population, fitness=False): """ Plot the population score distribution Example: >>> Interaction.plotPopScore(population) :param population: population object (:class:`GPopulation.GPopulation`) :param fitness: if True, the fitness score will be used, otherwise, the raw. :rtype: None """ score_list = getPopScores(population, fitness) pylab.plot(score_list, 'o') pylab.title("Plot of population score distribution") pylab.xlabel('Individual') pylab.ylabel('Score') pylab.grid(True) pylab.show() # -----------------------------------------------------------------
def plotHistPopScore(population, fitness=False): """ Population score distribution histogram Example: >>> Interaction.plotHistPopScore(population) :param population: population object (:class:`GPopulation.GPopulation`) :param fitness: if True, the fitness score will be used, otherwise, the raw. :rtype: None """ score_list = getPopScores(population, fitness) n, bins, patches = pylab.hist(score_list, 50, facecolor='green', alpha=0.75, normed=1) pylab.plot(bins, pylab.normpdf(bins, numpy.mean(score_list), numpy.std(score_list)), 'r--') pylab.xlabel('Score') pylab.ylabel('Frequency') pylab.grid(True) pylab.title("Plot of population score distribution") pylab.show() # -----------------------------------------------------------------
def fastLapModel(xList, labels, names, multiple=0, full_set=0): X = numpy.array(xList) y = numpy.array(labels) featureNames = [] featureNames = numpy.array(names) # take fixed holdout set 30% of data rows xTrain, xTest, yTrain, yTest = train_test_split( X, y, test_size=0.30, random_state=531) # for final model (no CV) if full_set: xTrain = X yTrain = y check_set(xTrain, xTest, yTrain, yTest) print "Fitting the model to the data set..." # train random forest at a range of ensemble sizes in order to see how the # mse changes mseOos = [] m = 10 ** multiple nTreeList = range(500 * m, 1000 * m, 100 * m) # iTrees = 10000 for iTrees in nTreeList: depth = None maxFeat = int(np.sqrt(np.shape(xTrain)[1])) + 1 # try tweaking RFmd = ensemble.RandomForestRegressor(n_estimators=iTrees, max_depth=depth, max_features=maxFeat, oob_score=False, random_state=531, n_jobs=-1) # RFmd.n_features = 5 RFmd.fit(xTrain, yTrain) # Accumulate mse on test set prediction = RFmd.predict(xTest) mseOos.append(mean_squared_error(yTest, prediction)) # plot training and test errors vs number of trees in ensemble plot.plot(nTreeList, mseOos) plot.xlabel('Number of Trees in Ensemble') plot.ylabel('Mean Squared Error') #plot.ylim([0.0, 1.1*max(mseOob)]) plot.show() print("MSE") print(mseOos[-1]) return xTrain, xTest, yTrain, yTest, RFmd
def plot_importance(names, model, savefig=True): featureNames = numpy.array(names) featureImportance = model.feature_importances_ featureImportance = featureImportance / featureImportance.max() sorted_idx = numpy.argsort(featureImportance) barPos = numpy.arange(sorted_idx.shape[0]) + .5 plot.barh(barPos, featureImportance[sorted_idx], align='center') plot.yticks(barPos, featureNames[sorted_idx]) plot.xlabel('Variable Importance') plot.subplots_adjust(left=0.2, right=0.9, top=0.9, bottom=0.1) if savefig: dt_ = datetime.datetime.now().strftime('%d%b%y_%H%M') plt.savefig("../graphs/featureImportance_" + dt_ + ".png") plot.show() # Plot prediction save the graph with a timestamp
def plotBestFit(dataSet1,dataSet2): dataArr1 = array(dataSet1) dataArr2 = array(dataSet2) n = shape(dataArr1)[0] n1=shape(dataArr2)[0] xcord1 = []; ycord1 = [] xcord2 = []; ycord2 = [] xcord3=[];ycord3=[] j=0 for i in range(n): xcord1.append(dataArr1[i,0]); ycord1.append(dataArr1[i,1]) xcord2.append(dataArr2[i,0]); ycord2.append(dataArr2[i,1]) fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(xcord1, ycord1, s=30, c='red', marker='s') ax.scatter(xcord2, ycord2, s=30, c='blue') plt.xlabel('X1'); plt.ylabel('X2'); plt.show()
def plot_word_frequencies(freq, user): samples = [item for item, _ in freq.most_common(50)] freqs = np.array([float(freq[sample]) for sample in samples]) freqs /= np.max(freqs) ylabel = "Normalized word count" pylab.grid(True, color="silver") kwargs = dict() kwargs["linewidth"] = 2 kwargs["label"] = user pylab.plot(freqs, **kwargs) pylab.xticks(range(len(samples)), [nltk.compat.text_type(s) for s in samples], rotation=90) pylab.xlabel("Samples") pylab.ylabel(ylabel) pylab.gca().set_yscale('log', basey=2)
def predicted_vs_actual_sale_price(self, x_train, y_train, title_name): # Split the training data into an extra set of test x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(x_train, y_train) print(np.shape(x_train_split), np.shape(x_test_split), np.shape(y_train_split), np.shape(y_test_split)) lasso = LassoCV(alphas=[0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1], max_iter=50000, cv=10) # lasso = RidgeCV(alphas=[0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, # 0.3, 0.6, 1], cv=10) lasso.fit(x_train_split, y_train_split) y_predicted = lasso.predict(X=x_test_split) plt.figure(figsize=(10, 5)) plt.scatter(y_test_split, y_predicted, s=20) rmse_pred_vs_actual = self.rmse(y_predicted, y_test_split) plt.title(''.join([title_name, ', Predicted vs. Actual.', ' rmse = ', str(rmse_pred_vs_actual)])) plt.xlabel('Actual Sale Price') plt.ylabel('Predicted Sale Price') plt.plot([min(y_test_split), max(y_test_split)], [min(y_test_split), max(y_test_split)]) plt.tight_layout()
def predicted_vs_actual_sale_price_xgb(self, xgb_params, x_train, y_train, seed, title_name): # Split the training data into an extra set of test x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(x_train, y_train) dtrain_split = xgb.DMatrix(x_train_split, label=y_train_split) dtest_split = xgb.DMatrix(x_test_split) res = xgb.cv(xgb_params, dtrain_split, num_boost_round=1000, nfold=4, seed=seed, stratified=False, early_stopping_rounds=25, verbose_eval=10, show_stdv=True) best_nrounds = res.shape[0] - 1 print(np.shape(x_train_split), np.shape(x_test_split), np.shape(y_train_split), np.shape(y_test_split)) gbdt = xgb.train(xgb_params, dtrain_split, best_nrounds) y_predicted = gbdt.predict(dtest_split) plt.figure(figsize=(10, 5)) plt.scatter(y_test_split, y_predicted, s=20) rmse_pred_vs_actual = self.rmse(y_predicted, y_test_split) plt.title(''.join([title_name, ', Predicted vs. Actual.', ' rmse = ', str(rmse_pred_vs_actual)])) plt.xlabel('Actual Sale Price') plt.ylabel('Predicted Sale Price') plt.plot([min(y_test_split), max(y_test_split)], [min(y_test_split), max(y_test_split)]) plt.tight_layout()
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch))) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) if word_batch['the_input'][0].shape[0] < 256: cols = 2 else: cols = 1 for i in range(self.num_display_words): pylab.subplot(self.num_display_words // cols, cols, i + 1) if K.image_dim_ordering() == 'th': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input.T, cmap='Greys_r') pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 13) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch))) pylab.close()
def plot(self): """ Plot startup data. """ import pylab print("Plotting result...", end="") avg_data = self.average_data() avg_data = self.__sort_data(avg_data, False) if len(self.raw_data) > 1: err = self.stdev_data() sorted_err = [err[k] for k in list(zip(*avg_data))[0]] else: sorted_err = None pylab.barh(range(len(avg_data)), list(zip(*avg_data))[1], xerr=sorted_err, align='center', alpha=0.4) pylab.yticks(range(len(avg_data)), list(zip(*avg_data))[0]) pylab.xlabel("Average startup time (ms)") pylab.ylabel("Plugins") pylab.show() print(" done.")
def scatter_labeled_z(z_batch, label_batch, filename="labeled_z"): fig = pylab.gcf() fig.set_size_inches(20.0, 16.0) pylab.clf() colors = ["#2103c8", "#0e960e", "#e40402","#05aaa8","#ac02ab","#aba808","#151515","#94a169", "#bec9cd", "#6a6551"] for n in range(z_batch.shape[0]): result = pylab.scatter(z_batch[n, 0], z_batch[n, 1], c=colors[label_batch[n]], s=40, marker="o", edgecolors='none') classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] recs = [] for i in range(0, len(colors)): recs.append(mpatches.Rectangle((0, 0), 1, 1, fc=colors[i])) ax = pylab.subplot(111) box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.legend(recs, classes, loc="center left", bbox_to_anchor=(1.1, 0.5)) pylab.xticks(pylab.arange(-4, 5)) pylab.yticks(pylab.arange(-4, 5)) pylab.xlabel("z1") pylab.ylabel("z2") pylab.savefig(filename)
def show_results(self): pl.plot(self.t1, self.n_A1, 'b--', label='A1: Time Step = 0.05') pl.plot(self.t1, self.n_B1, 'b', label='B1: Time Step = 0.05') pl.plot(self.t2, self.n_A2, 'g--', label='A2: Time Step = 0.1') pl.plot(self.t2, self.n_B2, 'g', label='B2: Time Step = 0.1') pl.plot(self.t1, self.n_A1_true, 'r--', label='True A1: Time Step = 0.05') pl.plot(self.t1, self.n_B1_true, 'r', label='True B1: Time Step = 0.05') pl.plot(self.t2, self.n_A2_true, 'c--', label='True A2: Time Step = 0.1') pl.plot(self.t2, self.n_B2_true, 'c', label='True B2: Time Step = 0.1') pl.title('Double Decay Probelm-Approximation Compared with True in Defferent Time Steps') pl.xlim(0.0, 0.1) pl.ylim(0.0, 100.0) pl.xlabel('time ($s$)') pl.ylabel('Number of Nuclei') pl.legend(loc='best', shadow=True, fontsize='small') pl.grid(True) pl.savefig("computational_physics homework 4(improved-7).png")
def show(self): # pl.semilogy(self.theta, self.omega) # , label = '$L =%.1f m, $'%self.l + '$dt = %.2f s, $'%self.dt + '$\\theta_0 = %.2f radians, $'%self.theta[0] + '$q = %i, $'%self.q + '$F_D = %.2f, $'%self.F_D + '$\\Omega_D = %.1f$'%self.Omega_D) pl.plot(self.theta_phase ,self.omega_phase, '.', label = '$t \\approx 2\\pi n / \\Omega_D$') pl.xlabel('$\\theta$ (radians)') pl.ylabel('$\\omega$ (radians/s)') pl.legend() # pl.text(-1.4, 0.3, '$\\omega$ versus $\\theta$ $F_D = 1.2$', fontsize = 'x-large') pl.title('Chaotic Regime') # pl.show() # pl.semilogy(self.time_array, self.delta) # pl.legend(loc = 'upper center', fontsize = 'small') # pl.xlabel('$time (s)$') # pl.ylabel('$\\Delta\\theta (radians)$') # pl.xlim(0, self.T) # pl.ylim(float(input('ylim-: ')),float(input('ylim+: '))) # pl.ylim(1E-11, 0.01) # pl.text(4, -0.15, 'nonlinear pendulum - Euler-Cromer method') # pl.text(10, 1E-3, '$\\Delta\\theta versus time F_D = 0.5$') # pl.title('Simple Harmonic Motion') pl.title('Chaotic Regime')
def show(self): # pl.semilogy(self.theta, self.omega) # , label = '$L =%.1f m, $'%self.l + '$dt = %.2f s, $'%self.dt + '$\\theta_0 = %.2f radians, $'%self.theta[0] + '$q = %i, $'%self.q + '$F_D = %.2f, $'%self.F_D + '$\\Omega_D = %.1f$'%self.Omega_D) pl.plot(self.time_array,self.delta) # pl.show() # pl.semilogy(self.time_array, self.delta) # pl.legend(loc = 'upper center', fontsize = 'small') # pl.xlabel('$time (s)$') # pl.ylabel('$\\Delta\\theta (radians)$') # pl.xlim(0, self.T) # pl.ylim(float(input('ylim-: ')),float(input('ylim+: '))) # pl.ylim(1E-11, 0.01) # pl.text(4, -0.15, 'nonlinear pendulum - Euler-Cromer method') # pl.text(10, 1E-3, '$\\Delta\\theta versus time F_D = 0.5$') # pl.title('Simple Harmonic Motion') # pl.title('Chaotic Regime')
def show_log(self): # pl.subplot(121) pl.semilogy(self.time_array, self.delta, 'c') pl.xlabel('$time (s)$') pl.ylabel('$\\Delta\\theta$ (radians)') pl.xlim(0, self.T) # pl.ylim(1E-11, 0.01) pl.text(42, 1E-7, '$\\Delta\\theta$ versus time $F_D = 1.2$', fontsize = 'x-large') pl.title('Chaotic Regime') pl.show() # def show_log_sub122(self): # pl.subplot(122) # pl.semilogy(self.time_array, self.delta, 'g') # pl.xlabel('$time (s)$') # pl.ylabel('$\\Delta\\theta$ (radians)') # pl.xlim(0, self.T) # pl.ylim(1E-6, 100) # pl.text(20, 1E-5, '$\\Delta\\theta$ versus time $F_D = 1.2$', fontsize = 'x-large') # pl.title('Chaotic Regime') # pl.show()
def show_complex(self): font = {'family': 'serif', 'color': 'k', 'weight': 'normal', 'size': 16, } pl.title('The Trajectory of Tageted Baseball\n with air flow in adiabatic model', fontdict = font) pl.plot(self.x, self.y, label = '$v_0 = %.5f m/s$'%self.v0 + ', ' + '$\\theta = %.4f \degree$'%self.theta) pl.xlabel('x $m$') pl.ylabel('y $m$') pl.xlim(0, 300) pl.ylim(-100, 20) pl.grid() pl.legend(loc = 'upper right', shadow = True, fontsize = 'small') pl.text(15, -90, 'scan to approach the minimum velocity and corresponding launching angle', fontdict = font) pl.show()
def show_simple(self): font = {'family': 'serif', 'color': 'k', 'weight': 'normal', 'size': 16, } pl.title('The Trajectory of Tageted Baseball\n with air flow in adiabatic model', fontdict = font) pl.plot(self.x, self.y, label ='$\\alpha = %.0f \degree$'%self.alpha) pl.xlabel('x $m$') pl.ylabel('y $m$') pl.xlim(0, 400) pl.ylim(-100, 200) pl.grid() pl.legend(loc = 'upper right', shadow = True, fontsize = 'medium') pl.text(5, -80, 'trojectories varing with angles of wind', fontdict = font) pl.show()
def show_results(self): font = {'family': 'serif', 'color': 'k', 'weight': 'normal', 'size': 14, } pl.plot(self.x, self.y, 'c', label='firing angle = 45°') pl.title('The Trajectory of a Cannon Shell', fontdict = font) pl.xlabel('x (k$m$)') pl.ylabel('y ($km$)') pl.xlim(0, 60) pl.ylim(0, 20) pl.grid(True) pl.legend(loc='upper right', shadow=True, fontsize='large') pl.text(41, 16, 'Only with air drag', fontdict = font) pl.show()
def show_results(self): font = {'family': 'serif', 'color': 'k', 'weight': 'normal', 'size': 12, } pl.plot(self.x, self.y, 'c', label='firing angle = 45°') pl.title('The Trajectory of a Cannon Shell', fontdict = font) pl.xlabel('x (k$m$)') pl.ylabel('y ($km$)') pl.xlim(0, 60) pl.ylim(0, 20) pl.grid(True) pl.legend(loc='upper right', shadow=True, fontsize='large') pl.text(34.5, 16, ' With air drag and the \n dependence of g on altitude', fontdict = font) pl.show()
def show_results(self): font = {'family': 'serif', 'color': 'k', 'weight': 'normal', 'size': 12, } pl.plot(self.x, self.y, 'c', label='firing angle = 45°') pl.title('The Trajectory of a Cannon Shell', fontdict = font) pl.xlabel('x (k$m$)') pl.ylabel('y ($km$)') pl.xlim(0, 60) pl.ylim(0, 20) pl.grid(True) pl.legend(loc='upper right', shadow=True, fontsize='large') pl.text(34.5, 16, ' With both air drag and \n reduced air density-adiabatic', fontdict = font) pl.show()
def plotdata(obsmode,spectrum,val,odict,sdict, instr,fieldname,outdir,outname): isetting=P.isinteractive() P.ioff() P.clf() P.plot(obsmode,val,'.') P.ylabel('(pysyn-syn)/syn') P.xlabel('obsmode') P.title("%s: %s"%(instr,fieldname)) P.savefig(os.path.join(outdir,outname+'_obsmode.ps')) P.clf() P.plot(spectrum,val,'.') P.ylabel('(pysyn-syn)/syn') P.xlabel('spectrum') P.title("%s: %s"%(instr,fieldname)) P.savefig(os.path.join(outdir,outname+'_spectrum.ps')) matplotlib.interactive(isetting)
def plot_2D_contour(states,p,labels,inter=False): import pylab as pl from pyme.statistics import expectation as EXP exp = EXP((states,p)) X = np.unique(states[0,:]) Y = np.unique(states[1,:]) X_len = len(X) Y_len = len(Y) Z = np.zeros((X.max()+1,Y.max()+1)) for i in range(len(p)): Z[states[0,i],states[1,i]] = p[i] Z = np.where(Z < 1e-8,0.0,Z) pl.clf() XX, YY = np.meshgrid(X,Y) pl.contour(range(X.max()+1),range(Y.max()+1),Z.T) pl.axhline(y=exp[1]) pl.axvline(x=exp[0]) pl.xlabel(labels[0]) pl.ylabel(labels[1]) if inter == True: pl.draw() else: pl.show()
def starPlot(targ_ra, targ_dec, data, iso, g_radius, nbhd): """Star bin plot""" mag_g = data[mag_g_dred_flag] mag_r = data[mag_r_dred_flag] filter = star_filter(data) iso_filter = (iso.separation(mag_g, mag_r) < 0.1) # projection of image proj = ugali.utils.projector.Projector(targ_ra, targ_dec) x, y = proj.sphereToImage(data[filter & iso_filter]['RA'], data[filter & iso_filter]['DEC']) plt.scatter(x, y, edgecolor='none', s=3, c='black') plt.xlim(0.2, -0.2) plt.ylim(-0.2, 0.2) plt.gca().set_aspect('equal') plt.xlabel(r'$\Delta \alpha$ (deg)') plt.ylabel(r'$\Delta \delta$ (deg)') plt.title('Stars')
def plot_time_freq(self, colors=True, ax=None): import pylab as pl if ax is None: fig, allax = pl.subplots(1) ax = allax # make time matrix same shape as others t = np.outer(self.t, np.ones(self.npeaks)) f = self.f if colors: mag = 20*np.log10(self.mag) ax.scatter(t, f, s=6, c=mag, lw=0) else: mag = 100 + 20*np.log10(self.mag) ax.scatter(t, f, s=mag, lw=0) pl.xlabel('Time (s)') pl.ylabel('Frequency (Hz)') # if colors: # cs = pl.colorbar(ax=ax) # cs.set_label('Magnitude (dB)') # pl.show() return ax
def plot_time_mag(self): import pylab as pl pl.figure() t = np.outer(self.t, np.ones(self.npeaks)) # f = np.log2(self.f) f = self.f mag = 20*np.log10(self.mag) pl.scatter(t, mag, s=10, c=f, lw=0, norm=pl.matplotlib.colors.LogNorm()) pl.xlabel('Time (s)') pl.ylabel('Magnitude (dB)') cs = pl.colorbar() cs.set_label('Frequency (Hz)') # pl.show() return pl.gca()
def two_plot_time_freq_mag(self, minlen=10): part = [pp for pp in self.partial if len(pp.f) > minlen] pl.figure() ax1 = pl.subplot(211) pl.hold(True) ax2 = pl.subplot(212, sharex=ax1) pl.hold(True) for pp in part: ax1.plot(pp.start_idx + np.arange(len(pp.f)), np.array(pp.f)) ax2.plot(pp.start_idx + np.arange(len(pp.f)), 20*np.log10(np.array(pp.mag))) ax1.hold(False) # ax1.xlabel('Time (s)') ax1.set_ylabel('Frequency (Hz)') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Frequency (Hz)') # pl.show() return pl.gca()
def plot_time_freq_mag(self, minlen=10, cm=pl.cm.rainbow): cadd = 30 cmax = 256 ccur = 0 part = [pp for pp in self.partial if len(pp.f) > minlen] pl.figure() pl.hold(True) for pp in part: # pl.plot(pp.start_idx + np.arange(len(pp.f)), np.array(pp.f)) mag = 100 + 20*np.log10(np.array(pp.mag)) pl.scatter(pp.start_idx + np.arange(len(pp.f)), np.array(pp.f), s=mag, c=cm(ccur), lw=0) ccur = np.mod(ccur + cadd, cmax) pl.hold(False) pl.xlabel('Time (s)') pl.ylabel('Frequency (Hz)') pl.show()
def visualiseNormObject(self): shape = (2*self.extent, 2*self.extent) pylab.ion() pylab.clf() #pylab.set_cmap("bone") pylab.hot() pylab.title("image: %s" % self.fitsFile) pylab.imshow(np.reshape(self.signPreserveNorm(), shape, order="F"), interpolation="nearest") pylab.plot(np.arange(0,2*self.extent), self.extent*np.ones((2*self.extent,)), "r--") pylab.plot(self.extent*np.ones((2*self.extent,)), np.arange(0,2*self.extent), "r--") pylab.colorbar() pylab.ylim(-1, 2*self.extent) pylab.xlim(-1, 2*self.extent) pylab.xlabel("Pixels") pylab.ylabel("Pixels") pylab.show()
def main(): data = pd.read_table('../Real_Values.txt').get_values() x = [float(d) for d in data] test = np.array([669, 592, 664, 1005, 699, 401, 646, 472, 598, 681, 1126, 1260, 562, 491, 714, 530, 521, 687, 776, 802, 499, 536, 871, 801, 965, 768, 381, 497, 458, 699, 549, 427, 358, 219, 635, 756, 775, 969, 598, 630, 649, 722, 835, 812, 724, 966, 778, 584, 697, 737, 777, 1059, 1218, 848, 713, 884, 879, 1056, 1273, 1848, 780, 1206, 1404, 1444, 1412, 1493, 1576, 1178, 836, 1087, 1101, 1082, 775, 698, 620, 651, 731, 906, 958, 1039, 1105, 620, 576, 707, 888, 1052, 1072, 1357, 768, 986, 816, 889, 973, 983, 1351, 1266, 1053, 1879, 2085, 2419, 1880, 2045, 2212, 1491, 1378, 1524, 1231, 1577, 2459, 1848, 1506, 1589, 1386, 1111, 1180, 1075, 1595, 1309, 2092, 1846, 2321, 2036, 3587, 1637, 1416, 1432, 1110, 1135, 1233, 1439, 894, 628, 967, 1176, 1069, 1193, 1771, 1199, 888, 1155, 1254, 1403, 1502, 1692, 1187, 1110, 1382, 1808, 2039, 1810, 1819, 1408, 803, 1568, 1227, 1270, 1268, 1535, 873, 1006, 1328, 1733, 1352, 1906, 2029, 1734, 1314, 1810, 1540, 1958, 1420, 1530, 1126, 721, 771, 874, 997, 1186, 1415, 973, 1146, 1147, 1079, 3854, 3407, 2257, 1200, 734, 1051, 1030, 1370, 2422, 1531, 1062, 530, 1030, 1061, 1249, 2080, 2251, 1190, 756, 1161, 1053, 1063, 932, 1604, 1130, 744, 930, 948, 1107, 1161, 1194, 1366, 1155, 785, 602, 903, 1142, 1410, 1256, 742, 985, 1037, 1067, 1196, 1412, 1127, 779, 911, 989, 946, 888, 1349, 1124, 761, 994, 1068, 971, 1157, 1558, 1223, 782, 2790, 1835, 1444, 1098, 1399, 1255, 950, 1110, 1345, 1224, 1092, 1446, 1210, 1122, 1259, 1181, 1035, 1325, 1481, 1278, 769, 911, 876, 877, 950, 1383, 980, 705, 888, 877, 638, 1065, 1142, 1090, 1316, 1270, 1048, 1256, 1009, 1175, 1176, 870, 856, 860]) n_predict = 100 extrapolation = fourierExtrapolation(x, n_predict) pl.figure() pl.plot(np.arange(len(x), len(extrapolation) + len(x)), extrapolation, 'r', label = 'extrapolation') pl.plot(x, 'b', label = 'Given Data', linewidth = 3) pl.legend() pl.ylabel('BPM') pl.xlabel('Sample') pl.title('Fourier Extrapolation') pl.savefig('FourierExtrapolation.png') #pl.show() with open('Fourier_PredValues.txt', 'w') as out: out.write(str([e for e in extrapolation]).strip('[]'))
def ansQuest(maxTime,numTrials): means=[] distLists=performSim(maxTime,numTrials) for t in range(maxTime+1): tot=0.0 for distL in distLists: tot+=distL[t] means.append(tot/len(distL)) pylab.figure() pylab.plot(means) pylab.xlabel('distance') pylab.ylabel('time') pylab.title('Average Distance vs. Time ('+str(len(distLists))+'trials)')
def view_whitening(data): pylab.subplot(121) pylab.imshow(data['spatial'], interpolation='nearest') pylab.title('Spatial') pylab.xlabel('# Electrode') pylab.ylabel('# Electrode') pylab.colorbar() pylab.subplot(122) pylab.title('Temporal') pylab.plot(data['temporal']) pylab.xlabel('Time [ms]') x, y = pylab.xticks() pylab.xticks(x, (x-x[-1]//2)//10) pylab.tight_layout()