我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.fill_between()。
def plot_mean_debye(sol, ax): x = np.log10(sol[0]["data"]["tau"]) x = np.linspace(min(x), max(x),100) list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol] # list_best_rtd = [s["fit"]["best"] for s in sol] y = np.mean(list_best_rtd, axis=0) y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0) y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0) ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10) plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range") plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1) plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"]) , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD") ax.set_xlabel("Relaxation time (s)", fontsize=14) ax.set_ylabel("Chargeability (%)", fontsize=14) plt.yticks(fontsize=14), plt.xticks(fontsize=14) plt.xscale("log") ax.set_xlim([1e-6, 1e1]) ax.set_ylim([0, 5.0]) ax.legend(loc=1, fontsize=12) # ax.set_title(title+" step method", fontsize=14)
def plot_joints_step(self, stamp): if self.plots == '': return mean_joints = self.get_mean_joints() std_joints = self.get_std_joints() f = plt.figure(facecolor="white", figsize=(16, 12)) ax = f.add_subplot(111) ax.set_title('Mean +- {}std'.format(self.std_factor)) color_id = 0 for joint_id, joint_mean in enumerate(mean_joints): ax.plot(self.x, joint_mean, label='Joint {}'.format(joint_id), color=self.colors[color_id], linestyle='dashed') plt.fill_between(self.x, joint_mean - self.std_factor*std_joints[joint_id], joint_mean + self.std_factor*std_joints[joint_id], alpha=0.1, color=self.colors[color_id]) color_id = (color_id + 1) % len(self.colors) plt.legend(loc='upper left') self._mk_dirs() filename = '_'.join(['joints', stamp]) plt.savefig(join(self.plots, filename) + '.svg', dpi=100, transparent=False) plt.close('all')
def plot_series(series): plt.figure(1) # colors = [np.array([1, 0.1, 0.1]), np.array([0.1, 1, 0.1]), np.array([0.1, 0.1, 1])] colors = ['m', 'g', 'r', 'b', 'y'] for i, s in enumerate(series): print(s['x'], s['y'], s['std'], s['label']) small_number = np.ones_like(s['x']) * (s['x'][1]*0.1) x_axis = np.where(s['x'] == 0, small_number, s['x']) plt.plot(x_axis, s['y'], color=colors[i], label=s['label']) plt.fill_between(x_axis, s['y'] - s['std'], s['y'] + s['std'], color=colors[i], alpha=0.2) plt.semilogx() plt.xlabel('MI reward bonus') plt.ylabel('Final intrinsic reward') plt.title('Final intrinsic reward in pointMDP with 10 good modes') plt.legend(loc='best') plt.show()
def set_data(self, y, variance, x=None): """ update a gauss_1D with new data :param y: :param variance: :param x: :return: """ n_points = len(y) if x is None: x = np.arange(n_points) self._handle.set_data(x, y) # Update mean new_percentiles = [] out = self.distribution.split("+") n_percentiles = len(out) sub_alpha = str(self.alpha / n_percentiles) # Normalize w.r.t. the number of percentiles for i, percentile in enumerate(self._percentiles): percentile.remove() percentile = float(out[i]) assert 0 <= percentile <= 100, 'Percentile must be >0 & <100. Instead is %f' % percentile interval = scipy.stats.norm.interval(percentile/100, loc=y, scale=np.sqrt(variance)) interval = np.nan_to_num(interval) # Fix stupid case of norm.interval(0) returning nan new_percentiles.append(plt.fill_between(x, interval[0], interval[1], color=self._handle.get_color(), alpha=sub_alpha)) # TODO: not implemented yet pass
def plot_evolution(params, color=None, label=None): data = get_results(NAME, params, calc=do_calculations) times = data.times success = data.success N = float(len(success)) t = sorted(times[success]) p = np.arange(sum(success))/N t.append(endtime) p = np.append(p, [p[-1]]) errp = 1.96*np.sqrt(p*(1.-p)/N) # 95% confidence plt.semilogx(t, p, color=color, label=label) plt.fill_between(t, p - errp, p + errp, alpha=0.2, facecolor=color, linewidth=0) plt.xlabel("Time [ns]") plt.ylabel("Exit probability") plt.xlim(xmin=0.1, xmax=5e6) print "last time: %.5f ms\nend prob: %.3f\nstd. dev.: %.3f" % ( t[-2]*1e-6, p[-2], errp[-2])
def plot(self): ''' Plots fit against input data. Should not be run before the :code:`.fit(` method. ''' try: import matplotlib.pyplot as plt except ImportError: warnings.warn("Cannot plot - no matplotlib.") return None plt.title("actual v. estimated w/ 95% confidence") self.estimated.plot(color='b', alpha=0.7) plt.fill_between(self.estimated.index.to_datetime(), self.estimated + self.upper, self.estimated - self.lower, color='b', alpha=0.3) pd.Series(self.y.values.ravel(), index=self.estimated.index).plot( color='k', linewidth=1.5) plt.show()
def plotSounds(sound_list, name_list, samplerate, path, toSave=False): """Plots the sounds as a time series data""" times = np.arange(len(sound_list[0]))/float(samplerate) fig = plt.figure(figsize=(15,4)) imageCoordinate = 100 + 10*len(sound_list) + 1 i = 0 for sound in sound_list: fig.add_subplot(imageCoordinate) plt.fill_between(times, sound, color='k') plt.xlim(times[0], times[-1]) plt.title(name_list[i]) plt.xlabel('time (s)') plt.ylabel('amplitude') # plt.axis("off") plt.plot(sound) imageCoordinate += 1 i += 1 if toSave: plt.savefig("./plots/sounds/" + path + ".png", bbox_inches='tight') plt.show()
def plotSounds(sound_list, name_list, samplerate, path, toSave=False): """Plots the sounds as a time series data""" times = np.arange(len(sound_list[0]))/float(samplerate) fig = plt.figure(figsize=(15,4)) imageCoordinate = 100 + 10*len(sound_list) + 1 i = 0 for sound in sound_list: fig.add_subplot(imageCoordinate) plt.fill_between(times, sound, color='k') plt.xlim(times[0], times[-1]) plt.title(name_list[i]) plt.xlabel('time (s)') plt.ylabel('amplitude') # plt.axis("off") plt.plot(sound) imageCoordinate += 1 i += 1 if toSave: plt.savefig(path + ".jpg", bbox_inches='tight') plt.show()
def fill_plot(): """ fill plot """ # ?????? x = np.linspace(-2*np.pi, 2*np.pi, 1000, endpoint=True) y = np.sin(x) # ?? plt.plot(x, y, color="blue", alpha=1.00) # ???? # plt.fill_between(x, y1, y2, where=None, *kwargs) plt.fill_between(x, 0, y, y > 0, color="blue", alpha=.25) plt.fill_between(x, 0, y, y < 0, color="red", alpha=.25) # ???? plt.show() return # fill_plot()
def plotGPGO(gpgo, param): param_value = list(param.values())[0][1] x_test = np.linspace(param_value[0], param_value[1], 1000).reshape((1000, 1)) hat = gpgo.GP.predict(x_test, return_std=True) y_hat, y_std = hat[0], np.sqrt(hat[1]) l, u = y_hat - 1.96 * y_std, y_hat + 1.96 * y_std fig = plt.figure() r = fig.add_subplot(2, 1, 1) r.set_title('Fitted Gaussian process') plt.fill_between(x_test.flatten(), l, u, alpha=0.2) plt.plot(x_test.flatten(), y_hat, color='red', label='Posterior mean') plt.legend(loc=0) a = np.array([-gpgo._acqWrapper(np.atleast_1d(x)) for x in x_test]).flatten() r = fig.add_subplot(2, 1, 2) r.set_title('Acquisition function') plt.plot(x_test, a, color='green') gpgo._optimizeAcq(method='L-BFGS-B', n_start=1000) plt.axvline(x=gpgo.best, color='black', label='Found optima') plt.legend(loc=0) plt.tight_layout() plt.savefig(os.path.join(os.getcwd(), 'mthesis_text/figures/chapter3/sine/{}.pdf'.format(i))) plt.show()
def plotGPGO(gpgo, param, index, new=True): param_value = list(param.values())[0][1] x_test = np.linspace(param_value[0], param_value[1], 1000).reshape((1000, 1)) y_hat, y_var = gpgo.GP.predict(x_test, return_std=True) std = np.sqrt(y_var) l, u = y_hat - 1.96 * std, y_hat + 1.96 * std if new: plt.figure() plt.subplot(5, 1, 1) plt.fill_between(x_test.flatten(), l, u, alpha=0.2) plt.plot(x_test.flatten(), y_hat) plt.subplot(5, 1, index) a = np.array([-gpgo._acqWrapper(np.atleast_1d(x)) for x in x_test]).flatten() plt.plot(x_test, a, color=colors[index - 2], label=acq_titles[index - 2]) gpgo._optimizeAcq(method='L-BFGS-B', n_start=1000) plt.axvline(x=gpgo.best) plt.legend(loc=0)
def plot_true_diff(X_train, X_test, y_train, true_fun, mu_post, stand_devi): """ plot true function and difference between true function and posterior prediction :param X_train: training data :param y_train: function value of training data :param true_fun: true function which get from dataset_generator function :param mu_post: mean of posterior functions :param stand_devi: standard derivation of posterior functions :return: """ plt.subplot(4, 2, 7) plt.plot(X_train, y_train, 'r+', ms=20) plt.plot(X_test, true_fun(X_test), 'b-') plt.gca().fill_between(X_test.flat, mu_post - 3 * stand_devi, mu_post + 3 * stand_devi, color="#dddddd") plt.plot(X_test, mu_post, 'r--', lw=2) plt.title('Mean predictions plus 3 st.deviations') plt.axis([-5, 5, -3, 3])
def plotInterval(time, upper, lower, intervalType, color='black'): """ The function to plot confidence interval. Args: time: time label upper: the upper bound lower: the lower bound color: the color of the plot """ ALPHA = 0.4 if time is None: if intervalType == 'line': plt.plot(upper, '--', color=color) plt.plot(lower, '--', color=color) elif intervalType == 'ribbon': plt.fill_between(upper, lower, facecolor=color, alpha=ALPHA) else: if intervalType == 'line': plt.plot(time, upper, '--', color=color) plt.plot(time, lower, '--', color=color) elif intervalType == 'ribbon': plt.fill_between(time, upper, lower, facecolor=color, alpha=ALPHA)
def PlotValidationCurve(param_range, train_mean, train_std, test_mean, test_std): #todo: automatically save plot to disk for exportation #plot data plot_params = [item[0] for item in param_range] plot.plot(plot_params, train_mean, color='blue', marker='o', markersize=5, label='training accuracy') plot.fill_between(plot_params, train_mean+train_std, train_mean-train_std, alpha=0.15, color='blue') plot.plot(plot_params, test_mean, color='green', linestyle='--', marker='s', markersize=5, label='validation accuracy') plot.fill_between(plot_params, test_mean+test_std, test_mean-test_std, alpha=0.15, color='green') #show plot plot.grid() plot.xscale('log', basex=2)#since hidden units are all powers of 2 plot.xlabel('Parameter: Number of hidden units') plot.ylabel('Accuracy') plot.legend(loc='lower right') plot.ylim([0, 1.0]) plot.show()
def infer_states_Gaussian_ADF(Y,variance,true_states=None,do_plot=True): ''' Parameters ---------- Returns ------- ''' fx0 = hmm.Gaussian(0,0) fA = hmm.DiffusionGaussian(variance) fB = hmm.PoissonObservationModel(1,0) L = len(Y) fwd,bwd,posterior = forward_backward_abstract(Y,L,fx0,fA,fB) m = np.array([p.m for p in posterior]) v = np.array([p.t for p in posterior]) s = v**-.5 if do_plot: plt.figure() plt.plot(m,color='k') plt.fill_between(range(L),m-s*1.95,m+s*1.95,color=(0.1,)*4,lw=0) if not true_states is None: plt.plot(true_states,color='r'); plt.xlim(0,L) return m,v
def main(argv): import matplotlib.pyplot as plt import numpy as np for in_file in argv: with open(in_file, 'r') as f: lines = [l for l in f.read().split('\n') if l.find('R_std') != -1] iters = [] rewards = [] errors = [] for l in lines: iters.append(float(l.split(' ')[3].replace(',', ''))) rewards.append(float(l.split(' ')[-3].replace(',', ''))) errors.append(float(l.split(' ')[-1].replace(',', ''))) plt.plot(iters, rewards, label=in_file, linewidth=2.0) plt.fill_between(iters, np.asarray(rewards)-errors, np.asarray(rewards)+errors, alpha=0.2) plt.title('Learning Curves') plt.legend(loc=4) plt.xlabel('Iterations[x1000]') plt.ylabel('Reward') plt.show()
def draw_diam_plot(orig_g, mG): df = pd.DataFrame(mG) gD = bfs_eff_diam(orig_g, 20, .9) ori_degree_seq = [] for i in range(0, len(max(mG))): ori_degree_seq.append(gD) plt.fill_between(df.columns, df.mean() - df.sem(), df.mean() + df.sem(), color='blue', alpha=0.2, label="se") h, = plt.plot(df.mean(), color='blue', aa=True, linewidth=4, ls='--', label="H*") orig, = plt.plot(ori_degree_seq, color='black', linewidth=2, ls='-', label="H") plt.title('Diameter Plot') plt.ylabel('Diameter') plt.xlabel('Growth') plt.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom='off', # ticks along the bottom edge are off top='off', # ticks along the top edge are off labelbottom='off') # labels along the bottom edge are off plt.legend([orig, h], ['$H$', 'HRG $H^*$'], loc=4) # fig = plt.gcf() # fig.set_size_inches(5, 4, forward=True) plt.show()
def meanSwapsShadowedLine(datafile, prefix, ylabel, xmax=None, ylim=None): #Process data df = pd.read_csv(datafile, delimiter=",") #Filter out sentences with 0 swaps df = df.loc[df[prefix+"Eager"]>0] grouped = df.groupby("words", as_index=True) idx = grouped.groups.keys() all_means=grouped.mean() all_stds=grouped.std(ddof=0) #Plot cols = [prefix+laziness for laziness in ('Eager', 'Lazy', 'Lazier')] labels = ('Eager', 'Lazy', 'Lazier') markers = ('p', '^', '8') color = ('aquamarine', 'gold', 'purple') if xmax is not None: plt.xlim(2,xmax+0.5) if ylim is not None: plt.ylim(ylim) for i in range(len(cols)): means = all_means[cols[i]][:xmax] stds = all_stds[cols[i]][:xmax] idx = idx[:xmax] plt.xlabel("Sentence Length", fontsize=16) plt.ylabel(ylabel, fontsize=16) plt.plot(idx, means, color[i], marker=markers[i], lw=1.5, label=labels[i]) plt.fill_between(idx, means+stds, means-stds, color=color[i], alpha=0.5*1/(i+1)) plt.legend(loc="upper left") #plt.show() plt.savefig("%sswaps.pdf"%prefix) plt.close()
def plot_nice_err(x, y, y_err, color='blue', ls='-', lw=1): plt.plot(x, y, color=color, ls=ls, lw=lw) plt.fill_between(x, y-y_err, y+y_err, alpha=0.1, color=color)
def plot_conditioned_joints_goal(self, goal, obtained_traj, mean_goal, std_goal, stamp): if self.plots == '': return self._mk_dirs() color_id = 0 mean_joints = self.get_mean_joints() std_joints = self.get_std_joints() for joint_id, joint_goal in enumerate(goal): f = plt.figure(facecolor="white", figsize=(16, 12)) ax = f.add_subplot(111) ax.set_title('Conditioning joint {}: mean, {}std, var(goal), output, goal'.format(joint_id, self.std_factor)) plt.plot(self.x, mean_joints[joint_id], label='Mean joint {}'.format(joint_id), color=self.colors[color_id], linestyle='dashed') plt.fill_between(self.x, mean_joints[joint_id] - self.std_factor*std_joints[joint_id], mean_joints[joint_id] + self.std_factor*std_joints[joint_id], alpha=0.1, color=self.colors[color_id]) color_goal = '0.2' # grey 20% plt.plot(self.x, mean_goal[joint_id], color=color_goal, label='Conditioned traj joint {}'.format(joint_id), linestyle=':') plt.fill_between(self.x, mean_goal[joint_id] - self.std_factor*std_goal[joint_id], mean_goal[joint_id] + self.std_factor*std_goal[joint_id], alpha=0.1, color=color_goal) plt.plot([1], [joint_goal], marker='o', markerfacecolor=self.colors[color_id], markersize=7, label='Goal') #plt.plot([1], [obtained_traj[-1, joint_id]], marker='o', markerfacecolor=self.colors[color_id], markersize=4) plt.plot(self.x, obtained_traj[:, joint_id], color=self.colors[color_id], label='Refined output traj') plt.legend(loc='upper left', scatterpoints = 1) color_id = (color_id + 1) % len(self.colors) end_stamp = '_'.join([stamp, 'joint', str(joint_id)]) plt.savefig(join(self.plots, end_stamp) + '.svg', dpi=100, transparent=False) plt.close('all')
def plot(self, x=None, legend='promp', color=None): mean = np.dot(self.Phi.T, self.meanW) x = self.x if x is None else x plt.plot(x, mean, color=color, label=legend) std = 2*np.sqrt(np.diag(np.dot(self.Phi.T, np.dot(self.sigmaW, self.Phi)))) plt.fill_between(x, mean - std, mean + std, color=color, alpha=0.2) for viapoint_id, viapoint in enumerate(self.viapoints): x_index = x[int(round((len(x)-1)*viapoint['t'], 0))] plt.plot(x_index, viapoint['obsy'], marker="o", markersize=10, label="Via {} {}".format(viapoint_id, legend), color=color)
def plot_learning_curve_helper(self, train_sizes, train_scores, test_scores, title, alpha=0.1): train_scores = -train_scores test_scores = -test_scores train_mean = np.mean(train_scores, axis=1) train_std = np.std(train_scores, axis=1) test_mean = np.mean(test_scores, axis=1) test_std = np.std(test_scores, axis=1) plt.plot(train_sizes, train_mean, label='train score', color='blue', marker='o') plt.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, color='blue', alpha=alpha) plt.plot(train_sizes, test_mean, label='test score', color='red', marker='o') plt.fill_between(train_sizes, test_mean + test_std, test_mean - test_std, color='red', alpha=alpha) plt.title(title) plt.xlabel('Number of training points') plt.ylabel(r'Mean Squared Error') plt.grid(ls='--') plt.legend(loc='best') plt.show() # def feature_reduction(self, X_train, y_train, X_val): # thresh = 5 * 10 ** (-3) # # model = XGBRegressor() # model.fit(X_train, y_train) # selection = SelectFromModel(model, threshold=thresh, prefit=True) # select_X_train = selection.transform(X_train) # select_X_val = selection.transform(X_val) # return select_X_train, select_X_val
def nTron_IQ_plot(iq_vals, desc, threshold=0.0): iq_vals = iq_vals.real < threshold iqr = iq_vals.reshape(desc['Integrated'].dims(), order='C') iqrm = np.mean(iqr, axis=0) extent = (0.18, 10, 0.14, 0.40) aspect = 9.84/0.34 plt.imshow(iqrm, origin='lower', cmap='RdGy', extent=extent, aspect=aspect) # def plot_BER(volts, multidata, **kwargs): # ber_dat = [switching_BER(data, **kwargs) for data in multidata] # mean = []; limit = []; ci68 = []; ci95 = [] # for datum in ber_dat: # mean.append(datum[0]) # limit.append(datum[1]) # ci68.append(datum[2]) # ci95.append(datum[3]) # mean = np.array(mean) # limit = np.array(limit) # fig = plt.figure() # plt.semilogy(volts, 1-mean, '-o') # plt.semilogy(volts, 1-limit, linestyle="--") # plt.fill_between(volts, [1-ci[0] for ci in ci68], [1-ci[1] for ci in ci68], alpha=0.2, edgecolor="none") # plt.fill_between(volts, [1-ci[0] for ci in ci95], [1-ci[1] for ci in ci95], alpha=0.2, edgecolor="none") # plt.ylabel("Switching Error Rate", size=14) # plt.xlabel("Pulse Voltage (V)", size=14) # plt.title("Bit Error Rate", size=16) # return fig # def load_BER_data_legacy(filename): # with h5py.File(filename, 'r') as f: # dsets = [f[k] for k in f.keys() if "data" in k] # data_mean = [np.mean(dset.value, axis=-1) for dset in dsets] # volts = [float(dset.attrs['pulse_voltage']) for dset in dsets] # return volts, data_mean
def distribution_1D(y, percentiles, x=None, color=None, alpha=0.60, distribution='68+95+99', linewidth=4, linestyle='-', marker=None, markersize=10, markevery=0.1): """ Plot a distribution :param y: :param percentiles: :param x: :param color: Color used for plotting the curve :param alpha: Transparency level used for plotting the distributions :param distribution: The percentiles of the data that are to be plotter :param linewidth: :param linestyle: :param marker: :param markersize: :param markevery: scalar [0-1] :return: """ n_points = len(y) if x is None: x = np.arange(n_points) # assert len(y) == len(variance), 'Dimensions variance do not match dimensions y' assert len(y) == len(x), 'Dimensions x do not match dimensions y' if color is None: handle, = plt.plot(x, y, linewidth=linewidth, linestyle=linestyle, marker=marker, markersize=markersize, markevery=markevery) else: handle, = plt.plot(x, y, linewidth=linewidth, linestyle=linestyle, marker=marker, markersize=markersize, markevery=markevery, color=color) out_des = distribution.split("+") # assert out > len() out = len(percentiles) sub_alpha = str(alpha / out * 2) # Normalize w.r.t. the number of percentiles for i in range(0, out, 2): plt.fill_between(x, percentiles[i], percentiles[i+1], color=handle.get_color(), alpha=sub_alpha) return handle
def validation_crv(estimator, X, y, title, n_jobs=1): param_range = np.logspace(-6, -1, 5) train_scores, test_scores = validation_curve( estimator, X, y, param_name="max_features", param_range=param_range, cv=10, scoring="accuracy", n_jobs=n_jobs) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.title(title) plt.xlabel("$\gamma$") plt.ylabel("Score") plt.ylim(0.0, 1.1) lw = 2 plt.semilogx(param_range, train_scores_mean, label="Training score", color="darkorange", lw=lw) plt.fill_between(param_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2, color="darkorange", lw=lw) plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", color="navy", lw=lw) plt.fill_between(param_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2, color="navy", lw=lw) plt.legend(loc="best") return plt
def plot_cdf(self, x, *args, **kwargs): fx = self.cdf(x, N=1000, compute_std=True) itv = 2*self._std line, = plt.plot(x, fx, *args, **kwargs) plt.fill_between(x, fx - itv, fx + itv, color=line.get_color(), alpha=0.2)
def plot_pdf(self, x, *args, **kwargs): log = False if not "log" in kwargs else kwargs.pop("log") # plot at centers for compatibility with hist x = .5*(x[1:] + x[:-1]) fx = self.pdf(x, N=100, compute_std=True, log=log) itv = 2*self._std line, = plt.plot(x, fx, *args, **kwargs) plt.fill_between(x, fx - itv, fx + itv, color=line.get_color(), alpha=0.2)
def _BorderPlot(time, x, color, kind, alpha, legend, linewidth, axes): npts, dev = x.shape # Get the deviation/sem : xStd = np.std(x, axis=1) if kind is 'sem': xStd = xStd/np.sqrt(npts-1) xMean = np.mean(x, 1) xLow, xHigh = xMean-xStd, xMean+xStd # Plot : if axes is None: axes = plt.gca() plt.sca(axes) ax = plt.plot(time, xMean, color=color, label=legend, linewidth=linewidth) plt.fill_between(time, xLow, xHigh, alpha=alpha, color=ax[0].get_color())
def export_image(self, image_contents): """ Write curves into a PNG image. """ if self.ydata: # calculate and apply max range all_ydata = [] map(all_ydata.extend, [ydata for ydata in self.ydata.values()]) plt.ylim(self.get_range(all_ydata)) # create plots for each series of data for i, ((title, unit), ydata) in enumerate(self.ydata.items()): # create X axis xdata = [x for x in range(len(ydata))] # get additional statistics avg, rate, (a, b), dev = get_stats(ydata) # plot the data dataLine, = plt.plot(xdata, ydata, label=title) plotColor = dataLine.get_color() # plot the mean line avg_data = [avg for _ in ydata] meanLine, = plt.plot(xdata, avg_data, label='Mean: {:.2f}{}'.format(avg, unit), linestyle='--', color=plotColor) if a is not None: # plot the linear regression plt.plot([xdata[0], xdata[-1]], [a * xdata[0] + b, a * xdata[-1] + b], linestyle=':', color=plotColor) if dev is not None: # plot the standard deviation plt.fill_between(xdata, avg-dev, avg+dev, facecolor=plotColor, alpha=.3) # create the legend legend = plt.legend(handles=[dataLine, meanLine], loc=i+1, fontsize='small', fancybox=True, shadow=True) # add the legend to the current axes plt.gca().add_artist(legend) # save image to internal memory buffer plt.savefig(image_contents.new_image(), dpi=80, bbox_inches='tight', format='png') # reset yData self.ydata = {} # close plot plt.close()
def _line_plot(self): fig = plt.figure(figsize=figsize) ax = plt.axes(frameon=False) self.data = self.data.sort_values(by=self.x) for i, c in enumerate(self.categories): i = self.n - (i + 1) df = self.data[ self.data[self.hue] == c ].rolling(self.smooth).mean() x_d = df.loc[df[self.hue] == c, self.x].values y_d = df.loc[df[self.hue] == c, self.y].values y_d = (y_d - np.nanmin(y_d) )/(np.nanmax(y_d) - np.nanmin(y_d)) y_d += i * self.offset y_min = np.tile(np.nanmin(y_d), (len(y_d))) col = self.colours[i % self.n_c] plt.plot(x_d, y_d, color=col, label=c, alpha=0.8) plt.fill_between(x_d, y_d, y_min, alpha=0.6, color=col) x_min = self.data[self.x].min() x_max = self.data[self.x].max() plt.xlim(x_min, x_max) plt.xlabel(self.x) ax.set_yticks([self.offset*i for i in range(self.n)[::-1]]) ax.set_yticklabels(self.categories) return ax
def highlight(energy_axis, counts, zero_offset, energy_per_channel, channel_list, facecolor): left_region = (energy_axis >= zero_offset + energy_per_channel * channel_list[0]) right_region = (energy_axis <= zero_offset + energy_per_channel * channel_list[1]) region = left_region & right_region plt.fill_between(energy_axis, counts, where=region, facecolor=facecolor)
def plot_tasks(data, x_label, smoothing_function=None, xmin=None, xmax=None, max_reward=None, legend=True, save_directory=None, show_plots=True): x_label_upper = x_label[0].upper() + x_label[1:] for scalar, tasks in data.items(): fig = plt.figure() # min_y = np.inf # max_y = -np.inf for task, epochs_values in sorted(tasks.items(), key=operator.itemgetter(0)): mean = np.mean(epochs_values["values"], axis=0) if smoothing_function: mean = smoothing_function(mean) # percentiles = np.percentile(epochs_values["values"], [25, 75], axis=0) std = np.std(epochs_values["values"], axis=0) std = std[len(std) - len(mean):] # error_min, error_max = mean - std, mean + std plt.plot(epochs_values["epochs"], mean, label="Task " + str(task)) # plt.fill_between(x, error_min, error_max, alpha=0.3) # min_y = min(min_y, min(error_min)) # max_y = max(max_y, max(filter(lambda x: x != np.inf, error_max))) if legend: plt.legend() plt.xlim(xmin=xmin, xmax=xmax) # plt.ylim(ymin=min(0, min_y), ymax=max(0, max_y + 0.1 * max_y)) if "reward" in scalar.lower() and max_reward is not None: ymax = max_reward * 1.1 else: ymax = None plt.ylim(ymax=ymax) plt.xlabel(x_label_upper) plt.ylabel(scalar) plt.title("{} per {}".format(scalar, x_label)) fig.canvas.set_window_title("{} per {}".format(scalar, x_label)) if save_directory is not None: plt.savefig(os.path.join(save_directory, "{}_per_{}".format(scalar, x_label) + IMAGES_EXT)) if show_plots: plt.show()
def plot_errorbands(x, y, llci, ulci, plot_kws=None, err_kws=None, *args, **kwargs): err_kws_final = kwargs.copy() err_kws_final.update(err_kws) err_kws_final.update({'label':''}) plot_kws_final = kwargs.copy() plot_kws_final.update(plot_kws) plt.plot(x, y, **plot_kws_final) plt.fill_between(x, llci, ulci, **err_kws_final) return None
def drawValidationCurve(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) #np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(5,700) train_scores, valid_scores = validation_curve(self.adaReg, X, y, "n_estimators", train_sizes, cv=5, scoring='mean_squared_error') train_scores = -1.0/5 *train_scores valid_scores = -1.0/5 *valid_scores train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training MSE") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation MSE") plt.legend(loc="best") plt.xlabel('Estimators') plt.ylabel('MSE') plt.title('Validation Curve with AdaBoost-DecisionTree Regression\n on the parameter of Estimators when the Decsion Tree has max depth=10') plt.grid(True) plt.show()
def drawValidationCurve(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) #np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(2,60) train_scores, valid_scores = validation_curve(DecisionTreeRegressor(max_features=None), X, y, "max_depth", train_sizes, cv=5, scoring='mean_squared_error') train_scores = -1.0/5 *train_scores valid_scores = -1.0/5 *valid_scores train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training MSE") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation MSE") plt.legend(loc="best") plt.xlabel('Max Depth') plt.ylabel('MSE') plt.title('Validation Curve with Decision \nTree Regression on the parameter of Max Depth') plt.grid(True) plt.show()
def drawValidationCurve(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) #np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(2,100,2) train_scores, valid_scores = validation_curve(self.regr, X, y, "n_neighbors", train_sizes, cv=5, scoring='mean_squared_error') train_scores = -1.0/5*train_scores valid_scores = -1.0/5*valid_scores train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training MSE") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation MSE") plt.legend(loc="best") plt.xlabel('K Neighbors') plt.ylabel('MSE') plt.title('Validation Curve with KNN Regression on the parameter of K Neighbors') plt.grid(True) plt.show()
def drawValidationCurve_maxdepth(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) #np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(1,60) train_scores, valid_scores = validation_curve(self.model, X, y, "max_depth", train_sizes, cv=5, scoring='mean_squared_error') train_scores = -1.0/5*train_scores valid_scores = -1.0/5*valid_scores train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training MSE") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation MSE") plt.legend(loc="best") plt.xlabel('Max Depth') plt.ylabel('MSE') plt.title('Validation Curve with Random Forest Regression \non the parameter of Max Depth when n_estimators=32') plt.grid(True) plt.show()
def drawValidationCurve_estimators(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) #np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(1,40) train_scores, valid_scores = validation_curve(self.model, X, y, "n_estimators", train_sizes, cv=5, scoring='mean_squared_error') train_scores = -1.0/5*train_scores valid_scores = -1.0/5*valid_scores train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training MSE") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation MSE") plt.legend(loc="best") plt.xlabel('Estimators') plt.ylabel('MSE') plt.title('Validation Curve with Random Forest Regression \non the parameter of estimators when max_depth=39') plt.grid(True) plt.show()
def drawValidationCurve(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(1,60) train_scores, valid_scores = validation_curve(self.clf, X, y, "n_neighbors", train_sizes, cv=5) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training Precision") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation Precision") plt.legend(loc="best") plt.xlabel('K Neighbors') plt.ylabel('Precision') plt.title('Validation Curve with KNN on the parameter of K') plt.grid(True) plt.show()
def drawValidationCurve_maxdepth(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(2,60) train_scores, valid_scores = validation_curve(self.clf, X, y, "max_depth", train_sizes, cv=5) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training Precision") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation Precision") plt.legend(loc="best") plt.xlabel('Max Depth') plt.ylabel('Precision') plt.title('Validation Curve with Random Forest Classification\n on the parameter of Max Depth when n_stimators=20') plt.grid(True) plt.show()
def drawValidationCurve_estimators(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(2,40) train_scores, valid_scores = validation_curve(self.clf, X, y, "n_estimators", train_sizes, cv=5) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training Precision") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation Precision") plt.legend(loc="best") plt.xlabel('Estimators') plt.ylabel('Precision') plt.title('Validation Curve with Random Forest Classification\n on the parameter of Estimators when Max Depth=30') plt.grid(True) plt.show()
def drawValidationCurve(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(2,75) train_scores, valid_scores = validation_curve(self.ada, X, y, "n_estimators", train_sizes, cv=5) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training Precision") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation Precision") plt.legend(loc="best") plt.xlabel('Estimators') plt.ylabel('Precision') plt.title('Validation Curve with AdaBoost-DecisionTree on the parameter of Estimators') plt.grid(True) plt.show()
def drawValidationCurve(self): """ To draw the validation curve :return:NA """ X, y = self.X_train, self.y_train.ravel() indices = np.arange(y.shape[0]) np.random.shuffle(indices) X, y = X[indices], y[indices] train_sizes = range(2,60) train_scores, valid_scores = validation_curve(self.clf, X, y, "max_depth", train_sizes, cv=5) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, valid_scores_mean - valid_scores_std, valid_scores_mean + valid_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training Precision") plt.plot(train_sizes, valid_scores_mean, '*-', color="g", label="Cross-validation Precision") plt.legend(loc="best") plt.xlabel('Max Depth(log2(all features) to be considered) ') plt.ylabel('Precision') plt.title('Validation Curve with Decision Tree on the parameter of Max Depth') plt.grid(True) plt.show()
def draw_community_median_distances(user_topics_dir, distance_file, df): y_axis = [row['avg_distance'] for idx, row in df.iterrows()] x_axis = np.arange(0, len(y_axis)) plt.figure(figsize=(20, 10)) plt.plot(x_axis, y_axis, 'r') plt.fill_between(x_axis, y_axis, color='red', alpha=0.5) plt.xlabel('Community ID') plt.ylabel('Divergence from Clique') plt.title('Community Users Divergence from Clique', fontsize=14, fontweight='bold') plt.xticks(rotation='vertical', fontsize=8) plt.subplots_adjust(bottom=0.2) plt.ylim([0, np.log(2) + 0.01]) plt.xlim([0, len(x_axis) - 1]) plt.savefig(distance_file) plt.close()
def plot_with_fill(x, y, axis=0, std_error=False, color='r'): plt.plot(x, np.mean(y, axis), '.-', linewidth=2, color=color) lb, ub = fill_bounds(y, axis=axis, std_error=std_error) plt.fill_between(x, lb, ub, linewidth=0, facecolor=color, alpha=0.1)
def plot_P_rf(self): print len(self.time), len(self.prf) zeros = np.zeros((len(self.prf))) where = self.prf >= 0 plt.plot(self.time, self.prf,'k') plt.fill_between(self.time,zeros,self.prf,where,facecolor='k') plt.show()
def test_ensemble(ensemble, sess, dataLoader): test_xs, test_ys = dataLoader.get_test_data() mean, var = ensemble_mean_var(ensemble, test_xs, sess) std = np.sqrt(var) upper = mean + 3*std lower = mean - 3*std test_xs_scaled = dataLoader.input_mean + dataLoader.input_std*test_xs plt.plot(test_xs_scaled, test_ys, 'b-') plt.plot(test_xs_scaled, mean, 'r-') plt.fill_between(test_xs_scaled[:, 0], lower[:, 0], upper[:, 0], color='yellow', alpha=0.5) plt.show()
def test_dropout(model, sess, dataLoader, args): test_xs, test_ys = dataLoader.get_test_data() mean, var = dropout_mean_var(model, test_xs, sess, args) std = np.sqrt(var) upper = mean + 3*std lower = mean - 3*std test_xs_scaled = dataLoader.input_mean + dataLoader.input_std*test_xs plt.plot(test_xs_scaled, test_ys, 'b-') plt.plot(test_xs_scaled, mean, 'r-') plt.fill_between(test_xs_scaled[:, 0], lower[:, 0], upper[:, 0], color='yellow', alpha=0.5) plt.show()
def plot_trace(ax, pivots, freq, err, n_std_dev=1, err_smoothing=3, show_errorbars=True, c='r', ls='-', label=None): ax.plot(pivots, freq, c=c, ls=ls, label=label) if show_errorbars: smerr = 1.0/np.convolve(1.0/err, np.ones(err_smoothing, dtype=float)/err_smoothing, mode='same') ax.fill_between(pivots, np.maximum(0,freq-n_std_dev*smerr), np.minimum(1,freq+n_std_dev*smerr), facecolor=c, linewidth=0, alpha=0.1)