Python scipy.stats.norm 模块,pdf() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.stats.norm.pdf()

项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_megkde_2d_basic():
    # Draw from normal, fit KDE, see if sampling from kde's pdf recovers norm
    np.random.seed(1)
    data = np.random.multivariate_normal([0, 1], [[1.0, 0.], [0., 0.75 ** 2]], size=10000)
    xs, ys = np.linspace(-4, 4, 50), np.linspace(-4, 4, 50)
    xx, yy = np.meshgrid(xs, ys, indexing='ij')
    samps = np.vstack((xx.flatten(), yy.flatten())).T
    zs = MegKDE(data).evaluate(samps).reshape(xx.shape)
    zs_x = zs.sum(axis=1)
    zs_y = zs.sum(axis=0)
    cs_x = zs_x.cumsum()
    cs_x /= cs_x[-1]
    cs_x[0] = 0
    cs_y = zs_y.cumsum()
    cs_y /= cs_y[-1]
    cs_y[0] = 0
    samps_x = interp1d(cs_x, xs)(np.random.uniform(size=10000))
    samps_y = interp1d(cs_y, ys)(np.random.uniform(size=10000))
    mu_x, std_x = norm.fit(samps_x)
    mu_y, std_y = norm.fit(samps_y)
    assert np.isclose(mu_x, 0, atol=0.1)
    assert np.isclose(std_x, 1.0, atol=0.1)
    assert np.isclose(mu_y, 1, atol=0.1)
    assert np.isclose(std_y, 0.75, atol=0.1)
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_grid_data(self):
        x, y = np.linspace(-3, 3, 200), np.linspace(-5, 5, 200)
        xx, yy = np.meshgrid(x, y, indexing='ij')
        xs, ys = xx.flatten(), yy.flatten()
        chain = np.vstack((xs, ys)).T
        pdf = (1 / (2 * np.pi)) * np.exp(-0.5 * (xs * xs + ys * ys / 4))
        c = ChainConsumer()
        c.add_chain(chain, parameters=['x', 'y'], weights=pdf, grid=True)
        summary = c.analysis.get_summary()
        x_sum = summary['x']
        y_sum = summary['y']
        expected_x = np.array([-1.0, 0.0, 1.0])
        expected_y = np.array([-2.0, 0.0, 2.0])
        threshold = 0.1
        assert np.all(np.abs(expected_x - x_sum) < threshold)
        assert np.all(np.abs(expected_y - y_sum) < threshold)
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_summary_max_symmetric_2(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.6827
        c.configure(statistics="max_symmetric", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(0, 2, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        xmax = xs[pdf.argmax()]
        cdf_top = skewnorm.cdf(summary[2], 5, 1, 1.5)
        cdf_bottom = skewnorm.cdf(summary[0], 5, 1, 1.5)
        area = cdf_top - cdf_bottom

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(area, summary_area, atol=0.05)
        assert np.isclose(summary[2] - summary[1], summary[1] - summary[0])
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_summary_max_symmetric_3(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.95
        c.configure(statistics="max_symmetric", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(0, 2, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        xmax = xs[pdf.argmax()]
        cdf_top = skewnorm.cdf(summary[2], 5, 1, 1.5)
        cdf_bottom = skewnorm.cdf(summary[0], 5, 1, 1.5)
        area = cdf_top - cdf_bottom

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(area, summary_area, atol=0.05)
        assert np.isclose(summary[2] - summary[1], summary[1] - summary[0])
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_summary_max_shortest_2(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.6827
        c.configure(statistics="max_shortest", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(-1, 5, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        cdf = skewnorm.cdf(xs, 5, 1, 1.5)
        x2 = interp1d(cdf, xs, bounds_error=False, fill_value=np.inf)(cdf + summary_area)
        dist = x2 - xs
        ind = np.argmin(dist)
        x0 = xs[ind]
        x2 = x2[ind]
        xmax = xs[pdf.argmax()]

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(x0, summary[0], atol=0.05)
        assert np.isclose(x2, summary[2], atol=0.05)
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_summary_max_shortest_3(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.95
        c.configure(statistics="max_shortest", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(-1, 5, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        cdf = skewnorm.cdf(xs, 5, 1, 1.5)
        x2 = interp1d(cdf, xs, bounds_error=False, fill_value=np.inf)(cdf + summary_area)
        dist = x2 - xs
        ind = np.argmin(dist)
        x0 = xs[ind]
        x2 = x2[ind]
        xmax = xs[pdf.argmax()]

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(x0, summary[0], atol=0.05)
        assert np.isclose(x2, summary[2], atol=0.05)
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_summary_max_central_2(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.6827
        c.configure(statistics="max_central", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(-1, 5, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        cdf = skewnorm.cdf(xs, 5, 1, 1.5)
        xval = interp1d(cdf, xs)([0.5 - 0.5 * summary_area, 0.5 + 0.5 * summary_area])
        xmax = xs[pdf.argmax()]

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(xval[0], summary[0], atol=0.05)
        assert np.isclose(xval[1], summary[2], atol=0.05)
项目:pyglmnet    作者:glm-tools    | 项目源码 | 文件源码
def _logL(distr, y, y_hat):
    """The log likelihood."""
    if distr in ['softplus', 'poisson']:
        eps = np.spacing(1)
        logL = np.sum(y * np.log(y_hat + eps) - y_hat)
    elif distr == 'gaussian':
        logL = -0.5 * np.sum((y - y_hat)**2)
    elif distr == 'binomial':
        # analytical formula
        logL = np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))

        # but this prevents underflow
        # z = beta0 + np.dot(X, beta)
        # logL = np.sum(y * z - np.log(1 + np.exp(z)))
    elif distr == 'probit':
        logL = np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))
    elif distr == 'gamma':
        # see
        # https://www.statistics.ma.tum.de/fileadmin/w00bdb/www/czado/lec8.pdf
        nu = 1.  # shape parameter, exponential for now
        logL = np.sum(nu * (-y / y_hat - np.log(y_hat)))
    return logL
项目:DW-POSSUM    作者:marksgraham    | 项目源码 | 文件源码
def add_motion_spikes(motion_mat,frequency,severity,TR):
    time = motion_mat[:,0]
    max_translation = 5 * severity / 1000 * np.sqrt(2*np.pi)#Max translations in m, factor of sqrt(2*pi) accounts for normalisation factor in norm.pdf later on
    max_rotation = np.radians(5 * severity) *np.sqrt(2*np.pi) #Max rotation in rad
    time_blocks = np.floor(time[-1]/TR).astype(np.int32)
    for i in range(time_blocks):
        if np.random.uniform(0,1) < frequency: #Decide whether to add spike
            for j in range(1,4):
                if np.random.uniform(0,1) < 1/6:
                    motion_mat[:,j] = motion_mat[:,j] \
                    + max_translation * random.uniform(-1,1) \
                    * norm.pdf(time,loc = (i+0.5)*TR,scale = TR/5)
            for j in range(4,7):
                if np.random.uniform(0,1) < 1/6:
                    motion_mat[:,j] = motion_mat[:,j] \
                    + max_rotation * random.uniform(-1,1) \
                    * norm.pdf(time,loc = (i+0.5 + np.random.uniform(-0.25,-.25))*TR,scale = TR/5)
    return motion_mat
项目:MarkovModels    作者:pmontalb    | 项目源码 | 文件源码
def calculate_vega(forward, strike, implied_sigma, annuity, days_to_maturity):
    """ This function returns Vega, which is the same for both payer/receiver
    :param forward:
    :param strike:
    :param implied_sigma:
    :param annuity:
    :param days_to_maturity:
    :return:
    """
    from scipy.stats import norm
    from math import sqrt
    d1 = calculate_d1(forward, strike, implied_sigma, days_to_maturity)
    year_fraction = float(days_to_maturity) / 365
    vega = annuity * forward * sqrt(year_fraction) * norm.pdf(d1)

    return vega
项目:wide-deep-cnn    作者:DaniUPC    | 项目源码 | 文件源码
def manual_loss(self, logits, targets, K):
        """ Computes the Gaussian Mixture Loss out of the graph computation """
        mixs, sigmas, means = logits[:, 0:K], logits[:, K:2*K], logits[:, 2*K:]
        mixs = np.exp(mixs)/np.sum(np.exp(mixs))  # Apply softmax
        sigmas = np.exp(sigmas)
        # Compute over all instances
        logexps = []
        for i in range(self.batch_size):
            sumexp = np.sum(
                [
                    mixs[:, k] *
                    norm.pdf(targets[i], means[i, k], sigmas[i, k])
                    for k in range(K)
                ]
            )
            logexps.append(np.log(sumexp))
        return -np.mean(logexps)
项目:pyGPGO    作者:hawk31    | 项目源码 | 文件源码
def ExpectedImprovement(self, tau, mean, std):
        """
        Expected Improvement acquisition function.

        Parameters
        ----------
        tau: float
            Best observed function evaluation.
        mean: float
            Point mean of the posterior process.
        std: float
            Point std of the posterior process.

        Returns
        -------
        float
            Expected improvement.
        """
        z = (mean - tau - self.eps) / (std + self.eps)
        return (mean - tau) * norm.cdf(z) + std * norm.pdf(z)[0]
项目:pyGPGO    作者:hawk31    | 项目源码 | 文件源码
def tExpectedImprovement(self, tau, mean, std, nu=3.0):
        """
        Expected Improvement acquisition function. Only to be used with `tStudentProcess` surrogate.

        Parameters
        ----------
        tau: float
            Best observed function evaluation.
        mean: float
            Point mean of the posterior process.
        std: float
            Point std of the posterior process.

        Returns
        -------
        float
            Expected improvement.
        """
        gamma = (mean - tau - self.eps) / (std + self.eps)
        return gamma * std * t.cdf(gamma, df=nu) + std * (1 + (gamma ** 2 - 1)/(nu - 1)) * t.pdf(gamma, df=nu)
项目:Gaussian_process    作者:happyjin    | 项目源码 | 文件源码
def EI(params, means, stand_devi, parms_done, y, n_iterations, k):
    """
    Expected Improvement acquisition function
    :param params: test data
    :param means: GP posterior mean
    :param stand_devi: standard deviation
    :param parms_done: training data
    :param y: training targets
    :return: next point that need to pick up
    """
    s = 0.0005  # small value
    max_mean = np.max(y)

    f_max = max_mean + s
    z = (means - f_max) / stand_devi
    EI_vector = (means - f_max) * norm.cdf(z) + stand_devi * norm.pdf(z)
    max_index = np.where(EI_vector == np.max(EI_vector))
    next_point = params[max_index]
    if k == n_iterations-1:
        plt.subplot(2, 1, 2)
        plt.plot(params, EI_vector, label='EI')
        plt.legend(loc=3)
    return next_point
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='untrained_D')

    plt.legend()
    plt.show()
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='untrained_D')

    plt.legend()
    plt.show()
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='D')

    plt.legend()
    plt.show()
项目:PyCS    作者:COSMOGRAIL    | 项目源码 | 文件源码
def combigauss(subtds, subtderrs, truetds, lensmodelsigma = 0.0):
    """
    Give me submission delays and error bars, as well as the corresponding true delays, in form of numpy arrays.
    I compute the mean and sigma of the combined posterior on the fractional time delay distance error.
    """

    from scipy.stats import norm

    subtdoffs = subtds - truetds
    centers = subtdoffs/truetds
    sigmas = subtderrs/np.fabs(truetds)

    # We convolve with the lensmodelsigma:
    sigmas = np.sqrt(sigmas**2 + lensmodelsigma**2)

    sigma_combi = 1.0 / np.sqrt(np.sum(1.0 / (sigmas**2)))
    center_combi = sigma_combi**2 * np.sum( centers/sigmas**2 )

    probazero = norm.pdf(0.0, center_combi, sigma_combi)

    return (center_combi, sigma_combi, probazero)

    # To plot this you could do:
    #plt.plot(x, norm.pdf(x, center_combi, sigma_combi), ls="--", color="black")
项目:slam-tutorial-code    作者:tuongngoc    | 项目源码 | 文件源码
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        sigma_d = self.measurement_distance_stddev
        sigma_alpha = self.measurement_angle_stddev

        z = np.array(measurement)
        pred_z = np.array(predicted_measurement)
        z[1] = atan2(sin(z[1]), cos(z[1]))
        pred_z[1] = atan2(sin(pred_z[1]), cos(pred_z[1]))

        delta = z - pred_z
        delta[1] = atan2(sin(delta[1]), cos(delta[1]))

        P_d = normal_dist.pdf(delta[0], 0, sigma_d)
        P_alpha = normal_dist.pdf(delta[1], 0, sigma_alpha)

        return P_d * P_alpha
项目:slam-tutorial-code    作者:tuongngoc    | 项目源码 | 文件源码
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        sigma_d = self.measurement_distance_stddev
        sigma_alpha = self.measurement_angle_stddev

        z = np.array(measurement)
        pred_z = np.array(predicted_measurement)
        z[1] = atan2(sin(z[1]), cos(z[1]))
        pred_z[1] = atan2(sin(pred_z[1]), cos(pred_z[1]))

        delta = z - pred_z
        delta[1] = atan2(sin(delta[1]), cos(delta[1]))

        P_d = normal_dist.pdf(delta[0], 0, sigma_d)
        P_alpha = normal_dist.pdf(delta[1], 0, sigma_alpha)

        return P_d * P_alpha
项目:slam-tutorial-code    作者:tuongngoc    | 项目源码 | 文件源码
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        sigma_d = self.measurement_distance_stddev
        sigma_alpha = self.measurement_angle_stddev

        z = np.array(measurement)
        pred_z = np.array(predicted_measurement)
        z[1] = atan2(sin(z[1]), cos(z[1]))
        pred_z[1] = atan2(sin(pred_z[1]), cos(pred_z[1]))

        delta = z - pred_z
        delta[1] = atan2(sin(delta[1]), cos(delta[1]))

        P_d = normal_dist.pdf(delta[0], 0, sigma_d)
        P_alpha = normal_dist.pdf(delta[1], 0, sigma_alpha)

        return P_d * P_alpha
项目:crnn_tf    作者:liuhu-bigeye    | 项目源码 | 文件源码
def initiate_estimations(self):
        self.estimations.close()
        self.estimations = h5py.File(self.estimate_path, 'a')

        scale = 0.5
        eps = 1e-3
        # estimation shape (n_sample, max_h_len, max_t_len + 1)
        for idx in range(self.n_samples):
            token = self.tokens[idx]
            t_len = len(token)
            h_len = int(np.ceil(float(self.db['end_index'][idx] - self.db['begin_index'][idx] - self.c3d_depth) / float(self.depth_stride))) + 1

            steps = np.linspace(0, h_len-1, t_len)
            esti = [np.ones(h_len)[:, None]*eps] + [norm.pdf(np.arange(h_len)[:, None], loc=round(l), scale=scale) for l in steps]
            esti = np.hstack(esti)

            self.estimations[self.estimate_key][idx, :h_len, :t_len + 1] = copy.deepcopy(esti / esti.sum(axis=-1)[:, None] * 0.9)
        self.estimations.close()
        self.estimations = h5py.File(self.estimate_path, 'a')
项目:Machine-Learning-Projects    作者:poke19962008    | 项目源码 | 文件源码
def featureMapHist(lang='py', normed=True):
    freq = getValue(lang, type='freq')
    raw = np.array([])

    for i in xrange(len(freq)):
        raw = np.append(raw, [i]*freq[i])
    print "[SUCCESS] Calculated raw for", lang

    (mu, sigma) = np.mean(raw), np.std(raw)
    pdf = norm.pdf(raw, mu, sigma)

    if not normed:
        plt.plot(raw, pdf, label="%s"%lang)
    else:
        ax = fig.add_subplot(3, 2, langs.index(lang)+1)
        plt.plot(raw, pdf)
        ax.hist(raw, normed=True, alpha=0.75)
        ax.set_title(lang)
项目:Bayesian-Optimisation    作者:hyperc54    | 项目源码 | 文件源码
def updateInterface1D(self,solver,bbox,history,bounds):
        x = np.linspace(0, 1, 80)
        z=map(lambda y:bbox.queryAt([y]),x)
        xx=np.atleast_2d(x).T
        z_pred, sigma2_pred = solver.gp.predict(xx, eval_MSE=True)
        self.ax_scat.clear()
        self.ax_approx.clear()
        self.ax_eimap.clear()

        self.ax_scat.plot(x,np.array(z))
        self.ax_scat.scatter(np.array(history)[:,0],np.array(history)[:,1])
        self.ax_approx.plot(x,np.array(z_pred))
        self.ax_approx.fill_between(x,np.array(z_pred)+np.array(np.sqrt(sigma2_pred)),np.array(z_pred)-np.array(np.sqrt(sigma2_pred)),alpha=0.2)

        self.ax_approx.plot(x)
        target=min(np.array(history)[:,1])
        mean,variance=solver.gp.predict(xx,eval_MSE=True)
        z=(target-mean)/np.sqrt(variance)
        self.ax_approx.plot(x,np.sqrt(variance)*(z*norm.cdf(z)+norm.pdf(z)))
项目:Bayesian-Optimisation    作者:hyperc54    | 项目源码 | 文件源码
def update_interface_1D(ax,ax2,solver,bbox,history):
    x = np.linspace(0, 1, 80)
    z=map(lambda y:bbox.queryAt([y]),x)
    xx=np.atleast_2d(x).T
    z_pred, sigma2_pred = solver.gp.predict(xx, eval_MSE=True)
    ax.clear()
    ax2.clear()
    ax.plot(x,np.array(z))
    ax.scatter(np.array(history)[:,0],np.array(history)[:,1])
    ax2.plot(x,np.array(z_pred))
    ax2.fill_between(x,np.array(z_pred)+np.array(np.sqrt(sigma2_pred)),np.array(z_pred)-np.array(np.sqrt(sigma2_pred)),alpha=0.2)
    ax.set_xlim([0,1])
    ax2.set_xlim([0,1])
    ax2.plot(x)
    target=min(np.array(history)[:,1])
    mean,variance=solver.gp.predict(xx,eval_MSE=True)
    z=(target-mean)/np.sqrt(variance)
    ax2.plot(x,np.sqrt(variance)*(z*norm.cdf(z)+norm.pdf(z)))
项目:Bayesian-Optimisation    作者:hyperc54    | 项目源码 | 文件源码
def update_interface_2D(ax,ax2,ax3,solver,bbox,history):
    x = np.linspace(0, 1, 200)
    y = np.linspace(0, 1, 200)
    x,y=np.meshgrid(x, y)
    xx=x.ravel()
    yy=y.ravel()
    #z=map(lambda x:bbox.queryAt(x),[np.array(i) for i in zip(xx,yy)])
    points_pred= np.array(map(lambda s:np.array(s),zip(xx,yy)))
    z_pred, sigma2_pred = solver.gp.predict(points_pred, eval_MSE=True)
    #target=min(map(lambda x:bbox.queryAt(x),[np.array(i) for i in history]))
    target=min(list(np.array(history)[:,1]))
    u=(target-z_pred)/np.sqrt(sigma2_pred)
    ax.clear()
    ax2.clear()
    #c1=ax.contourf(x,y,np.array(z).reshape(-1,len(x[0])))
    tt=np.array(map(np.asarray,np.array(history).reshape(-1,2)[:,0]))
    ax.scatter(tt[:,0],tt[:,1])

    c2=ax2.contourf(x,y,np.array(z_pred).reshape(-1,len(x[0])))
    c3=ax3.contourf(x,y,np.array(np.sqrt(sigma2_pred)*(u*norm.cdf(u)+norm.pdf(u))).reshape(-1,len(x[0])))
    ax2.scatter(tt[:,0],tt[:,1])
    ax3.scatter(tt[:,0],tt[:,1])
    #c1.set_clim(min(z),max(z))
    #c2.set_clim(min(z),max(z))
项目:the-neural-perspective    作者:johnsonc    | 项目源码 | 文件源码
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='untrained_D')

    plt.legend()
    plt.show()
项目:the-neural-perspective    作者:johnsonc    | 项目源码 | 文件源码
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='untrained_D')

    plt.legend()
    plt.show()
项目:the-neural-perspective    作者:johnsonc    | 项目源码 | 文件源码
def plot_data_and_D(sess, model, FLAGS):

    # True data distribution with untrained D
    f, ax = plt.subplots(1)

    # p_data
    X = np.linspace(int(FLAGS.mu-3.0*FLAGS.sigma),
                    int(FLAGS.mu+3.0*FLAGS.sigma),
                    FLAGS.num_points)
    y = norm.pdf(X, loc=FLAGS.mu, scale=FLAGS.sigma)
    ax.plot(X, y, label='p_data')

    # Untrained p_discriminator
    untrained_D = np.zeros((FLAGS.num_points,1))
    for i in range(FLAGS.num_points/FLAGS.batch_size):
        batch_X = np.reshape(
            X[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)],
            (FLAGS.batch_size,1))
        untrained_D[FLAGS.batch_size*i:FLAGS.batch_size*(i+1)] = \
            sess.run(model.D,
                feed_dict={model.pretrained_inputs: batch_X})
    ax.plot(X, untrained_D, label='D')

    plt.legend()
    plt.show()
项目:SLAM    作者:jfjensen    | 项目源码 | 文件源码
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        d_true, alpha_true = measurement
        d_pred, alpha_pred = predicted_measurement

        # --->>> Compute difference in distance and bearing angle.
        d_diff = abs(d_pred - d_true)
        alpha_diff = (alpha_pred - alpha_true + pi) % (2 * pi) - pi

        # Important: make sure the angle difference works correctly and does
        # not return values offset by 2 pi or - 2 pi.
        # You may use the following Gaussian PDF function:
        # scipy.stats.norm.pdf(x, mu, sigma). With the import in the header,
        # this is normal_dist.pdf(x, mu, sigma).
        p_d = normal_dist.pdf(d_diff, 0, self.measurement_distance_stddev)
        p_alpha = normal_dist.pdf(alpha_diff, 0, self.measurement_angle_stddev)

        # Note that the two parameters sigma_d and sigma_alpha discussed
        # in the lecture are self.measurement_distance_stddev and
        # self.measurement_angle_stddev.
        return p_d * p_alpha
项目:SLAM    作者:jfjensen    | 项目源码 | 文件源码
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        d_true, alpha_true = measurement
        d_pred, alpha_pred = predicted_measurement

        # --->>> Compute difference in distance and bearing angle.
        d_diff = abs(d_pred - d_true)
        alpha_diff = (alpha_pred - alpha_true + pi) % (2 * pi) - pi

        # Important: make sure the angle difference works correctly and does
        # not return values offset by 2 pi or - 2 pi.
        # You may use the following Gaussian PDF function:
        # scipy.stats.norm.pdf(x, mu, sigma). With the import in the header,
        # this is normal_dist.pdf(x, mu, sigma).
        p_d = normal_dist.pdf(d_diff, 0, self.measurement_distance_stddev)
        p_alpha = normal_dist.pdf(alpha_diff, 0, self.measurement_angle_stddev)

        # Note that the two parameters sigma_d and sigma_alpha discussed
        # in the lecture are self.measurement_distance_stddev and
        # self.measurement_angle_stddev.
        return p_d * p_alpha
项目:SLAM    作者:jfjensen    | 项目源码 | 文件源码
def probability_of_measurement(self, measurement, predicted_measurement):
        """Given a measurement and a predicted measurement, computes
           probability."""
        # Compute differences to real measurements.
        d_true, alpha_true = measurement
        d_pred, alpha_pred = predicted_measurement

        # --->>> Compute difference in distance and bearing angle.
        d_diff = abs(d_pred - d_true)
        alpha_diff = (alpha_pred - alpha_true + pi) % (2 * pi) - pi

        # Important: make sure the angle difference works correctly and does
        # not return values offset by 2 pi or - 2 pi.
        # You may use the following Gaussian PDF function:
        # scipy.stats.norm.pdf(x, mu, sigma). With the import in the header,
        # this is normal_dist.pdf(x, mu, sigma).
        p_d = normal_dist.pdf(d_diff, 0, self.measurement_distance_stddev)
        p_alpha = normal_dist.pdf(alpha_diff, 0, self.measurement_angle_stddev)

        # Note that the two parameters sigma_d and sigma_alpha discussed
        # in the lecture are self.measurement_distance_stddev and
        # self.measurement_angle_stddev.
        return p_d * p_alpha
项目:patchmatch    作者:samyblusseau    | 项目源码 | 文件源码
def dissimilarity_weights(m):
    w = int((m-1)/2)
    v = range(-w, w+1)
    [x, y] = meshgrid(v, v)
    g=norm.pdf(x)*norm.pdf(y)
    return g
项目:kernel_goodness_of_fit    作者:karlnapf    | 项目源码 | 文件源码
def test_austerity(self):
        np.random.seed(SEED)
        X = gen_X(SAMPLE_SIZE)


        def vectorized_log_lik(X,theta):
            return _vector_of_log_likelihoods(theta[0],theta[1],X)

        def log_density_prior(theta):
            return np.log(norm.pdf(theta[0],0, SIGMA_1)) + np.log(norm.pdf(theta[1],0, SIGMA_2))


        sample,_ = austerity(vectorized_log_lik,log_density_prior, X,0.01,batch_size=50,chain_size=10, thinning=1, theta_t=np.random.randn(2))
        assert_almost_equal(np.array([-0.2554517,  1.3805683]),sample[-1])
项目:kernel_goodness_of_fit    作者:karlnapf    | 项目源码 | 文件源码
def _vector_of_log_likelihoods(theta_1, theta_2, x):
    lik = np.log(0.5 * norm.pdf(x, theta_1, SIGMA_x) + 0.5 * norm.pdf(x, theta_1 + theta_2, SIGMA_x))
    return lik
项目:kernel_goodness_of_fit    作者:karlnapf    | 项目源码 | 文件源码
def _log_probability(theta_1,theta_2,x):
    log_lik = _log_lik(theta_1, theta_2, x)

    log_prior = np.log(norm.pdf(theta_1,0, SIGMA_1)) + np.log(norm.pdf(theta_2,0, SIGMA_2))

    return log_lik+log_prior
项目:kernel_goodness_of_fit    作者:karlnapf    | 项目源码 | 文件源码
def log_density_prior(theta):
    return np.log(norm.pdf(theta[0],0, SIGMA_1)) + np.log(norm.pdf(theta[1],0, SIGMA_2))
项目:kernel_goodness_of_fit    作者:karlnapf    | 项目源码 | 文件源码
def log_density_prior(theta):
    return np.log(norm.pdf(theta[0],0, SIGMA_1)) + np.log(norm.pdf(theta[1],0, SIGMA_2))
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_deviance(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):
    if save_as_png:
        save_as = 'png'
    else:
        save_as = 'pdf'
    filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0]
    model = get_model_type(sol)
    if draw or save:
        fig, ax = plt.subplots(figsize=(4,3))
        deviance = sol.MDL.trace('deviance')[:]
        sampler_state = sol.MDL.get_state()["sampler"]
        x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"])
        plt.plot(x, deviance, "-", color="C3", label="Model deviance\nDIC = %.2f\nBPIC = %.2f" %(sol.MDL.DIC,sol.MDL.BPIC))
        plt.xlabel("Iteration")
        plt.ylabel("Model deviance")
        plt.legend(numpoints=1, loc="best", fontsize=9)
        plt.grid('on')
        if sampler_state["_burn"] == 0:
            plt.xscale('log')
        else:
            plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
        ax.yaxis.set_major_locator(MaxNLocator(integer=True))
        fig.tight_layout()
    if save:
        save_where = '/Figures/ModelDeviance/'
        working_path = getcwd().replace("\\", "/")+"/"
        save_path = working_path+save_where
        print("\nSaving model deviance figure in:\n", save_path)
        if not path.exists(save_path):
            makedirs(save_path)
        fig.savefig(save_path+'ModelDeviance-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')
    try:    plt.close(fig)
    except: pass
    if draw:    return fig
    else:       return None
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_logp(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):
    if save_as_png:
        save_as = 'png'
    else:
        save_as = 'pdf'
    filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0]
    model = get_model_type(sol)
    if draw or save:
        fig, ax = plt.subplots(figsize=(4,3))
        logp = logp_trace(sol.MDL)
        sampler_state = sol.MDL.get_state()["sampler"]
        x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"])
        plt.plot(x, logp, "-", color="C3")
        plt.xlabel("Iteration")
        plt.ylabel("Log-likelihood")
        plt.legend(numpoints=1, loc="best", fontsize=9)
        plt.grid('on')
        if sampler_state["_burn"] == 0:
            plt.xscale('log')
        else:
            plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
        ax.yaxis.set_major_locator(MaxNLocator(integer=True))
        fig.tight_layout()
    if save:
        save_where = '/Figures/LogLikelihood/'
        working_path = getcwd().replace("\\", "/")+"/"
        save_path = working_path+save_where
        print("\nSaving logp trace figure in:\n", save_path)
        if not path.exists(save_path):
            makedirs(save_path)
        fig.savefig(save_path+'LogLikelihood-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')
    try:    plt.close(fig)
    except: pass
    if draw:    return fig
    else:       return None
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_deviance(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):
    if save_as_png:
        save_as = 'png'
    else:
        save_as = 'pdf'
    filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0]
    model = get_model_type(sol)
    if draw or save:
        fig, ax = plt.subplots(figsize=(4,3))
        deviance = sol.MDL.trace('deviance')[:]
        sampler_state = sol.MDL.get_state()["sampler"]
        x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"])
        plt.plot(x, deviance, "-", color="C3", label="Model deviance\nDIC = %.2f\nBPIC = %.2f" %(sol.MDL.DIC,sol.MDL.BPIC))
        plt.xlabel("Iteration")
        plt.ylabel("Model deviance")
        plt.legend(numpoints=1, loc="best")
        plt.grid('on')
        if sampler_state["_burn"] == 0:
            plt.xscale('log')
        else:
            plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
        ax.yaxis.set_major_locator(MaxNLocator(integer=True))
        fig.tight_layout()
    if save:
        save_where = '/Figures/ModelDeviance/'
        working_path = getcwd().replace("\\", "/")+"/"
        save_path = working_path+save_where
        print("\nSaving model deviance figure in:\n", save_path)
        if not path.exists(save_path):
            makedirs(save_path)
        fig.savefig(save_path+'ModelDeviance-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')
    try:    plt.close(fig)
    except: pass
    if draw:    return fig
    else:       return None
项目:BISIP    作者:clberube    | 项目源码 | 文件源码
def plot_logp(sol, save=False, draw=True, save_as_png=True, fig_dpi=144):
    if save_as_png:
        save_as = 'png'
    else:
        save_as = 'pdf'
    filename = sol.filename.replace("\\", "/").split("/")[-1].split(".")[0]
    model = get_model_type(sol)
    if draw or save:
        fig, ax = plt.subplots(figsize=(4,3))
        logp = logp_trace(sol.MDL)
        sampler_state = sol.MDL.get_state()["sampler"]
        x = np.arange(sampler_state["_burn"]+1, sampler_state["_iter"]+1, sampler_state["_thin"])
        plt.plot(x, logp, "-", color="C3")
        plt.xlabel("Iteration")
        plt.ylabel("Log-likelihood")
        plt.legend(numpoints=1, loc="best")
        plt.grid('on')
        if sampler_state["_burn"] == 0:
            plt.xscale('log')
        else:
            plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
        ax.yaxis.set_major_locator(MaxNLocator(integer=True))
        fig.tight_layout()
    if save:
        save_where = '/Figures/LogLikelihood/'
        working_path = getcwd().replace("\\", "/")+"/"
        save_path = working_path+save_where
        print("\nSaving logp trace figure in:\n", save_path)
        if not path.exists(save_path):
            makedirs(save_path)
        fig.savefig(save_path+'LogLikelihood-%s-%s.%s'%(model,filename,save_as), dpi=fig_dpi, bbox_inches='tight')
    try:    plt.close(fig)
    except: pass
    if draw:    return fig
    else:       return None
项目:wallstreet    作者:mcdallas    | 项目源码 | 文件源码
def _fprime(self, sigma):
        logSoverK = log(self.S/self.K)
        n12 = ((self.r + sigma**2/2)*self.T)
        numerd1 = logSoverK + n12
        d1 = numerd1/(sigma*sqrt(self.T))
        return self.S*sqrt(self.T)*norm.pdf(d1)*exp(-self.r*self.T)
项目:CRN_ProbabilisticInversion    作者:elaloy    | 项目源码 | 文件源码
def CompPrior(X,MCPar):

    prior=np.ones((X.shape[0],1))
    log_prior=np.zeros((X.shape[0],1))
    for ii in xrange(0,X.shape[0]):
        if (MCPar.Prior[0:9]=='Prior_CRN'):
            # Uniform prior for Ninh
            prior[ii,0] = 1.0/(MCPar.ub[0,MCPar.idx_unif2]-MCPar.lb[0,MCPar.idx_unif2])
            log_prior[ii,0]=np.log(prior[ii,0])

            # Uniform prior for Age
            prior[ii,0] = 1.0/(MCPar.ub[0,MCPar.idx_unif1]-MCPar.lb[0,MCPar.idx_unif1])
            log_prior[ii,0]=np.log(prior[ii,0])

            if (MCPar.Prior[10]=='1'): # Gaussian prior for erosion rate
                prior[ii,0] = prior[ii,0]*norm.pdf(X[ii,MCPar.idx_norm],MCPar.pmu,MCPar.psd)
                log_prior[ii,0]+=np.log(prior[ii,0])
            else: # Uniform prior for erosion rate
                prior[ii,0] = prior[ii,0]*(1.0/(MCPar.ub[0,MCPar.idx_unif0]-MCPar.lb[0,MCPar.idx_unif0]))
                log_prior[ii,0]+=np.log(prior[ii,0])

            # Check log_p for inf
            if np.isinf(log_prior[ii,0])==True:
                log_prior[ii,0]=1e-200
        else: # Uniform prior for every variable
            for jj in xrange(0,MCPar.n):
                prior[ii,0] = prior[ii,0]*(1.0/(MCPar.ub[0,jj]-MCPar.lb[0,jj])) 
                log_prior[ii,0]+=np.log(prior[ii,0])
    return prior, log_prior
项目:Psi-staircase    作者:NNiehof    | 项目源码 | 文件源码
def __genprior(self, x, distr='uniform', mu=0, sig=1):
        """Generate prior probability distribution for variable.

        Arguments
        ---------
            x   :  1D numpy array (float64)
                    points to evaluate the density at.

            distr :  string
                    Distribution to use a prior :
                        'uniform'   (default) discrete uniform distribution

                        'normal'   normal distribution

                        'gamma'    gamma distribution

                        'beta'     beta distribution

            mu :  scalar float
                first parameter of distr distribution (check scipy for parameterization)

            sig : scalar float
                second parameter of distr distribution

        Returns
        -------
        1D numpy array of prior probabilities (unnormalized)
        """
        if distr == 'uniform':
            nx = len(x)
            p = np.ones(nx) / nx
        elif distr == 'normal':
            p = norm.pdf(x, mu, sig)
        elif distr == 'beta':
            p = beta.pdf(x, mu, sig)
        elif distr == 'gamma':
            p = gamma.pdf(x, mu, scale=sig)
        else:
            nx = len(x)
            p = np.ones(nx) / nx
        return p
项目:Psi-staircase    作者:NNiehof    | 项目源码 | 文件源码
def __entropy(self, pdf):
        """Calculate shannon entropy of posterior distribution.
        Arguments
        ---------
            pdf :   ndarray (float64)
                    posterior distribution of psychometric curve parameters for each stimuli


        Returns
        -------
        1D numpy array (float64) : Shannon entropy of posterior for each stimuli
        """
        # Marginalize out all nuisance parameters, i.e. all except alpha and sigma
        postDims = np.ndim(pdf)
        if self.marginalize == True:
            while postDims > 3:  # marginalize out second-to-last dimension, last dim is x
                pdf = np.sum(pdf, axis=-2)
                postDims -= 1
        # find expected entropy, suppress divide-by-zero and invalid value warnings
        # as this is handled by the NaN redefinition to 0
        with np.errstate(divide='ignore', invalid='ignore'):
            entropy = np.multiply(pdf, np.log(pdf))
        entropy[np.isnan(entropy)] = 0  # define 0*log(0) to equal 0
        dimSum = tuple(range(postDims - 1))  # dimensions to sum over. also a Chinese dish
        entropy = -(np.sum(entropy, axis=dimSum))
        return entropy
项目:Psi-staircase    作者:NNiehof    | 项目源码 | 文件源码
def minEntropyStim(self):
        """Find the stimulus intensity based on the expected information gain.

        Minimum Shannon entropy is used as selection criterion for the stimulus intensity in the upcoming trial.
        """
        self.pdf = self.pdf
        self.nX = len(self.stimRange)
        self.nDims = np.ndim(self.pdf)

        # make pdf the same dims as conditional prob table likelihood
        self.pdfND = np.expand_dims(self.pdf, axis=self.nDims)  # append new axis
        self.pdfND = np.tile(self.pdfND, (self.nX))  # tile along new axis

        # Probabilities of response r (succes, failure) after presenting a stimulus
        # with stimulus intensity x at the next trial, multiplied with the prior (pdfND)
        self.pTplus1success = np.multiply(self.likelihood, self.pdfND)
        self.pTplus1failure = self.pdfND - self.pTplus1success

        # Probability of success or failure given stimulus intensity x, p(r|x)
        self.sumAxes = tuple(range(self.nDims))  # sum over all axes except the stimulus intensity axis
        self.pSuccessGivenx = np.sum(self.pTplus1success, axis=self.sumAxes)
        self.pFailureGivenx = np.sum(self.pTplus1failure, axis=self.sumAxes)

        # Posterior probability of parameter values given stimulus intensity x and response r
        # p(alpha, sigma | x, r)
        self.posteriorTplus1success = self.pTplus1success / self.pSuccessGivenx
        self.posteriorTplus1failure = self.pTplus1failure / self.pFailureGivenx

        # Expected entropy for the next trial at intensity x, producing response r
        self.entropySuccess = self.__entropy(self.posteriorTplus1success)
        self.entropyFailure = self.__entropy(self.posteriorTplus1failure)
        self.expectEntropy = np.multiply(self.entropySuccess, self.pSuccessGivenx) + np.multiply(self.entropyFailure,
                                                                                                 self.pFailureGivenx)
        self.minEntropyInd = np.argmin(self.expectEntropy)  # index of smallest expected entropy
        self.xCurrent = self.stimRange[self.minEntropyInd]  # stim intensity at minimum expected entropy

        self.iTrial += 1
        if self.iTrial == (self.nTrials - 1):
            self.stop = 1
项目:OptML    作者:johannespetrat    | 项目源码 | 文件源码
def expected_improvement(self, optimiser, x):
        mu,std = optimiser.predict([x], return_std=True)
        current_best = max([score for score, params in self.hyperparam_history])
        gamma = (current_best - mu[0])/std[0]
        exp_improv = std[0] * (gamma * norm.cdf(gamma) + norm.pdf(gamma))
        return -1 * exp_improv
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_extents_weighted():
    xs = np.random.uniform(low=-4, high=4, size=100000)
    weights = norm.pdf(xs)
    low, high = get_extents(xs, weights)
    threshold = 0.5
    assert np.abs(low + 4) < threshold
    assert np.abs(high - 4) < threshold
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_megkde_1d_basic():
    # Draw from normal, fit KDE, see if sampling from kde's pdf recovers norm
    np.random.seed(0)
    data = np.random.normal(loc=0, scale=1.0, size=2000)
    xs = np.linspace(-3, 3, 100)
    ys = MegKDE(data).evaluate(xs)
    cs = ys.cumsum()
    cs /= cs[-1]
    cs[0] = 0
    samps = interp1d(cs, xs)(np.random.uniform(size=10000))
    mu, std = norm.fit(samps)
    assert np.isclose(mu, 0, atol=0.1)
    assert np.isclose(std, 1.0, atol=0.1)