Python scipy.optimize 模块,fmin() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用scipy.optimize.fmin()

项目:trappist1    作者:rodluger    | 项目源码 | 文件源码
def GetSigma():
  '''
  Get the standard deviation of the Gaussian PSF. I find `sigma` = 0.45 pixels.
  I also plotted the `x` and `y` obtained below to find that average the maximum extent of
  the stellar motion is x,y ~ y,x ~ (2.5, 3.5).

  '''

  star = Everest(os.path.join(TRAPPIST_OUT, 'nPLDTrappist.fits'), TRAPPIST_EPIC)
  fpix = star.fpix.reshape(-1, 6, 6).swapaxes(1,2)
  guess = [3., 3., 1e4, 1e2, 0.5]
  n = 0
  niter = len(fpix) // 10
  x = np.zeros(niter)
  y = np.zeros(niter)
  a = np.zeros(niter)
  b = np.zeros(niter)
  sigma = np.zeros(niter)
  for n in prange(niter):
    x[n], y[n], a[n], b[n], sigma[n] = fmin(ChiSq, guess, args = (fpix[n * 10],), disp = 0)
  return np.nanmedian(sigma)
项目:scipy-lecture-notes-zh-CN    作者:jayleicn    | 项目源码 | 文件源码
def nelder_mead(x0, f, f_prime, hessian=None):
    all_x_i = [x0[0]]
    all_y_i = [x0[1]]
    all_f_i = [f(x0)]
    def store(X):
        x, y = X
        all_x_i.append(x)
        all_y_i.append(y)
        all_f_i.append(f(X))
    optimize.fmin(f, x0, callback=store, ftol=1e-12)
    return all_x_i, all_y_i, all_f_i




###############################################################################
# Run different optimizers on these problems
项目:Python_SelfLearning    作者:fukuit    | 项目源码 | 文件源码
def main():
    a = math.sqrt(2)
    arg = (a, )
    [xopt, fopt, iter, funcalls, warnflag, allvecs] = fmin(
        banana,
        [-1, 1.2],
        args=arg,
        callback=cbf,
        xtol=1e-4,
        ftol=1e-4,
        maxiter=400,
        maxfun=400,
        disp=True,
        retall=True,
        full_output=True)
    for item in allvecs:
        print('%f, %f' % (item[0], item[1]))
项目:pwtools    作者:elcorto    | 项目源码 | 文件源码
def get_min(self, x0=None, **kwds):
        """Return [x,y] where z(x,y) = min(z) by minimizing z(x,y) w/
        scipy.optimize.fmin().

        Parameters
        ----------
        x0 : sequence, length (2,), optional
            Initial guess. If None then use the data grid point with the
            smallest `z` value.

        Returns
        -------
        [xmin, ymin]: 1d array (2,)
        """
        _kwds = dict(disp=0, xtol=1e-12, ftol=1e-8, maxfun=1e4, maxiter=1e4)
        _kwds.update(kwds)                            
        if x0 is None:
            idx0 = self.values.argmin()
            x0 = [self.xx[idx0], self.yy[idx0]]
        xopt = optimize.fmin(self, x0, **_kwds)
        return xopt
项目:pwtools    作者:elcorto    | 项目源码 | 文件源码
def get_min(self, x0=None, **kwds):
        """Minimize fit function by `scipy.optimize.fmin()`.

        Parameters
        ----------
        x0 : 1d array, optional
            Initial guess. If not given then `points[i,...]` at the min of
            `values` is used.
        **kwds : keywords to fmin()

        Returns
        -------
        1d array (ndim,)
        """            
        _kwds = dict(disp=0, xtol=1e-12, ftol=1e-8, maxfun=1e4, maxiter=1e4)
        _kwds.update(kwds)
        if x0 is None:
            idx = self.values.argmin()
            x0 = self.points[idx,...]
        xopt = optimize.fmin(self, x0, **_kwds)
        return xopt                        


# Need to inherit first Fit1D such that Fit1D.get_min() is used instead of
# PolyFit.get_min().
项目:crnpy    作者:etonello    | 项目源码 | 文件源码
def estimate_params(crn, start_t, end_t, incr, sample_times, data, start_params, **kw):
    """Find the values of the parameters that minimize the distance
    to the points at sample_times. Use fmin from scipy."""
    # find indices for comparison
    t = np.arange(start_t, end_t, incr)
    indices = map(lambda x: np.where(t >= x)[0][0], sample_times)
    param_names = start_params.keys()
    param_values = start_params.values()

    # function to minimize
    def score(par):
        scores = []
        params = dict(zip(param_names, par))
        for dataset in data:
            initial_cond = dict((species, dataset[species][0]) for species in dataset)
            sol, _ = simulate(crn, params, initial_cond, start_t, end_t, incr)
            scores.append(score_distance(sol, dataset, indices))
        return sum(scores)

    return fmin(score, param_values, full_output = True, **kw)
项目:gamtools    作者:pombo-lab    | 项目源码 | 文件源码
def squared_difference(params, my_function, x, y):
    """
    Get the squared difference between the output of an arbitrary function
    called with arbitrary parameters, and the expected output.

    This function is useful for fitting probability distributions (e.g
    using scipy's :data:`~scipy.optimize.fmin` function.

    :param tuple params: Tuple of parameters to be passed to my_function
    :param func my_function: Arbitrary function. Must accept exactly two \
            parameters, a tuple of parameters params and a numpy array x \
            of positions, and must itself return a numpy array.
    :param x: Positions to calculated the value of the function
    :type x: :class:`~numpy.ndarray`
    :param y: Expected value of the function at each position x.
    :type y: :class:`~numpy.ndarray`
    """

    z = my_function(params, x)
    diff = [d**2 for d in z - y]
    return sum(diff)
项目:PyFusionGUI    作者:SyntaxVoid    | 项目源码 | 文件源码
def EM_VMM_calc_best_fit_optimise(z,lookup=None,N=None):
    '''Calculates MLE approximate parameters for mean and kappa for
    the von Mises distribution. Can use a lookup table for the two Bessel
    functions, or a scipy optimiser if lookup=None

    SH: 23May2013 '''
    if N==None:
        N = len(z)
    z_bar = np.sum(z)/float(N)
    mean_theta = np.angle(z_bar)
    R_squared = np.real(z_bar * z_bar.conj())
    tmp = (float(N)/(float(N)-1))*(R_squared-1./float(N))
    #This is to catch problems with the sqrt below - however, need to track down why this happens...
    #This happens for very low kappa values - i.e terrible clusters...
    if tmp<0:tmp = 0.
    R_e = np.sqrt(tmp)
    if lookup==None:
        tmp1 = opt.fmin(kappa_guess_func,3,args=(R_e,),disp=0)
        kappa = tmp1[0]
    else:
        min_arg = np.argmin(np.abs(lookup[0]-R_e))
        kappa = lookup[1][min_arg]
    return kappa, mean_theta, 1
项目:sequana    作者:sequana    | 项目源码 | 文件源码
def get_max_gc_correlation(self, reference):
        """Plot correlation between coverage and GC content by varying the GC window

         The GC content uses a moving window of size W. This parameter affects
         the correlation bewteen coverage and GC. This function find the
         *optimal* window length.

        """
        pylab.clf()
        corrs = []
        wss = []

        def func(params):
            ws = int(round(params[0]))
            if ws < 10:
                return 0
            self.bed.compute_gc_content(reference, ws)
            corr = self.get_gc_correlation()
            corrs.append(corr)
            wss.append(ws)
            return corr

        from scipy.optimize import fmin
        res = fmin(func, 100, xtol=1, disp=False)  # guess is 200
        pylab.plot(wss, corrs, "o")
        pylab.xlabel("GC window size")
        pylab.ylabel("Correlation")
        pylab.grid()
        return res[0]
项目:EZClimate    作者:Litterman    | 项目源码 | 文件源码
def _minimize_node(self, node, m):
        from scipy.optimize import fmin
        return fmin(self._min_func, x0=m[node], args=(m, node), disp=False)
项目:HackCU-Machine-Learning    作者:spencer-hanson    | 项目源码 | 文件源码
def gradDescent(theta, x, y):
    result = optimize.fmin(costFunc, x0=theta, args=(x, y), maxiter=1000, full_output=True);
    return result[0], result[1];
项目:trump-weather    作者:JulianNorton    | 项目源码 | 文件源码
def optimizeTheta(mytheta, myX, myy , mylambda=.05):
    result = optimize.fmin(computeCost, x0=mytheta, args=(myX, myy, mylambda), maxiter=400, full_output=True)
    return result[0], result[1]
项目:trump-weather    作者:JulianNorton    | 项目源码 | 文件源码
def computeCost(mytheta,myX,myy,mylambda = 0.): 
    """
    theta_start is an n- dimensional vector of initial theta guess
    X is matrix with n- columns and m- rows
    y is a matrix with m- rows and 1 column
    Note this includes regularization, if you set mylambda to nonzero
    For the first part of the homework, the default 0. is used for mylambda
    """
    #note to self: *.shape is (rows, columns)
    term1 = np.dot(-np.array(myy).T,np.log(h(mytheta,myX)))
    term2 = np.dot((1-np.array(myy)).T,np.log(1-h(mytheta,myX)))
    regterm = (mylambda/2) * np.sum(np.dot(mytheta[1:].T,mytheta[1:])) #Skip theta0
    return float( (1./m) * ( np.sum(term1 - term2) + regterm ) )


#An alternative to OCTAVE's 'fminunc' we'll use some scipy.optimize function, "fmin"
#Note "fmin" does not need to be told explicitly the derivative terms
#It only needs the cost function, and it minimizes with the "downhill simplex algorithm."
#http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.optimize.fmin.html
项目:trump-weather    作者:JulianNorton    | 项目源码 | 文件源码
def optimizeTheta(mytheta,myX,myy,mylambda=0.):
    result = optimize.fmin(computeCost, x0=mytheta, args=(myX, myy, mylambda), maxiter=400, full_output=True)
    return result[0], result[1]


#Cost function is the same as the one implemented above, as I included the regularization
#toggled off for default function call (lambda = 0)
#I do not need separate implementation of the derivative term of the cost function
#Because the scipy optimization function I'm using only needs the cost function itself
#Let's check that the cost function returns a cost of 0.693 with zeros for initial theta,
#and regularized x values
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def guess_fit_parameters(self, fitorder=1):
        """
        Do a normal (non-bayesian) fit to the data.
        The result will be saved for use as initial guess parameters in the full MCMC fit.
        If you use a custom model, you will probably have to override this method as well.
        """
        pars = np.zeros(fitorder + 1)
        pars[-2] = 1.0
        min_func = lambda p, xi, yi, yerri: np.sum((yi - self.model(p, xi)) ** 2 / yerri ** 2)

        best_pars = fmin(min_func, x0=pars, args=(self.x, self.y, self.yerr))
        self.guess_pars = best_pars
        return best_pars
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def guess_fit_parameters(self, fitorder=1):
        polypars = np.zeros(fitorder + 1)
        polypars[-2] = 1.0
        pars = [-7, 3.5]
        pars.extend(polypars)
        min_func = lambda p, xi, yi, yerri: np.sum((yi - self.model(p, xi)) ** 2 / yerri ** 2)

        best_pars = fmin(min_func, x0=pars, args=(self.x, self.y, self.yerr))
        self.guess_pars = best_pars
        return best_pars
项目:augur    作者:nextstrain    | 项目源码 | 文件源码
def minimize_clade_error(self):
        from scipy.optimize import fmin as minimizer
        if self.verbose:
            print "initial function value:", self.clade_fit(self.model_params)
            print "initial parameters:", self.model_params
        self.model_params = minimizer(self.clade_fit, self.model_params, disp = self.verbose>1)
        if self.verbose:
            print "final function value:", self.clade_fit(self.model_params)
            print "final parameters:", self.model_params, '\n'
项目:augur    作者:nextstrain    | 项目源码 | 文件源码
def minimize_af_error(self):
        from scipy.optimize import fmin as minimizer
        if self.verbose:
            print "initial function value:", self.af_fit(self.model_params)
            print "initial parameters:", self.model_params
        self.model_params = minimizer(self.af_fit, self.model_params, disp = self.verbose>1)
        if self.verbose:
            print "final function value:", self.af_fit(self.model_params)
            print "final parameters:", self.model_params, '\n'
项目:astronomy-utilities    作者:astronomeralex    | 项目源码 | 文件源码
def mle(data,guess=[]):
    """
    this will do the minimzation
    guess is the array of initial parameters
    if blank, the code will set it
    """

    if len(guess) == 0:
        guess.append(1.)
        guess.append(2.)
        guess.append(np.median(data))
        guess = np.array(guess)

    return fmin(mloglike,guess,[data])
项目:gamtools    作者:pombo-lab    | 项目源码 | 文件源码
def fit_histogram(breaks, counts, initial_params=None):
    """
    Fit a composite negative binomial and lognormal distribution
    to a histogram of read coverage per genomic window by
    least-squares minimization using the nelder-mead simplex
    algorithm.

    :param breaks: Array containing histogram bin edges.
    :type breaks: :class:`~numpy.ndarray`
    :param counts: Array containing the number of windows \
            falling into each histogram bin.
    :type counts: :class:`~numpy.ndarray`
    :param tuple initial_params: Initial guess for parameters \
            of the composite distribution (see \
            :func:`~n_binom_plus_log_normal`).
    :returns: Tuple of parameters representing the best-fit \
            distribution.
    """

    if initial_params is None:
        initial_params = (6.89799811e-01, 5.08503431e-01,
                          2.69945316, 0.23982432,
                          0.15)

    old_stdout = sys.stdout
    with open(os.devnull, "w") as sys.stdout:
        params = fmin(squared_difference,
                      initial_params,
                      (n_binom_plus_log_normal, breaks,
                       counts / float(sum(counts))),
                      xtol=0.00001, ftol=0.00001)
    sys.stdout = old_stdout
    return params
项目:PyFusionGUI    作者:SyntaxVoid    | 项目源码 | 文件源码
def test_von_mises_fits():
    N = 20000
    mu = 2.9
    kappa = 5
    mu_list = np.linspace(-np.pi,np.pi,20)
    kappa_list = range(1,50)
    mu_best_fit = []
    mu_record = []
    fig,ax = pt.subplots(nrows = 2)
    def kappa_guess_func(kappa,R_e):
        return (R_e - spec.iv(1,kappa)/spec.iv(0,kappa))**2

    def calc_best_fit(theta):
        z = np.exp(1j*theta)
        z_bar = np.average(z)
        mean_theta = np.angle(z_bar)
        R_squared = np.real(z_bar * z_bar.conj())
        R_e = np.sqrt((float(N)/(float(N)-1))*(R_squared-1./float(N)))
        tmp1 = opt.fmin(kappa_guess_func,3,args=(R_e,))
        return mean_theta, tmp1[0]

    for mu in mu_list:
        kappa_best_fit = []
        for kappa in kappa_list:
            mu_record.append(mu)
            theta = np.random.vonmises(mu,kappa,N)
            mean_theta, kappa_best = calc_best_fit(theta)
            mu_best_fit.append(mean_theta)
            kappa_best_fit.append(kappa_best)
        ax[0].plot(kappa_list, kappa_best_fit,'.')
        ax[0].plot(kappa_list, kappa_list,'-')
    ax[1].plot(mu_record,mu_best_fit,'.')
    ax[1].plot(mu_record,mu_record,'-')
    fig.canvas.draw(); fig.show()
项目:MatchingMarkets.py    作者:QuantEcon    | 项目源码 | 文件源码
def brute_search(self, weights, metaParamNames=list(),
                     objective=lambda x: x.loss,
                     negative_objective=False,
                     stochastic_objective=True,
                     stochastic_samples=25,
                     stochastic_precision=0.01,
                     ):
        """
        Uses BFS to find optimal simulation hyperparameters
        Note:
            You want runs passed in the solver to have __no randomness__
            Solving a stochastic simulation will cause problems with solver

        Arguments
        ---------
        Weights: list<floats>
                 weights to be optimized on
        metaParamName: list<string>
                 list of names of arguments to be optimized on
                 the index respects the weights argument above
                 the strings should be the same as in a simulation run input
                 Weights and metaparamNames together would form the
                 algoParams dict in a normal simulation
        objective: function(Market) -> float
            objective function
            by default number of matches
            can be changed to loss, for example, by
                "objective=lambda x: x.loss"
        returns
        -------
        np.array of weights where function is minimized
                if negative_objective=True, where maximized
        """
        def this_run(w):
            # note - sign here
            # TODO fix negative sign
            sign = 1 if negative_objective else -1
            if stochastic_objective:
                result = 0
                # If objective stochastic, make montecarlo draws & average
                for i in range(stochastic_samples):
                    result += sign * \
                            self.single_run(w,
                                            metaParamNames=metaParamNames,
                                            objective=objective)
                # Average montecarlo draws
                result = result/stochastic_samples
                # Tune precision for convergence
                result = int(result/stochastic_precision)*stochastic_precision
                return result
            else:
                return sign*self.single_run(w,
                                            metaParamNames=metaParamNames,
                                            objective=objective)
        # res = []
        # for i in range(10):
        #    res.append(this_run(weights))
        res = optimize.brute(this_run, weights, full_output=True, disp=True,
                             finish=optimize.fmin
                             )
        return res[0]
项目:PyCS    作者:COSMOGRAIL    | 项目源码 | 文件源码
def opt_magshift(lcs, sourcespline=None, verbose=False, trace=False):
    """
    If you don't give any sourcespline, this is a dirty rough magshift optimization,
    using the median mag level (without microlensing), once for all.
    We don't touch the magshift of the first curve.

    If you do give a sourcespline, I'll optimize the magshift of each curve one by one to match the spline.
    New : I even touch the magshift of the first curve.

    We use the l1-norm of the residues, not a usual r2, to avoid local minima. Important !
    This is done with the nosquare=True option to the call of r2 !
    """

    if sourcespline == None:

        reflevel = np.median(lcs[0].getmags(noml = True))
        for l in lcs[1:]:
            level = np.median(l.getmags(noml = True))
            l.shiftmag(reflevel - level)
            if trace:
                pycs.gen.util.trace(lcs)
        if verbose:
            print "Magshift optimization done."

    else:

        #for l in lcs[1:]: # We don't touch the first one.
        for l in lcs:

            if verbose:
                print "Magshift optimization on %s ..." % (l)
            inip = l.magshift

            def setp(p):
                l.magshift = p

            def errorfct(p):    
                setp(p)
                return pycs.gen.spl.r2([l], sourcespline, nosquare=True)

            minout = spopt.fmin(errorfct, inip, full_output=1, xtol=0.001, disp=verbose)            
            popt = minout[0]
            if verbose:
                print "Optimal magshift: %.4f" % (popt) 
            setp(popt)
            if verbose:
                print "Magshift optimization of %s done." % (l)
        # We do not return anything
项目:pulse2percept    作者:uwescience    | 项目源码 | 文件源码
def test_Horsager2009():
    """Make sure the model can reproduce data from Horsager et al. (2009)

    Single-pulse data is taken from Fig.3, where the threshold current is
    reported for a given pulse duration and amplitude of a single pulse.
    We don't fit every data point to save some computation time, but make sure
    the model produces roughly the same values as reported in the paper for
    some data points.
    """

    def forward_pass(model, pdurs, amps):
        """Calculate model output based on a list of pulse durs and amps"""
        pdurs = np.array([pdurs]).ravel()
        amps = np.array([amps]).ravel()
        for pdur, amp in zip(pdurs, amps):
            in_arr = np.ones((2, 1))
            pt = p2p.stimuli.PulseTrain(model.tsample, amp=amp, freq=0.1,
                                        pulsetype='cathodicfirst',
                                        pulse_dur=pdur / 1000.0,
                                        interphase_dur=pdur / 1000.0)
            percept = model.model_cascade(in_arr, [pt.data], 'GCL', False)
            yield percept.data.max()

    def calc_error_amp(amp_pred, pdur, model):
        """Calculates the error in threshold current

        For a given data `pdur`, what is the `amp` needed for output `theta`?
        We're trying to find the current that produces output `theta`. Thus
        we calculate the error between output produced with `amp_pred` and
        `theta`.
        """
        theta_pred = list(forward_pass(model, pdur, amp_pred))[0]
        return np.log(np.maximum(1e-10, (theta_pred - model.theta) ** 2))

    def yield_fits(model, pdurs, amps):
        """Yields a threshold current by fitting the model to the data"""
        for pdur, amp in zip(pdurs, amps):
            yield scpo.fmin(calc_error_amp, amp, disp=0, args=(pdur, model))[0]

    # Data from Fig.3 in Horsager et al. (2009)
    pdurs = [0.07335, 0.21985, 0.52707, 3.96939]  # pulse duration in ms
    amps_true = [181.6, 64.7, 33.1, 14.7]  # threshold current in uA
    amps_expected = [201.9, 61.0, 29.2, 10.4]

    # Make sure our implementation comes close to ground-truth `amps`:
    # - Do the forward pass
    model = p2p.retina.Horsager2009(tsample=0.01 / 1000, tau1=0.42 / 1000,
                                    tau2=45.25 / 1000, tau3=26.25 / 1000,
                                    beta=3.43, epsilon=2.25, theta=110.3)
    amps_predicted = np.array(list(yield_fits(model, pdurs, amps_true)))

    # - Make sure predicted values still the same
    npt.assert_almost_equal(amps_expected, amps_predicted, decimal=0)