Python scipy.optimize 模块,fmin_cg() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用scipy.optimize.fmin_cg()

项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        args = (self._input, self._indicatorFunction)
        initialParams = self.initialise()

        params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                  args=args, maxiter = self._maxiter)

        self._trainedParams = params
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def fit(self, X):
        from scipy import optimize

        def objective(W, *args):
            X, pad = args
            Obj, DeltaW = self.objective(X, W)
            return Obj

        def objectiveG(W, *args):
            X, pad = args
            #print type(X)
            Obj, DeltaW = self.objective(X, W)
            return DeltaW

        n, m = np.shape(X)
        self.n = n
        args = (X, 1)
        initW = np.ravel(np.random.rand(self.k, n), order="F")
        optW = optimize.fmin_cg(objective, x0=initW, fprime=objectiveG, \
                                args=args, maxiter=self.maxiter, \
                                callback=self.callback)
        self.trainedW = optW
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def fit(self, X):
        from scipy import optimize

        def objective(W, *args):
            X, pad = args
            Obj, DeltaW = self.objective(X, W)
            return Obj

        def objectiveG(W, *args):
            X, pad = args
            #print type(X)
            Obj, DeltaW = self.objective(X, W)
            return DeltaW

        n, m = np.shape(X)
        self.n = n
        args = (X, 1)
        initW = np.ravel(np.random.rand(self.k, n), order="F")
        optW = optimize.fmin_cg(objective, x0=initW, fprime=objectiveG, \
                                args=args, maxiter=self.maxiter, \
                                callback=self.callback)
        self.trainedW = optW
项目:scipy-lecture-notes-zh-CN    作者:jayleicn    | 项目源码 | 文件源码
def conjugate_gradient(x0, f, f_prime, hessian=None):
    all_x_i = [x0[0]]
    all_y_i = [x0[1]]
    all_f_i = [f(x0)]
    def store(X):
        x, y = X
        all_x_i.append(x)
        all_y_i.append(y)
        all_f_i.append(f(X))
    optimize.fmin_cg(f, x0, f_prime, callback=store, gtol=1e-12)
    return all_x_i, all_y_i, all_f_i
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        args = (self._input, self._indicatorFunction)
        initialParams = self.initialise()

        params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                  args=args, maxiter = self._maxiter)

        self._trainedParams = params
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print()
        print("Training %s..." % self.search())
        print()

        args = (self._input, self._indicatorFunction)
        initialParams = self.initialise()

        params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                  args=args, maxiter = self._maxiter)

        self._trainedParams = params
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print()
        print("Training %s..." % self.search())
        print()

        args = (self._input, self._indicatorFunction)
        initialParams = self.initialise()

        params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                  args=args, maxiter = self._maxiter)

        self._trainedParams = params
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        args = (self._input, self._indicatorFunction)
        initialParams = self.initialise()

        params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                  args=args, maxiter = self._maxiter)

        self._trainedParams = params
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        args = (self._input, self._indicatorFunction)
        initialParams = self.initialise()

        params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                  args=args, maxiter = self._maxiter)

        self._trainedParams = params
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        args = (self._input, self._indicatorFunction)
        initialParams = self.initialise()

        params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                  args=args, maxiter = self._maxiter)

        self._trainedParams = params
项目:EvoloPy-NN    作者:7ossam81    | 项目源码 | 文件源码
def __call__(self, net, input, target):
        from scipy.optimize import fmin_cg
        if 'disp' not in self.kwargs:
            self.kwargs['disp'] = 0
        x = fmin_cg(self.fcn, self.x.copy(), fprime=self.grad, callback=self.step, **self.kwargs)
        self.x[:] = x
        return None
项目:SPIND    作者:LiuLab-CSRC    | 项目源码 | 文件源码
def refine(solution, qs, refine_cycle):
  A_refined = solution.A.copy()
  def _fun(x, *argv):
    asx, bsx, csx, asy, bsy, csy, asz, bsz, csz = x
    h, k, l, qx, qy, qz = argv
    r1 = (asx*h + bsx*k + csx*l - qx)
    r2 = (asy*h + bsy*k + csy*l - qy)
    r3 = (asz*h + bsz*k + csz*l - qz)
    return r1**2. + r2**2. + r3**2.

  def _gradient(x, *argv):
    asx, bsx, csx, asy, bsy, csy, asz, bsz, csz = x
    h, k, l, qx, qy, qz = argv
    r1 = (asx*h + bsx*k + csx*l - qx)
    r2 = (asy*h + bsy*k + csy*l - qy)
    r3 = (asz*h + bsz*k + csz*l - qz)
    g_asx, g_bsx, g_csx = 2.*h*r1, 2.*k*r1, 2.*l*r1
    g_asy, g_bsy, g_csy = 2.*h*r2, 2.*k*r2, 2.*l*r2
    g_asz, g_bsz, g_csz = 2.*h*r3, 2.*k*r3, 2.*l*r3
    return np.asarray((g_asx, g_bsx, g_csx,
               g_asy, g_bsy, g_csy,
               g_asz, g_bsz, g_csz))
  rhkls = solution.rhkls
  pair_ids = solution.pair_ids
  for i in range(refine_cycle):
    for j in range(len(pair_ids)):  # refine by each reflection
      pair_id = pair_ids[j]
      x0 = A_refined.reshape((-1))
      rhkl = rhkls[pair_id,:]
      q = qs[pair_id,:]
      args = (rhkl[0], rhkl[1], rhkl[2], q[0], q[1], q[2])
      res = fmin_cg(_fun, x0, fprime=_gradient, args=args, disp=0)
      A_refined = res.reshape((3,3))
  eXYZs = np.abs(A_refined.dot(rhkls.T) - qs.T).T
  dists = norm(eXYZs, axis=1)
  pair_dist = dists[pair_ids].mean()

  if pair_dist < solution.pair_dist:
    solution.A_refined = A_refined
    solution.pair_dist_refined = pair_dist
    solution.hkls_refined = np.linalg.inv(A_refined).dot(qs.T).T
    solution.rhkls_refined = np.rint(solution.hkls_refined)
    solution.ehkls_refined = np.abs(solution.hkls_refined - solution.rhkls_refined)
  else:
    solution.A_refined = solution.A.copy()
    solution.pair_dist_refined = solution.pair_dist
    solution.hkls_refined = solution.hkls.copy()
    solution.rhkls_refined = solution.rhkls.copy()
    solution.ehkls_refined = solution.ehkls.copy()
  return solution
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self, retry=3):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        try:
            targets = self._indicatorFunction
        except AttributeError:
            targets = self._targets

        # attempting to handle the case where the the optimization raises the following warning:
        #        Warning: Desired accuracy not necessarily achieved due to percision loss.
        #
        # This seems to mean that the function was unable to calculate a gradient(?) could mean we
        # have randomly initialised the parmaters in a bad (flat) region of the parameter space. This
        # seems to be supported by the fact that it usually happpens after 1 iteration.  Trying to solve 
        # it by catching the warning and allowing the network to retry with a new randomly initialised set
        # of parameters a number of times.
        # Seems the warning is just a print statement not a real warning, so going to have to capture the 
        # the print statement and search for the string and control flow based on that.

        args = (self._input, targets)
        counter = 0
        output = [] # list to pass to Capturing to store print statements
        while counter <= retry:
            if counter > 0:
                print
                print "Training %s (Attempt: %d)..." % (self.search(), counter+1)
                print
            initialParams = self.initialise()
            with Capturing(output) as output:
                params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                              args=args, maxiter = self._maxiter)
            print output[counter]
            if "Warning" in output[counter]:
                if counter == retry:
                    print "Optimisation has failed %d times. Aborting training!" % (counter+1)
                print "Optimisation failed on attempt number %d!" % (counter+1)
                counter += 1
                continue
            self._trainedParams = params
            break
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self, retry=3):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        try:
            targets = self._indicatorFunction
        except AttributeError:
            targets = self._targets

        # attempting to handle the case where the the optimization raises the following warning:
        #        Warning: Desired accuracy not necessarily achieved due to percision loss.
        #
        # This seems to mean that the function was unable to calculate a gradient(?) could mean we
        # have randomly initialised the parmaters in a bad (flat) region of the parameter space. This
        # seems to be supported by the fact that it usually happpens after 1 iteration.  Trying to solve 
        # it by catching the warning and allowing the network to retry with a new randomly initialised set
        # of parameters a number of times.
        # Seems the warning is just a print statement not a real warning, so going to have to capture the 
        # the print statement and search for the string and control flow based on that.

        args = (self._input, targets)
        counter = 0
        output = [] # list to pass to Capturing to store print statements
        while counter <= retry:
            if counter > 0:
                print
                print "Training %s (Attempt: %d)..." % (self.search(), counter+1)
                print
            initialParams = self.initialise()
            with Capturing(output) as output:
                params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                              args=args, maxiter = self._maxiter)
            print output[counter]
            if "Warning" in output[counter]:
                if counter == retry:
                    print "Optimisation has failed %d times. Aborting training!" % (counter+1)
                print "Optimisation failed on attempt number %d!" % (counter+1)
                counter += 1
                continue
            self._trainedParams = params
            break
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self, retry=3):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print()
        print("Training %s..." % self.search())
        print()

        try:
            targets = self._indicatorFunction
        except AttributeError:
            targets = self._targets

        # attempting to handle the case where the the optimization raises the following warning:
        #        Warning: Desired accuracy not necessarily achieved due to percision loss.
        #
        # This seems to mean that the function was unable to calculate a gradient(?) could mean we
        # have randomly initialised the parmaters in a bad (flat) region of the parameter space. This
        # seems to be supported by the fact that it usually happpens after 1 iteration.  Trying to solve 
        # it by catching the warning and allowing the network to retry with a new randomly initialised set
        # of parameters a number of times.
        # Seems the warning is just a print statement not a real warning, so going to have to capture the 
        # the print statement and search for the string and control flow based on that.

        args = (self._input, targets)
        counter = 0
        output = [] # list to pass to Capturing to store print statements
        while counter <= retry:
            if counter > 0:
                print()
                print("Training %s (Attempt: %d)..." % (self.search(), counter+1))
                print()
            initialParams = self.initialise()
            with Capturing(output) as output:
                params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                              args=args, maxiter = self._maxiter)
            print(output[counter])
            if "Warning" in output[counter]:
                if counter == retry:
                    print("Optimisation has failed %d times. Aborting training!" % (counter+1))
                print("Optimisation failed on attempt number %d!" % (counter+1))
                counter += 1
                continue
            self._trainedParams = params
            break
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self, retry=3):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        try:
            targets = self._indicatorFunction
        except AttributeError:
            targets = self._targets

        # attempting to handle the case where the the optimization raises the following warning:
        #        Warning: Desired accuracy not necessarily achieved due to percision loss.
        #
        # This seems to mean that the function was unable to calculate a gradient(?) could mean we
        # have randomly initialised the parmaters in a bad (flat) region of the parameter space. This
        # seems to be supported by the fact that it usually happpens after 1 iteration.  Trying to solve 
        # it by catching the warning and allowing the network to retry with a new randomly initialised set
        # of parameters a number of times.
        # Seems the warning is just a print statement not a real warning, so going to have to capture the 
        # the print statement and search for the string and control flow based on that.

        args = (self._input, targets)
        counter = 0
        output = [] # list to pass to Capturing to store print statements
        while counter <= retry:
            if counter > 0:
                print
                print "Training %s (Attempt: %d)..." % (self.search(), counter+1)
                print
            initialParams = self.initialise()
            with Capturing(output) as output:
                params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                              args=args, maxiter = self._maxiter)
            print output[counter]
            if "Warning" in output[counter]:
                if counter == retry:
                    print "Optimisation has failed %d times. Aborting training!" % (counter+1)
                print "Optimisation failed on attempt number %d!" % (counter+1)
                counter += 1
                continue
            self._trainedParams = params
            break
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self, retry=3):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        try:
            targets = self._indicatorFunction
        except AttributeError:
            targets = self._targets

        # attempting to handle the case where the the optimization raises the following warning:
        #        Warning: Desired accuracy not necessarily achieved due to percision loss.
        #
        # This seems to mean that the function was unable to calculate a gradient(?) could mean we
        # have randomly initialised the parmaters in a bad (flat) region of the parameter space. This
        # seems to be supported by the fact that it usually happpens after 1 iteration.  Trying to solve 
        # it by catching the warning and allowing the network to retry with a new randomly initialised set
        # of parameters a number of times.
        # Seems the warning is just a print statement not a real warning, so going to have to capture the 
        # the print statement and search for the string and control flow based on that.

        args = (self._input, targets)
        counter = 0
        output = [] # list to pass to Capturing to store print statements
        while counter <= retry:
            if counter > 0:
                print
                print "Training %s (Attempt: %d)..." % (self.search(), counter+1)
                print
            initialParams = self.initialise()
            with Capturing(output) as output:
                params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                              args=args, maxiter = self._maxiter)
            print output[counter]
            if "Warning" in output[counter]:
                if counter == retry:
                    print "Optimisation has failed %d times. Aborting training!" % (counter+1)
                print "Optimisation failed on attempt number %d!" % (counter+1)
                counter += 1
                continue
            self._trainedParams = params
            break
项目:hco-experiments    作者:zooniverse    | 项目源码 | 文件源码
def train(self, retry=3):
        """
            train the network
        """
        from scipy import optimize

        def costFunction(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return cost

        def costFunctionGradient(params, *args):
            input, targets = args
            cost, grad = self.costFunction(params, input, targets)
            return grad

        print
        print "Training %s..." % self.search()
        print

        try:
            targets = self._indicatorFunction
        except AttributeError:
            targets = self._targets

        # attempting to handle the case where the the optimization raises the following warning:
        #        Warning: Desired accuracy not necessarily achieved due to percision loss.
        #
        # This seems to mean that the function was unable to calculate a gradient(?) could mean we
        # have randomly initialised the parmaters in a bad (flat) region of the parameter space. This
        # seems to be supported by the fact that it usually happpens after 1 iteration.  Trying to solve 
        # it by catching the warning and allowing the network to retry with a new randomly initialised set
        # of parameters a number of times.
        # Seems the warning is just a print statement not a real warning, so going to have to capture the 
        # the print statement and search for the string and control flow based on that.

        args = (self._input, targets)
        counter = 0
        output = [] # list to pass to Capturing to store print statements
        while counter <= retry:
            if counter > 0:
                print
                print "Training %s (Attempt: %d)..." % (self.search(), counter+1)
                print
            initialParams = self.initialise()
            with Capturing(output) as output:
                params = optimize.fmin_cg(costFunction, x0=initialParams, fprime=costFunctionGradient, \
                                              args=args, maxiter = self._maxiter)
            print output[counter]
            if "Warning" in output[counter]:
                if counter == retry:
                    print "Optimisation has failed %d times. Aborting training!" % (counter+1)
                print "Optimisation failed on attempt number %d!" % (counter+1)
                counter += 1
                continue
            self._trainedParams = params
            break