41 from scipy.optimize
import _minpack
43 from numpy
import atleast_1d, dot, take, triu, shape, eye, \
44 transpose, zeros, product, greater, array, \
45 all, where, isscalar, asarray, inf, abs
47 error = _minpack.error
49 __all__ = [
'fsolve',
'leastsq',
'fixed_point',
'curve_fit']
51 def _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape=None):
52 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
53 if (output_shape
is not None)
and (shape(res) != output_shape):
54 if (output_shape[0] != 1):
55 if len(output_shape) > 1:
56 if output_shape[1] == 1:
58 msg =
"%s: there is a mismatch between the input and output " \
59 "shape of the '%s' argument" % (checker, argname)
60 func_name = getattr(thefunc,
'func_name',
None)
62 msg +=
" '%s'." % func_name
69 def fsolve(func, x0, args=(), fprime=
None, full_output=0,
70 col_deriv=0, xtol=1.49012e-8, maxfev=0, band=
None,
71 epsfcn=0.0, factor=100, diag=
None):
73 Find the roots of a function.
75 Return the roots of the (non-linear) equations defined by
76 ``func(x) = 0`` given a starting estimate.
80 func : callable f(x, *args)
81 A function that takes at least one (possibly vector) argument.
83 The starting estimate for the roots of ``func(x) = 0``.
85 Any extra arguments to `func`.
87 A function to compute the Jacobian of `func` with derivatives
88 across the rows. By default, the Jacobian will be estimated.
90 If True, return optional outputs.
92 Specify whether the Jacobian function computes derivatives down
93 the columns (faster, because there is no transpose operation).
98 The solution (or the result of the last iteration for
99 an unsuccessful call).
101 A dictionary of optional outputs with the keys::
103 * 'nfev': number of function calls
104 * 'njev': number of Jacobian calls
105 * 'fvec': function evaluated at the output
106 * 'fjac': the orthogonal matrix, q, produced by the QR
107 factorization of the final approximate Jacobian
108 matrix, stored column wise
109 * 'r': upper triangular matrix produced by QR factorization of same
111 * 'qtf': the vector (transpose(q) * fvec)
114 An integer flag. Set to 1
if a solution was found, otherwise refer
115 to `mesg`
for more information.
117 If no solution
is found, `mesg` details the cause of failure.
122 The calculation will terminate
if the relative error between two
123 consecutive iterates
is at most `xtol`.
125 The maximum number of calls to the function. If zero, then
126 ``100*(N+1)``
is the maximum where N
is the number of elements
129 If set to a two-sequence containing the number of sub-
and
130 super-diagonals within the band of the Jacobi matrix, the
131 Jacobi matrix
is considered banded (only
for ``fprime=
None``).
133 A suitable step length
for the forward-difference
134 approximation of the Jacobian (
for ``fprime=
None``). If
135 `epsfcn`
is less than the machine precision, it
is assumed
136 that the relative errors
in the functions are of the order of
137 the machine precision.
139 A parameter determining the initial step bound
140 (``factor * || diag * x||``). Should be
in the interval
143 N positive entries that serve
as a scale factors
for the
148 ``fsolve``
is a wrapper around MINPACK
's hybrd and hybrj algorithms.
151 x0 = array(x0, ndmin=1)
153 if type(args) != type(()): args = (args,)
154 _check_func('fsolve', 'func', func, x0, args, n, (n,))
163 retval = _minpack._hybrd(func, x0, args, full_output, xtol,
164 maxfev, ml, mu, epsfcn, factor, diag)
166 _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n,n))
169 retval = _minpack._hybrj(func, Dfun, x0, args, full_output,
170 col_deriv, xtol, maxfev, factor,diag)
172 errors = {0:["Improper input parameters were entered.",TypeError],
173 1:["The solution converged.", None],
174 2:["The number of calls to function has "
175 "reached maxfev = %d." % maxfev, ValueError],
176 3:["xtol=%f is too small, no further improvement "
177 "in the approximate\n solution "
178 "is possible." % xtol, ValueError],
179 4:["The iteration is not making good progress, as measured "
180 "by the \n improvement from the last five "
181 "Jacobian evaluations.", ValueError],
182 5:["The iteration is not making good progress, "
183 "as measured by the \n improvement from the last "
184 "ten iterations.", ValueError],
185 'unknown': ["An error occurred.", TypeError]}
187 info = retval[-1] # The FORTRAN return value
188 if (info != 1 and not full_output):
189 if info in [2,3,4,5]:
190 msg = errors[info][0]
191 warnings.warn(msg, RuntimeWarning)
194 raise errors[info][1](errors[info][0])
196 raise errors['unknown'][1](errors['unknown'][0])
200 return retval + (errors[info][0],) # Return all + the message
202 return retval + (errors['unknown'][0],)
207 def leastsq(func, x0, args=(), Dfun=None, full_output=0,
208 col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
209 gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None):
211 Minimize the sum of squares of a set of equations.
215 x = arg min(sum(func(y)**2,axis=0))
221 should take at least one (possibly length N vector) argument
and
222 returns M floating point numbers.
224 The starting estimate
for the minimization.
226 Any extra arguments to func are placed
in this tuple.
228 A function
or method to compute the Jacobian of func with derivatives
229 across the rows. If this
is None, the Jacobian will be estimated.
231 non-zero to
return all optional outputs.
233 non-zero to specify that the Jacobian function computes derivatives
234 down the columns (faster, because there
is no transpose operation).
236 Relative error desired
in the sum of squares.
238 Relative error desired
in the approximate solution.
240 Orthogonality desired between the function vector
and the columns of
243 The maximum number of calls to the function. If zero, then 100*(N+1)
is
244 the maximum where N
is the number of elements
in x0.
246 A suitable step length
for the forward-difference approximation of the
247 Jacobian (
for Dfun=
None). If epsfcn
is less than the machine precision,
248 it
is assumed that the relative errors
in the functions are of the
249 order of the machine precision.
251 A parameter determining the initial step bound
252 (``factor * || diag * x||``). Should be
in interval ``(0.1, 100)``.
254 N positive entries that serve
as a scale factors
for the variables.
259 The solution (
or the result of the last iteration
for an unsuccessful
262 Uses the fjac
and ipvt optional outputs to construct an
263 estimate of the jacobian around the solution. ``
None``
if a
264 singular matrix encountered (indicates very flat curvature
in
265 some direction). This matrix must be multiplied by the
266 residual standard deviation to get the covariance of the
267 parameter estimates -- see curve_fit.
269 a dictionary of optional outputs with the key s::
271 -
'nfev' : the number of function calls
272 -
'fvec' : the function evaluated at the output
273 -
'fjac' : A permutation of the R matrix of a QR
274 factorization of the final approximate
275 Jacobian matrix, stored column wise.
276 Together with ipvt, the covariance of the
277 estimate can be approximated.
278 -
'ipvt' : an integer array of length N which defines
279 a permutation matrix, p, such that
280 fjac*p = q*r, where r
is upper triangular
281 with diagonal elements of nonincreasing
282 magnitude. Column j of p
is column ipvt(j)
283 of the identity matrix.
284 -
'qtf' : the vector (transpose(q) * fvec).
287 A string message giving information about the cause of failure.
289 An integer flag. If it
is equal to 1, 2, 3
or 4, the solution was
290 found. Otherwise, the solution was
not found. In either case, the
291 optional output variable
'mesg' gives more information.
295 "leastsq" is a wrapper around MINPACK
's lmdif and lmder algorithms.
297 cov_x is a Jacobian approximation to the Hessian of the least squares
299 This approximation assumes that the objective function
is based on the
300 difference between some observed target data (ydata)
and a (non-linear)
301 function of the parameters `f(xdata, params)` ::
303 func(params) = ydata - f(xdata, params)
305 so that the objective function
is ::
307 min sum((ydata - f(xdata, params))**2, axis=0)
311 x0 = array(x0, ndmin=1)
313 if type(args) != type(()):
315 m = _check_func('leastsq', 'func', func, x0, args, n)[0]
317 raise TypeError('Improper input: N=%s must not exceed M=%s' % (n,m))
321 retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
322 gtol, maxfev, epsfcn, factor, diag)
325 _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n,m))
327 _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m,n))
330 retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
331 ftol, xtol, gtol, maxfev, factor, diag)
333 errors = {0:["Improper input parameters.", TypeError],
334 1:["Both actual and predicted relative reductions "
335 "in the sum of squares\n are at most %f" % ftol, None],
336 2:["The relative error between two consecutive "
337 "iterates is at most %f" % xtol, None],
338 3:["Both actual and predicted relative reductions in "
339 "the sum of squares\n are at most %f and the "
340 "relative error between two consecutive "
341 "iterates is at \n most %f" % (ftol,xtol), None],
342 4:["The cosine of the angle between func(x) and any "
343 "column of the\n Jacobian is at most %f in "
344 "absolute value" % gtol, None],
345 5:["Number of calls to function has reached "
346 "maxfev = %d." % maxfev, ValueError],
347 6:["ftol=%f is too small, no further reduction "
348 "in the sum of squares\n is possible.""" % ftol, ValueError],
349 7:[
"xtol=%f is too small, no further improvement in "
350 "the approximate\n solution is possible." % xtol, ValueError],
351 8:[
"gtol=%f is too small, func(x) is orthogonal to the "
352 "columns of\n the Jacobian to machine "
353 "precision." % gtol, ValueError],
354 'unknown':[
"Unknown error.", TypeError]}
358 if (info
not in [1,2,3,4]
and not full_output):
359 if info
in [5,6,7,8]:
360 warnings.warn(errors[info][0], RuntimeWarning)
363 raise errors[info][1](errors[info][0])
365 raise errors[
'unknown'][1](errors[
'unknown'][0])
367 mesg = errors[info][0]
370 if info
in [1,2,3,4]:
371 from numpy.dual
import inv
372 from numpy.linalg
import LinAlgError
373 perm = take(eye(n),retval[1][
'ipvt']-1,0)
374 r = triu(transpose(retval[1][
'fjac'])[:n,:])
377 cov_x = inv(dot(transpose(R),R))
380 return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
382 return (retval[0], info)
384 def _general_function(params, xdata, ydata, function):
385 return function(xdata, *params) - ydata
387 def _weighted_general_function(params, xdata, ydata, function, weights):
388 return weights * (function(xdata, *params) - ydata)
390 def curve_fit(f, xdata, ydata, p0=None, sigma=None, **kw):
392 Use non-linear least squares to fit a function, f, to data.
394 Assumes ``ydata = f(xdata, *params) + eps``
399 The model function, f(x, ...). It must take the independent
400 variable as the first argument and the parameters to fit as
401 separate remaining arguments.
402 xdata : An N-length sequence or an (k,N)-shaped array
403 for functions with k predictors.
404 The independent variable where the data is measured.
405 ydata : N-length sequence
406 The dependent data --- nominally f(xdata, ...)
407 p0 : None, scalar, or M-length sequence
408 Initial guess for the parameters. If None, then the initial
409 values will all be 1 (if the number of parameters for the function
410 can be determined using introspection, otherwise a ValueError
412 sigma : None or N-length sequence
413 If not None, it represents the standard-deviation of ydata.
414 This vector, if given, will be used as weights in the
415 least-squares problem.
420 Optimal values for the parameters so that the sum of the squared error
421 of ``f(xdata, *popt) - ydata`` is minimized
423 The estimated covariance of popt. The diagonals provide the variance
424 of the parameter estimate.
432 The algorithm uses the Levenburg-Marquardt algorithm through `leastsq`.
433 Additional keyword arguments are passed directly to that algorithm.
437 >>> import numpy as np
438 >>> from scipy.optimize import curve_fit
439 >>> def func(x, a, b, c):
440 ... return a*np.exp(-b*x) + c
442 >>> x = np.linspace(0,4,50)
443 >>> y = func(x, 2.5, 1.3, 0.5)
444 >>> yn = y + 0.2*np.random.normal(size=len(x))
446 >>> popt, pcov = curve_fit(func, x, yn)
452 args, varargs, varkw, defaults = inspect.getargspec(f)
454 msg =
"Unable to determine number of fit parameters."
455 raise ValueError(msg)
457 p0 = [1.0] * (len(args)-2)
459 p0 = [1.0] * (len(args)-1)
464 args = (xdata, ydata, f)
466 func = _general_function
468 func = _weighted_general_function
469 args += (1.0/asarray(sigma),)
472 return_full = kw.pop(
'full_output',
False)
473 res = leastsq(func, p0, args=args, full_output=1, **kw)
474 (popt, pcov, infodict, errmsg, ier) = res
476 if ier
not in [1,2,3,4]:
477 msg =
"Optimal parameters not found: " + errmsg
478 raise RuntimeError(msg)
480 if (len(ydata) > len(p0))
and pcov
is not None:
481 s_sq = (func(popt, *args)**2).sum()/(len(ydata)-len(p0))
487 return popt, pcov, infodict, errmsg, ier
491 def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
492 """Perform a simple check on the gradient for correctness.
499 fvec = atleast_1d(fcn(x,*args))
501 fvec = fvec.reshape((m,))
503 fjac = atleast_1d(Dfcn(x,*args))
504 fjac = fjac.reshape((m,n))
506 fjac = transpose(fjac)
508 xp = zeros((n,), float)
509 err = zeros((m,), float)
511 _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
513 fvecp = atleast_1d(fcn(xp,*args))
514 fvecp = fvecp.reshape((m,))
515 _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
517 good = (product(greater(err, 0.5), axis=0))
523 def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500):
524 """Find the point where func(x) == x
526 Given a function of one or more variables and a starting point, find a
527 fixed-point of the function: i.e. where func(x)=x.
529 Uses Steffensen's Method using Aitken's Del^2 convergence acceleration.
530 See Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
534 >>> from numpy import sqrt, array
535 >>> from scipy.optimize import fixed_point
536 >>> def func(x, c1, c2):
537 return sqrt(c1/(x+c2))
538 >>> c1 = array([10,12.])
539 >>> c2 = array([3, 5.])
540 >>> fixed_point(func, [1.2, 1.3], args=(c1,c2))
541 array([ 1.4920333 , 1.37228132])
547 for iter
in range(maxiter):
550 d = p2 - 2.0 * p1 + p0
551 p = where(d == 0, p2, p0 - (p1 - p0)*(p1 - p0) / d)
552 relerr = where(p0 == 0, p, (p-p0)/p0)
553 if all(abs(relerr) < xtol):
558 for iter
in range(maxiter):
561 d = p2 - 2.0 * p1 + p0
565 p = p0 - (p1 - p0)*(p1 - p0) / d
570 if abs(relerr) < xtol:
573 msg =
"Failed to converge after %d iterations, value is %s" % (maxiter, p)
574 raise RuntimeError(msg)