summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPicca Frédéric-Emmanuel <picca@debian.org>2017-08-18 07:57:35 +0200
committerPicca Frédéric-Emmanuel <picca@debian.org>2017-08-18 07:57:35 +0200
commit3687bde755d68caba93ada43bd5565dad916b88e (patch)
tree1f1b4f6533b7b299bc7f195a62e16c7ff4b4581b
parente1a98b942a00c8f8cc663535ca1e1c050a922ce3 (diff)
New upstream version 0.9.7+dfsg
-rw-r--r--PKG-INFO2
-rw-r--r--README.rst87
-rw-r--r--THANKS.txt64
-rw-r--r--doc/.DS_Storebin0 -> 8196 bytes
-rw-r--r--doc/__pycache__/extensions.cpython-35.pycbin0 -> 358 bytes
-rw-r--r--doc/__pycache__/extensions.cpython-36.pycbin0 -> 314 bytes
-rw-r--r--doc/_images/model_fit4.pngbin0 -> 39784 bytes
-rw-r--r--doc/_templates/indexsidebar.html12
-rw-r--r--doc/bounds.rst6
-rw-r--r--doc/builtin_models.rst680
-rw-r--r--doc/conf.py62
-rw-r--r--doc/confidence.rst38
-rw-r--r--doc/constraints.rst32
-rw-r--r--doc/extensions.py9
-rw-r--r--doc/extensions.pycbin398 -> 354 bytes
-rw-r--r--doc/faq.rst23
-rw-r--r--doc/fitting.rst480
-rw-r--r--doc/index.rst39
-rw-r--r--doc/installation.rst51
-rw-r--r--doc/intro.rst90
-rw-r--r--doc/model.rst832
-rw-r--r--doc/parameters.rst214
-rw-r--r--doc/sphinx/ext_mathjax.py9
-rw-r--r--doc/sphinx/ext_pngmath.py9
-rw-r--r--doc/sphinx/theme/lmfitdoc/layout.html2
-rw-r--r--doc/support.rst6
-rw-r--r--doc/testlinks.py9
-rw-r--r--doc/testlinks.py.~1~0
-rw-r--r--doc/whatsnew.rst52
-rw-r--r--lmfit.egg-info/PKG-INFO33
-rw-r--r--lmfit.egg-info/SOURCES.txt148
-rw-r--r--lmfit.egg-info/dependency_links.txt1
-rw-r--r--lmfit.egg-info/requires.txt3
-rw-r--r--lmfit.egg-info/top_level.txt1
-rw-r--r--lmfit/__init__.py76
-rw-r--r--lmfit/_differentialevolution.py750
-rw-r--r--lmfit/_version.py20
-rw-r--r--lmfit/asteval.py195
-rw-r--r--lmfit/astutils.py39
-rw-r--r--lmfit/confidence.py177
-rw-r--r--lmfit/lineshapes.py250
-rw-r--r--lmfit/minimizer.py1129
-rw-r--r--lmfit/model.py1054
-rw-r--r--lmfit/models.py826
-rw-r--r--lmfit/ordereddict.py128
-rw-r--r--lmfit/parameter.py594
-rw-r--r--lmfit/printfuncs.py115
-rw-r--r--lmfit/ui/basefitter.py5
-rw-r--r--lmfit/ui/ipy_fitter.py8
-rw-r--r--lmfit/uncertainties/__init__.py6
-rw-r--r--lmfit/uncertainties/umath.py7
-rw-r--r--requirements.txt5
-rw-r--r--setup.cfg13
-rw-r--r--setup.py20
-rw-r--r--tests/test_NIST_Strd.py2
-rw-r--r--tests/test_algebraic_constraint2.py2
-rw-r--r--tests/test_brute_method.py235
-rw-r--r--tests/test_confidence.py4
-rw-r--r--tests/test_minimizer.py20
-rw-r--r--tests/test_model.py79
-rw-r--r--tests/test_model_uncertainties.py97
-rw-r--r--tests/test_nose.py98
-rw-r--r--tests/test_parameters.py56
-rw-r--r--tests/test_params_set.py141
-rw-r--r--versioneer.py1901
65 files changed, 6431 insertions, 4615 deletions
diff --git a/PKG-INFO b/PKG-INFO
index ad03cb0..e5b9d4f 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: lmfit
-Version: 0.9.5
+Version: 0.9.7
Summary: Least-Squares Minimization with Bounds and Constraints
Home-page: http://lmfit.github.io/lmfit-py/
Author: LMFit Development Team
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..e35e80c
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,87 @@
+LMfit-py
+========
+
+.. image:: https://travis-ci.org/lmfit/lmfit-py.png
+ :target: https://travis-ci.org/lmfit/lmfit-py
+
+.. image:: https://zenodo.org/badge/4185/lmfit/lmfit-py.svg
+ :target: https://zenodo.org/badge/latestdoi/4185/lmfit/lmfit-py
+
+
+Overview
+---------
+
+LMfit-py provides a Least-Squares Minimization routine and class with a
+simple, flexible approach to parameterizing a model for fitting to data.
+
+LMfit is a pure python package, and so easy to install from source or with
+``pip install lmfit``.
+
+For questions, comments, and suggestions, please use the LMfit mailing
+list, https://groups.google.com/group/lmfit-py. Using the bug tracking
+software in GitHub Issues is encouraged for known problems and bug reports.
+Please read `Contributing.md <.github/CONTRIBUTING.md>`_ before creating an Issue.
+
+
+Parameters and Fitting
+-------------------------
+
+LMfit-py provides a Least-Squares Minimization routine and class
+with a simple, flexible approach to parameterizing a model for
+fitting to data. Named Parameters can be held fixed or freely
+adjusted in the fit, or held between lower and upper bounds. In
+addition, parameters can be constrained as a simple mathematical
+expression of other Parameters.
+
+To do this, the programmer defines a Parameters object, an enhanced
+dictionary, containing named parameters::
+
+ fit_params = Parameters()
+ fit_params['amp'] = Parameter(value=1.2, min=0.1, max=1000)
+ fit_params['cen'] = Parameter(value=40.0, vary=False)
+ fit_params['wid'] = Parameter(value=4, min=0)
+
+or using the equivalent::
+
+ fit_params = Parameters()
+ fit_params.add('amp', value=1.2, min=0.1, max=1000)
+ fit_params.add('cen', value=40.0, vary=False)
+ fit_params.add('wid', value=4, min=0)
+
+The programmer will also write a function to be minimized (in the
+least-squares sense) with its first argument being this Parameters object,
+and additional positional and keyword arguments as desired::
+
+ def myfunc(params, x, data, someflag=True):
+ amp = params['amp'].value
+ cen = params['cen'].value
+ wid = params['wid'].value
+ ...
+ return residual_array
+
+For each call of this function, the values for the params may have changed,
+subject to the bounds and constraint settings for each Parameter. The function
+should return the residual (ie, data-model) array to be minimized.
+
+The advantage here is that the function to be minimized does not have to be
+changed if different bounds or constraints are placed on the fitting
+Parameters. The fitting model (as described in myfunc) is instead written
+in terms of physical parameters of the system, and remains remains
+independent of what is actually varied in the fit. In addition, which
+parameters are adjusted and which are fixed happens at run-time, so that
+changing what is varied and what constraints are placed on the parameters
+can easily be modified by the consumer in real-time data analysis.
+
+To perform the fit, the user calls::
+
+ result = minimize(myfunc, fit_params, args=(x, data), kws={'someflag':True}, ....)
+
+After the fit, each real variable in the ``fit_params`` dictionary is updated
+to have best-fit values, estimated standard deviations, and correlations
+with other variables in the fit, while the results dictionary holds fit
+statistics and information.
+
+By default, the underlying fit algorithm is the Levenberg-Marquart
+algorithm with numerically-calculated derivatives from MINPACK's lmdif
+function, as used by ``scipy.optimize.leastsq``. Other solvers (Nelder-Mead,
+etc) are also available, though slightly less well-tested and supported.
diff --git a/THANKS.txt b/THANKS.txt
index b208054..a1e3bb0 100644
--- a/THANKS.txt
+++ b/THANKS.txt
@@ -1,24 +1,40 @@
-Many people have contributed to lmfit.
-
-Matthew Newville wrote the original version and maintains the project.
-Till Stensitzki wrote the improved estimates of confidence intervals, and
- contributed many tests, bug fixes, and documentation.
-Daniel B. Allan wrote much of the high level Model code, and many
- improvements to the testing and documentation.
-Antonino Ingargiola wrote much of the high level Model code and provided
- many bug fixes.
-J. J. Helmus wrote the MINUT bounds for leastsq, originally in
- leastsqbounds.py, and ported to lmfit.
-E. O. Le Bigot wrote the uncertainties package, a version of which is used
- by lmfit.
-Michal Rawlik added plotting capabilities for Models.
-A. R. J. Nelson added differential_evolution, emcee, and greatly improved the
- code in the docstrings.
-
-Additional patches, bug fixes, and suggestions have come from Christoph
- Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau, nmearl,
- Gustavo Pasquevich, Clemens Prescher, LiCode, and Ben Gamari.
-
-The lmfit code obviously depends on, and owes a very large debt to the code
-in scipy.optimize. Several discussions on the scipy-user and lmfit mailing
-lists have also led to improvements in this code.
+Many people have contributed to lmfit. The attribution of credit in a project such as
+this is very difficult to get perfect, and there are no doubt important contributions
+missing or under-represented here. Please consider this file as part of the documentation
+that may have bugs that need fixing.
+
+Some of the largest and most important contributions (approximately in order of
+contribution in size to the existing code) are from:
+
+ Matthew Newville wrote the original version and maintains the project.
+
+ Till Stensitzki wrote the improved estimates of confidence intervals, and contributed
+ many tests, bug fixes, and documentation.
+
+ A. R. J. Nelson added differential_evolution, emcee, and greatly improved the code,
+ docstrings, and overall project.
+
+ Daniel B. Allan wrote much of the high level Model code, and many improvements to the
+ testing and documentation.
+
+ Antonino Ingargiola wrote much of the high level Model code and has provided many bug
+ fixes and improvements.
+
+ Renee Otten wrote the brute force method, and has improved the code and documentation
+ in many places.
+
+ Michal Rawlik added plotting capabilities for Models.
+
+ J. J. Helmus wrote the MINUT bounds for leastsq, originally in leastsqbounds.py, and
+ ported to lmfit.
+
+ E. O. Le Bigot wrote the uncertainties package, a version of which is used by lmfit.
+
+
+Additional patches, bug fixes, and suggestions have come from Christoph Deil, Francois
+Boulogne, Thomas Caswell, Colin Brosseau, nmearl, Gustavo Pasquevich, Clemens Prescher,
+LiCode, Ben Gamari, Yoav Roam, Alexander Stark, Alexandre Beelen, and many others.
+
+The lmfit code obviously depends on, and owes a very large debt to the code in
+scipy.optimize. Several discussions on the scipy-user and lmfit mailing lists have also
+led to improvements in this code.
diff --git a/doc/.DS_Store b/doc/.DS_Store
new file mode 100644
index 0000000..e6bd8bf
--- /dev/null
+++ b/doc/.DS_Store
Binary files differ
diff --git a/doc/__pycache__/extensions.cpython-35.pyc b/doc/__pycache__/extensions.cpython-35.pyc
new file mode 100644
index 0000000..e4812dd
--- /dev/null
+++ b/doc/__pycache__/extensions.cpython-35.pyc
Binary files differ
diff --git a/doc/__pycache__/extensions.cpython-36.pyc b/doc/__pycache__/extensions.cpython-36.pyc
new file mode 100644
index 0000000..c1e8ebd
--- /dev/null
+++ b/doc/__pycache__/extensions.cpython-36.pyc
Binary files differ
diff --git a/doc/_images/model_fit4.png b/doc/_images/model_fit4.png
new file mode 100644
index 0000000..b045f89
--- /dev/null
+++ b/doc/_images/model_fit4.png
Binary files differ
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
index ceb1a92..1979506 100644
--- a/doc/_templates/indexsidebar.html
+++ b/doc/_templates/indexsidebar.html
@@ -1,10 +1,8 @@
<h3>Getting LMFIT</h3>
<p>Current version: <b>{{ release }}</b></p>
-<p>Download: &nbsp; <a href="http://pypi.python.org/pypi/lmfit/">PyPI (Python.org)</a>
<p>Install: &nbsp; <tt>pip install lmfit</tt>
-<p>
-<p>Development version: <br>
-&nbsp; &nbsp; <a href="https://github.com/lmfit/lmfit-py/">github.com</a> <br>
+<p>Download: &nbsp; <a href="http://pypi.python.org/pypi/lmfit/">PyPI</a>
+<p>Develop: &nbsp; <a href="https://github.com/lmfit/lmfit-py/">GitHub</a> <br>
<h3>Questions?</h3>
@@ -12,13 +10,11 @@
&nbsp; <a href="https://groups.google.com/group/lmfit-py">Mailing List</a><br>
&nbsp; <a href="support.html">Getting Help</a><br>
-<h3>Off-line Documentation</h3>
+<h3>Static, off-line docs</h3>
[<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.pdf">PDF</a>
|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.epub">EPUB</a>
-|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit_doc.zip">HTML(zip)</a>
-]
-
+|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit_doc.zip">HTML(zip)</a>]
<hr>
<p>
diff --git a/doc/bounds.rst b/doc/bounds.rst
index 40f8390..1840152 100644
--- a/doc/bounds.rst
+++ b/doc/bounds.rst
@@ -23,14 +23,14 @@ for `MINUIT`_. This is implemented following (and borrowing heavily from)
the `leastsqbound`_ from J. J. Helmus. Parameter values are mapped from
internally used, freely variable values :math:`P_{\rm internal}` to bounded
parameters :math:`P_{\rm bounded}`. When both ``min`` and ``max`` bounds
-are specified, the mapping is
+are specified, the mapping is:
.. math::
:nowrap:
\begin{eqnarray*}
P_{\rm internal} &=& \arcsin\big(\frac{2 (P_{\rm bounded} - {\rm min})}{({\rm max} - {\rm min})} - 1\big) \\
- P_{\rm bounded} &=& {\rm min} + \big(\sin(P_{\rm internal}) + 1\big) \frac{({\rm max} - {\rm min})}{2}
+ P_{\rm bounded} &=& {\rm min} + \big(\sin(P_{\rm internal}) + 1\big) \frac{({\rm max} - {\rm min})}{2}
\end{eqnarray*}
With only an upper limit ``max`` supplied, but ``min`` left unbounded, the
@@ -59,7 +59,7 @@ With these mappings, the value for the bounded Parameter cannot exceed the
specified bounds, though the internally varied value can be freely varied.
It bears repeating that code from `leastsqbound`_ was adopted to implement
-the transformation described above. The challenging part (Thanks again to
+the transformation described above. The challenging part (thanks again to
Jonathan J. Helmus!) here is to re-transform the covariance matrix so that
the uncertainties can be estimated for bounded Parameters. This is
included by using the derivate :math:`dP_{\rm internal}/dP_{\rm bounded}`
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index 7c1e06b..06c79bb 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -11,7 +11,7 @@ These pre-defined models each subclass from the :class:`model.Model` class of th
previous chapter and wrap relatively well-known functional forms, such as
Gaussians, Lorentzian, and Exponentials that are used in a wide range of
scientific domains. In fact, all the models are all based on simple, plain
-python functions defined in the :mod:`lineshapes` module. In addition to
+Python functions defined in the :mod:`lineshapes` module. In addition to
wrapping a function into a :class:`model.Model`, these models also provide a
:meth:`guess` method that is intended to give a reasonable
set of starting values from a data array that closely approximates the
@@ -19,10 +19,10 @@ data to be fit.
As shown in the previous chapter, a key feature of the :class:`mode.Model` class
is that models can easily be combined to give a composite
-:class:`model.Model`. Thus while some of the models listed here may seem pretty
-trivial (notably, :class:`ConstantModel` and :class:`LinearModel`), the
-main point of having these is to be able to used in composite models. For
-example, a Lorentzian plus a linear background might be represented as::
+:class:`model.CompositeModel`. Thus, while some of the models listed here may
+seem pretty trivial (notably, :class:`ConstantModel` and :class:`LinearModel`),
+the main point of having these is to be able to use them in composite models. For
+example, a Lorentzian plus a linear background might be represented as::
>>> from lmfit.models import LinearModel, LorentzianModel
>>> peak = LorentzianModel()
@@ -55,263 +55,82 @@ methods for all of these make a fairly crude guess for the value of
:class:`GaussianModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: GaussianModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Gaussian or normal distribution lineshape
-<http://en.wikipedia.org/wiki/Normal_distribution>`_. Parameter names:
-``amplitude``, ``center``, and ``sigma``.
-In addition, parameters ``fwhm`` and ``height`` are included as constraints
-to report full width at half maximum and maximum peak height, respectively.
-
-.. math::
-
- f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]}
-
-where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
-:math:`\mu`, and ``sigma`` to :math:`\sigma`. The full width at
-half maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately
-:math:`2.3548\sigma`
-
+.. autoclass:: GaussianModel
:class:`LorentzianModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: LorentzianModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Lorentzian or Cauchy-Lorentz distribution function
-<http://en.wikipedia.org/wiki/Cauchy_distribution>`_. Parameter names:
-``amplitude``, ``center``, and ``sigma``.
-In addition, parameters ``fwhm`` and ``height`` are included as constraints
-to report full width at half maximum and maximum peak height, respectively.
-
-.. math::
-
- f(x; A, \mu, \sigma) = \frac{A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
-
-where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
-:math:`\mu`, and ``sigma`` to :math:`\sigma`. The full width at
-half maximum is :math:`2\sigma`.
+.. autoclass:: LorentzianModel
:class:`VoigtModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: VoigtModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Voigt distribution function
-<http://en.wikipedia.org/wiki/Voigt_profile>`_. Parameter names:
-``amplitude``, ``center``, and ``sigma``. A ``gamma`` parameter is also
-available. By default, it is constrained to have value equal to ``sigma``,
-though this can be varied independently. In addition, parameters ``fwhm``
-and ``height`` are included as constraints to report full width at half
-maximum and maximum peak height, respectively. The definition for the
-Voigt function used here is
-
-.. math::
-
- f(x; A, \mu, \sigma, \gamma) = \frac{A \textrm{Re}[w(z)]}{\sigma\sqrt{2 \pi}}
-
-where
-
-.. math::
- :nowrap:
-
- \begin{eqnarray*}
- z &=& \frac{x-\mu +i\gamma}{\sigma\sqrt{2}} \\
- w(z) &=& e^{-z^2}{\operatorname{erfc}}(-iz)
- \end{eqnarray*}
-
-and :func:`erfc` is the complimentary error function. As above,
-``amplitude`` corresponds to :math:`A`, ``center`` to
-:math:`\mu`, and ``sigma`` to :math:`\sigma`. The parameter ``gamma``
-corresponds to :math:`\gamma`.
-If ``gamma`` is kept at the default value (constrained to ``sigma``),
-the full width at half maximum is approximately :math:`3.6013\sigma`.
+.. autoclass:: VoigtModel
:class:`PseudoVoigtModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: PseudoVoigtModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-a model based on a `pseudo-Voigt distribution function
-<http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation>`_,
-which is a weighted sum of a Gaussian and Lorentzian distribution functions
-with that share values for ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
-and full width at half maximum (and so have constrained values of
-``sigma`` (:math:`\sigma`). A parameter ``fraction`` (:math:`\alpha`)
-controls the relative weight of the Gaussian and Lorentzian components,
-giving the full definition of
-
-.. math::
-
- f(x; A, \mu, \sigma, \alpha) = \frac{(1-\alpha)A}{\sigma_g\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma_g}^2}}]}
- + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
-
-where :math:`\sigma_g = {\sigma}/{\sqrt{2\ln{2}}}` so that the full width
-at half maximum of each component and of the sum is :math:`2\sigma`. The
-:meth:`guess` function always sets the starting value for ``fraction`` at 0.5.
+.. autoclass:: PseudoVoigtModel
:class:`MoffatModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: MoffatModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-a model based on a `Moffat distribution function
-<https://en.wikipedia.org/wiki/Moffat_distribution>`_, the parameters are
-``amplitude`` (:math:`A`), ``center`` (:math:`\mu`),
-a width parameter ``sigma`` (:math:`\sigma`) and an exponent ``beta`` (:math:`\beta`).
-For (:math:`\beta=1`) the Moffat has a Lorentzian shape.
-
-.. math::
-
- f(x; A, \mu, \sigma, \beta) = A \big[(\frac{x-\mu}{\sigma})^2+1\big]^{-\beta}
-
-the full width have maximum is :math:`2\sigma\sqrt{2^{1/\beta}-1}`.
-:meth:`guess` function always sets the starting value for ``beta`` to 1.
+.. autoclass:: MoffatModel
:class:`Pearson7Model`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: Pearson7Model(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Pearson VII distribution
-<http://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_VII_distribution>`_.
-This is a Lorenztian-like distribution function. It has the usual
-parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), and also an ``exponent`` (:math:`m`) in
-
-.. math::
-
- f(x; A, \mu, \sigma, m) = \frac{A}{\sigma{\beta(m-\frac{1}{2}, \frac{1}{2})}} \bigl[1 + \frac{(x-\mu)^2}{\sigma^2} \bigr]^{-m}
-
-where :math:`\beta` is the beta function (see :scipydoc:`special.beta` in
-:mod:`scipy.special`). The :meth:`guess` function always
-gives a starting value for ``exponent`` of 1.5.
+.. autoclass:: Pearson7Model
:class:`StudentsTModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: StudentsTModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Student's t distribution function
-<http://en.wikipedia.org/wiki/Student%27s_t-distribution>`_, with the usual
-parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`) in
-
-.. math::
-
- f(x; A, \mu, \sigma) = \frac{A \Gamma(\frac{\sigma+1}{2})} {\sqrt{\sigma\pi}\,\Gamma(\frac{\sigma}{2})} \Bigl[1+\frac{(x-\mu)^2}{\sigma}\Bigr]^{-\frac{\sigma+1}{2}}
-
-
-where :math:`\Gamma(x)` is the gamma function.
+.. autoclass:: StudentsTModel
:class:`BreitWignerModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: BreitWignerModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Breit-Wigner-Fano function
-<http://en.wikipedia.org/wiki/Fano_resonance>`_. It has the usual
-parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), plus ``q`` (:math:`q`) in
-
-.. math::
-
- f(x; A, \mu, \sigma, q) = \frac{A (q\sigma/2 + x - \mu)^2}{(\sigma/2)^2 + (x - \mu)^2}
+.. autoclass:: BreitWignerModel
:class:`LognormalModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: LognormalModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on the `Log-normal distribution function
-<http://en.wikipedia.org/wiki/Lognormal>`_.
-It has the usual parameters
-``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma``
-(:math:`\sigma`) in
-
-.. math::
-
- f(x; A, \mu, \sigma) = \frac{A e^{-(\ln(x) - \mu)/ 2\sigma^2}}{x}
+.. autoclass:: LognormalModel
:class:`DampedOcsillatorModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: DampedOcsillatorModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on the `Damped Harmonic Oscillator Amplitude
-<http://en.wikipedia.org/wiki/Harmonic_oscillator#Amplitude_part>`_.
-It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`) in
+.. autoclass:: DampedOscillatorModel
-.. math::
+:class:`DampedHarmonicOcsillatorModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- f(x; A, \mu, \sigma) = \frac{A}{\sqrt{ [1 - (x/\mu)^2]^2 + (2\sigma x/\mu)^2}}
+.. autoclass:: DampedHarmonicOscillatorModel
:class:`ExponentialGaussianModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: ExponentialGaussianModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model of an `Exponentially modified Gaussian distribution
-<http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_.
-It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), and also ``gamma`` (:math:`\gamma`) in
-
-.. math::
+.. autoclass:: ExponentialGaussianModel
- f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2}
- \exp\bigl[\gamma({\mu - x + \gamma\sigma^2/2})\bigr]
- {\operatorname{erfc}}\Bigl(\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\Bigr)
-
-
-where :func:`erfc` is the complimentary error function.
:class:`SkewedGaussianModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: SkewedGaussianModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A variation of the above model, this is a `Skewed normal distribution
-<http://en.wikipedia.org/wiki/Skew_normal_distribution>`_.
-It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), and also ``gamma`` (:math:`\gamma`) in
-
-.. math::
-
- f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}}
- e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 +
- {\operatorname{erf}}\bigl[
- \frac{\gamma(x-\mu)}{\sigma\sqrt{2}}
- \bigr] \Bigr\}
-
-
-where :func:`erf` is the error function.
+.. autoclass:: SkewedGaussianModel
:class:`DonaichModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: DonaichModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model of an `Doniach Sunjic asymmetric lineshape
-<http://www.casaxps.com/help_manual/line_shapes.htm>`_, used in
-photo-emission. With the usual parameters ``amplitude`` (:math:`A`),
-``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`), and also ``gamma``
-(:math:`\gamma`) in
-
-.. math::
-
- f(x; A, \mu, \sigma, \gamma) = A\frac{\cos\bigl[\pi\gamma/2 + (1-\gamma)
- \arctan{(x - \mu)}/\sigma\bigr]} {\bigr[1 + (x-\mu)/\sigma\bigl]^{(1-\gamma)/2}}
-
+.. autoclass:: DonaichModel
Linear and Polynomial Models
------------------------------------
@@ -324,67 +143,22 @@ of many components of composite model.
:class:`ConstantModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: ConstantModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
- a class that consists of a single value, ``c``. This is constant in the
- sense of having no dependence on the independent variable ``x``, not in
- the sense of being non-varying. To be clear, ``c`` will be a variable
- Parameter.
+.. autoclass:: ConstantModel
:class:`LinearModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: LinearModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
- a class that gives a linear model:
-
-.. math::
-
- f(x; m, b) = m x + b
-
-with parameters ``slope`` for :math:`m` and ``intercept`` for :math:`b`.
-
+.. autoclass:: LinearModel
:class:`QuadraticModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: QuadraticModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-
- a class that gives a quadratic model:
-
-.. math::
-
- f(x; a, b, c) = a x^2 + b x + c
-
-with parameters ``a``, ``b``, and ``c``.
-
-
-:class:`ParabolicModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: ParabolicModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
- same as :class:`QuadraticModel`.
-
+.. autoclass:: QuadraticModel
:class:`PolynomialModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: PolynomialModel(degree, missing=None[, prefix=''[, name=None[, **kws]]])
-
- a class that gives a polynomial model up to ``degree`` (with maximum
- value of 7).
-
-.. math::
-
- f(x; c_0, c_1, \ldots, c_7) = \sum_{i=0, 7} c_i x^i
-
-with parameters ``c0``, ``c1``, ..., ``c7``. The supplied ``degree``
-will specify how many of these are actual variable parameters. This uses
-:numpydoc:`polyval` for its calculation of the polynomial.
-
+.. autoclass:: PolynomialModel
Step-like models
@@ -395,55 +169,13 @@ Two models represent step-like functions, and share many characteristics.
:class:`StepModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: StepModel(form='linear'[, missing=None[, prefix=''[, name=None[, **kws]]]])
-
-A model based on a Step function, with four choices for functional form.
-The step function starts with a value 0, and ends with a value of :math:`A`
-(``amplitude``), rising to :math:`A/2` at :math:`\mu` (``center``),
-with :math:`\sigma` (``sigma``) setting the characteristic width. The
-supported functional forms are ``linear`` (the default), ``atan`` or
-``arctan`` for an arc-tangent function, ``erf`` for an error function, or
-``logistic`` for a `logistic function <http://en.wikipedia.org/wiki/Logistic_function>`_.
-The forms are
-
-.. math::
- :nowrap:
-
- \begin{eqnarray*}
- & f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) & = A \min{[1, \max{(0, \alpha)}]} \\
- & f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) & = A [1/2 + \arctan{(\alpha)}/{\pi}] \\
- & f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) & = A [1 + {\operatorname{erf}}(\alpha)]/2 \\
- & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A [1 - \frac{1}{1 + e^{\alpha}} ]
- \end{eqnarray*}
+.. autoclass:: StepModel
-where :math:`\alpha = (x - \mu)/{\sigma}`.
:class:`RectangleModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: RectangleModel(form='linear'[, missing=None[, prefix=''[, name=None[, **kws]]]])
-
-A model based on a Step-up and Step-down function of the same form. The
-same choices for functional form as for :class:`StepModel` are supported,
-with ``linear`` as the default. The function starts with a value 0, and
-ends with a value of :math:`A` (``amplitude``), rising to :math:`A/2` at
-:math:`\mu_1` (``center1``), with :math:`\sigma_1` (``sigma1``) setting the
-characteristic width. It drops to rising to :math:`A/2` at :math:`\mu_2`
-(``center2``), with characteristic width :math:`\sigma_2` (``sigma2``).
-
-.. math::
- :nowrap:
-
- \begin{eqnarray*}
- &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) &= A \{ \min{[1, \max{(0, \alpha_1)}]} + \min{[-1, \max{(0, \alpha_2)}]} \} \\
- &f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) &= A [\arctan{(\alpha_1)} + \arctan{(\alpha_2)}]/{\pi} \\
- &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) &= A [{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)]/2 \\
- &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A [1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 + e^{\alpha_2}} ]
- \end{eqnarray*}
-
-
-where :math:`\alpha_1 = (x - \mu_1)/{\sigma_1}` and :math:`\alpha_2 = -(x - \mu_2)/{\sigma_2}`.
+.. autoclass:: RectangleModel
Exponential and Power law models
@@ -452,31 +184,12 @@ Exponential and Power law models
:class:`ExponentialModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: ExponentialModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on an `exponential decay function
-<http://en.wikipedia.org/wiki/Exponential_decay>`_. With parameters named
-``amplitude`` (:math:`A`), and ``decay`` (:math:`\tau`), this has the form:
-
-.. math::
-
- f(x; A, \tau) = A e^{-x/\tau}
-
+.. autoclass:: ExponentialModel
:class:`PowerLawModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: PowerLawModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Power Law <http://en.wikipedia.org/wiki/Power_law>`_.
-With parameters
-named ``amplitude`` (:math:`A`), and ``exponent`` (:math:`k`), this has the
-form:
-
-.. math::
-
- f(x; A, k) = A x^k
-
+.. autoclass:: PowerLawModel
User-defined Models
----------------------------
@@ -484,13 +197,13 @@ User-defined Models
.. _asteval: http://newville.github.io/asteval/
As shown in the previous chapter (:ref:`model_chapter`), it is fairly
-straightforward to build fitting models from parametrized python functions.
+straightforward to build fitting models from parametrized Python functions.
The number of model classes listed so far in the present chapter should
make it clear that this process is not too difficult. Still, it is
sometimes desirable to build models from a user-supplied function. This
may be especially true if model-building is built-in to some larger library
or application for fitting in which the user may not be able to easily
-build and use a new model from python code.
+build and use a new model from Python code.
The :class:`ExpressionModel` allows a model to be built from a
@@ -501,30 +214,18 @@ mathematical constraints as discussed in :ref:`constraints_chapter`.
:class:`ExpressionModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: ExpressionModel(expr, independent_vars=None, init_script=None, **kws)
-
- A model using the user-supplied mathematical expression, which can be nearly any valid Python expresion.
-
- :param expr: expression use to build model
- :type expr: string
- :param independent_vars: list of argument names in expression that are independent variables.
- :type independent_vars: ``None`` (default) or list of strings for independent variables.
- :param init_script: python script to run before parsing and evaluating expression.
- :type init_script: ``None`` (default) or string
-
-with other parameters passed to :class:`model.Model`, with the notable
-exception that :class:`ExpressionModel` does **not** support the `prefix` argument.
+.. autoclass:: ExpressionModel
Since the point of this model is that an arbitrary expression will be
supplied, the determination of what are the parameter names for the model
happens when the model is created. To do this, the expression is parsed,
and all symbol names are found. Names that are already known (there are
over 500 function and value names in the asteval namespace, including most
-python builtins, more than 200 functions inherited from numpy, and more
+Python builtins, more than 200 functions inherited from NumPy, and more
than 20 common lineshapes defined in the :mod:`lineshapes` module) are not
converted to parameters. Unrecognized name are expected to be names either
of parameters or independent variables. If `independent_vars` is the
-default value of ``None``, and if the expression contains a variable named
+default value of None, and if the expression contains a variable named
`x`, that will be used as the independent variable. Otherwise,
`independent_vars` must be given.
@@ -544,10 +245,9 @@ To evaluate this model, you might do the following::
>>> params = mod.make_params(off=0.25, amp=1.0, x0=2.0, phase=0.04)
>>> y = mod.eval(params, x=x)
-
While many custom models can be built with a single line expression
(especially since the names of the lineshapes like `gaussian`, `lorentzian`
-and so on, as well as many numpy functions, are available), more complex
+and so on, as well as many NumPy functions, are available), more complex
models will inevitably require multiple line functions. You can include
such Python code with the `init_script` argument. The text of this script
is evaluated when the model is initialized (and before the actual
@@ -560,16 +260,16 @@ could to define this in a script::
>>> script = """
def mycurve(x, amp, cen, sig):
- loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
- gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
- return log(loren)*gradient(gauss)/gradient(x)
+ loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
+ gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
+ return log(loren)*gradient(gauss)/gradient(x)
"""
and then use this with :class:`ExpressionModel` as::
>>> mod = ExpressionModel('mycurve(x, height, mid, wid)',
- init_script=script,
- independent_vars=['x'])
+ init_script=script,
+ independent_vars=['x'])
As above, this will interpret the parameter names to be `height`, `mid`,
and `wid`, and build a model that can be used to fit data.
@@ -582,9 +282,9 @@ Example 1: Fit Peaked data to Gaussian, Lorentzian, and Voigt profiles
Here, we will fit data to three similar line shapes, in order to decide which
might be the better model. We will start with a Gaussian profile, as in
the previous chapter, but use the built-in :class:`GaussianModel` instead
-of writing one ourselves. This is a slightly different version rom the
+of writing one ourselves. This is a slightly different version from the
one in previous example in that the parameter names are different, and have
-built-in default values. We'll simply use::
+built-in default values. We will simply use::
from numpy import loadtxt
from lmfit.models import GaussianModel
@@ -603,23 +303,23 @@ built-in default values. We'll simply use::
which prints out the results::
[[Model]]
- Model(gaussian)
+ Model(gaussian)
[[Fit Statistics]]
- # function evals = 23
- # data points = 401
- # variables = 3
- chi-square = 29.994
- reduced chi-square = 0.075
- Akaike info crit = -1033.774
- Bayesian info crit = -1021.792
+ # function evals = 23
+ # data points = 401
+ # variables = 3
+ chi-square = 29.994
+ reduced chi-square = 0.075
+ Akaike info crit = -1033.774
+ Bayesian info crit = -1021.792
[[Variables]]
- sigma: 1.23218319 +/- 0.007374 (0.60%) (init= 1.35)
- center: 9.24277049 +/- 0.007374 (0.08%) (init= 9.25)
- amplitude: 30.3135571 +/- 0.157126 (0.52%) (init= 29.08159)
- fwhm: 2.90156963 +/- 0.017366 (0.60%) == '2.3548200*sigma'
- height: 9.81457973 +/- 0.050872 (0.52%) == '0.3989423*amplitude/max(1.e-15, sigma)'
+ sigma: 1.23218319 +/- 0.007374 (0.60%) (init= 1.35)
+ center: 9.24277049 +/- 0.007374 (0.08%) (init= 9.25)
+ amplitude: 30.3135571 +/- 0.157126 (0.52%) (init= 29.08159)
+ fwhm: 2.90156963 +/- 0.017366 (0.60%) == '2.3548200*sigma'
+ height: 9.81457973 +/- 0.050872 (0.52%) == '0.3989423*amplitude/max(1.e-15, sigma)'
[[Correlations]] (unreported correlations are < 0.250)
- C(sigma, amplitude) = 0.577
+ C(sigma, amplitude) = 0.577
We see a few interesting differences from the results of the previous
chapter. First, the parameter names are longer. Second, there are ``fwhm``
@@ -652,23 +352,23 @@ with the rest of the script as above. Perhaps predictably, the first thing
we try gives results that are worse::
[[Model]]
- Model(lorentzian)
+ Model(lorentzian)
[[Fit Statistics]]
- # function evals = 27
- # data points = 401
- # variables = 3
- chi-square = 53.754
- reduced chi-square = 0.135
- Akaike info crit = -799.830
- Bayesian info crit = -787.848
+ # function evals = 27
+ # data points = 401
+ # variables = 3
+ chi-square = 53.754
+ reduced chi-square = 0.135
+ Akaike info crit = -799.830
+ Bayesian info crit = -787.848
[[Variables]]
- sigma: 1.15484517 +/- 0.013156 (1.14%) (init= 1.35)
- center: 9.24438944 +/- 0.009275 (0.10%) (init= 9.25)
- amplitude: 38.9728645 +/- 0.313857 (0.81%) (init= 36.35199)
- fwhm: 2.30969034 +/- 0.026312 (1.14%) == '2.0000000*sigma'
- height: 10.7420881 +/- 0.086336 (0.80%) == '0.3183099*amplitude/max(1.e-15, sigma)'
+ sigma: 1.15484517 +/- 0.013156 (1.14%) (init= 1.35)
+ center: 9.24438944 +/- 0.009275 (0.10%) (init= 9.25)
+ amplitude: 38.9728645 +/- 0.313857 (0.81%) (init= 36.35199)
+ fwhm: 2.30969034 +/- 0.026312 (1.14%) == '2.0000000*sigma'
+ height: 10.7420881 +/- 0.086336 (0.80%) == '0.3183099*amplitude/max(1.e-15, sigma)'
[[Correlations]] (unreported correlations are < 0.250)
- C(sigma, amplitude) = 0.709
+ C(sigma, amplitude) = 0.709
with the plot shown on the right in the figure above. The tails are now
@@ -681,24 +381,24 @@ does a better job. Using :class:`VoigtModel`, this is as simple as using::
with all the rest of the script as above. This gives::
[[Model]]
- Model(voigt)
+ Model(voigt)
[[Fit Statistics]]
- # function evals = 19
- # data points = 401
- # variables = 3
- chi-square = 14.545
- reduced chi-square = 0.037
- Akaike info crit = -1324.006
- Bayesian info crit = -1312.024
+ # function evals = 19
+ # data points = 401
+ # variables = 3
+ chi-square = 14.545
+ reduced chi-square = 0.037
+ Akaike info crit = -1324.006
+ Bayesian info crit = -1312.024
[[Variables]]
- amplitude: 35.7554017 +/- 0.138614 (0.39%) (init= 43.62238)
- sigma: 0.73015574 +/- 0.003684 (0.50%) (init= 0.8775)
- center: 9.24411142 +/- 0.005054 (0.05%) (init= 9.25)
- gamma: 0.73015574 +/- 0.003684 (0.50%) == 'sigma'
- fwhm: 2.62951718 +/- 0.013269 (0.50%) == '3.6013100*sigma'
- height: 19.5360268 +/- 0.075691 (0.39%) == '0.3989423*amplitude/max(1.e-15, sigma)'
+ amplitude: 35.7554017 +/- 0.138614 (0.39%) (init= 43.62238)
+ sigma: 0.73015574 +/- 0.003684 (0.50%) (init= 0.8775)
+ center: 9.24411142 +/- 0.005054 (0.05%) (init= 9.25)
+ gamma: 0.73015574 +/- 0.003684 (0.50%) == 'sigma'
+ fwhm: 2.62951718 +/- 0.013269 (0.50%) == '3.6013100*sigma'
+ height: 19.5360268 +/- 0.075691 (0.39%) == '0.3989423*amplitude/max(1.e-15, sigma)'
[[Correlations]] (unreported correlations are < 0.250)
- C(sigma, amplitude) = 0.651
+ C(sigma, amplitude) = 0.651
which has a much better value for :math:`\chi^2` and an obviously better
@@ -731,26 +431,26 @@ give it a starting value using something like::
which gives::
[[Model]]
- Model(voigt)
+ Model(voigt)
[[Fit Statistics]]
- # function evals = 23
- # data points = 401
- # variables = 4
- chi-square = 10.930
- reduced chi-square = 0.028
- Akaike info crit = -1436.576
- Bayesian info crit = -1420.600
+ # function evals = 23
+ # data points = 401
+ # variables = 4
+ chi-square = 10.930
+ reduced chi-square = 0.028
+ Akaike info crit = -1436.576
+ Bayesian info crit = -1420.600
[[Variables]]
- amplitude: 34.1914716 +/- 0.179468 (0.52%) (init= 43.62238)
- sigma: 0.89518950 +/- 0.014154 (1.58%) (init= 0.8775)
- center: 9.24374845 +/- 0.004419 (0.05%) (init= 9.25)
- gamma: 0.52540156 +/- 0.018579 (3.54%) (init= 0.7)
- fwhm: 3.22385492 +/- 0.050974 (1.58%) == '3.6013100*sigma'
- height: 15.2374711 +/- 0.299235 (1.96%) == '0.3989423*amplitude/max(1.e-15, sigma)'
+ amplitude: 34.1914716 +/- 0.179468 (0.52%) (init= 43.62238)
+ sigma: 0.89518950 +/- 0.014154 (1.58%) (init= 0.8775)
+ center: 9.24374845 +/- 0.004419 (0.05%) (init= 9.25)
+ gamma: 0.52540156 +/- 0.018579 (3.54%) (init= 0.7)
+ fwhm: 3.22385492 +/- 0.050974 (1.58%) == '3.6013100*sigma'
+ height: 15.2374711 +/- 0.299235 (1.96%) == '0.3989423*amplitude/max(1.e-15, sigma)'
[[Correlations]] (unreported correlations are < 0.250)
- C(sigma, gamma) = -0.928
- C(gamma, amplitude) = 0.821
- C(sigma, amplitude) = -0.651
+ C(sigma, gamma) = -0.928
+ C(gamma, amplitude) = 0.821
+ C(sigma, amplitude) = -0.651
and the fit shown on the right above.
@@ -765,12 +465,12 @@ Akaike or Bayesian Information Criteria (see
:ref:`information_criteria_label`) to assess how likely the model with
variable ``gamma`` is to explain the data than the model with ``gamma``
fixed to the value of ``sigma``. According to theory,
-:math:`\exp(-(\rm{AIC1}-\rm{AIC0})/2)` gives the probably that a model with
-AIC` is more likely than a model with AIC0. For the two models here, with
+:math:`\exp(-(\rm{AIC1}-\rm{AIC0})/2)` gives the probability that a model with
+AIC1 is more likely than a model with AIC0. For the two models here, with
AIC values of -1432 and -1321 (Note: if we had more carefully set the value
for ``weights`` based on the noise in the data, these values might be
positive, but there difference would be roughly the same), this says that
-the model with ``gamma`` fixed to ``sigma`` has a probably less than 1.e-25
+the model with ``gamma`` fixed to ``sigma`` has a probability less than 1.e-25
of being the better model.
@@ -795,31 +495,31 @@ After making a composite model, we run :meth:`fit` and report the
results, which gives::
[[Model]]
- (Model(step, prefix='step_', form='erf') + Model(linear, prefix='line_'))
+ (Model(step, prefix='step_', form='erf') + Model(linear, prefix='line_'))
[[Fit Statistics]]
- # function evals = 51
- # data points = 201
- # variables = 5
- chi-square = 584.829
- reduced chi-square = 2.984
- Akaike info crit = 224.671
- Bayesian info crit = 241.187
+ # function evals = 51
+ # data points = 201
+ # variables = 5
+ chi-square = 584.829
+ reduced chi-square = 2.984
+ Akaike info crit = 224.671
+ Bayesian info crit = 241.187
[[Variables]]
- line_slope: 2.03039786 +/- 0.092221 (4.54%) (init= 0)
- line_intercept: 11.7234542 +/- 0.274094 (2.34%) (init= 10.7816)
- step_amplitude: 112.071629 +/- 0.647316 (0.58%) (init= 134.0885)
- step_sigma: 0.67132341 +/- 0.010873 (1.62%) (init= 1.428571)
- step_center: 3.12697699 +/- 0.005151 (0.16%) (init= 2.5)
+ line_slope: 2.03039786 +/- 0.092221 (4.54%) (init= 0)
+ line_intercept: 11.7234542 +/- 0.274094 (2.34%) (init= 10.7816)
+ step_amplitude: 112.071629 +/- 0.647316 (0.58%) (init= 134.0885)
+ step_sigma: 0.67132341 +/- 0.010873 (1.62%) (init= 1.428571)
+ step_center: 3.12697699 +/- 0.005151 (0.16%) (init= 2.5)
[[Correlations]] (unreported correlations are < 0.100)
- C(line_slope, step_amplitude) = -0.878
- C(step_amplitude, step_sigma) = 0.563
- C(line_slope, step_sigma) = -0.455
- C(line_intercept, step_center) = 0.427
- C(line_slope, line_intercept) = -0.308
- C(line_slope, step_center) = -0.234
- C(line_intercept, step_sigma) = -0.139
- C(line_intercept, step_amplitude) = -0.121
- C(step_amplitude, step_center) = 0.109
+ C(line_slope, step_amplitude) = -0.878
+ C(step_amplitude, step_sigma) = 0.563
+ C(line_slope, step_sigma) = -0.455
+ C(line_intercept, step_center) = 0.427
+ C(line_slope, line_intercept) = -0.308
+ C(line_slope, step_center) = -0.234
+ C(line_intercept, step_sigma) = -0.139
+ C(line_intercept, step_amplitude) = -0.121
+ C(step_amplitude, step_center) = 0.109
with a plot of
@@ -857,39 +557,39 @@ parameter values.
The fit results printed out are::
[[Model]]
- ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_'))
+ ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_'))
[[Fit Statistics]]
- # function evals = 66
- # data points = 250
- # variables = 8
- chi-square = 1247.528
- reduced chi-square = 5.155
- Akaike info crit = 417.865
- Bayesian info crit = 446.036
+ # function evals = 66
+ # data points = 250
+ # variables = 8
+ chi-square = 1247.528
+ reduced chi-square = 5.155
+ Akaike info crit = 417.865
+ Bayesian info crit = 446.036
[[Variables]]
- exp_amplitude: 99.0183282 +/- 0.537487 (0.54%) (init= 162.2102)
- exp_decay: 90.9508859 +/- 1.103105 (1.21%) (init= 93.24905)
- g1_sigma: 16.6725753 +/- 0.160481 (0.96%) (init= 15)
- g1_center: 107.030954 +/- 0.150067 (0.14%) (init= 105)
- g1_amplitude: 4257.77319 +/- 42.38336 (1.00%) (init= 2000)
- g1_fwhm: 39.2609139 +/- 0.377905 (0.96%) == '2.3548200*g1_sigma'
- g1_height: 101.880231 +/- 0.592170 (0.58%) == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)'
- g2_sigma: 13.8069484 +/- 0.186794 (1.35%) (init= 15)
- g2_center: 153.270100 +/- 0.194667 (0.13%) (init= 155)
- g2_amplitude: 2493.41770 +/- 36.16947 (1.45%) (init= 2000)
- g2_fwhm: 32.5128782 +/- 0.439866 (1.35%) == '2.3548200*g2_sigma'
- g2_height: 72.0455934 +/- 0.617220 (0.86%) == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)'
+ exp_amplitude: 99.0183282 +/- 0.537487 (0.54%) (init= 162.2102)
+ exp_decay: 90.9508859 +/- 1.103105 (1.21%) (init= 93.24905)
+ g1_sigma: 16.6725753 +/- 0.160481 (0.96%) (init= 15)
+ g1_center: 107.030954 +/- 0.150067 (0.14%) (init= 105)
+ g1_amplitude: 4257.77319 +/- 42.38336 (1.00%) (init= 2000)
+ g1_fwhm: 39.2609139 +/- 0.377905 (0.96%) == '2.3548200*g1_sigma'
+ g1_height: 101.880231 +/- 0.592170 (0.58%) == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)'
+ g2_sigma: 13.8069484 +/- 0.186794 (1.35%) (init= 15)
+ g2_center: 153.270100 +/- 0.194667 (0.13%) (init= 155)
+ g2_amplitude: 2493.41770 +/- 36.16947 (1.45%) (init= 2000)
+ g2_fwhm: 32.5128782 +/- 0.439866 (1.35%) == '2.3548200*g2_sigma'
+ g2_height: 72.0455934 +/- 0.617220 (0.86%) == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)'
[[Correlations]] (unreported correlations are < 0.500)
- C(g1_sigma, g1_amplitude) = 0.824
- C(g2_sigma, g2_amplitude) = 0.815
- C(exp_amplitude, exp_decay) = -0.695
- C(g1_sigma, g2_center) = 0.684
- C(g1_center, g2_amplitude) = -0.669
- C(g1_center, g2_sigma) = -0.652
- C(g1_amplitude, g2_center) = 0.648
- C(g1_center, g2_center) = 0.621
- C(g1_sigma, g1_center) = 0.507
- C(exp_decay, g1_amplitude) = -0.507
+ C(g1_sigma, g1_amplitude) = 0.824
+ C(g2_sigma, g2_amplitude) = 0.815
+ C(exp_amplitude, exp_decay) = -0.695
+ C(g1_sigma, g2_center) = 0.684
+ C(g1_center, g2_amplitude) = -0.669
+ C(g1_center, g2_sigma) = -0.652
+ C(g1_amplitude, g2_center) = 0.648
+ C(g1_center, g2_center) = 0.621
+ C(g1_sigma, g1_center) = 0.507
+ C(exp_decay, g1_amplitude) = -0.507
We get a very good fit to this problem (described at the NIST site as of
average difficulty, but the tests there are generally deliberately challenging) by
@@ -915,9 +615,9 @@ this, and by defining an :func:`index_of` function to limit the data range.
That is, with::
def index_of(arrval, value):
- "return index of array *at or below* value "
- if value < min(arrval): return 0
- return max(np.where(arrval<=value)[0])
+ "return index of array *at or below* value "
+ if value < min(arrval): return 0
+ return max(np.where(arrval<=value)[0])
ix1 = index_of(x, 75)
ix2 = index_of(x, 135)
@@ -932,39 +632,39 @@ giving to identical values (to the precision printed out in the report),
but in few steps, and without any bounds on parameters at all::
[[Model]]
- ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_'))
+ ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_'))
[[Fit Statistics]]
- # function evals = 48
- # data points = 250
- # variables = 8
- chi-square = 1247.528
- reduced chi-square = 5.155
- Akaike info crit = 417.865
- Bayesian info crit = 446.036
+ # function evals = 48
+ # data points = 250
+ # variables = 8
+ chi-square = 1247.528
+ reduced chi-square = 5.155
+ Akaike info crit = 417.865
+ Bayesian info crit = 446.036
[[Variables]]
- exp_amplitude: 99.0183281 +/- 0.537487 (0.54%) (init= 94.53724)
- exp_decay: 90.9508862 +/- 1.103105 (1.21%) (init= 111.1985)
- g1_sigma: 16.6725754 +/- 0.160481 (0.96%) (init= 14.5)
- g1_center: 107.030954 +/- 0.150067 (0.14%) (init= 106.5)
- g1_amplitude: 4257.77322 +/- 42.38338 (1.00%) (init= 2126.432)
- g1_fwhm: 39.2609141 +/- 0.377905 (0.96%) == '2.3548200*g1_sigma'
- g1_height: 101.880231 +/- 0.592171 (0.58%) == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)'
- g2_sigma: 13.8069481 +/- 0.186794 (1.35%) (init= 15)
- g2_center: 153.270100 +/- 0.194667 (0.13%) (init= 150)
- g2_amplitude: 2493.41766 +/- 36.16948 (1.45%) (init= 1878.892)
- g2_fwhm: 32.5128777 +/- 0.439866 (1.35%) == '2.3548200*g2_sigma'
- g2_height: 72.0455935 +/- 0.617221 (0.86%) == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)'
+ exp_amplitude: 99.0183281 +/- 0.537487 (0.54%) (init= 94.53724)
+ exp_decay: 90.9508862 +/- 1.103105 (1.21%) (init= 111.1985)
+ g1_sigma: 16.6725754 +/- 0.160481 (0.96%) (init= 14.5)
+ g1_center: 107.030954 +/- 0.150067 (0.14%) (init= 106.5)
+ g1_amplitude: 4257.77322 +/- 42.38338 (1.00%) (init= 2126.432)
+ g1_fwhm: 39.2609141 +/- 0.377905 (0.96%) == '2.3548200*g1_sigma'
+ g1_height: 101.880231 +/- 0.592171 (0.58%) == '0.3989423*g1_amplitude/max(1.e-15, g1_sigma)'
+ g2_sigma: 13.8069481 +/- 0.186794 (1.35%) (init= 15)
+ g2_center: 153.270100 +/- 0.194667 (0.13%) (init= 150)
+ g2_amplitude: 2493.41766 +/- 36.16948 (1.45%) (init= 1878.892)
+ g2_fwhm: 32.5128777 +/- 0.439866 (1.35%) == '2.3548200*g2_sigma'
+ g2_height: 72.0455935 +/- 0.617221 (0.86%) == '0.3989423*g2_amplitude/max(1.e-15, g2_sigma)'
[[Correlations]] (unreported correlations are < 0.500)
- C(g1_sigma, g1_amplitude) = 0.824
- C(g2_sigma, g2_amplitude) = 0.815
- C(exp_amplitude, exp_decay) = -0.695
- C(g1_sigma, g2_center) = 0.684
- C(g1_center, g2_amplitude) = -0.669
- C(g1_center, g2_sigma) = -0.652
- C(g1_amplitude, g2_center) = 0.648
- C(g1_center, g2_center) = 0.621
- C(g1_sigma, g1_center) = 0.507
- C(exp_decay, g1_amplitude) = -0.507
+ C(g1_sigma, g1_amplitude) = 0.824
+ C(g2_sigma, g2_amplitude) = 0.815
+ C(exp_amplitude, exp_decay) = -0.695
+ C(g1_sigma, g2_center) = 0.684
+ C(g1_center, g2_amplitude) = -0.669
+ C(g1_center, g2_sigma) = -0.652
+ C(g1_amplitude, g2_center) = 0.648
+ C(g1_center, g2_center) = 0.621
+ C(g1_sigma, g1_center) = 0.507
+ C(exp_decay, g1_amplitude) = -0.507
This script is in the file ``doc_nistgauss2.py`` in the examples folder,
and the fit result shown on the right above shows an improved initial
diff --git a/doc/conf.py b/doc/conf.py
index 490a01d..0f3e012 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -15,7 +15,8 @@ import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
+sys.path.insert(0, os.path.abspath('../'))
+# sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
sys.path.append(os.path.abspath(os.path.join('.')))
# -- General configuration -----------------------------------------------------
@@ -24,30 +25,23 @@ sys.path.append(os.path.abspath(os.path.join('.')))
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
from extensions import extensions
-extensions = [
- 'sphinx.ext.extlinks',
- 'sphinx.ext.autodoc',
- 'sphinx.ext.napoleon',
- 'sphinx.ext.mathjax',
- ]
-
-try:
- import IPython.sphinxext.ipython_directive
- extensions.extend(['IPython.sphinxext.ipython_directive',
- 'IPython.sphinxext.ipython_console_highlighting'])
-except ImportError:
- pass
-
-intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
- 'numpy': ('http://docs.scipy.org/doc/numpy/', None),
- 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
+ssoextensions = ['sphinx.ext.extlinks',
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.mathjax']
+
+autoclass_content = 'both'
+
+intersphinx_mapping = {'py': ('https://docs.python.org/2', None),
+ 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
+ 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
}
## intersphinx_cache_limit = 10
extlinks = {
- 'scipydoc' : ('http://docs.scipy.org/doc/scipy/reference/generated/%s.html', ''),
- 'numpydoc' : ('http://docs.scipy.org/doc/numpy/reference/generated/numpy.%s.html', ''),
+ 'scipydoc' : ('http://docs.scipy.org/doc/scipy/reference/generated/scipy.%s.html', 'scipy.'),
+ 'numpydoc' : ('http://docs.scipy.org/doc/numpy/reference/generated/numpy.%s.html', 'numpy.'),
}
# Add any paths that contain templates here, relative to this directory.
@@ -64,33 +58,13 @@ master_doc = 'index'
# General information about the project.
project = u'lmfit'
-copyright = u'2014, Matthew Newville, The University of Chicago, Till Stensitzki, Freie Universitat Berlin'
+copyright = u'2017, Matthew Newville, The University of Chicago, Till Stensitzki, Freie Universitat Berlin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-sys.path.insert(0, os.path.abspath('../'))
-try:
- import lmfit
- release = lmfit.__version__
-# The full version, including alpha/beta/rc tags.
-except ImportError:
- release = 'latest'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
+
+import lmfit
+release = lmfit.__version__
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
diff --git a/doc/confidence.rst b/doc/confidence.rst
index 54d2970..ef0ac39 100644
--- a/doc/confidence.rst
+++ b/doc/confidence.rst
@@ -7,10 +7,10 @@ Calculation of confidence intervals
The lmfit :mod:`confidence` module allows you to explicitly calculate
confidence intervals for variable parameters. For most models, it is not
-necessary: the estimation of the standard error from the estimated
+necessary since the estimation of the standard error from the estimated
covariance matrix is normally quite good.
-But for some models, e.g. a sum of two exponentials, the approximation
+But for some models, the sum of two exponentials for example, the approximation
begins to fail. For this case, lmfit has the function :func:`conf_interval`
to calculate confidence intervals directly. This is substantially slower
than using the errors estimated from the covariance matrix, but the results
@@ -30,7 +30,7 @@ within a certain confidence.
F(P_{fix},N-P) = \left(\frac{\chi^2_f}{\chi^2_{0}}-1\right)\frac{N-P}{P_{fix}}
-N is the number of data-points, P the number of parameter of the null model.
+`N` is the number of data points, `P` the number of parameters of the null model.
:math:`P_{fix}` is the number of fixed parameters (or to be more clear, the
difference of number of parameters between our null model and the alternate
model).
@@ -63,17 +63,17 @@ starting point::
>>> result = mini.minimize()
>>> print(lmfit.fit_report(result.params))
[Variables]]
- a: 0.09943895 +/- 0.000193 (0.19%) (init= 0.1)
- b: 1.98476945 +/- 0.012226 (0.62%) (init= 1)
+ a: 0.09943895 +/- 0.000193 (0.19%) (init= 0.1)
+ b: 1.98476945 +/- 0.012226 (0.62%) (init= 1)
[[Correlations]] (unreported correlations are < 0.100)
- C(a, b) = 0.601
+ C(a, b) = 0.601
Now it is just a simple function call to calculate the confidence
intervals::
>>> ci = lmfit.conf_interval(mini, result)
>>> lmfit.printfuncs.report_ci(ci)
- 99.70% 95.00% 67.40% 0.00% 67.40% 95.00% 99.70%
+ 99.70% 95.00% 67.40% 0.00% 67.40% 95.00% 99.70%
a 0.09886 0.09905 0.09925 0.09944 0.09963 0.09982 0.10003
b 1.94751 1.96049 1.97274 1.97741 1.99680 2.00905 2.02203
@@ -103,16 +103,16 @@ uncertainties and correlations
which will report::
[[Variables]]
- a1: 2.98622120 +/- 0.148671 (4.98%) (init= 2.986237)
- a2: -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256)
- t1: 1.30994233 +/- 0.131211 (10.02%) (init= 1.309932)
- t2: 11.8240350 +/- 0.463164 (3.92%) (init= 11.82408)
+ a1: 2.98622120 +/- 0.148671 (4.98%) (init= 2.986237)
+ a2: -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256)
+ t1: 1.30994233 +/- 0.131211 (10.02%) (init= 1.309932)
+ t2: 11.8240350 +/- 0.463164 (3.92%) (init= 11.82408)
[[Correlations]] (unreported correlations are < 0.500)
- C(a2, t2) = 0.987
- C(a2, t1) = -0.925
- C(t1, t2) = -0.881
- C(a1, t1) = -0.599
- 95.00% 68.00% 0.00% 68.00% 95.00%
+ C(a2, t2) = 0.987
+ C(a2, t1) = -0.925
+ C(t1, t2) = -0.881
+ C(a1, t1) = -0.599
+ 95.00% 68.00% 0.00% 68.00% 95.00%
a1 2.71850 2.84525 2.98622 3.14874 3.34076
a2 -4.63180 -4.46663 -4.33526 -4.22883 -4.14178
t2 10.82699 11.33865 11.82404 12.28195 12.71094
@@ -120,14 +120,14 @@ which will report::
Again we called :func:`conf_interval`, this time with tracing and only for
-1- and 2 :math:`\sigma`. Comparing these two different estimates, we see
+1- and 2-:math:`\sigma`. Comparing these two different estimates, we see
that the estimate for `a1` is reasonably well approximated from the
covariance matrix, but the estimates for `a2` and especially for `t1`, and
`t2` are very asymmetric and that going from 1 :math:`\sigma` (68%
confidence) to 2 :math:`\sigma` (95% confidence) is not very predictable.
Let plots mad of the confidence region are shown the figure on the left
-below for ``a1`` and ``t2``, and for ``a2`` and ``t2`` on the right:
+below for `a1` and `t2`, and for `a2` and `t2` on the right:
.. _figC1:
@@ -174,7 +174,7 @@ by using :meth:`Minimizer.emcee` on the same problem.
Credible intervals (the Bayesian equivalent of the frequentist confidence
interval) can be obtained with this method. MCMC can be used for model
-selection, to determine outliers, to marginalise over nuisance parameters, etc.
+selection, to determine outliers, to marginalise over nuisance parameters, etcetera.
For example, you may have fractionally underestimated the uncertainties on a
dataset. MCMC can be used to estimate the true level of uncertainty on each
datapoint. A tutorial on the possibilities offered by MCMC can be found at [1]_.
diff --git a/doc/constraints.rst b/doc/constraints.rst
index 7d06b8d..33b1451 100644
--- a/doc/constraints.rst
+++ b/doc/constraints.rst
@@ -7,27 +7,27 @@ Using Mathematical Constraints
.. _asteval: http://newville.github.io/asteval/
Being able to fix variables to a constant value or place upper and lower
-bounds on their values can greatly simplify modeling real data. These
-capabilities are key to lmfit's Parameters. In addition, it is sometimes
-highly desirable to place mathematical constraints on parameter values.
-For example, one might want to require that two Gaussian peaks have the
-same width, or have amplitudes that are constrained to add to some value.
-Of course, one could rewrite the objective or model function to place such
-requirements, but this is somewhat error prone, and limits the flexibility
+bounds on their values can greatly simplify modeling real data. These
+capabilities are key to lmfit's Parameters. In addition, it is sometimes
+highly desirable to place mathematical constraints on parameter values.
+For example, one might want to require that two Gaussian peaks have the
+same width, or have amplitudes that are constrained to add to some value.
+Of course, one could rewrite the objective or model function to place such
+requirements, but this is somewhat error prone, and limits the flexibility
so that exploring constraints becomes laborious.
-To simplify the setting of constraints, Parameters can be assigned a
-mathematical expression of other Parameters, builtin constants, and builtin
-mathematical functions that will be used to determine its value. The
-expressions used for constraints are evaluated using the `asteval`_ module,
-which uses Python syntax, and evaluates the constraint expressions in a safe
+To simplify the setting of constraints, Parameters can be assigned a
+mathematical expression of other Parameters, builtin constants, and builtin
+mathematical functions that will be used to determine its value. The
+expressions used for constraints are evaluated using the `asteval`_ module,
+which uses Python syntax, and evaluates the constraint expressions in a safe
and isolated namespace.
-This approach to mathematical constraints allows one to not have to write a
-separate model function for two Gaussians where the two ``sigma`` values are
+This approach to mathematical constraints allows one to not have to write a
+separate model function for two Gaussians where the two ``sigma`` values are
forced to be equal, or where amplitudes are related. Instead, one can write a
-more general two Gaussian model (perhaps using :class:`GaussianModel`) and
-impose such constraints on the Parameters for a particular fit.
+more general two Gaussian model (perhaps using :class:`GaussianModel`) and
+impose such constraints on the Parameters for a particular fit.
Overview
diff --git a/doc/extensions.py b/doc/extensions.py
index 40de659..1bc8b9f 100644
--- a/doc/extensions.py
+++ b/doc/extensions.py
@@ -1,10 +1,9 @@
# sphinx extensions for mathjax
+
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
- 'numpydoc']
-mathjax = 'sphinx.ext.mathjax'
-pngmath = 'sphinx.ext.pngmath'
-
-extensions.append(mathjax)
+ 'sphinx.ext.extlinks',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.mathjax']
diff --git a/doc/extensions.pyc b/doc/extensions.pyc
index 38be3f3..ad73ffa 100644
--- a/doc/extensions.pyc
+++ b/doc/extensions.pyc
Binary files differ
diff --git a/doc/faq.rst b/doc/faq.rst
index f21a7a2..6ff85f1 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -23,11 +23,11 @@ I get import errors from IPython
If you see something like::
- from IPython.html.widgets import Dropdown
+ from IPython.html.widgets import Dropdown
ImportError: No module named 'widgets'
-then you need to install the ipywidgets package. Try 'pip install ipywidgets'.
+then you need to install the ``ipywidgets`` package, try: ``pip install ipywidgets``.
@@ -35,10 +35,10 @@ then you need to install the ipywidgets package. Try 'pip install ipywidgets'.
How can I fit multi-dimensional data?
========================================
-The fitting routines accept data arrays that are 1 dimensional and double
+The fitting routines accept data arrays that are one dimensional and double
precision. So you need to convert the data and model (or the value
returned by the objective function) to be one dimensional. A simple way to
-do this is to use numpy's :numpydoc:`ndarray.flatten`, for example::
+do this is to use :numpydoc:`ndarray.flatten`, for example::
def residual(params, x, data=None):
....
@@ -48,30 +48,31 @@ do this is to use numpy's :numpydoc:`ndarray.flatten`, for example::
How can I fit multiple data sets?
========================================
-As above, the fitting routines accept data arrays that are 1 dimensional
+As above, the fitting routines accept data arrays that are one dimensional
and double precision. So you need to convert the sets of data and models
(or the value returned by the objective function) to be one dimensional. A
-simple way to do this is to use numpy's :numpydoc:`concatenate`. As an
+simple way to do this is to use :numpydoc:`concatenate`. As an
example, here is a residual function to simultaneously fit two lines to two
different arrays. As a bonus, the two lines share the 'offset' parameter::
+ import numpy as np
def fit_function(params, x=None, dat1=None, dat2=None):
model1 = params['offset'] + x * params['slope1']
model2 = params['offset'] + x * params['slope2']
- resid1 = dat1 - model1
+ resid1 = dat1 - model1
resid2 = dat2 - model2
- return numpy.concatenate((resid1, resid2))
+ return np.concatenate((resid1, resid2))
How can I fit complex data?
===================================
-As with working with multidimensional data, you need to convert your data
+As with working with multi-dimensional data, you need to convert your data
and model (or the value returned by the objective function) to be double
-precision floating point numbers. The simplest approach is to use numpy's
-:numpydoc:`ndarray.view` method, perhaps like::
+precision floating point numbers. The simplest approach is to use
+:numpydoc:`ndarray.view`, perhaps like::
import numpy as np
def residual(params, x, data=None):
diff --git a/doc/fitting.rst b/doc/fitting.rst
index 0783ade..96f2fd0 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -2,8 +2,9 @@
.. module:: lmfit.minimizer
+
=======================================
-Performing Fits, Analyzing Outputs
+Performing Fits and Analyzing Outputs
=======================================
As shown in the previous chapter, a simple fit can be performed with the
@@ -11,10 +12,6 @@ As shown in the previous chapter, a simple fit can be performed with the
:class:`Minimizer` class can be used to gain a bit more control, especially
when using complicated constraints or comparing results from related fits.
-.. warning::
-
- Upgrading scripts from version 0.8.3 to 0.9.0? See :ref:`whatsnew_090_label`
-
The :func:`minimize` function
=============================
@@ -27,7 +24,6 @@ details on writing the objective.
.. autofunction:: minimize
-
.. _fit-func-label:
Writing a Fitting Function
@@ -36,23 +32,23 @@ Writing a Fitting Function
An important component of a fit is writing a function to be minimized --
the *objective function*. Since this function will be called by other
routines, there are fairly stringent requirements for its call signature
-and return value. In principle, your function can be any python callable,
+and return value. In principle, your function can be any Python callable,
but it must look like this:
.. function:: func(params, *args, **kws):
- calculate objective residual to be minimized from parameters.
+ Calculate objective residual to be minimized from parameters.
- :param params: parameters.
- :type params: :class:`Parameters`.
- :param args: positional arguments. Must match ``args`` argument to :func:`minimize`
- :param kws: keyword arguments. Must match ``kws`` argument to :func:`minimize`
- :return: residual array (generally data-model) to be minimized in the least-squares sense.
- :rtype: numpy array. The length of this array cannot change between calls.
+ :param params: Parameters.
+ :type params: :class:`~lmfit.parameter.Parameters`
+ :param args: Positional arguments. Must match ``args`` argument to :func:`minimize`.
+ :param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize`.
+ :return: Residual array (generally data-model) to be minimized in the least-squares sense.
+ :rtype: numpy.ndarray. The length of this array cannot change between calls.
A common use for the positional and keyword arguments would be to pass in other
-data needed to calculate the residual, including such things as the data array,
+data needed to calculate the residual, including things as the data array,
dependent variable, uncertainties in the data, and other data structures for the
model calculation.
@@ -63,38 +59,37 @@ model. For the other methods, the return value can either be a scalar or an arr
array is returned, the sum of squares of the array will be sent to the underlying fitting
method, effectively doing a least-squares optimization of the return values.
-
Since the function will be passed in a dictionary of :class:`Parameters`, it is advisable
to unpack these to get numerical values at the top of the function. A
-simple way to do this is with :meth:`Parameters.valuesdict`, as with::
+simple way to do this is with :meth:`Parameters.valuesdict`, as shown below::
def residual(pars, x, data=None, eps=None):
- # unpack parameters:
- # extract .value attribute for each parameter
- parvals = pars.valuesdict()
- period = parvals['period']
- shift = parvals['shift']
- decay = parvals['decay']
+ # unpack parameters:
+ # extract .value attribute for each parameter
+ parvals = pars.valuesdict()
+ period = parvals['period']
+ shift = parvals['shift']
+ decay = parvals['decay']
- if abs(shift) > pi/2:
- shift = shift - sign(shift)*pi
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
- if abs(period) < 1.e-10:
- period = sign(period)*1.e-10
+ if abs(period) < 1.e-10:
+ period = sign(period)*1.e-10
- model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay)
+ model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay)
- if data is None:
- return model
- if eps is None:
- return (model - data)
- return (model - data)/eps
+ if data is None:
+ return model
+ if eps is None:
+ return (model - data)
+ return (model - data)/eps
-In this example, ``x`` is a positional (required) argument, while the
-``data`` array is actually optional (so that the function returns the model
+In this example, `x` is a positional (required) argument, while the
+`data` array is actually optional (so that the function returns the model
calculation if the data is neglected). Also note that the model
-calculation will divide ``x`` by the value of the 'period' Parameter. It
+calculation will divide `x` by the value of the ``period`` Parameter. It
might be wise to ensure this parameter cannot be 0. It would be possible
to use the bounds on the :class:`Parameter` to do this::
@@ -102,8 +97,8 @@ to use the bounds on the :class:`Parameter` to do this::
but putting this directly in the function with::
- if abs(period) < 1.e-10:
- period = sign(period)*1.e-10
+ if abs(period) < 1.e-10:
+ period = sign(period)*1.e-10
is also a reasonable approach. Similarly, one could place bounds on the
``decay`` parameter to take values only between ``-pi/2`` and ``pi/2``.
@@ -157,6 +152,8 @@ class as listed in the :ref:`Table of Supported Fitting Methods
| Differential | ``differential_evolution`` |
| Evolution | |
+-----------------------+------------------------------------------------------------------+
+ | Brute force method | ``brute`` |
+ +-----------------------+------------------------------------------------------------------+
.. note::
@@ -169,9 +166,9 @@ class as listed in the :ref:`Table of Supported Fitting Methods
.. warning::
Much of this documentation assumes that the Levenberg-Marquardt method is
- the method used. Many of the fit statistics and estimates for
- uncertainties in parameters discussed in :ref:`fit-results-label` are
- done only for this method.
+ used. Many of the fit statistics and estimates for uncertainties in
+ parameters discussed in :ref:`fit-results-label` are done only for this
+ method.
.. _fit-results-label:
@@ -190,7 +187,15 @@ messages, fit statistics, and the updated parameters themselves.
Importantly, the parameters passed in to :meth:`Minimizer.minimize`
will be not be changed. To to find the best-fit values, uncertainties
and so on for each parameter, one must use the
-:attr:`MinimizerResult.params` attribute.
+:attr:`MinimizerResult.params` attribute. For example, to print the
+fitted values, bounds and other parameters attributes in a
+well formatted text tables you can execute::
+
+ result.params.pretty_print()
+
+with `results` being a `MinimizerResult` object. Note that the method
+:meth:`~lmfit.parameter.Parameters.pretty_print` accepts several arguments
+for customizing the output (e.g., column width, numeric format, etcetera).
.. autoclass:: MinimizerResult
@@ -228,7 +233,7 @@ Goodness-of-Fit Statistics
+----------------------+----------------------------------------------------------------------------+
| var_names | ordered list of variable parameter names used for init_vals and covar |
+----------------------+----------------------------------------------------------------------------+
-| covar | covariance matrix (with rows/columns using var_names |
+| covar | covariance matrix (with rows/columns using var_names) |
+----------------------+----------------------------------------------------------------------------+
| init_vals | list of initial values for variable parameters |
+----------------------+----------------------------------------------------------------------------+
@@ -236,13 +241,13 @@ Goodness-of-Fit Statistics
Note that the calculation of chi-square and reduced chi-square assume
that the returned residual function is scaled properly to the
uncertainties in the data. For these statistics to be meaningful, the
-person writing the function to be minimized must scale them properly.
+person writing the function to be minimized **must** scale them properly.
After a fit using using the :meth:`leastsq` method has completed
successfully, standard errors for the fitted variables and correlations
between pairs of fitted variables are automatically calculated from the
covariance matrix. The standard error (estimated :math:`1\sigma`
-error-bar) go into the :attr:`stderr` attribute of the Parameter. The
+error-bar) goes into the :attr:`stderr` attribute of the Parameter. The
correlations with all other variables will be put into the
:attr:`correl` attribute of the Parameter -- a dictionary with keys for
all other Parameters and values of the corresponding correlation.
@@ -268,8 +273,8 @@ The :class:`MinimizerResult` includes the traditional chi-square and reduced chi
:nowrap:
\begin{eqnarray*}
- \chi^2 &=& \sum_i^N r_i^2 \\
- \chi^2_\nu &=& = \chi^2 / (N-N_{\rm varys})
+ \chi^2 &=& \sum_i^N r_i^2 \\
+ \chi^2_\nu &=& = \chi^2 / (N-N_{\rm varys})
\end{eqnarray*}
where :math:`r` is the residual array returned by the objective function
@@ -284,14 +289,14 @@ Also included are the `Akaike Information Criterion
held in the ``aic`` and ``bic`` attributes, respectively. These give slightly
different measures of the relative quality for a fit, trying to balance
quality of fit with the number of variable parameters used in the fit.
-These are calculated as
+These are calculated as:
.. math::
:nowrap:
\begin{eqnarray*}
{\rm aic} &=& N \ln(\chi^2/N) + 2 N_{\rm varys} \\
- {\rm bic} &=& N \ln(\chi^2/N) + \ln(N) *N_{\rm varys} \\
+ {\rm bic} &=& N \ln(\chi^2/N) + \ln(N) N_{\rm varys} \\
\end{eqnarray*}
@@ -314,18 +319,18 @@ used to abort a fit.
.. function:: iter_cb(params, iter, resid, *args, **kws):
- user-supplied function to be run at each iteration
+ User-supplied function to be run at each iteration.
- :param params: parameters.
- :type params: :class:`Parameters`.
- :param iter: iteration number
- :type iter: integer
- :param resid: residual array.
- :type resid: ndarray
- :param args: positional arguments. Must match ``args`` argument to :func:`minimize`
- :param kws: keyword arguments. Must match ``kws`` argument to :func:`minimize`
- :return: residual array (generally data-model) to be minimized in the least-squares sense.
- :rtype: ``None`` for normal behavior, any value like ``True`` to abort fit.
+ :param params: Parameters.
+ :type params: :class:`~lmfit.parameter.Parameters`
+ :param iter: Iteration number.
+ :type iter: int
+ :param resid: Residual array.
+ :type resid: numpy.ndarray
+ :param args: Positional arguments. Must match ``args`` argument to :func:`minimize`
+ :param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize`
+ :return: Residual array (generally data-model) to be minimized in the least-squares sense.
+ :rtype: None for normal behavior, any value like True to abort the fit.
Normally, the iteration callback would have no return value or return
@@ -340,265 +345,35 @@ statistics are not likely to be meaningful, and uncertainties will not be comput
Using the :class:`Minimizer` class
=======================================
-For full control of the fitting process, you'll want to create a
+For full control of the fitting process, you will want to create a
:class:`Minimizer` object.
-.. class:: Minimizer(function, params, fcn_args=None, fcn_kws=None, iter_cb=None, scale_covar=True, mask_non_finite=False, **kws)
-
- creates a Minimizer, for more detailed access to fitting methods and attributes.
-
- :param function: objective function to return fit residual. See :ref:`fit-func-label` for details.
- :type function: callable.
- :param params: a dictionary of Parameters. Keywords must be strings
- that match ``[a-z_][a-z0-9_]*`` and is not a python
- reserved word. Each value must be :class:`Parameter`.
- :type params: dict
- :param fcn_args: arguments tuple to pass to the residual function as positional arguments.
- :type fcn_args: tuple
- :param fcn_kws: dictionary to pass to the residual function as keyword arguments.
- :type fcn_kws: dict
- :param iter_cb: function to be called at each fit iteration. See :ref:`fit-itercb-label` for details.
- :type iter_cb: callable or ``None``
- :param scale_covar: flag for automatically scaling covariance matrix and uncertainties to reduced chi-square (``leastsq`` only)
- :type scale_covar: bool (default ``True``).
- :param nan_policy: Specifies action if `userfcn` (or a Jacobian) returns nan
- values. One of:
-
- 'raise' - a `ValueError` is raised
- 'propagate' - the values returned from `userfcn` are un-altered
- 'omit' - the non-finite values are filtered.
-
- :type nan_policy: str (default 'raise')
- :param kws: dictionary to pass as keywords to the underlying :mod:`scipy.optimize` method.
- :type kws: dict
+.. autoclass :: Minimizer
The Minimizer object has a few public methods:
.. automethod:: Minimizer.minimize
-.. method:: leastsq(params=None, scale_covar=True, **kws)
-
- perform fit with Levenberg-Marquardt algorithm. Keywords will be
- passed directly to :scipydoc:`optimize.leastsq`. By default,
- numerical derivatives are used, and the following arguments are set:
-
-
- +------------------+----------------+------------------------------------------------------------+
- | :meth:`leastsq` | Default Value | Description |
- | arg | | |
- +==================+================+============================================================+
- | xtol | 1.e-7 | Relative error in the approximate solution |
- +------------------+----------------+------------------------------------------------------------+
- | ftol | 1.e-7 | Relative error in the desired sum of squares |
- +------------------+----------------+------------------------------------------------------------+
- | maxfev | 2000*(nvar+1) | maximum number of function calls (nvar= # of variables) |
- +------------------+----------------+------------------------------------------------------------+
- | Dfun | ``None`` | function to call for Jacobian calculation |
- +------------------+----------------+------------------------------------------------------------+
-
-
-.. versionchanged:: 0.9.0
- return value changed to :class:`MinimizerResult`
-
-.. method:: scalar_minimize(method='Nelder-Mead', params=None, hess=None, tol=None, **kws)
-
- perform fit with any of the scalar minimization algorithms supported by
- :scipydoc:`optimize.minimize`.
-
- +-------------------------+-----------------+-----------------------------------------------------+
- | :meth:`scalar_minimize` | Default Value | Description |
- | arg | | |
- +=========================+=================+=====================================================+
- | method | ``Nelder-Mead`` | fitting method |
- +-------------------------+-----------------+-----------------------------------------------------+
- | tol | 1.e-7 | fitting and parameter tolerance |
- +-------------------------+-----------------+-----------------------------------------------------+
- | hess | None | Hessian of objective function |
- +-------------------------+-----------------+-----------------------------------------------------+
-
-.. versionchanged:: 0.9.0
- return value changed to :class:`MinimizerResult`
-
-.. method:: prepare_fit(**kws)
-
- prepares and initializes model and Parameters for subsequent
- fitting. This routine prepares the conversion of :class:`Parameters`
- into fit variables, organizes parameter bounds, and parses, "compiles"
- and checks constrain expressions. The method also creates and returns
- a new instance of a :class:`MinimizerResult` object that contains the
- copy of the Parameters that will actually be varied in the fit.
-
- This method is called directly by the fitting methods, and it is
- generally not necessary to call this function explicitly.
-
-.. versionchanged:: 0.9.0
- return value changed to :class:`MinimizerResult`
-
-
-
-.. method:: emcee(params=None, steps=1000, nwalkers=100, burn=0, thin=1, ntemps=1, pos=None, reuse_sampler=False, workers=1, float_behavior='posterior', is_weighted=True, seed=None)
-
- Bayesian sampling of the posterior distribution for the parameters using the `emcee`
- Markov Chain Monte Carlo package. The method assumes that the prior is Uniform. You need
- to have `emcee` installed to use this method.
-
- :param params: a :class:`Parameters` dictionary for starting values
- :type params: :class:`Parameters` or `None`
- :param steps: How many samples you would like to draw from the posterior
- distribution for each of the walkers?
- :type steps: int
- :param nwalkers: Should be set so :math:`nwalkers >> nvarys`, where `nvarys`
- are the number of parameters being varied during the fit.
- "Walkers are the members of the ensemble. They are almost
- like separate Metropolis-Hastings chains but, of course,
- the proposal distribution for a given walker depends on the
- positions of all the other walkers in the ensemble." - from
- [1]_.
- :type nwalkers: int
- :param burn: Discard this many samples from the start of the sampling regime.
- :type burn: int
- :param thin: Only accept 1 in every `thin` samples.
- :type thin: int
- :param ntemps: If `ntemps > 1` perform a Parallel Tempering.
- :type ntemps: int
- :param pos: Specify the initial positions for the sampler. If `ntemps == 1`
- then `pos.shape` should be `(nwalkers, nvarys)`. Otherwise,
- `(ntemps, nwalkers, nvarys)`. You can also initialise using a
- previous chain that had the same `ntemps`, `nwalkers` and `nvarys`.
- :type pos: np.ndarray
- :param reuse_sampler: If you have already run :meth:`emcee` on a given
- :class:`Minimizer` object then it possesses an internal sampler
- attribute. You can continue to draw from the same sampler (retaining
- the chain history) if you set this option to `True`. Otherwise a new
- sampler is created. The `nwalkers`, `ntemps` and `params` keywords
- are ignored with this option.
- **Important**: the :class:`Parameters` used to create the sampler
- must not change in-between calls to :meth:`emcee`. Alteration of
- :class:`Parameters` would include changed ``min``, ``max``,
- ``vary`` and ``expr`` attributes. This may happen, for example, if
- you use an altered :class:`Parameters` object and call the
- :meth:`minimize` method in-between calls to :meth:`emcee` .
- :type reuse_sampler: bool
- :param workers: For parallelization of sampling. It can be any Pool-like object
- with a map method that follows the same calling sequence as the
- built-in map function. If int is given as the argument, then a
- multiprocessing-based pool is spawned internally with the
- corresponding number of parallel processes. 'mpi4py'-based
- parallelization and 'joblib'-based parallelization pools can also
- be used here. **Note**: because of multiprocessing overhead it may
- only be worth parallelising if the objective function is expensive
- to calculate, or if there are a large number of objective
- evaluations per step (`ntemps * nwalkers * nvarys`).
- :type workers: int or Pool-like
- :type float_behavior: str
- :param float_behavior: Specifies the meaning of the objective function if it
- returns a float. One of:
-
- 'posterior' - the objective function returns a log-posterior probability
-
- 'chi2' - the objective function is returning :math:`\chi^2`.
-
- See Notes for further details.
- :param is_weighted: Has your objective function been weighted by measurement
- uncertainties? If `is_weighted is True` then your objective
- function is assumed to return residuals that have been divided by
- the true measurement uncertainty `(data - model) / sigma`. If
- `is_weighted is False` then the objective function is assumed to
- return unweighted residuals, `data - model`. In this case `emcee`
- will employ a positive measurement uncertainty during the sampling.
- This measurement uncertainty will be present in the output params
- and output chain with the name `__lnsigma`. A side effect of this
- is that you cannot use this parameter name yourself.
- **Important** this parameter only has any effect if your objective
- function returns an array. If your objective function returns a
- float, then this parameter is ignored. See Notes for more details.
- :type is_weighted: bool
- :param seed: If `seed` is an int, a new `np.random.RandomState` instance is used,
- seeded with `seed`.
- If `seed` is already a `np.random.RandomState` instance, then that
- `np.random.RandomState` instance is used.
- Specify `seed` for repeatable sampling.
- :type seed: int or np.random.RandomState
-
- :return: :class:`MinimizerResult` object containing updated params, statistics,
- etc. The :class:`MinimizerResult` also contains the ``chain``,
- ``flatchain`` and ``lnprob`` attributes. The ``chain``
- and ``flatchain`` attributes contain the samples and have the shape
- `(nwalkers, (steps - burn) // thin, nvarys)` or
- `(ntemps, nwalkers, (steps - burn) // thin, nvarys)`,
- depending on whether Parallel tempering was used or not.
- `nvarys` is the number of parameters that are allowed to vary.
- The ``flatchain`` attribute is a :class:`pandas.DataFrame` of the
- flattened chain, `chain.reshape(-1, nvarys)`. To access flattened
- chain values for a particular parameter use
- `result.flatchain[parname]`. The ``lnprob`` attribute contains the
- log probability for each sample in ``chain``. The sample with the
- highest probability corresponds to the maximum likelihood estimate.
-
- This method samples the posterior distribution of the parameters using
- Markov Chain Monte Carlo. To do so it needs to calculate the
- log-posterior probability of the model parameters, `F`, given the data,
- `D`, :math:`\ln p(F_{true} | D)`. This 'posterior probability' is
- calculated as:
-
- .. math::
-
- \ln p(F_{true} | D) \propto \ln p(D | F_{true}) + \ln p(F_{true})
-
- where :math:`\ln p(D | F_{true})` is the 'log-likelihood' and
- :math:`\ln p(F_{true})` is the 'log-prior'. The default log-prior
- encodes prior information already known about the model. This method
- assumes that the log-prior probability is `-np.inf` (impossible) if the
- one of the parameters is outside its limits. The log-prior probability
- term is zero if all the parameters are inside their bounds (known as a
- uniform prior). The log-likelihood function is given by [1]_:
-
- .. math::
-
- \ln p(D|F_{true}) = -\frac{1}{2}\sum_n \left[\frac{\left(g_n(F_{true}) - D_n \right)^2}{s_n^2}+\ln (2\pi s_n^2)\right]
-
- The first summand in the square brackets represents the residual for a
- given datapoint (:math:`g` being the generative model) . This term
- represents :math:`\chi^2` when summed over all datapoints.
- Ideally the objective function used to create :class:`lmfit.Minimizer` should
- return the log-posterior probability, :math:`\ln p(F_{true} | D)`.
- However, since the in-built log-prior term is zero, the objective
- function can also just return the log-likelihood, unless you wish to
- create a non-uniform prior.
-
- If a float value is returned by the objective function then this value
- is assumed by default to be the log-posterior probability, i.e.
- `float_behavior is 'posterior'`. If your objective function returns
- :math:`\chi^2`, then you should use a value of `'chi2'` for
- `float_behavior`. `emcee` will then multiply your :math:`\chi^2` value
- by -0.5 to obtain the posterior probability.
-
- However, the default behaviour of many objective functions is to return
- a vector of (possibly weighted) residuals. Therefore, if your objective
- function returns a vector, `res`, then the vector is assumed to contain
- the residuals. If `is_weighted is True` then your residuals are assumed
- to be correctly weighted by the standard deviation of the data points
- (`res = (data - model) / sigma`) and the log-likelihood (and
- log-posterior probability) is calculated as: `-0.5 * np.sum(res **2)`.
- This ignores the second summand in the square brackets. Consequently, in
- order to calculate a fully correct log-posterior probability value your
- objective function should return a single value. If `is_weighted is False`
- then the data uncertainty, :math:`s_n`, will be treated as a nuisance
- parameter and will be marginalised out. This is achieved by employing a
- strictly positive uncertainty (homoscedasticity) for each data point,
- :math:`s_n=exp(\_\_lnsigma)`. `__lnsigma` will be present in
- `MinimizerResult.params`, as well as `Minimizer.chain`, `nvarys` will also be
- increased by one.
-
- .. [1] http://dan.iel.fm/emcee/current/user/line/
+.. automethod:: Minimizer.leastsq
+
+.. automethod:: Minimizer.least_squares
+.. automethod:: Minimizer.scalar_minimize
+
+.. automethod:: Minimizer.prepare_fit
+
+.. automethod:: Minimizer.brute
+
+For more information, check the examples in ``examples/lmfit_brute.py``.
+
+.. automethod:: Minimizer.emcee
.. _label-emcee:
-:meth:`emcee` - calculating the posterior probability distribution of parameters
+:meth:`Minimizer.emcee` - calculating the posterior probability distribution of parameters
==============================================================================================
-:meth:`emcee` can be used to obtain the posterior probability distribution of
+:meth:`Minimizer.emcee` can be used to obtain the posterior probability distribution of
parameters, given a set of experimental data. An example problem is a double
exponential decay. A small amount of Gaussian noise is also added in::
@@ -627,10 +402,10 @@ Solving with :func:`minimize` gives the Maximum Likelihood solution.::
>>> mi = lmfit.minimize(residual, p, method='Nelder')
>>> lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
[[Variables]]
- a1: 2.98623688 (init= 4)
- a2: -4.33525596 (init= 4)
- t1: 1.30993185 (init= 3)
- t2: 11.8240752 (init= 3)
+ a1: 2.98623688 (init= 4)
+ a2: -4.33525596 (init= 4)
+ t1: 1.30993185 (init= 3)
+ t2: 11.8240752 (init= 3)
[[Correlations]] (unreported correlations are < 0.500)
>>> plt.plot(x, y)
>>> plt.plot(x, residual(mi.params) + y, 'r')
@@ -640,11 +415,11 @@ Solving with :func:`minimize` gives the Maximum Likelihood solution.::
However, this doesn't give a probability distribution for the parameters.
Furthermore, we wish to deal with the data uncertainty. This is called
-marginalisation of a nuisance parameter. emcee requires a function that returns
+marginalisation of a nuisance parameter. ``emcee`` requires a function that returns
the log-posterior probability. The log-posterior probability is a sum of the
log-prior probability and log-likelihood functions. The log-prior probability is
-assumed to be zero if all the parameters are within their bounds and `-np.inf`
-if any of the parameters are outside their bounds.::
+assumed to be zero if all the parameters are within their bounds and ``-np.inf``
+if any of the parameters are outside their bounds.
>>> # add a noise parameter
>>> mi.params.add('f', value=1, min=0.001, max=2)
@@ -659,13 +434,13 @@ if any of the parameters are outside their bounds.::
... resid += np.log(2 * np.pi * s**2)
... return -0.5 * np.sum(resid)
-Now we have to set up the minimizer and do the sampling.::
+Now we have to set up the minimizer and do the sampling::
>>> mini = lmfit.Minimizer(lnprob, mi.params)
>>> res = mini.emcee(burn=300, steps=600, thin=10, params=mi.params)
Lets have a look at those posterior distributions for the parameters. This requires
-installation of the `corner` package.::
+installation of the `corner` package::
>>> import corner
>>> corner.corner(res.flatchain, labels=res.var_names, truths=list(res.params.valuesdict().values()))
@@ -684,18 +459,18 @@ You can see that we recovered the right uncertainty level on the data.::
median of posterior probability distribution
------------------------------------------
[[Variables]]
- a1: 3.00975345 +/- 0.151034 (5.02%) (init= 2.986237)
- a2: -4.35419204 +/- 0.127505 (2.93%) (init=-4.335256)
- t1: 1.32726415 +/- 0.142995 (10.77%) (init= 1.309932)
- t2: 11.7911935 +/- 0.495583 (4.20%) (init= 11.82408)
- f: 0.09805494 +/- 0.004256 (4.34%) (init= 1)
+ a1: 3.00975345 +/- 0.151034 (5.02%) (init= 2.986237)
+ a2: -4.35419204 +/- 0.127505 (2.93%) (init=-4.335256)
+ t1: 1.32726415 +/- 0.142995 (10.77%) (init= 1.309932)
+ t2: 11.7911935 +/- 0.495583 (4.20%) (init= 11.82408)
+ f: 0.09805494 +/- 0.004256 (4.34%) (init= 1)
[[Correlations]] (unreported correlations are < 0.100)
- C(a2, t2) = 0.981
- C(a2, t1) = -0.927
- C(t1, t2) = -0.880
- C(a1, t1) = -0.519
- C(a1, a2) = 0.195
- C(a1, t2) = 0.146
+ C(a2, t2) = 0.981
+ C(a2, t1) = -0.927
+ C(t1, t2) = -0.880
+ C(a1, t1) = -0.519
+ C(a1, a2) = 0.195
+ C(a1, t2) = 0.146
>>> # find the maximum likelihood solution
>>> highest_prob = np.argmax(res.lnprob)
@@ -724,46 +499,31 @@ Getting and Printing Fit Reports
.. currentmodule:: lmfit.printfuncs
-.. function:: fit_report(result, modelpars=None, show_correl=True, min_correl=0.1)
-
- generate and return text of report of best-fit values, uncertainties,
- and correlations from fit.
-
- :param result: :class:`MinimizerResult` object as returned by :func:`minimize`.
- :param modelpars: Parameters with "Known Values" (optional, default None)
- :param show_correl: whether to show list of sorted correlations [``True``]
- :param min_correl: smallest correlation absolute value to show [0.1]
-
- If the first argument is a :class:`Parameters` object,
- goodness-of-fit statistics will not be included.
-
-.. function:: report_fit(result, modelpars=None, show_correl=True, min_correl=0.1)
-
- print text of report from :func:`fit_report`.
+.. autofunction:: fit_report
-An example fit with report would be
+An example using this to write out a fit report would be:
.. literalinclude:: ../examples/doc_withreport.py
which would write out::
[[Fit Statistics]]
- # function evals = 85
- # data points = 1001
- # variables = 4
- chi-square = 498.812
- reduced chi-square = 0.500
- Akaike info crit = -689.223
- Bayesian info crit = -669.587
+ # function evals = 85
+ # data points = 1001
+ # variables = 4
+ chi-square = 498.812
+ reduced chi-square = 0.500
+ Akaike info crit = -689.223
+ Bayesian info crit = -669.587
[[Variables]]
- amp: 13.9121944 +/- 0.141202 (1.01%) (init= 13)
- period: 5.48507044 +/- 0.026664 (0.49%) (init= 2)
- shift: 0.16203676 +/- 0.014056 (8.67%) (init= 0)
- decay: 0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
+ amp: 13.9121944 +/- 0.141202 (1.01%) (init= 13)
+ period: 5.48507044 +/- 0.026664 (0.49%) (init= 2)
+ shift: 0.16203676 +/- 0.014056 (8.67%) (init= 0)
+ decay: 0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
[[Correlations]] (unreported correlations are < 0.100)
- C(period, shift) = 0.797
- C(amp, decay) = 0.582
- C(amp, shift) = -0.297
- C(amp, period) = -0.243
- C(shift, decay) = -0.182
- C(period, decay) = -0.150
+ C(period, shift) = 0.797
+ C(amp, decay) = 0.582
+ C(amp, shift) = -0.297
+ C(amp, period) = -0.243
+ C(shift, decay) = -0.182
+ C(period, decay) = -0.150
diff --git a/doc/index.rst b/doc/index.rst
index 0469958..69d4271 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -1,24 +1,27 @@
.. lmfit documentation master file,
-Non-Linear Least-Square Minimization and Curve-Fitting for Python
+Non-Linear Least-Squares Minimization and Curve-Fitting for Python
===========================================================================
.. _Levenberg-Marquardt: http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm
.. _MINPACK-1: http://en.wikipedia.org/wiki/MINPACK
+.. _scipy.optimize: http://docs.scipy.org/doc/scipy/reference/optimize.html
+.. _lmfit github repository: http://github.com/lmfit/lmfit-py
Lmfit provides a high-level interface to non-linear optimization and curve
-fitting problems for Python. Lmfit builds on and extends many of the
-optimization algorithm of :mod:`scipy.optimize`, especially the
-`Levenberg-Marquardt`_ method from :scipydoc:`optimize.leastsq`.
-
-Lmfit provides a number of useful enhancements to optimization and data
-fitting problems, including:
+fitting problems for Python. It builds on and extends many of the
+optimization methods of `scipy.optimize`_. Initially inspired by (and
+named for) extending the `Levenberg-Marquardt`_ method from
+:scipydoc:`optimize.leastsq`, lmfit now provides a number of useful
+enhancements to optimization and data fitting problems, including:
- * Using :class:`Parameter` objects instead of plain floats as variables.
- A :class:`Parameter` has a value that can be varied in the fit, have a
- fixed value, or have upper and/or lower bounds. A Parameter can even
- have a value that is constrained by an algebraic expression of other
- Parameter values.
+ * Using :class:`~lmfit.parameter.Parameter` objects instead of plain
+ floats as variables. A :class:`~lmfit.parameter.Parameter` has a value
+ that can be varied during the fit or kept at a fixed value. It can
+ have upper and/or lower bounds. A Parameter can even have a value that
+ is constrained by an algebraic expression of other Parameter values.
+ As a Python object, a Parameter can also have attributes such as a
+ standard error, after a fit that can estimate uncertainties.
* Ease of changing fitting algorithms. Once a fitting model is set up,
one can change the fitting algorithm used to find the optimal solution
@@ -27,20 +30,18 @@ fitting problems, including:
* Improved estimation of confidence intervals. While
:scipydoc:`optimize.leastsq` will automatically calculate
uncertainties and correlations from the covariance matrix, the accuracy
- of these estimates are often questionable. To help address this, lmfit
- has functions to explicitly explore parameter space to determine
+ of these estimates is sometimes questionable. To help address this,
+ lmfit has functions to explicitly explore parameter space and determine
confidence levels even for the most difficult cases.
- * Improved curve-fitting with the :class:`Model` class. This
+ * Improved curve-fitting with the :class:`~lmfit.model.Model` class. This
extends the capabilities of :scipydoc:`optimize.curve_fit`, allowing
- you to turn a function that models for your data into a python class
+ you to turn a function that models your data into a Python class
that helps you parametrize and fit data with that model.
- * Many :ref:`pre-built models <builtin_models_chapter>` for common
+ * Many :ref:`built-in models <builtin_models_chapter>` for common
lineshapes are included and ready to use.
-.. _lmfit github repository: http://github.com/lmfit/lmfit-py
-
The lmfit package is Free software, using an Open Source license. The
software and this document are works in progress. If you are interested in
participating in this effort please use the `lmfit github repository`_.
diff --git a/doc/installation.rst b/doc/installation.rst
index 61e488e..99d776a 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -3,24 +3,31 @@ Downloading and Installation
====================================
.. _lmfit github repository: http://github.com/lmfit/lmfit-py
-.. _Python Setup Tools: http://pypi.python.org/pypi/setuptools
-.. _pip: https://pip.pypa.io/
-.. _nose: http://nose.readthedocs.org/
+.. _python: http://python.org
+.. _scipy: http://scipy.org/scipylib/index.html
+.. _numpy: http://numpy.org/
+.. _nose: http://nose.readthedocs.org/
+.. _pytest: http://pytest.org/
+.. _emcee: http://dan.iel.fm/emcee/
+.. _pandas: http://pandas.pydata.org/
+.. _jupyter: http://jupyter.org/
+.. _matplotlib: http://matplotlib.org/
Prerequisites
~~~~~~~~~~~~~~~
-The lmfit package requires Python, Numpy, and Scipy.
+The lmfit package requires `Python`_, `NumPy`_, and `SciPy`_.
-Lmfit works with Python 2.7, 3.3, 3.4, and 3.5. Support for Python 2.6
-ended with Lmfit version 0.9.4. Scipy version 0.14 or higher is required,
+Lmfit works with Python versions 2.7, 3.3, 3.4, 3.5, and 3.6. Support for Python 2.6
+ended with lmfit version 0.9.4. Scipy version 0.15 or higher is required,
with 0.17 or higher recommended to be able to use the latest optimization
-features from scipy. Numpy version 1.5 or higher is required.
+features. NumPy version 1.5.1 or higher is required.
-In order to run the test suite, the `nose`_ framework is required. Some
-parts of lmfit will be able to make use of IPython (version 4 or higher),
-matplotlib, and pandas if those libraries are installed, but no core
-functionality of lmfit requires these.
+In order to run the test suite, either the `nose`_ or `pytest`_ package is
+required. Some functionality of lmfit requires the `emcee`_ package, some
+functionality will make use of the `pandas`_, `Jupyter`_ or `matplotlib`_
+packages if available. We highly recommend each of these
+packages.
Downloads
@@ -32,20 +39,18 @@ The latest stable version of lmfit is |release| is available from `PyPi
Installation
~~~~~~~~~~~~~~~~~
-If you have `pip`_ installed, you can install lmfit with::
+With ``pip`` now widely avaliable, you can install lmfit with::
pip install lmfit
-or you can download the source kit, unpack it and install with::
+Alternatively, you can download the source kit, unpack it and install with::
python setup.py install
-For Anaconda Python, lmfit is not an official packages, but several
+For Anaconda Python, lmfit is not an official package, but several
Anaconda channels provide it, allowing installation with (for example)::
conda install -c conda-forge lmfit
- conda install -c newville lmfit
-
Development Version
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -62,14 +67,16 @@ and install using::
Testing
~~~~~~~~~~
-A battery of tests scripts that can be run with the `nose`_ testing
-framework is distributed with lmfit in the ``tests`` folder. These are
-routinely run on the development version. Running ``nosetests`` should run
-all of these tests to completion without errors or failures.
+A battery of tests scripts that can be run with either the `nose`_ or
+`pytest`_ testing framework is distributed with lmfit in the ``tests``
+folder. These are automatically run as part of the development process.
+For any release or any master branch from the git repository, running
+``pytest`` or ``nosetests`` should run all of these tests to completion
+without errors or failures.
Many of the examples in this documentation are distributed with lmfit in
-the ``examples`` folder, and should also run for you. Many of these require
-
+the ``examples`` folder, and should also run for you. Some of these
+examples assume `matplotlib`_ has been installed and is working correctly.
Acknowledgements
~~~~~~~~~~~~~~~~~~
diff --git a/doc/intro.rst b/doc/intro.rst
index 1c6271d..3800a2f 100644
--- a/doc/intro.rst
+++ b/doc/intro.rst
@@ -4,18 +4,20 @@
Getting started with Non-Linear Least-Squares Fitting
===========================================================
-The lmfit package is designed to provide simple tools to help you build
-complex fitting models for non-linear least-squares problems and apply
-these models to real data. This section gives an overview of the concepts
-and describes how to set up and perform simple fits. Some basic knowledge
-of Python, numpy, and modeling data are assumed.
-
-To do a non-linear least-squares fit of a model to data or for a variety of other
-optimization problems, the main task is to write an *objective function*
-that takes the values of the fitting variables and calculates either a
-scalar value to be minimized or an array of values that is to be minimized
-in the least-squares sense. For many data fitting processes, the
-least-squares approach is used, and the objective function should
+The lmfit package provides simple tools to help you build complex fitting
+models for non-linear least-squares problems and apply these models to real
+data. This section gives an overview of the concepts and describes how to
+set up and perform simple fits. Some basic knowledge of Python, NumPy, and
+modeling data are assumed -- this is not a tutorial on why or how to
+perform a minimization or fit data, but is rather aimed at explaining how
+to use lmfit to do these things.
+
+In order to do a non-linear least-squares fit of a model to data or for any
+other optimization problem, the main task is to write an *objective
+function* that takes the values of the fitting variables and calculates
+either a scalar value to be minimized or an array of values that are to be
+minimized, typically in the least-squares sense. For many data fitting
+processes, the latter approach is used, and the objective function should
return an array of (data-model), perhaps scaled by some weighting factor
such as the inverse of the uncertainty in the data. For such a problem,
the chi-square (:math:`\chi^2`) statistic is often defined as:
@@ -29,30 +31,32 @@ model}({\bf{v}})` is the model calculation, :math:`{\bf{v}}` is the set of
variables in the model to be optimized in the fit, and :math:`\epsilon_i`
is the estimated uncertainty in the data.
-In a traditional non-linear fit, one writes an objective function that takes the
-variable values and calculates the residual :math:`y^{\rm meas}_i -
-y_i^{\rm model}({\bf{v}})`, or the residual scaled by the data
-uncertainties, :math:`[y^{\rm meas}_i - y_i^{\rm
-model}({\bf{v}})]/{\epsilon_i}`, or some other weighting factor. As a
-simple example, one might write an objective function like this::
+In a traditional non-linear fit, one writes an objective function that
+takes the variable values and calculates the residual array :math:`y^{\rm
+meas}_i - y_i^{\rm model}({\bf{v}})`, or the residual array scaled by the
+data uncertainties, :math:`[y^{\rm meas}_i - y_i^{\rm
+model}({\bf{v}})]/{\epsilon_i}`, or some other weighting factor.
+
+As a simple concrete example, one might want to model data with a decaying
+sine wave, and so write an objective function like this::
def residual(vars, x, data, eps_data):
amp = vars[0]
phaseshift = vars[1]
- freq = vars[2]
+ freq = vars[2]
decay = vars[3]
- model = amp * sin(x * freq + phaseshift) * exp(-x*x*decay)
+ model = amp * sin(x * freq + phaseshift) * exp(-x*x*decay)
return (data-model)/eps_data
-To perform the minimization with :mod:`scipy.optimize`, one would do::
+To perform the minimization with :mod:`scipy.optimize`, one would do this::
from scipy.optimize import leastsq
vars = [10.0, 0.2, 3.0, 0.007]
out = leastsq(residual, vars, args=(x, data, eps_data))
-Though it is wonderful to be able to use python for such optimization
+Though it is wonderful to be able to use Python for such optimization
problems, and the scipy library is robust and easy to use, the approach
here is not terribly different from how one would do the same fit in C or
Fortran. There are several practical challenges to using this approach,
@@ -71,38 +75,40 @@ including:
c) There is no simple, robust way to put bounds on values for the
variables, or enforce mathematical relationships between the
- variables. In fact, those optimization methods that do provide
+ variables. In fact, the optimization methods that do provide
bounds, require bounds to be set for all variables with separate
arrays that are in the same arbitrary order as variable values.
Again, this is acceptable for small or one-off cases, but becomes
painful if the fitting model needs to change.
-These shortcomings are really do solely to the use of traditional arrays of
-variables, as matches closely the implementation of the Fortran code. The
-lmfit module overcomes these shortcomings by using objects -- a core reason for working with
-Python. The key concept for lmfit is to use :class:`Parameter`
-objects instead of plain floating point numbers as the variables for the
-fit. By using :class:`Parameter` objects (or the closely related
-:class:`Parameters` -- a dictionary of :class:`Parameter` objects), one can
+These shortcomings are due to the use of traditional arrays to hold the
+variables, which matches closely the implementation of the underlying
+Fortran code, but does not fit very well with Python's rich selection of
+objects and data structures. The key concept in lmfit is to define and use
+:class:`Parameter` objects instead of plain floating point numbers as the
+variables for the fit. Using :class:`Parameter` objects (or the closely
+related :class:`Parameters` -- a dictionary of :class:`Parameter` objects),
+allows one to:
a) forget about the order of variables and refer to Parameters
by meaningful names.
- b) place bounds on Parameters as attributes, without worrying about order.
+ b) place bounds on Parameters as attributes, without worrying about
+ preserving the order of arrays for variables and boundaries.
c) fix Parameters, without having to rewrite the objective function.
d) place algebraic constraints on Parameters.
To illustrate the value of this approach, we can rewrite the above example
-as::
+for the decaying sine wave as::
from lmfit import minimize, Parameters
def residual(params, x, data, eps_data):
amp = params['amp']
pshift = params['phase']
- freq = params['frequency']
+ freq = params['frequency']
decay = params['decay']
- model = amp * sin(x * freq + pshift) * exp(-x*x*decay)
+ model = amp * sin(x * freq + pshift) * exp(-x*x*decay)
return (data-model)/eps_data
@@ -128,23 +134,27 @@ be fixed or bounded. This can be done during definition::
params.add('frequency', value=3.0, max=10)
where ``vary=False`` will prevent the value from changing in the fit, and
-``min=0.0`` will set a lower bound on that parameters value). It can also be done
+``min=0.0`` will set a lower bound on that parameter's value. It can also be done
later by setting the corresponding attributes after they have been
created::
params['amp'].vary = False
params['decay'].min = 0.10
-Importantly, our objective function remains unchanged.
+Importantly, our objective function remains unchanged. This means the
+objective function can simply express the parameterized phenomenon to be
+modeled, and is separate from the choice of parameters to be varied in the
+fit.
+
The `params` object can be copied and modified to make many user-level
changes to the model and fitting process. Of course, most of the
information about how your data is modeled goes into the objective
-function, but the approach here allows some external control; that is, control by
-the **user** performing the fit, instead of by the author of the
+function, but the approach here allows some external control; that is,
+control by the **user** performing the fit, instead of by the author of the
objective function.
Finally, in addition to the :class:`Parameters` approach to fitting data,
lmfit allows switching optimization methods without changing
-the objective function, provides tools for writing fitting reports, and
-provides better determination of Parameters confidence levels.
+the objective function, provides tools for generating fitting reports, and
+provides a better determination of Parameters confidence levels.
diff --git a/doc/model.rst b/doc/model.rst
index 08a3876..6696986 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -8,99 +8,94 @@ Modeling Data and Curve Fitting
A common use of least-squares minimization is *curve fitting*, where one
has a parametrized model function meant to explain some phenomena and wants
-to adjust the numerical values for the model to most closely match some
-data. With :mod:`scipy`, such problems are commonly solved with
-:scipydoc:`scipy.optimize.curve_fit`, which is a wrapper around
-:scipydoc:`scipy.optimize.leastsq`. Since Lmfit's :func:`minimize` is also
-a high-level wrapper around :scipydoc:`scipy.optimize.leastsq` it can be used
-for curve-fitting problems, but requires more effort than using
-:scipydoc:`scipy.optimize.curve_fit`.
-
-
-Here we discuss lmfit's :class:`Model` class. This takes a model function
--- a function that calculates a model for some data -- and provides methods
-to create parameters for that model and to fit data using that model
-function. This is closer in spirit to :scipydoc:`scipy.optimize.curve_fit`,
-but with the advantages of using :class:`Parameters` and lmfit.
-
-In addition to allowing you turn any model function into a curve-fitting
-method, Lmfit also provides canonical definitions for many known line shapes
+to adjust the numerical values for the model so that it most closely
+matches some data. With :mod:`scipy`, such problems are typically solved
+with :scipydoc:`optimize.curve_fit`, which is a wrapper around
+:scipydoc:`optimize.leastsq`. Since lmfit's
+:func:`~lmfit.minimizer.minimize` is also a high-level wrapper around
+:scipydoc:`optimize.leastsq` it can be used for curve-fitting problems.
+While it offers many benefits over :scipydoc:`optimize.leastsq`, using
+:func:`~lmfit.minimizer.minimize` for many curve-fitting problems still
+requires more effort than using :scipydoc:`optimize.curve_fit`.
+
+The :class:`Model` class in lmfit provides a simple and flexible approach
+to curve-fitting problems. Like :scipydoc:`optimize.curve_fit`, a
+:class:`Model` uses a *model function* -- a function that is meant to
+calculate a model for some phenomenon -- and then uses that to best match
+an array of supplied data. Beyond that similarity, its interface is rather
+different from :scipydoc:`optimize.curve_fit`, for example in that it uses
+:class:`~lmfit.parameter.Parameters`, but also offers several other
+important advantages.
+
+In addition to allowing you to turn any model function into a curve-fitting
+method, lmfit also provides canonical definitions for many known line shapes
such as Gaussian or Lorentzian peaks and Exponential decays that are widely
used in many scientific domains. These are available in the :mod:`models`
module that will be discussed in more detail in the next chapter
(:ref:`builtin_models_chapter`). We mention it here as you may want to
consult that list before writing your own model. For now, we focus on
-turning python function into high-level fitting models with the
+turning Python functions into high-level fitting models with the
:class:`Model` class, and using these to fit data.
-Example: Fit data to Gaussian profile
-================================================
+Motivation and simple example: Fit data to Gaussian profile
+=============================================================
Let's start with a simple and common example of fitting data to a Gaussian
peak. As we will see, there is a buit-in :class:`GaussianModel` class that
-provides a model function for a Gaussian profile, but here we'll build our
-own. We start with a simple definition of the model function:
+can help do this, but here we'll build our own. We start with a simple
+definition of the model function:
- >>> from numpy import sqrt, pi, exp, linspace
+ >>> from numpy import sqrt, pi, exp, linspace, random
>>>
>>> def gaussian(x, amp, cen, wid):
... return amp * exp(-(x-cen)**2 /wid)
- ...
-We want to fit this objective function to data :math:`y(x)` represented by the
-arrays ``y`` and ``x``. This can be done easily with :scipydoc:`optimize.curve_fit`::
+We want to use this function to fit to data :math:`y(x)` represented by the
+arrays `y` and `x`. With :scipydoc:`optimize.curve_fit`, this would be::
>>> from scipy.optimize import curve_fit
>>>
- >>> x = linspace(-10,10)
- >>> y = y = gaussian(x, 2.33, 0.21, 1.51) + np.random.normal(0, 0.2, len(x))
+ >>> x = linspace(-10,10, 101)
+ >>> y = gaussian(x, 2.33, 0.21, 1.51) + random.normal(0, 0.2, len(x))
>>>
>>> init_vals = [1, 0, 1] # for [amp, cen, wid]
>>> best_vals, covar = curve_fit(gaussian, x, y, p0=init_vals)
>>> print best_vals
-We sample random data point, make an initial guess of the model
-values, and run :scipydoc:`optimize.curve_fit` with the model function,
-data arrays, and initial guesses. The results returned are the optimal
-values for the parameters and the covariance matrix. It's simple and very
-useful. But it misses the benefits of lmfit.
-
-
-To solve this with lmfit we would have to write an objective function. But
-such a function would be fairly simple (essentially, ``data - model``,
-possibly with some weighting), and we would need to define and use
-appropriately named parameters. Though convenient, it is somewhat of a
-burden to keep the named parameter straight (on the other hand, with
-:scipydoc:`optimize.curve_fit` you are required to remember the parameter
-order). After doing this a few times it appears as a recurring pattern,
-and we can imagine automating this process. That's where the
-:class:`Model` class comes in.
+That is, we create data, make an initial guess of the model values, and run
+:scipydoc:`optimize.curve_fit` with the model function, data arrays, and
+initial guesses. The results returned are the optimal values for the
+parameters and the covariance matrix. It's simple and useful, but it
+misses the benefits of lmfit.
-:class:`Model` allows us to easily wrap a model function such as the
-``gaussian`` function. This automatically generate the appropriate
-residual function, and determines the corresponding parameter names from
-the function signature itself::
+With lmfit, we create a :class:`Model` that wraps the `gaussian` model
+function, which automatically generates the appropriate residual function,
+and determines the corresponding parameter names from the function
+signature itself::
>>> from lmfit import Model
- >>> gmod = Model(gaussian)
- >>> gmod.param_names
+ >>> gmodel = Model(gaussian)
+ >>> gmodel.param_names
set(['amp', 'wid', 'cen'])
- >>> gmod.independent_vars)
+ >>> gmodel.independent_vars
['x']
-The Model ``gmod`` knows the names of the parameters and the independent
-variables. By default, the first argument of the function is taken as the
-independent variable, held in :attr:`independent_vars`, and the rest of the
-functions positional arguments (and, in certain cases, keyword arguments --
-see below) are used for Parameter names. Thus, for the ``gaussian``
-function above, the parameters are named ``amp``, ``cen``, and ``wid``, and
-``x`` is the independent variable -- all taken directly from the signature
-of the model function. As we will see below, you can specify what the
-independent variable is, and you can add or alter parameters, too.
-
-The parameters are *not* created when the model is created. The model knows
+As you can see, the Model `gmodel` determined the names of the parameters
+and the independent variables. By default, the first argument of the
+function is taken as the independent variable, held in
+:attr:`independent_vars`, and the rest of the functions positional
+arguments (and, in certain cases, keyword arguments -- see below) are used
+for Parameter names. Thus, for the `gaussian` function above, the
+independent variable is `x`, and the parameters are named `amp`,
+`cen`, and `wid`, and -- all taken directly from the signature of the
+model function. As we will see below, you can modify the default
+assignment of independent variable / arguments and specify yourself what
+the independent variable is and which function arguments should be identified
+as parameter names.
+
+The Parameters are *not* created when the model is created. The model knows
what the parameters should be named, but not anything about the scale and
range of your data. You will normally have to make these parameters and
assign initial values and other attributes. To help you do this, each
@@ -109,16 +104,15 @@ the expected names:
>>> params = gmod.make_params()
-This creates the :class:`Parameters` but doesn't necessarily give them
-initial values -- again, the model has no idea what the scale should be.
-You can set initial values for parameters with keyword arguments to
-:meth:`make_params`:
-
+This creates the :class:`~lmfit.parameter.Parameters` but does not
+automaticaly give them initial values since it has no idea what the scale
+should be. You can set initial values for parameters with keyword
+arguments to :meth:`make_params`:
>>> params = gmod.make_params(cen=5, amp=200, wid=1)
or assign them (and other parameter properties) after the
-:class:`Parameters` has been created.
+:class:`~lmfit.parameter.Parameters` class has been created.
A :class:`Model` has several methods associated with it. For example, one
can use the :meth:`eval` method to evaluate the model or the :meth:`fit`
@@ -128,44 +122,54 @@ For example, one could use :meth:`eval` to calculate the predicted
function::
>>> x = linspace(0, 10, 201)
- >>> y = gmod.eval(x=x, amp=10, cen=6.2, wid=0.75)
+ >>> y = gmod.eval(params, x=x)
+
+or with::
+
+ >>> y = gmod.eval(x=x, cen=6.5, amp=100, wid=2.0)
Admittedly, this a slightly long-winded way to calculate a Gaussian
-function. But now that the model is set up, we can also use its
+function, given that you could have called your `gaussian` function
+directly. But now that the model is set up, we can use its
:meth:`fit` method to fit this model to data, as with::
- >>> result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+ >>> result = gmod.fit(y, params)
+
+or with::
-Putting everything together, the script to do such a fit (included in the
+ >>> result = gmod.fit(y, cen=6.5, amp=100, wid=2.0)
+
+Putting everything together, (included in the
``examples`` folder with the source code) is:
.. literalinclude:: ../examples/doc_model1.py
-which is pretty compact and to the point. The returned ``result`` will be
+which is pretty compact and to the point. The returned `result` will be
a :class:`ModelResult` object. As we will see below, this has many
components, including a :meth:`fit_report` method, which will show::
[[Model]]
- gaussian
+ Model(gaussian)
[[Fit Statistics]]
- # function evals = 33
- # data points = 101
- # variables = 3
- chi-square = 3.409
- reduced chi-square = 0.035
- Akaike info crit = -336.264
- Bayesian info crit = -328.418
+ # function evals = 31
+ # data points = 101
+ # variables = 3
+ chi-square = 3.409
+ reduced chi-square = 0.035
+ Akaike info crit = -336.264
+ Bayesian info crit = -328.418
[[Variables]]
- amp: 8.88021829 +/- 0.113594 (1.28%) (init= 5)
- cen: 5.65866102 +/- 0.010304 (0.18%) (init= 5)
- wid: 0.69765468 +/- 0.010304 (1.48%) (init= 1)
+ amp: 5.07800631 +/- 0.064957 (1.28%) (init= 5)
+ cen: 5.65866112 +/- 0.010304 (0.18%) (init= 5)
+ wid: 0.97344373 +/- 0.028756 (2.95%) (init= 1)
[[Correlations]] (unreported correlations are < 0.100)
- C(amp, wid) = 0.577
+ C(amp, wid) = -0.577
-The result will also have :attr:`init_fit` for the fit with the initial
-parameter values and a :attr:`best_fit` for the fit with the best fit
-parameter values. These can be used to generate the following plot:
+As the script shows, the result will also have :attr:`init_fit` for the fit
+with the initial parameter values and a :attr:`best_fit` for the fit with
+the best fit parameter values. These can be used to generate the following
+plot:
.. image:: _images/model_fit1.png
:target: _images/model_fit1.png
@@ -174,21 +178,18 @@ parameter values. These can be used to generate the following plot:
which shows the data in blue dots, the best fit as a solid red line, and
the initial fit as a dashed black line.
-Note that the model fitting was really performed with 2 lines of code::
+Note that the model fitting was really performed with::
- gmod = Model(gaussian)
- result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+ gmodel = Model(gaussian)
+ result = gmodel.fit(y, params, x=x, amp=5, cen=5, wid=1)
-These lines clearly express that we want to turn the ``gaussian`` function
+These lines clearly express that we want to turn the `gaussian` function
into a fitting model, and then fit the :math:`y(x)` data to this model,
-starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``.
-This is much more expressive than :scipydoc:`optimize.curve_fit`::
-
- best_vals, covar = curve_fit(gaussian, x, y, p0=[5, 5, 1])
-
-In addition, all the other features of lmfit are included:
-:class:`Parameters` can have bounds and constraints and the result is a
-rich object that can be reused to explore the model fit in detail.
+starting with values of 5 for `amp`, 5 for `cen` and 1 for `wid`. In
+addition, all the other features of lmfit are included:
+:class:`~lmfit.parameter.Parameters` can have bounds and constraints and
+the result is a rich object that can be reused to explore the model fit in
+detail.
The :class:`Model` class
@@ -197,137 +198,27 @@ The :class:`Model` class
The :class:`Model` class provides a general way to wrap a pre-defined
function as a fitting model.
-.. class:: Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix=''[, name=None[, **kws]]]]]])
-
- Create a model based on the user-supplied function. This uses
- introspection to automatically converting argument names of the
- function to Parameter names.
-
- :param func: model function to be wrapped
- :type func: callable
- :param independent_vars: list of argument names to ``func`` that are independent variables.
- :type independent_vars: ``None`` (default) or list of strings.
- :param param_names: list of argument names to ``func`` that should be made into Parameters.
- :type param_names: ``None`` (default) or list of strings
- :param missing: how to handle missing values.
- :type missing: one of ``None`` (default), 'none', 'drop', or 'raise'.
- :param prefix: prefix to add to all parameter names to distinguish components in a :class:`CompositeModel`.
- :type prefix: string
- :param name: name for the model. When ``None`` (default) the name is the same as the model function (``func``).
- :type name: ``None`` or string.
- :param kws: additional keyword arguments to pass to model function.
-
-
-Of course, the model function will have to return an array that will be the
-same size as the data being modeled. Generally this is handled by also
-specifying one or more independent variables.
+.. autoclass:: Model
:class:`Model` class Methods
---------------------------------
-.. method:: Model.eval(params=None[, **kws])
-
- evaluate the model function for a set of parameters and inputs.
-
- :param params: parameters to use for fit.
- :type params: ``None`` (default) or Parameters
- :param kws: additional keyword arguments to pass to model function.
- :return: ndarray for model given the parameters and other arguments.
-
- If ``params`` is ``None``, the values for all parameters are expected to
- be provided as keyword arguments. If ``params`` is given, and a keyword
- argument for a parameter value is also given, the keyword argument will
- be used.
-
- Note that all non-parameter arguments for the model function --
- **including all the independent variables!** -- will need to be passed
- in using keyword arguments.
-
-
-.. method:: Model.fit(data[, params=None[, weights=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **kws]]]]]])
-
- perform a fit of the model to the ``data`` array with a set of
- parameters.
-
- :param data: array of data to be fitted.
- :type data: ndarray-like
- :param params: parameters to use for fit.
- :type params: ``None`` (default) or Parameters
- :param weights: weights to use for residual calculation in fit.
- :type weights: ``None`` (default) or ndarray-like.
- :param method: name of fitting method to use. See :ref:`fit-methods-label` for details
- :type method: string (default ``leastsq``)
- :param scale_covar: whether to automatically scale covariance matrix (``leastsq`` only)
- :type scale_covar: bool (default ``True``)
- :param iter_cb: function to be called at each fit iteration. See :ref:`fit-itercb-label` for details.
- :type iter_cb: callable or ``None``
- :param verbose: print a message when a new parameter is created due to a *hint*
- :type verbose: bool (default ``True``)
- :param kws: additional keyword arguments to pass to model function.
- :return: :class:`ModelResult` object.
-
- If ``params`` is ``None``, the internal ``params`` will be used. If it
- is supplied, these will replace the internal ones. If supplied,
- ``weights`` will be used to weight the calculated residual so that the
- quantity minimized in the least-squares sense is ``weights*(data -
- fit)``. ``weights`` must be an ndarray-like object of same size and
- shape as ``data``.
-
- Note that other arguments for the model function (including all the
- independent variables!) will need to be passed in using keyword
- arguments.
-
-
-.. method:: Model.guess(data, **kws)
-
- Guess starting values for model parameters.
-
- :param data: data array used to guess parameter values
- :type func: ndarray
- :param kws: additional options to pass to model function.
- :return: :class:`Parameters` with guessed initial values for each parameter.
-
- by default this is left to raise a ``NotImplementedError``, but may be
- overwritten by subclasses. Generally, this method should take some
- values for ``data`` and use it to construct reasonable starting values for
- the parameters.
-
-
-.. method:: Model.make_params(**kws)
-
- Create a set of parameters for model.
-
- :param kws: optional keyword/value pairs to set initial values for parameters.
- :return: :class:`Parameters`.
+.. automethod:: Model.eval
- The parameters may or may not have decent initial values for each
- parameter.
+.. automethod:: Model.fit
+.. automethod:: Model.guess
-.. method:: Model.set_param_hint(name, value=None[, min=None[, max=None[, vary=True[, expr=None]]]])
+.. automethod:: Model.make_params
- set *hints* to use when creating parameters with :meth:`Model.make_param` for
- the named parameter. This is especially convenient for setting initial
- values. The ``name`` can include the models ``prefix`` or not.
- :param name: parameter name.
- :type name: string
- :param value: value for parameter
- :type value: float
- :param min: lower bound for parameter value
- :type min: ``None`` or float
- :param max: upper bound for parameter value
- :type max: ``None`` or float
- :param vary: whether to vary parameter in fit.
- :type vary: boolean
- :param expr: mathematical expression for constraint
- :type expr: string
+.. automethod:: Model.set_param_hint
See :ref:`model_param_hints_section`.
-.. automethod:: lmfit.model.Model.print_param_hints
+.. automethod:: Model.print_param_hints
:class:`Model` class Attributes
@@ -339,27 +230,25 @@ specifying one or more independent variables.
.. attribute:: independent_vars
- list of strings for names of the independent variables.
+ List of strings for names of the independent variables.
.. attribute:: missing
- describes what to do for missing values. The choices are
+ Describes what to do for missing values. The choices are:
- * ``None``: Do not check for null or missing values (default)
- * ``'none'``: Do not check for null or missing values.
- * ``'drop'``: Drop null or missing observations in data. If pandas is
- installed, ``pandas.isnull`` is used, otherwise :attr:`numpy.isnan` is used.
- * ``'raise'``: Raise a (more helpful) exception when data contains null
- or missing values.
+ * None: Do not check for null or missing values (default).
+ * 'none': Do not check for null or missing values.
+ * 'drop': Drop null or missing observations in data. If pandas is installed, :func:`pandas.isnull` is used, otherwise :func:`numpy.isnan` is used.
+ * 'raise': Raise a (more helpful) exception when data contains null or missing values.
.. attribute:: name
- name of the model, used only in the string representation of the
+ Name of the model, used only in the string representation of the
model. By default this will be taken from the model function.
.. attribute:: opts
- extra keyword arguments to pass to model function. Normally this will
+ Extra keyword arguments to pass to model function. Normally this will
be determined internally and should not be changed.
.. attribute:: param_hints
@@ -368,46 +257,47 @@ specifying one or more independent variables.
.. attribute:: param_names
- list of strings of parameter names.
+ List of strings of parameter names.
.. attribute:: prefix
- prefix used for name-mangling of parameter names. The default is ''.
- If a particular :class:`Model` has arguments ``amplitude``,
- ``center``, and ``sigma``, these would become the parameter names.
- Using a prefix of ``g1_`` would convert these parameter names to
- ``g1_amplitude``, ``g1_center``, and ``g1_sigma``. This can be
+ Prefix used for name-mangling of parameter names. The default is ''.
+ If a particular :class:`Model` has arguments `amplitude`,
+ `center`, and `sigma`, these would become the parameter names.
+ Using a prefix of `'g1_'` would convert these parameter names to
+ `g1_amplitude`, `g1_center`, and `g1_sigma`. This can be
essential to avoid name collision in composite models.
Determining parameter names and independent variables for a function
-----------------------------------------------------------------------
-The :class:`Model` created from the supplied function ``func`` will create
-a :class:`Parameters` object, and names are inferred from the function
+The :class:`Model` created from the supplied function `func` will create
+a :class:`~lmfit.parameter.Parameters` object, and names are inferred from the function
arguments, and a residual function is automatically constructed.
By default, the independent variable is take as the first argument to the
-function. You can explicitly set this, of course, and will need to if the
-independent variable is not first in the list, or if there are actually more
-than one independent variables.
+function. You can, of course, explicitly set this, and will need to do so
+if the independent variable is not first in the list, or if there are actually
+more than one independent variables.
If not specified, Parameters are constructed from all positional arguments
and all keyword arguments that have a default value that is numerical, except
the independent variable, of course. Importantly, the Parameters can be
-modified after creation. In fact, you'll have to do this because none of the
-parameters have valid initial values. You can place bounds and constraints
-on Parameters, or fix their values.
-
+modified after creation. In fact, you will have to do this because none of the
+parameters have valid initial values. In addition, one can place bounds and
+constraints on Parameters, or fix their values.
Explicitly specifying ``independent_vars``
-------------------------------------------------
As we saw for the Gaussian example above, creating a :class:`Model` from a
-function is fairly easy. Let's try another::
+function is fairly easy. Let's try another one::
+ >>> from lmfit import Model
+ >>> import numpy as np
>>> def decay(t, tau, N):
... return N*np.exp(-t/tau)
...
@@ -420,11 +310,11 @@ function is fairly easy. Let's try another::
tau <Parameter 'tau', None, bounds=[None:None]>
N <Parameter 'N', None, bounds=[None:None]>
-Here, ``t`` is assumed to be the independent variable because it is the
+Here, `t` is assumed to be the independent variable because it is the
first argument to the function. The other function arguments are used to
create parameters for the model.
-If you want ``tau`` to be the independent variable in the above example,
+If you want `tau` to be the independent variable in the above example,
you can say so::
>>> decay_model = Model(decay, independent_vars=['tau'])
@@ -443,7 +333,7 @@ variable* here is simple, and based on how it treats arguments of the
function you are modeling:
independent variable
- a function argument that is not a parameter or otherwise part of the
+ A function argument that is not a parameter or otherwise part of the
model, and that will be required to be explicitly provided as a
keyword argument for each fit with :meth:`Model.fit` or evaluation
with :meth:`Model.eval`.
@@ -457,7 +347,7 @@ Functions with keyword arguments
If the model function had keyword parameters, these would be turned into
Parameters if the supplied default value was a valid number (but not
-``None``, ``True``, or ``False``).
+None, True, or False).
>>> def decay2(t, tau, N=10, check_positive=False):
... if check_small:
@@ -473,17 +363,17 @@ Parameters if the supplied default value was a valid number (but not
t <Parameter 't', None, bounds=[None:None]>
N <Parameter 'N', 10, bounds=[None:None]>
-Here, even though ``N`` is a keyword argument to the function, it is turned
+Here, even though `N` is a keyword argument to the function, it is turned
into a parameter, with the default numerical value as its initial value.
By default, it is permitted to be varied in the fit -- the 10 is taken as
an initial value, not a fixed value. On the other hand, the
-``check_positive`` keyword argument, was not converted to a parameter
+`check_positive` keyword argument, was not converted to a parameter
because it has a boolean default value. In some sense,
-``check_positive`` becomes like an independent variable to the model.
+`check_positive` becomes like an independent variable to the model.
However, because it has a default value it is not required to be given for
each model evaluation or fit, as independent variables are.
-Defining a ``prefix`` for the Parameters
+Defining a `prefix` for the Parameters
--------------------------------------------
As we will see in the next chapter when combining models, it is sometimes
@@ -491,7 +381,7 @@ necessary to decorate the parameter names in the model, but still have them
be correctly used in the underlying model function. This would be
necessary, for example, if two parameters in a composite model (see
:ref:`composite_models_section` or examples in the next chapter) would have
-the same name. To avoid this, we can add a ``prefix`` to the
+the same name. To avoid this, we can add a `prefix` to the
:class:`Model` which will automatically do this mapping for us.
>>> def myfunc(x, amplitude=1, center=0, sigma=1):
@@ -505,15 +395,15 @@ the same name. To avoid this, we can add a ``prefix`` to the
f1_center <Parameter 'f1_center', None, bounds=[None:None]>
f1_sigma <Parameter 'f1_sigma', None, bounds=[None:None]>
-You would refer to these parameters as ``f1_amplitude`` and so forth, and
-the model will know to map these to the ``amplitude`` argument of ``myfunc``.
+You would refer to these parameters as `f1_amplitude` and so forth, and
+the model will know to map these to the `amplitude` argument of `myfunc`.
Initializing model parameters
------------------------------------------
+--------------------------------
As mentioned above, the parameters created by :meth:`Model.make_params` are
-generally created with invalid initial values of ``None``. These values
+generally created with invalid initial values of None. These values
**must** be initialized in order for the model to be evaluated or used in a
fit. There are four different ways to do this initialization that can be
used in any combination:
@@ -597,11 +487,12 @@ can set parameter hints but then change the initial value explicitly with
Using parameter hints
--------------------------------
-
After a model has been created, you can give it hints for how to create
parameters with :meth:`Model.make_params`. This allows you to set not only a
default initial value but also to set other parameter attributes
controlling bounds, whether it is varied in the fit, or a constraint
+
+
expression. To set a parameter hint, you can use :meth:`Model.set_param_hint`,
as with::
@@ -629,17 +520,17 @@ at half maximum of a Gaussian model, one could use a parameter hint of::
>>> mod.set_param_hint('fwhm', expr='2.3548*sigma')
-
The :class:`ModelResult` class
=======================================
A :class:`ModelResult` (which had been called `ModelFit` prior to version
0.9) is the object returned by :meth:`Model.fit`. It is a subclass of
-:class:`Minimizer`, and so contains many of the fit results. Of course, it
-knows the :class:`Model` and the set of :class:`Parameters` used in the
-fit, and it has methods to evaluate the model, to fit the data (or re-fit
-the data with changes to the parameters, or fit with different or modified
-data) and to print out a report for that fit.
+:class:`~lmfit.minimizer.Minimizer`, and so contains many of the fit results.
+Of course, it knows the :class:`Model` and the set of
+:class:`~lmfit.parameter.Parameters` used in the fit, and it has methods to
+evaluate the model, to fit the data (or re-fit the data with changes to
+the parameters, or fit with different or modified data) and to print out a
+report for that fit.
While a :class:`Model` encapsulates your model function, it is fairly
abstract and does not contain the parameters or data used in a particular
@@ -650,181 +541,38 @@ more useful) object that represents a fit with a set of parameters to data
with a model.
-A :class:`ModelResult` has several attributes holding values for fit results,
-and several methods for working with fits. These include statistics
-inherited from :class:`Minimizer` useful for comparing different models,
-including `chisqr`, `redchi`, `aic`, and `bic`.
-
-.. class:: ModelResult()
-
- Model fit is intended to be created and returned by :meth:`Model.fit`.
+A :class:`ModelResult` has several attributes holding values for fit
+results, and several methods for working with fits. These include
+statistics inherited from :class:`~lmfit.minimizer.Minimizer` useful for
+comparing different models, including `chisqr`, `redchi`, `aic`, and `bic`.
+.. autoclass:: ModelResult
:class:`ModelResult` methods
---------------------------------
-These methods are all inherited from :class:`Minimize` or from
-:class:`Model`.
+.. automethod:: ModelResult.eval
+
+
+.. automethod:: ModelResult.eval_components
+
+.. automethod:: ModelResult.fit
+
+
+.. automethod:: ModelResult.fit_report
+
+.. automethod:: ModelResult.conf_interval
-.. method:: ModelResult.eval(params=None, **kwargs)
+.. automethod:: ModelResult.ci_report
- evaluate the model using parameters supplied (or the best-fit parameters
- if not specified) and supplied independent variables. The ``**kwargs``
- arguments can be used to update parameter values and/or independent
- variables.
-
-
-.. method:: ModelResult.eval_components(**kwargs)
-
- evaluate each component of a :class:`CompositeModel`, returning an
- ordered dictionary of with the values for each component model. The
- returned dictionary will have keys of the model prefix or (if no prefix
- is given), the model name. The ``**kwargs`` arguments can be used to
- update parameter values and/or independent variables.
-
-.. method:: ModelResult.fit(data=None[, params=None[, weights=None[, method=None[, **kwargs]]]])
-
- fit (or re-fit), optionally changing ``data``, ``params``, ``weights``,
- or ``method``, or changing the independent variable(s) with the
- ``**kwargs`` argument. See :meth:`Model.fit` for argument
- descriptions, and note that any value of ``None`` defaults to the last
- used value.
-
-.. method:: ModelResult.fit_report(modelpars=None[, show_correl=True[,`< min_correl=0.1]])
-
- return a printable fit report for the fit with fit statistics, best-fit
- values with uncertainties and correlations. As with :func:`fit_report`.
-
- :param modelpars: Parameters with "Known Values" (optional, default None)
- :param show_correl: whether to show list of sorted correlations [``True``]
- :param min_correl: smallest correlation absolute value to show [0.1]
-
-
-.. method:: ModelResult.conf_interval(**kwargs)
-
- calculate the confidence intervals for the variable parameters using
- :func:`confidence.conf_interval() <lmfit.conf_interval>`. All keyword
- arguments are passed to that function. The result is stored in
- :attr:`ci_out`, and so can be accessed without recalculating them.
-
-.. method:: ModelResult.ci_report(with_offset=True)
-
- return a nicely formatted text report of the confidence intervals, as
- from :func:`ci_report() <lmfit.ci_report>`.
-
-
-.. method:: ModelResult.plot(datafmt='o', fitfmt='-', initfmt='--', yerr=None, numpoints=None, fig=None, data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None, ax_fit_kws=None, fig_kws=None)
-
- Plot the fit results and residuals using matplotlib, if available. The
- plot will include two panels, one showing the fit residual, and the
- other with the data points, the initial fit curve, and the best-fit
- curve. If the fit model included weights or if ``yerr`` is specified,
- errorbars will also be plotted.
-
- :param datafmt: matplotlib format string for data curve.
- :type datafmt: ``None`` or string.
- :param fitfmt: matplotlib format string for best-fit curve.
- :type fitfmt: ``None`` or string.
- :param initfmt: matplotlib format string for initial curve.
- :type intfmt: ``None`` or string.
- :param yerr: array of uncertainties for data array.
- :type yerr: ``None`` or ndarray.
- :param numpoints: number of points to display
- :type numpoints: ``None`` or integer
- :param fig: matplotlib Figure to plot on.
- :type fig: ``None`` or matplotlib.figure.Figure
- :param data_kws: keyword arguments passed to plot for data curve.
- :type data_kws: ``None`` or dictionary
- :param fit_kws: keyword arguments passed to plot for best-fit curve.
- :type fit_kws: ``None`` or dictionary
- :param init_kws: keyword arguments passed to plot for initial curve.
- :type init_kws: ``None`` or dictionary
- :param ax_res_kws: keyword arguments passed to creation of matplotlib axes for the residual plot.
- :type ax_res_kws: ``None`` or dictionary
- :param ax_fit_kws: keyword arguments passed to creation of matplotlib axes for the fit plot.
- :type ax_fit_kws: ``None`` or dictionary
- :param fig_kws: keyword arguments passed to creation of matplotlib figure.
- :type fig_kws: ``None`` or dictionary
- :returns: matplotlib.figure.Figure
-
- This combines :meth:`ModelResult.plot_fit` and :meth:`ModelResult.plot_residual`.
-
- If ``yerr`` is specified or if the fit model included weights, then
- matplotlib.axes.Axes.errorbar is used to plot the data. If ``yerr`` is
- not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
-
- If ``fig`` is None then ``matplotlib.pyplot.figure(**fig_kws)`` is called.
-
-.. method:: ModelResult.plot_fit(ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None, numpoints=None, data_kws=None, fit_kws=None, init_kws=None, ax_kws=None)
-
- Plot the fit results using matplotlib, if available. The plot will include
- the data points, the initial fit curve, and the best-fit curve. If the fit
- model included weights or if ``yerr`` is specified, errorbars will also
- be plotted.
-
- :param ax: matplotlib axes to plot on.
- :type ax: ``None`` or matplotlib.axes.Axes.
- :param datafmt: matplotlib format string for data curve.
- :type datafmt: ``None`` or string.
- :param fitfmt: matplotlib format string for best-fit curve.
- :type fitfmt: ``None`` or string.
- :param initfmt: matplotlib format string for initial curve.
- :type intfmt: ``None`` or string.
- :param yerr: array of uncertainties for data array.
- :type yerr: ``None`` or ndarray.
- :param numpoints: number of points to display
- :type numpoints: ``None`` or integer
- :param data_kws: keyword arguments passed to plot for data curve.
- :type data_kws: ``None`` or dictionary
- :param fit_kws: keyword arguments passed to plot for best-fit curve.
- :type fit_kws: ``None`` or dictionary
- :param init_kws: keyword arguments passed to plot for initial curve.
- :type init_kws: ``None`` or dictionary
- :param ax_kws: keyword arguments passed to creation of matplotlib axes.
- :type ax_kws: ``None`` or dictionary
- :returns: matplotlib.axes.Axes
-
- For details about plot format strings and keyword arguments see
- documentation of :func:`matplotlib.axes.Axes.plot`.
-
- If ``yerr`` is specified or if the fit model included weights, then
- matplotlib.axes.Axes.errorbar is used to plot the data. If ``yerr`` is
- not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
-
- If ``ax`` is None then ``matplotlib.pyplot.gca(**ax_kws)`` is called.
-
-.. method:: ModelResult.plot_residuals(ax=None, datafmt='o', yerr=None, data_kws=None, fit_kws=None, ax_kws=None)
-
- Plot the fit residuals (data - fit) using matplotlib. If ``yerr`` is
- supplied or if the model included weights, errorbars will also be plotted.
-
- :param ax: matplotlib axes to plot on.
- :type ax: ``None`` or matplotlib.axes.Axes.
- :param datafmt: matplotlib format string for data curve.
- :type datafmt: ``None`` or string.
- :param yerr: array of uncertainties for data array.
- :type yerr: ``None`` or ndarray.
- :param numpoints: number of points to display
- :type numpoints: ``None`` or integer
- :param data_kws: keyword arguments passed to plot for data curve.
- :type data_kws: ``None`` or dictionary
- :param fit_kws: keyword arguments passed to plot for best-fit curve.
- :type fit_kws: ``None`` or dictionary
- :param ax_kws: keyword arguments passed to creation of matplotlib axes.
- :type ax_kws: ``None`` or dictionary
- :returns: matplotlib.axes.Axes
-
- For details about plot format strings and keyword arguments see
- documentation of :func:`matplotlib.axes.Axes.plot`.
-
- If ``yerr`` is specified or if the fit model included weights, then
- matplotlib.axes.Axes.errorbar is used to plot the data. If ``yerr`` is
- not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
-
- If ``ax`` is None then ``matplotlib.pyplot.gca(**ax_kws)`` is called.
+.. automethod:: ModelResult.eval_uncertainty
+.. automethod:: ModelResult.plot
+.. automethod:: ModelResult.plot_fit
+
+.. automethod:: ModelResult.plot_residuals
:class:`ModelResult` attributes
@@ -832,130 +580,158 @@ These methods are all inherited from :class:`Minimize` or from
.. attribute:: aic
- floating point best-fit Akaike Information Criterion statistic (see :ref:`fit-results-label`).
+ Floating point best-fit Akaike Information Criterion statistic (see :ref:`fit-results-label`).
.. attribute:: best_fit
- ndarray result of model function, evaluated at provided
+ numpy.ndarray result of model function, evaluated at provided
independent variables and with best-fit parameters.
.. attribute:: best_values
- dictionary with parameter names as keys, and best-fit values as values.
+ Dictionary with parameter names as keys, and best-fit values as values.
.. attribute:: bic
- floating point best-fit Bayesian Information Criterion statistic (see :ref:`fit-results-label`).
+ Floating point best-fit Bayesian Information Criterion statistic (see :ref:`fit-results-label`).
.. attribute:: chisqr
- floating point best-fit chi-square statistic (see :ref:`fit-results-label`).
+ Floating point best-fit chi-square statistic (see :ref:`fit-results-label`).
.. attribute:: ci_out
- confidence interval data (see :ref:`confidence_chapter`) or `None` if
+ Confidence interval data (see :ref:`confidence_chapter`) or None if
the confidence intervals have not been calculated.
.. attribute:: covar
- ndarray (square) covariance matrix returned from fit.
+ numpy.ndarray (square) covariance matrix returned from fit.
.. attribute:: data
- ndarray of data to compare to model.
+ numpy.ndarray of data to compare to model.
.. attribute:: errorbars
- boolean for whether error bars were estimated by fit.
+ Boolean for whether error bars were estimated by fit.
.. attribute:: ier
- integer returned code from :scipydoc:`optimize.leastsq`.
+ Integer returned code from :scipydoc:`optimize.leastsq`.
.. attribute:: init_fit
- ndarray result of model function, evaluated at provided
+ numpy.ndarray result of model function, evaluated at provided
independent variables and with initial parameters.
.. attribute:: init_params
- initial parameters.
+ Initial parameters.
.. attribute:: init_values
- dictionary with parameter names as keys, and initial values as values.
+ Dictionary with parameter names as keys, and initial values as values.
.. attribute:: iter_cb
- optional callable function, to be called at each fit iteration. This
- must take take arguments of ``params, iter, resid, *args, **kws``, where
- ``params`` will have the current parameter values, ``iter`` the
- iteration, ``resid`` the current residual array, and ``*args`` and
- ``**kws`` as passed to the objective function. See :ref:`fit-itercb-label`.
+ Optional callable function, to be called at each fit iteration. This
+ must take take arguments of ``(params, iter, resid, *args, **kws)``, where
+ `params` will have the current parameter values, `iter` the
+ iteration, `resid` the current residual array, and `*args` and
+ `**kws` as passed to the objective function. See :ref:`fit-itercb-label`.
.. attribute:: jacfcn
- optional callable function, to be called to calculate jacobian array.
+ Optional callable function, to be called to calculate Jacobian array.
.. attribute:: lmdif_message
- string message returned from :scipydoc:`optimize.leastsq`.
+ String message returned from :scipydoc:`optimize.leastsq`.
.. attribute:: message
- string message returned from :func:`minimize`.
+ String message returned from :func:`~lmfit.minimizer.minimize`.
.. attribute:: method
- string naming fitting method for :func:`minimize`.
+ String naming fitting method for :func:`~lmfit.minimizer.minimize`.
.. attribute:: model
- instance of :class:`Model` used for model.
+ Instance of :class:`Model` used for model.
.. attribute:: ndata
- integer number of data points.
+ Integer number of data points.
.. attribute:: nfev
- integer number of function evaluations used for fit.
+ Integer number of function evaluations used for fit.
.. attribute:: nfree
- integer number of free parameters in fit.
+ Integer number of free parameters in fit.
.. attribute:: nvarys
- integer number of independent, freely varying variables in fit.
+ Integer number of independent, freely varying variables in fit.
.. attribute:: params
- Parameters used in fit. Will have best-fit values.
+ Parameters used in fit. Will have best-fit values.
.. attribute:: redchi
- floating point reduced chi-square statistic (see :ref:`fit-results-label`).
+ Floating point reduced chi-square statistic (see :ref:`fit-results-label`).
.. attribute:: residual
- ndarray for residual.
+ numpy.ndarray for residual.
.. attribute:: scale_covar
- boolean flag for whether to automatically scale covariance matrix.
+ Boolean flag for whether to automatically scale covariance matrix.
.. attribute:: success
- boolean value of whether fit succeeded.
+ Boolean value of whether fit succeeded.
.. attribute:: weights
- ndarray (or ``None``) of weighting values to be used in fit. If not
- ``None``, it will be used as a multiplicative factor of the residual
+ numpy.ndarray (or None) of weighting values to be used in fit. If not
+ None, it will be used as a multiplicative factor of the residual
array, so that ``weights*(data - fit)`` is minimized in the
least-squares sense.
+
+Calculating uncertainties in the model function
+-------------------------------------------------
+
+We return to the first example above and ask not only for the
+uncertainties in the fitted parameters but for the range of values that
+those uncertainties mean for the model function itself. We can use the
+:meth:`ModelResult.eval_uncertainty` method of the model result object to
+evaluate the uncertainty in the model with a specified level for
+:math:`sigma`.
+
+That is, adding::
+
+ dely = result.eval_uncertainty(sigma=3)
+ plt.fill_between(x, result.best_fit-dely, result.best_fit+dely, color="#ABABAB")
+
+to the example fit to the Gaussian at the beginning of this chapter will
+give :math:`3-sigma` bands for the best-fit Gaussian, and produce the
+figure below.
+
+.. _figModel4:
+
+ .. image:: _images/model_fit4.png
+ :target: _images/model_fit4.png
+ :width: 50%
+
+
+
.. index:: Composite models
.. _composite_models_section:
@@ -976,11 +752,11 @@ to model a peak with a background. For such a simple problem, we could just
build a model that included both components::
def gaussian_plus_line(x, amp, cen, wid, slope, intercept):
- "line + 1-d gaussian"
+ "line + 1-d gaussian"
- gauss = (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
- line = slope * x + intercept
- return gauss + line
+ gauss = (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+ line = slope * x + intercept
+ return gauss + line
and use that with::
@@ -988,12 +764,15 @@ and use that with::
But we already had a function for a gaussian function, and maybe we'll
discover that a linear background isn't sufficient which would mean the
-model function would have to be changed. As an alternative we could define
-a linear function::
+model function would have to be changed.
+
+Instead, lmfit allows models to be combined into a :class:`CompositeModel`.
+As an alternative to including a linear background in our model function,
+we could define a linear function::
def line(x, slope, intercept):
- "a line"
- return slope * x + intercept
+ "a line"
+ return slope * x + intercept
and build a composite model with just::
@@ -1006,25 +785,24 @@ This model has parameters for both component models, and can be used as:
which prints out the results::
[[Model]]
- (Model(gaussian) + Model(line))
+ (Model(gaussian) + Model(line))
[[Fit Statistics]]
- # function evals = 44
- # data points = 101
- # variables = 5
- chi-square = 2.579
- reduced chi-square = 0.027
- Akaike info crit = -360.457
- Bayesian info crit = -347.381
+ # function evals = 44
+ # data points = 101
+ # variables = 5
+ chi-square = 2.579
+ reduced chi-square = 0.027
+ Akaike info crit = -360.457
+ Bayesian info crit = -347.381
[[Variables]]
- amp: 8.45931061 +/- 0.124145 (1.47%) (init= 5)
- cen: 5.65547872 +/- 0.009176 (0.16%) (init= 5)
- intercept: -0.96860201 +/- 0.033522 (3.46%) (init= 1)
- slope: 0.26484403 +/- 0.005748 (2.17%) (init= 0)
- wid: 0.67545523 +/- 0.009916 (1.47%) (init= 1)
+ amp: 8.45931061 +/- 0.124145 (1.47%) (init= 5)
+ cen: 5.65547872 +/- 0.009176 (0.16%) (init= 5)
+ intercept: -0.96860201 +/- 0.033522 (3.46%) (init= 1)
+ slope: 0.26484403 +/- 0.005748 (2.17%) (init= 0)
+ wid: 0.67545523 +/- 0.009916 (1.47%) (init= 1)
[[Correlations]] (unreported correlations are < 0.100)
- C(amp, wid) = 0.666
- C(cen, intercept) = 0.129
-
+ C(amp, wid) = 0.666
+ C(cen, intercept) = 0.129
and shows the plot on the left.
@@ -1049,13 +827,13 @@ Models :meth:`ModelResult.eval_components` method of the `result`::
which returns a dictionary of the components, using keys of the model name
(or `prefix` if that is set). This will use the parameter values in
-``result.params`` and the independent variables (``x``) used during the
+`result.params` and the independent variables (`x`) used during the
fit. Note that while the :class:`ModelResult` held in `result` does store the
-best parameters and the best estimate of the model in ``result.best_fit``,
-the original model and parameters in ``pars`` are left unaltered.
+best parameters and the best estimate of the model in `result.best_fit`,
+the original model and parameters in `pars` are left unaltered.
You can apply this composite model to other data sets, or evaluate the
-model at other values of ``x``. You may want to do this to give a finer or
+model at other values of `x`. You may want to do this to give a finer or
coarser spacing of data point, or to extrapolate the model outside the
fitting range. This can be done with::
@@ -1063,50 +841,38 @@ fitting range. This can be done with::
predicted = mod.eval(x=xwide)
In this example, the argument names for the model functions do not overlap.
-If they had, the ``prefix`` argument to :class:`Model` would have allowed
+If they had, the `prefix` argument to :class:`Model` would have allowed
us to identify which parameter went with which component model. As we will
see in the next chapter, using composite models with the built-in models
provides a simple way to build up complex models.
-.. class:: CompositeModel(left, right, op[, **kws])
-
- Create a composite model from two models (`left` and `right` and an
- binary operator (`op`). Additional keywords are passed to
- :class:`Model`.
-
- :param left: left-hand side Model
- :type left: :class:`Model`
- :param right: right-hand side Model
- :type right: :class:`Model`
- :param op: binary operator
- :type op: callable, and taking 2 arguments (`left` and `right`).
+.. autoclass:: CompositeModel(left, right, op[, **kws])
-Normally, one does not have to explicitly create a :class:`CompositeModel`,
-as doing::
+Note that when using builtin Python binary operators, a
+:class:`CompositeModel` will automatically be constructed for you. That is,
+doing::
mod = Model(fcn1) + Model(fcn2) * Model(fcn3)
-will automatically create a :class:`CompositeModel`. In this example,
-`mod.left` will be `Model(fcn1)`, `mod.op` will be :meth:`operator.add`,
-and `mod.right` will be another CompositeModel that has a `left` attribute
-of `Model(fcn2)`, an `op` of :meth:`operator.mul`, and a `right` of
-`Model(fcn3)`.
+will create a :class:`CompositeModel`. Here, `left` will be `Model(fcn1)`,
+`op` will be :meth:`operator.add`, and `right` will be another
+CompositeModel that has a `left` attribute of `Model(fcn2)`, an `op` of
+:meth:`operator.mul`, and a `right` of `Model(fcn3)`.
-If you want to use a binary operator other than add, subtract, multiply, or
-divide that are supported through normal Python syntax, you'll need to
+To use a binary operator other than '+', '-', '*', or '/' you can
explicitly create a :class:`CompositeModel` with the appropriate binary
operator. For example, to convolve two models, you could define a simple
convolution function, perhaps as::
import numpy as np
def convolve(dat, kernel):
- # simple convolution
- npts = min(len(dat), len(kernel))
- pad = np.ones(npts)
- tmp = np.concatenate((pad*dat[0], dat, pad*dat[-1]))
- out = np.convolve(tmp, kernel, mode='valid')
- noff = int((len(out) - npts)/2)
- return (out[noff:])[:npts]
+ # simple convolution
+ npts = min(len(dat), len(kernel))
+ pad = np.ones(npts)
+ tmp = np.concatenate((pad*dat[0], dat, pad*dat[-1]))
+ out = np.convolve(tmp, kernel, mode='valid')
+ noff = int((len(out) - npts)/2)
+ return (out[noff:])[:npts]
which extends the data in both directions so that the convolving kernel
function gives a valid result over the data range. Because this function
@@ -1118,23 +884,23 @@ binary operator. A full script using this technique is here:
which prints out the results::
[[Model]]
- (Model(jump) <function convolve at 0x109ee4488> Model(gaussian))
+ (Model(jump) <function convolve at 0x109ee4488> Model(gaussian))
[[Fit Statistics]]
- # function evals = 27
- # data points = 201
- # variables = 3
- chi-square = 22.091
- reduced chi-square = 0.112
- Akaike info crit = -437.837
- Bayesian info crit = -427.927
+ # function evals = 27
+ # data points = 201
+ # variables = 3
+ chi-square = 22.091
+ reduced chi-square = 0.112
+ Akaike info crit = -437.837
+ Bayesian info crit = -427.927
[[Variables]]
- mid: 5 (fixed)
- sigma: 0.64118585 +/- 0.013233 (2.06%) (init= 1.5)
- center: 4.51633608 +/- 0.009567 (0.21%) (init= 3.5)
- amplitude: 0.62654849 +/- 0.001813 (0.29%) (init= 1)
+ mid: 5 (fixed)
+ sigma: 0.64118585 +/- 0.013233 (2.06%) (init= 1.5)
+ center: 4.51633608 +/- 0.009567 (0.21%) (init= 3.5)
+ amplitude: 0.62654849 +/- 0.001813 (0.29%) (init= 1)
[[Correlations]] (unreported correlations are < 0.100)
- C(center, amplitude) = 0.344
- C(sigma, amplitude) = 0.280
+ C(center, amplitude) = 0.344
+ C(sigma, amplitude) = 0.280
and shows the plots:
diff --git a/doc/parameters.rst b/doc/parameters.rst
index 419ff61..a31d7d5 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -6,70 +6,43 @@
:class:`Parameter` and :class:`Parameters`
================================================
-This chapter describes :class:`Parameter` objects which is the key concept
-of lmfit.
+This chapter describes the :class:`Parameter` object, which is a key concept of
+lmfit.
A :class:`Parameter` is the quantity to be optimized in all minimization
problems, replacing the plain floating point number used in the
optimization routines from :mod:`scipy.optimize`. A :class:`Parameter` has
-a value that can be varied in the fit or have a fixed value, have upper
-and/or lower bounds. It can even have a value that is constrained by an
-algebraic expression of other Parameter values. Since :class:`Parameters`
-live outside the core optimization routines, they can be used in **all**
-optimization routines from :mod:`scipy.optimize`. By using
-:class:`Parameter` objects instead of plain variables, the objective
-function does not have to be modified to reflect every change of what is
-varied in the fit. This simplifies the writing of models, allowing general
-models that describe the phenomenon to be written, and gives the user more
-flexibility in using and testing variations of that model.
+a value that can either be varied in the fit or held at a fixed value, and
+can have upper and/or lower bounds placed on the value. It can even have a
+value that is constrained by an algebraic expression of other Parameter
+values. Since :class:`Parameter` objects live outside the core
+optimization routines, they can be used in **all** optimization routines
+from :mod:`scipy.optimize`. By using :class:`Parameter` objects instead of
+plain variables, the objective function does not have to be modified to
+reflect every change of what is varied in the fit, or whether bounds can be
+applied. This simplifies the writing of models, allowing general models
+that describe the phenomenon and gives the user more flexibility in using
+and testing variations of that model.
Whereas a :class:`Parameter` expands on an individual floating point
-variable, the optimization methods need an ordered group of floating point
-variables. In the :mod:`scipy.optimize` routines this is required to be a
-1-dimensional numpy ndarray. For lmfit, where each :class:`Parameter` has
-a name, this is replaced by a :class:`Parameters` class, which works as an
-ordered dictionary of :class:`Parameter` objects, with a few additional
+variable, the optimization methods actually still need an ordered group of
+floating point variables. In the :mod:`scipy.optimize` routines this is
+required to be a one-dimensional :numpydoc:`ndarray`. In lmfit, this one-dimensional
+array is replaced by a :class:`Parameters` object, which works as an
+ordered dictionary of :class:`Parameter` objects with a few additional
features and methods. That is, while the concept of a :class:`Parameter`
is central to lmfit, one normally creates and interacts with a
:class:`Parameters` instance that contains many :class:`Parameter` objects.
-The objective functions you write for lmfit will take an instance of
-:class:`Parameters` as its first argument.
+For example, the objective functions you write for lmfit will take an
+instance of :class:`Parameters` as its first argument. A table of
+parameter values, bounds and other attributes can be printed using
+:meth:`Parameters.pretty_print`.
The :class:`Parameter` class
========================================
-.. class:: Parameter(name=None[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]])
-
- create a Parameter object.
-
- :param name: parameter name
- :type name: ``None`` or string -- will be overwritten during fit if ``None``.
- :param value: the numerical value for the parameter
- :param vary: whether to vary the parameter or not.
- :type vary: boolean (``True``/``False``) [default ``True``]
- :param min: lower bound for value (``None`` = no lower bound).
- :param max: upper bound for value (``None`` = no upper bound).
- :param expr: mathematical expression to use to evaluate value during fit.
- :type expr: ``None`` or string
-
- Each of these inputs is turned into an attribute of the same name.
-
- After a fit, a Parameter for a fitted variable (that is with ``vary =
- True``) may have its :attr:`value` attribute to hold the best-fit value.
- Depending on the success of the fit and fitting algorithm used, it may also
- have attributes :attr:`stderr` and :attr:`correl`.
-
- .. attribute:: stderr
-
- the estimated standard error for the best-fit value.
-
- .. attribute:: correl
-
- a dictionary of the correlation with the other fitted variables in the
- fit, of the form::
-
- {'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}
+.. autoclass:: Parameter
See :ref:`bounds_chapter` for details on the math used to implement the
bounds with :attr:`min` and :attr:`max`.
@@ -81,144 +54,37 @@ The :class:`Parameter` class
.. index:: Removing a Constraint Expression
- .. method:: set(value=None[, vary=None[, min=None[, max=None[, expr=None]]]])
-
- set or update a Parameters value or other attributes.
-
- :param name: parameter name
- :param value: the numerical value for the parameter
- :param vary: whether to vary the parameter or not.
- :param min: lower bound for value
- :param max: upper bound for value
- :param expr: mathematical expression to use to evaluate value during fit.
-
- Each argument of :meth:`set` has a default value of ``None``, and will
- be set only if the provided value is not ``None``. You can use this to
- update some Parameter attribute without affecting others, for example::
-
- p1 = Parameter('a', value=2.0)
- p2 = Parameter('b', value=0.0)
- p1.set(min=0)
- p2.set(vary=False)
-
- to set a lower bound, or to set a Parameter as have a fixed value.
-
- Note that to use this approach to lift a lower or upper bound, doing::
-
- p1.set(min=0)
- .....
- # now lift the lower bound
- p1.set(min=None) # won't work! lower bound NOT changed
-
- won't work -- this will not change the current lower bound. Instead
- you'll have to use ``np.inf`` to remove a lower or upper bound::
-
- # now lift the lower bound
- p1.set(min=-np.inf) # will work!
-
- Similarly, to clear an expression of a parameter, you need to pass an
- empty string, not ``None``. You also need to give a value and
- explicitly tell it to vary::
-
- p3 = Parameter('c', expr='(a+b)/2')
- p3.set(expr=None) # won't work! expression NOT changed
-
- # remove constraint expression
- p3.set(value=1.0, vary=True, expr='') # will work! parameter now unconstrained
+ .. automethod:: set
The :class:`Parameters` class
========================================
-.. class:: Parameters()
-
- create a Parameters object. This is little more than a fancy ordered
- dictionary, with the restrictions that:
-
- 1. keys must be valid Python symbol names, so that they can be used in
- expressions of mathematical constraints. This means the names must
- match ``[a-z_][a-z0-9_]*`` and cannot be a Python reserved word.
-
- 2. values must be valid :class:`Parameter` objects.
-
- Two methods are provided for convenient initialization of a :class:`Parameters`,
- and one for extracting :class:`Parameter` values into a plain dictionary.
-
- .. method:: add(name[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]])
-
- add a named parameter. This creates a :class:`Parameter`
- object associated with the key `name`, with optional arguments
- passed to :class:`Parameter`::
-
- p = Parameters()
- p.add('myvar', value=1, vary=True)
-
- .. method:: add_many(self, paramlist)
-
- add a list of named parameters. Each entry must be a tuple
- with the following entries::
-
- name, value, vary, min, max, expr
-
- This method is somewhat rigid and verbose (no default values), but can
- be useful when initially defining a parameter list so that it looks
- table-like::
-
- p = Parameters()
- # (Name, Value, Vary, Min, Max, Expr)
- p.add_many(('amp1', 10, True, None, None, None),
- ('cen1', 1.2, True, 0.5, 2.0, None),
- ('wid1', 0.8, True, 0.1, None, None),
- ('amp2', 7.5, True, None, None, None),
- ('cen2', 1.9, True, 1.0, 3.0, None),
- ('wid2', None, False, None, None, '2*wid1/3'))
-
-
- .. automethod:: Parameters.pretty_print
-
- .. method:: valuesdict()
-
- return an ordered dictionary of name:value pairs with the
- Paramater name as the key and Parameter value as value.
-
- This is distinct from the :class:`Parameters` itself, as the dictionary
- values are not :class:`Parameter` objects, just the :attr:`value`.
- Using :meth:`valuesdict` can be a very convenient way to get updated
- values in a objective function.
-
- .. method:: dumps(**kws)
+.. autoclass:: Parameters
- return a JSON string representation of the :class:`Parameter` object.
- This can be saved or used to re-create or re-set parameters, using the
- :meth:`loads` method.
+ .. automethod:: add
- Optional keywords are sent :py:func:`json.dumps`.
+ .. automethod:: add_many
- .. method:: dump(file, **kws)
+ .. automethod:: pretty_print
- write a JSON representation of the :class:`Parameter` object to a file
- or file-like object in `file` -- really any object with a :meth:`write`
- method. Optional keywords are sent :py:func:`json.dumps`.
+ .. automethod:: valuesdict
- .. method:: loads(sval, **kws)
+ .. automethod:: dumps
- use a JSON string representation of the :class:`Parameter` object in
- `sval` to set all parameter settings. Optional keywords are sent
- :py:func:`json.loads`.
+ .. automethod:: dump
- .. method:: load(file, **kws)
+ .. automethod:: loads
- read and use a JSON string representation of the :class:`Parameter`
- object from a file or file-like object in `file` -- really any object
- with a :meth:`read` method. Optional keywords are sent
- :py:func:`json.loads`.
+ .. automethod:: load
Simple Example
==================
-Using :class:`Parameters`` and :func:`minimize` function (discussed in the
-next chapter) might look like this:
+A basic example making use of :class:`~lmfit.parameter.Parameters` and the
+:func:`~lmfit.minimizer.minimize` function (discussed in the next chapter)
+might look like this:
.. literalinclude:: ../examples/doc_basic.py
@@ -228,10 +94,10 @@ can be simplified using the :class:`Parameters` :meth:`valuesdict` method,
which would make the objective function ``fcn2min`` above look like::
def fcn2min(params, x, data):
- """ model decaying sine wave, subtract data"""
- v = params.valuesdict()
+ """ model decaying sine wave, subtract data"""
+ v = params.valuesdict()
- model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
- return model - data
+ model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
+ return model - data
The results are identical, and the difference is a stylistic choice.
diff --git a/doc/sphinx/ext_mathjax.py b/doc/sphinx/ext_mathjax.py
index 40de659..1bc8b9f 100644
--- a/doc/sphinx/ext_mathjax.py
+++ b/doc/sphinx/ext_mathjax.py
@@ -1,10 +1,9 @@
# sphinx extensions for mathjax
+
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
- 'numpydoc']
-mathjax = 'sphinx.ext.mathjax'
-pngmath = 'sphinx.ext.pngmath'
-
-extensions.append(mathjax)
+ 'sphinx.ext.extlinks',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.mathjax']
diff --git a/doc/sphinx/ext_pngmath.py b/doc/sphinx/ext_pngmath.py
index cf153fe..8cf169a 100644
--- a/doc/sphinx/ext_pngmath.py
+++ b/doc/sphinx/ext_pngmath.py
@@ -1,10 +1,9 @@
# sphinx extensions for pngmath
+
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
- 'numpydoc']
-mathjax = 'sphinx.ext.mathjax'
-pngmath = 'sphinx.ext.pngmath'
-
-extensions.append(pngmath)
+ 'sphinx.ext.extlinks',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.pngmath']
diff --git a/doc/sphinx/theme/lmfitdoc/layout.html b/doc/sphinx/theme/lmfitdoc/layout.html
index 6a31b9d..499eaef 100644
--- a/doc/sphinx/theme/lmfitdoc/layout.html
+++ b/doc/sphinx/theme/lmfitdoc/layout.html
@@ -24,7 +24,7 @@
<li><a href="{{ pathto('parameters') }}">parameters</a>|</li>
<li><a href="{{ pathto('fitting') }}"> minimize</a>|</li>
<li><a href="{{ pathto('model') }}"> model</a>|</li>
- <li><a href="{{ pathto('builtin_models') }}"> builtin models</a>|</li>
+ <li><a href="{{ pathto('builtin_models') }}"> built-in models</a>|</li>
<li><a href="{{ pathto('confidence') }}">confidence intervals</a>|</li>
<li><a href="{{ pathto('bounds') }}">bounds</a>|</li>
<li><a href="{{ pathto('constraints') }}">constraints</a>]</li>
diff --git a/doc/support.rst b/doc/support.rst
index 4fe2c3e..b6a39a8 100644
--- a/doc/support.rst
+++ b/doc/support.rst
@@ -10,9 +10,9 @@ Getting Help
If you have questions, comments, or suggestions for LMFIT, please use the
`mailing list`_. This provides an on-line conversation that is and
archived well and can be searched well with standard web searches. If you
-find a bug with the code or documentation, use the `github issues`_ Issue
-tracker to submit a report. If you have an idea for how to solve the
-problem and are familiar with python and github, submitting a github Pull
+find a bug in the code or documentation, use `GitHub Issues`_
+to submit a report. If you have an idea for how to solve the
+problem and are familiar with Python and GitHub, submitting a GitHub Pull
Request would be greatly appreciated.
If you are unsure whether to use the mailing list or the Issue tracker,
diff --git a/doc/testlinks.py b/doc/testlinks.py
deleted file mode 100644
index a35a685..0000000
--- a/doc/testlinks.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from sphinx.ext.intersphinx import fetch_inventory
-import warnings
-uri = 'file:///Users/Newville/Codes/lmfit-py/doc/_build/html/'
-inv = fetch_inventory(warnings, uri, uri + 'objects.inv')
-print (" INV : ", inv)
-
-for key in inv.keys():
- for key2 in inv[key]:
- print(key2)
diff --git a/doc/testlinks.py.~1~ b/doc/testlinks.py.~1~
deleted file mode 100644
index e69de29..0000000
--- a/doc/testlinks.py.~1~
+++ /dev/null
diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst
index 063c6c9..1dca429 100644
--- a/doc/whatsnew.rst
+++ b/doc/whatsnew.rst
@@ -6,24 +6,62 @@ Release Notes
.. _lmfit github repository: http://github.com/lmfit/lmfit-py
-This section discusses changes between versions, especially significant
-changes to the use and behavior of the library. This is not meant to be a
-comprehensive list of changes. For such a complete record, consult the
-`lmfit github repository`_.
+This section discusses changes between versions, especially changes
+significant to the use and behavior of the library. This is not meant
+to be a comprehensive list of changes. For such a complete record,
+consult the `lmfit github repository`_.
+
+.. _whatsnew_096_label:
+
+Version 0.9.6 Release Notes
+==========================================
+
+Support for SciPy 0.14 has been dropped: SciPy 0.15 is now required. This
+is especially important for lmfit maintenance, as it means we can now rely
+on SciPy having code for differential evolution and do not need to keep a
+local copy.
+
+A brute force method was added, which can be used either with
+:meth:`Minimizer.brute` or using the `method='brute'` option to
+:meth:`Minimizer.minimize`. This method requires finite bounds on
+all varying parameters, or that parameters have a finite
+`brute_step` attribute set to specify the step size.
+
+Custom cost functions can now be used for the scalar minimizers using the
+`reduce_fcn` option.
+
+Many improvements to documentation and docstrings in the code were made.
+As part of that effort, all API documentation in this main Sphinx
+documentation now derives from the docstrings.
+
+Uncertainties in the resulting best-fit for a model can now be calculated
+from the uncertainties in the model parameters.
+
+Parameters have two new attributes: `brute_step`, to specify the step
+size when using the `brute` method, and `user_data`, which is unused but
+can be used to hold additional information the user may desire. This will
+be preserved on copy and pickling.
+
+Several bug fixes and cleanups.
+
+Versioneer was updated to 0.18.
+
+Tests can now be run either with nose or pytest.
+
.. _whatsnew_095_label:
Version 0.9.5 Release Notes
==========================================
-Support for Python 2.6 and scipy 0.13 has been dropped.
+Support for Python 2.6 and SciPy 0.13 has been dropped.
.. _whatsnew_094_label:
Version 0.9.4 Release Notes
==========================================
-Some support for the new `least_squares` routine from scipy 0.17 has been
+Some support for the new `least_squares` routine from SciPy 0.17 has been
added.
@@ -32,7 +70,7 @@ so that the Parameter value does not need `sigma = params['sigma'].value`.
The older, explicit usage still works, but the docs, samples, and tests
have been updated to use the simpler usage.
-Support for Python 2.6 and scipy 0.13 is now explicitly deprecated and wil
+Support for Python 2.6 and SciPy 0.13 is now explicitly deprecated and wil
be dropped in version 0.9.5.
.. _whatsnew_093_label:
diff --git a/lmfit.egg-info/PKG-INFO b/lmfit.egg-info/PKG-INFO
new file mode 100644
index 0000000..e5b9d4f
--- /dev/null
+++ b/lmfit.egg-info/PKG-INFO
@@ -0,0 +1,33 @@
+Metadata-Version: 1.1
+Name: lmfit
+Version: 0.9.7
+Summary: Least-Squares Minimization with Bounds and Constraints
+Home-page: http://lmfit.github.io/lmfit-py/
+Author: LMFit Development Team
+Author-email: matt.newville@gmail.com
+License: BSD
+Download-URL: http://lmfit.github.io//lmfit-py/
+Description: A library for least-squares minimization and data fitting in
+ Python. Built on top of scipy.optimize, lmfit provides a Parameter object
+ which can be set as fixed or free, can have upper and/or lower bounds, or
+ can be written in terms of algebraic constraints of other Parameters. The
+ user writes a function to be minimized as a function of these Parameters,
+ and the scipy.optimize methods are used to find the optimal values for the
+ Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
+ algorithm, and provides estimated standard errors and correlations between
+ varied Parameters. Other minimization methods, including Nelder-Mead's
+ downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
+ others are also supported. Bounds and contraints can be placed on
+ Parameters for all of these methods.
+
+ In addition, methods for explicitly calculating confidence intervals are
+ provided for exploring minmization problems where the approximation of
+ estimating Parameter uncertainties from the covariance matrix is
+ questionable.
+Platform: Windows
+Platform: Linux
+Platform: Mac OS X
+Classifier: Intended Audience :: Science/Research
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Scientific/Engineering
diff --git a/lmfit.egg-info/SOURCES.txt b/lmfit.egg-info/SOURCES.txt
new file mode 100644
index 0000000..a72da67
--- /dev/null
+++ b/lmfit.egg-info/SOURCES.txt
@@ -0,0 +1,148 @@
+INSTALL
+LICENSE
+MANIFEST.in
+README.rst
+THANKS.txt
+publish_docs.sh
+requirements.txt
+setup.cfg
+setup.py
+versioneer.py
+NIST_STRD/Bennett5.dat
+NIST_STRD/BoxBOD.dat
+NIST_STRD/Chwirut1.dat
+NIST_STRD/Chwirut2.dat
+NIST_STRD/DanWood.dat
+NIST_STRD/ENSO.dat
+NIST_STRD/Eckerle4.dat
+NIST_STRD/Gauss1.dat
+NIST_STRD/Gauss2.dat
+NIST_STRD/Gauss3.dat
+NIST_STRD/Hahn1.dat
+NIST_STRD/Kirby2.dat
+NIST_STRD/Lanczos1.dat
+NIST_STRD/Lanczos2.dat
+NIST_STRD/Lanczos3.dat
+NIST_STRD/MGH09.dat
+NIST_STRD/MGH10.dat
+NIST_STRD/MGH17.dat
+NIST_STRD/Misra1a.dat
+NIST_STRD/Misra1b.dat
+NIST_STRD/Misra1c.dat
+NIST_STRD/Misra1d.dat
+NIST_STRD/Nelson.dat
+NIST_STRD/Rat42.dat
+NIST_STRD/Rat43.dat
+NIST_STRD/Roszman1.dat
+NIST_STRD/Thurber.dat
+doc/.DS_Store
+doc/Makefile
+doc/bounds.rst
+doc/builtin_models.rst
+doc/conf.py
+doc/confidence.rst
+doc/constraints.rst
+doc/contents.rst
+doc/extensions.py
+doc/extensions.pyc
+doc/faq.rst
+doc/fitting.rst
+doc/index.rst
+doc/installation.rst
+doc/intro.rst
+doc/model.rst
+doc/parameters.rst
+doc/support.rst
+doc/test_ci2_result.png
+doc/whatsnew.rst
+doc/__pycache__/extensions.cpython-35.pyc
+doc/__pycache__/extensions.cpython-36.pyc
+doc/_images/conf_interval1.png
+doc/_images/conf_interval1a.png
+doc/_images/conf_interval2.png
+doc/_images/emcee_dbl_exp.png
+doc/_images/emcee_dbl_exp2.png
+doc/_images/emcee_triangle.png
+doc/_images/model_eval.png
+doc/_images/model_fit1.png
+doc/_images/model_fit2.png
+doc/_images/model_fit2a.png
+doc/_images/model_fit3a.png
+doc/_images/model_fit3b.png
+doc/_images/model_fit4.png
+doc/_images/models_doc1.png
+doc/_images/models_doc2.png
+doc/_images/models_nistgauss.png
+doc/_images/models_nistgauss2.png
+doc/_images/models_peak1.png
+doc/_images/models_peak2.png
+doc/_images/models_peak3.png
+doc/_images/models_peak4.png
+doc/_images/models_stepfit.png
+doc/_static/empty
+doc/_templates/indexsidebar.html
+doc/sphinx/apigen.py
+doc/sphinx/ext_mathjax.py
+doc/sphinx/ext_pngmath.py
+doc/sphinx/github.py
+doc/sphinx/numpydoc/__init__.py
+doc/sphinx/numpydoc/comment_eater.py
+doc/sphinx/numpydoc/compiler_unparse.py
+doc/sphinx/numpydoc/docscrape.py
+doc/sphinx/numpydoc/docscrape_sphinx.py
+doc/sphinx/numpydoc/numpydoc.py
+doc/sphinx/numpydoc/phantom_import.py
+doc/sphinx/numpydoc/plot_directive.py
+doc/sphinx/numpydoc/traitsdoc.py
+doc/sphinx/theme/lmfitdoc/layout.html
+doc/sphinx/theme/lmfitdoc/theme.conf
+doc/sphinx/theme/lmfitdoc/static/contents.png
+doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t
+doc/sphinx/theme/lmfitdoc/static/navigation.png
+lmfit/__init__.py
+lmfit/_version.py
+lmfit/asteval.py
+lmfit/astutils.py
+lmfit/confidence.py
+lmfit/lineshapes.py
+lmfit/minimizer.py
+lmfit/model.py
+lmfit/models.py
+lmfit/parameter.py
+lmfit/printfuncs.py
+lmfit.egg-info/PKG-INFO
+lmfit.egg-info/SOURCES.txt
+lmfit.egg-info/dependency_links.txt
+lmfit.egg-info/requires.txt
+lmfit.egg-info/top_level.txt
+lmfit/ui/__init__.py
+lmfit/ui/basefitter.py
+lmfit/ui/ipy_fitter.py
+lmfit/uncertainties/__init__.py
+lmfit/uncertainties/umath.py
+tests/NISTModels.py
+tests/_test_ci.py
+tests/_test_make_paras_and_func.py
+tests/lmfit_testutils.py
+tests/test_1variable.py
+tests/test_NIST_Strd.py
+tests/test_algebraic_constraint.py
+tests/test_algebraic_constraint2.py
+tests/test_basicfit.py
+tests/test_bounded_jacobian.py
+tests/test_bounds.py
+tests/test_brute_method.py
+tests/test_confidence.py
+tests/test_copy_params.py
+tests/test_default_kws.py
+tests/test_itercb.py
+tests/test_least_squares.py
+tests/test_manypeaks_speed.py
+tests/test_minimizer.py
+tests/test_model.py
+tests/test_model_uncertainties.py
+tests/test_multidatasets.py
+tests/test_nose.py
+tests/test_parameters.py
+tests/test_params_set.py
+tests/test_stepmodel.py \ No newline at end of file
diff --git a/lmfit.egg-info/dependency_links.txt b/lmfit.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lmfit.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/lmfit.egg-info/requires.txt b/lmfit.egg-info/requires.txt
new file mode 100644
index 0000000..c50d3dc
--- /dev/null
+++ b/lmfit.egg-info/requires.txt
@@ -0,0 +1,3 @@
+numpy
+scipy
+six
diff --git a/lmfit.egg-info/top_level.txt b/lmfit.egg-info/top_level.txt
new file mode 100644
index 0000000..536bcc8
--- /dev/null
+++ b/lmfit.egg-info/top_level.txt
@@ -0,0 +1 @@
+lmfit
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
index ccfd898..e1fd8a5 100644
--- a/lmfit/__init__.py
+++ b/lmfit/__init__.py
@@ -1,44 +1,48 @@
-"""
-Lmfit provides a high-level interface to non-linear optimization and curve
-fitting problems for Python. Lmfit builds on Levenberg-Marquardt algorithm of
-scipy.optimize.leastsq(), but also supports most of the optimization methods
-from scipy.optimize. It has a number of useful enhancements, including:
+"""Lmfit provides a high-level interface to non-linear optimization and
+curve-fitting problems for Python.
+
+Lmfit builds on the Levenberg-Marquardt algorithm of
+scipy.optimize.leastsq(), but also supports most of the optimization
+methods from scipy.optimize. It has a number of useful enhancements,
+including:
- * Using Parameter objects instead of plain floats as variables. A Parameter
- has a value that can be varied in the fit, fixed, have upper and/or lower
- bounds. It can even have a value that is constrained by an algebraic
- expression of other Parameter values.
+ * Using Parameter objects instead of plain floats as variables. A
+ Parameter has a value that can be varied in the fit, fixed, have
+ upper and/or lower bounds. It can even have a value that is
+ constrained by an algebraic expression of other Parameter values.
- * Ease of changing fitting algorithms. Once a fitting model is set up, one
- can change the fitting algorithm without changing the objective function.
+ * Ease of changing fitting algorithms. Once a fitting model is set
+ up, one can change the fitting algorithm without changing the
+ objective function.
* Improved estimation of confidence intervals. While
- scipy.optimize.leastsq() will automatically calculate uncertainties and
- correlations from the covariance matrix, lmfit also has functions to
- explicitly explore parameter space to determine confidence levels even for
- the most difficult cases.
+ scipy.optimize.leastsq() will automatically calculate uncertainties
+ and correlations from the covariance matrix, lmfit also has functions
+ to explicitly explore parameter space to determine confidence levels
+ even for the most difficult cases.
* Improved curve-fitting with the Model class. This which extends the
- capabilities of scipy.optimize.curve_fit(), allowing you to turn a function
- that models for your data into a python class that helps you parametrize
- and fit data with that model.
+ capabilities of scipy.optimize.curve_fit(), allowing you to turn a
+ function that models for your data into a python class that helps you
+ parametrize and fit data with that model.
+
+ * Many pre-built models for common lineshapes are included and ready
+ to use.
- * Many pre-built models for common lineshapes are included and ready to use.
+version: 0.9.5
+last update: 2016-Jul-26
+License: BSD
+Authors: Matthew Newville, The University of Chicago
+ Till Stensitzki, Freie Universitat Berlin
+ Daniel B. Allen, Johns Hopkins University
+ Antonino Ingargiola, University of California, Los Angeles
- version: 0.9.5
- last update: 2016-Jul-26
- License: BSD
- Authors: Matthew Newville, The University of Chicago
- Till Stensitzki, Freie Universitat Berlin
- Daniel B. Allen, Johns Hopkins University
- Antonino Ingargiola, University of California, Los Angeles
"""
-import warnings
import sys
-from .minimizer import minimize, Minimizer, MinimizerException
-from .parameter import Parameter, Parameters
from .confidence import conf_interval, conf_interval2d
+from .minimizer import Minimizer, MinimizerException, minimize
+from .parameter import Parameter, Parameters
from .printfuncs import (fit_report, ci_report,
report_fit, report_ci, report_errors)
@@ -48,20 +52,8 @@ from . import models
from . import uncertainties
from .uncertainties import ufloat, correlated_values
-
-## versioneer code
+# versioneer code
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
-
-# PY26 Depreciation Warning
-if sys.version_info[:2] == (2, 6):
- warnings.warn('Support for Python 2.6.x was dropped with lmfit 0.9.5')
-
-# SCIPY 0.13 Depreciation Warning
-import scipy
-scipy_major, scipy_minor, scipy_other = scipy.__version__.split('.', 2)
-
-if int(scipy_major) == 0 and int(scipy_minor) < 15:
- warnings.warn('Support for Scipy 0.14 was dropped with lmfit 0.9.5')
diff --git a/lmfit/_differentialevolution.py b/lmfit/_differentialevolution.py
deleted file mode 100644
index 1e1fb66..0000000
--- a/lmfit/_differentialevolution.py
+++ /dev/null
@@ -1,750 +0,0 @@
-"""
-differential_evolution: The differential evolution global optimization algorithm
-Added by Andrew Nelson 2014
-"""
-from __future__ import division, print_function, absolute_import
-import numpy as np
-from scipy.optimize import minimize
-from scipy.optimize.optimize import _status_message
-import numbers
-
-__all__ = ['differential_evolution']
-
-_MACHEPS = np.finfo(np.float64).eps
-
-
-#------------------------------------------------------------------------------
-# scipy.optimize does not contain OptimizeResult until 0.14. Include here as a
-# fix for scipy < 0.14.
-
-class OptimizeResult(dict):
- """ Represents the optimization result.
- Attributes
- ----------
- x : ndarray
- The solution of the optimization.
- success : bool
- Whether or not the optimizer exited successfully.
- status : int
- Termination status of the optimizer. Its value depends on the
- underlying solver. Refer to `message` for details.
- message : str
- Description of the cause of the termination.
- fun, jac, hess, hess_inv : ndarray
- Values of objective function, Jacobian, Hessian or its inverse (if
- available). The Hessians may be approximations, see the documentation
- of the function in question.
- nfev, njev, nhev : int
- Number of evaluations of the objective functions and of its
- Jacobian and Hessian.
- nit : int
- Number of iterations performed by the optimizer.
- maxcv : float
- The maximum constraint violation.
- Notes
- -----
- There may be additional attributes not listed above depending of the
- specific solver. Since this class is essentially a subclass of dict
- with attribute accessors, one can see which attributes are available
- using the `keys()` method.
- """
- def __getattr__(self, name):
- try:
- return self[name]
- except KeyError:
- raise AttributeError(name)
-
- __setattr__ = dict.__setitem__
- __delattr__ = dict.__delitem__
-
- def __repr__(self):
- if self.keys():
- m = max(map(len, list(self.keys()))) + 1
- return '\n'.join([k.rjust(m) + ': ' + repr(v)
- for k, v in self.items()])
- else:
- return self.__class__.__name__ + "()"
-#------------------------------------------------------------------------------
-
-
-def differential_evolution(func, bounds, args=(), strategy='best1bin',
- maxiter=None, popsize=15, tol=0.01,
- mutation=(0.5, 1), recombination=0.7, seed=None,
- callback=None, disp=False, polish=True,
- init='latinhypercube'):
- """Finds the global minimum of a multivariate function.
- Differential Evolution is stochastic in nature (does not use gradient
- methods) to find the minimium, and can search large areas of candidate
- space, but often requires larger numbers of function evaluations than
- conventional gradient based techniques.
-
- The algorithm is due to Storn and Price [1]_.
-
- Parameters
- ----------
- func : callable
- The objective function to be minimized. Must be in the form
- ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
- and ``args`` is a tuple of any additional fixed parameters needed to
- completely specify the function.
- bounds : sequence
- Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
- defining the lower and upper bounds for the optimizing argument of
- `func`. It is required to have ``len(bounds) == len(x)``.
- ``len(bounds)`` is used to determine the number of parameters in ``x``.
- args : tuple, optional
- Any additional fixed parameters needed to
- completely specify the objective function.
- strategy : str, optional
- The differential evolution strategy to use. Should be one of:
-
- - 'best1bin'
- - 'best1exp'
- - 'rand1exp'
- - 'randtobest1exp'
- - 'best2exp'
- - 'rand2exp'
- - 'randtobest1bin'
- - 'best2bin'
- - 'rand2bin'
- - 'rand1bin'
-
- The default is 'best1bin'.
- maxiter : int, optional
- The maximum number of times the entire population is evolved.
- The maximum number of function evaluations is:
- ``maxiter * popsize * len(x)``
- popsize : int, optional
- A multiplier for setting the total population size. The population has
- ``popsize * len(x)`` individuals.
- tol : float, optional
- When the mean of the population energies, multiplied by tol,
- divided by the standard deviation of the population energies
- is greater than 1 the solving process terminates:
- ``convergence = mean(pop) * tol / stdev(pop) > 1``
- mutation : float or tuple(float, float), optional
- The mutation constant.
- If specified as a float it should be in the range [0, 2].
- If specified as a tuple ``(min, max)`` dithering is employed. Dithering
- randomly changes the mutation constant on a generation by generation
- basis. The mutation constant for that generation is taken from
- ``U[min, max)``. Dithering can help speed convergence significantly.
- Increasing the mutation constant increases the search radius, but will
- slow down convergence.
- recombination : float, optional
- The recombination constant, should be in the range [0, 1]. Increasing
- this value allows a larger number of mutants to progress into the next
- generation, but at the risk of population stability.
- seed : int or `np.random.RandomState`, optional
- If `seed` is not specified the `np.RandomState` singleton is used.
- If `seed` is an int, a new `np.random.RandomState` instance is used,
- seeded with seed.
- If `seed` is already a `np.random.RandomState instance`, then that
- `np.random.RandomState` instance is used.
- Specify `seed` for repeatable minimizations.
- disp : bool, optional
- Display status messages
- callback : callable, `callback(xk, convergence=val)`, optional:
- A function to follow the progress of the minimization. ``xk`` is
- the current value of ``x0``. ``val`` represents the fractional
- value of the population convergence. When ``val`` is greater than one
- the function halts. If callback returns `True`, then the minimization
- is halted (any polishing is still carried out).
- polish : bool, optional
- If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
- method is used to polish the best population member at the end, which
- can improve the minimization slightly.
- init : string, optional
- Specify how the population initialization is performed. Should be
- one of:
-
- - 'latinhypercube'
- - 'random'
-
- The default is 'latinhypercube'. Latin Hypercube sampling tries to
- maximize coverage of the available parameter space. 'random' initializes
- the population randomly - this has the drawback that clustering can
- occur, preventing the whole of parameter space being covered.
-
- Returns
- -------
- res : OptimizeResult
- The optimization result represented as a `OptimizeResult` object.
- Important attributes are: ``x`` the solution array, ``success`` a
- Boolean flag indicating if the optimizer exited successfully and
- ``message`` which describes the cause of the termination. See
- `OptimizeResult` for a description of other attributes. If `polish`
- was employed, then OptimizeResult also contains the `jac` attribute.
-
- Notes
- -----
- Differential evolution is a stochastic population based method that is
- useful for global optimization problems. At each pass through the population
- the algorithm mutates each candidate solution by mixing with other candidate
- solutions to create a trial candidate. There are several strategies [2]_ for
- creating trial candidates, which suit some problems more than others. The
- 'best1bin' strategy is a good starting point for many systems. In this
- strategy two members of the population are randomly chosen. Their difference
- is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`,
- so far:
-
- .. math::
-
- b' = b_0 + mutation * (population[rand0] - population[rand1])
-
- A trial vector is then constructed. Starting with a randomly chosen 'i'th
- parameter the trial is sequentially filled (in modulo) with parameters from
- `b'` or the original candidate. The choice of whether to use `b'` or the
- original candidate is made with a binomial distribution (the 'bin' in
- 'best1bin') - a random number in [0, 1) is generated. If this number is
- less than the `recombination` constant then the parameter is loaded from
- `b'`, otherwise it is loaded from the original candidate. The final
- parameter is always loaded from `b'`. Once the trial candidate is built
- its fitness is assessed. If the trial is better than the original candidate
- then it takes its place. If it is also better than the best overall
- candidate it also replaces that.
- To improve your chances of finding a global minimum use higher `popsize`
- values, with higher `mutation` and (dithering), but lower `recombination`
- values. This has the effect of widening the search radius, but slowing
- convergence.
-
- .. versionadded:: 0.15.0
-
- Examples
- --------
- Let us consider the problem of minimizing the Rosenbrock function. This
- function is implemented in `rosen` in `scipy.optimize`.
-
- >>> from scipy.optimize import rosen, differential_evolution
- >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
- >>> result = differential_evolution(rosen, bounds)
- >>> result.x, result.fun
- (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
-
- Next find the minimum of the Ackley function
- (http://en.wikipedia.org/wiki/Test_functions_for_optimization).
-
- >>> from scipy.optimize import differential_evolution
- >>> import numpy as np
- >>> def ackley(x):
- ... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
- ... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
- ... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
- >>> bounds = [(-5, 5), (-5, 5)]
- >>> result = differential_evolution(ackley, bounds)
- >>> result.x, result.fun
- (array([ 0., 0.]), 4.4408920985006262e-16)
-
- References
- ----------
- .. [1] Storn, R and Price, K, Differential Evolution - a Simple and
- Efficient Heuristic for Global Optimization over Continuous Spaces,
- Journal of Global Optimization, 1997, 11, 341 - 359.
- .. [2] http://www1.icsi.berkeley.edu/~storn/code.html
- .. [3] http://en.wikipedia.org/wiki/Differential_evolution
- """
-
- solver = DifferentialEvolutionSolver(func, bounds, args=args,
- strategy=strategy, maxiter=maxiter,
- popsize=popsize, tol=tol,
- mutation=mutation,
- recombination=recombination,
- seed=seed, polish=polish,
- callback=callback,
- disp=disp,
- init=init)
- return solver.solve()
-
-
-class DifferentialEvolutionSolver(object):
-
- """This class implements the differential evolution solver
-
- Parameters
- ----------
- func : callable
- The objective function to be minimized. Must be in the form
- ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
- and ``args`` is a tuple of any additional fixed parameters needed to
- completely specify the function.
- bounds : sequence
- Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
- defining the lower and upper bounds for the optimizing argument of
- `func`. It is required to have ``len(bounds) == len(x)``.
- ``len(bounds)`` is used to determine the number of parameters in ``x``.
- args : tuple, optional
- Any additional fixed parameters needed to
- completely specify the objective function.
- strategy : str, optional
- The differential evolution strategy to use. Should be one of:
-
- - 'best1bin'
- - 'best1exp'
- - 'rand1exp'
- - 'randtobest1exp'
- - 'best2exp'
- - 'rand2exp'
- - 'randtobest1bin'
- - 'best2bin'
- - 'rand2bin'
- - 'rand1bin'
-
- The default is 'best1bin'
-
- maxiter : int, optional
- The maximum number of times the entire population is evolved. The
- maximum number of function evaluations is:
- ``maxiter * popsize * len(x)``
- popsize : int, optional
- A multiplier for setting the total population size. The population has
- ``popsize * len(x)`` individuals.
- tol : float, optional
- When the mean of the population energies, multiplied by tol,
- divided by the standard deviation of the population energies
- is greater than 1 the solving process terminates:
- ``convergence = mean(pop) * tol / stdev(pop) > 1``
- mutation : float or tuple(float, float), optional
- The mutation constant.
- If specified as a float it should be in the range [0, 2].
- If specified as a tuple ``(min, max)`` dithering is employed. Dithering
- randomly changes the mutation constant on a generation by generation
- basis. The mutation constant for that generation is taken from
- U[min, max). Dithering can help speed convergence significantly.
- Increasing the mutation constant increases the search radius, but will
- slow down convergence.
- recombination : float, optional
- The recombination constant, should be in the range [0, 1]. Increasing
- this value allows a larger number of mutants to progress into the next
- generation, but at the risk of population stability.
- seed : int or `np.random.RandomState`, optional
- If `seed` is not specified the `np.random.RandomState` singleton is
- used.
- If `seed` is an int, a new `np.random.RandomState` instance is used,
- seeded with `seed`.
- If `seed` is already a `np.random.RandomState` instance, then that
- `np.random.RandomState` instance is used.
- Specify `seed` for repeatable minimizations.
- disp : bool, optional
- Display status messages
- callback : callable, `callback(xk, convergence=val)`, optional
- A function to follow the progress of the minimization. ``xk`` is
- the current value of ``x0``. ``val`` represents the fractional
- value of the population convergence. When ``val`` is greater than one
- the function halts. If callback returns `True`, then the minimization
- is halted (any polishing is still carried out).
- polish : bool, optional
- If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method
- is used to polish the best population member at the end. This requires
- a few more function evaluations.
- maxfun : int, optional
- Set the maximum number of function evaluations. However, it probably
- makes more sense to set `maxiter` instead.
- init : string, optional
- Specify which type of population initialization is performed. Should be
- one of:
-
- - 'latinhypercube'
- - 'random'
- """
-
- # Dispatch of mutation strategy method (binomial or exponential).
- _binomial = {'best1bin': '_best1',
- 'randtobest1bin': '_randtobest1',
- 'best2bin': '_best2',
- 'rand2bin': '_rand2',
- 'rand1bin': '_rand1'}
- _exponential = {'best1exp': '_best1',
- 'rand1exp': '_rand1',
- 'randtobest1exp': '_randtobest1',
- 'best2exp': '_best2',
- 'rand2exp': '_rand2'}
-
- def __init__(self, func, bounds, args=(),
- strategy='best1bin', maxiter=None, popsize=15,
- tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
- maxfun=None, callback=None, disp=False, polish=True,
- init='latinhypercube'):
-
- if strategy in self._binomial:
- self.mutation_func = getattr(self, self._binomial[strategy])
- elif strategy in self._exponential:
- self.mutation_func = getattr(self, self._exponential[strategy])
- else:
- raise ValueError("Please select a valid mutation strategy")
- self.strategy = strategy
-
- self.callback = callback
- self.polish = polish
- self.tol = tol
-
- #Mutation constant should be in [0, 2). If specified as a sequence
- #then dithering is performed.
- self.scale = mutation
- if (not np.all(np.isfinite(mutation)) or
- np.any(np.array(mutation) >= 2) or
- np.any(np.array(mutation) < 0)):
- raise ValueError('The mutation constant must be a float in '
- 'U[0, 2), or specified as a tuple(min, max)'
- ' where min < max and min, max are in U[0, 2).')
-
- self.dither = None
- if hasattr(mutation, '__iter__') and len(mutation) > 1:
- self.dither = [mutation[0], mutation[1]]
- self.dither.sort()
-
- self.cross_over_probability = recombination
-
- self.func = func
- self.args = args
-
- # convert tuple of lower and upper bounds to limits
- # [(low_0, high_0), ..., (low_n, high_n]
- # -> [[low_0, ..., low_n], [high_0, ..., high_n]]
- self.limits = np.array(bounds, dtype='float').T
- if (np.size(self.limits, 0) != 2
- or not np.all(np.isfinite(self.limits))):
- raise ValueError('bounds should be a sequence containing '
- 'real valued (min, max) pairs for each value'
- ' in x')
-
- self.maxiter = maxiter or 1000
- self.maxfun = (maxfun or ((self.maxiter + 1) * popsize *
- np.size(self.limits, 1)))
-
- # population is scaled to between [0, 1].
- # We have to scale between parameter <-> population
- # save these arguments for _scale_parameter and
- # _unscale_parameter. This is an optimization
- self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
- self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
-
- parameter_count = np.size(self.limits, 1)
- self.random_number_generator = _make_random_gen(seed)
-
- #default initialization is a latin hypercube design, but there
- #are other population initializations possible.
- self.population = np.zeros((popsize * parameter_count,
- parameter_count))
- if init == 'latinhypercube':
- self.init_population_lhs()
- elif init == 'random':
- self.init_population_random()
- else:
- raise ValueError("The population initialization method must be one"
- "of 'latinhypercube' or 'random'")
-
- self.population_energies = np.ones(
- popsize * parameter_count) * np.inf
-
- self.disp = disp
-
- def init_population_lhs(self):
- """
- Initializes the population with Latin Hypercube Sampling
- Latin Hypercube Sampling ensures that the sampling of parameter space
- is maximised.
- """
- samples = np.size(self.population, 0)
- N = np.size(self.population, 1)
- rng = self.random_number_generator
-
- # Generate the intervals
- segsize = 1.0 / samples
-
- # Fill points uniformly in each interval
- rdrange = rng.rand(samples, N) * segsize
- rdrange += np.atleast_2d(np.arange(0., 1., segsize)).T
-
- # Make the random pairings
- self.population = np.zeros_like(rdrange)
-
- for j in range(N):
- order = rng.permutation(range(samples))
- self.population[:, j] = rdrange[order, j]
-
- def init_population_random(self):
- """
- Initialises the population at random. This type of initialization
- can possess clustering, Latin Hypercube sampling is generally better.
- """
- rng = self.random_number_generator
- self.population = rng.random_sample(self.population.shape)
-
- @property
- def x(self):
- """
- The best solution from the solver
-
- Returns
- -------
- x - ndarray
- The best solution from the solver.
- """
- return self._scale_parameters(self.population[0])
-
- def solve(self):
- """
- Runs the DifferentialEvolutionSolver.
-
- Returns
- -------
- res : OptimizeResult
- The optimization result represented as a ``OptimizeResult`` object.
- Important attributes are: ``x`` the solution array, ``success`` a
- Boolean flag indicating if the optimizer exited successfully and
- ``message`` which describes the cause of the termination. See
- `OptimizeResult` for a description of other attributes. If polish
- was employed, then OptimizeResult also contains the ``hess_inv`` and
- ``jac`` attributes.
- """
-
- nfev, nit, warning_flag = 0, 0, False
- status_message = _status_message['success']
-
- # calculate energies to start with
- for index, candidate in enumerate(self.population):
- parameters = self._scale_parameters(candidate)
- self.population_energies[index] = self.func(parameters,
- *self.args)
- nfev += 1
-
- if nfev > self.maxfun:
- warning_flag = True
- status_message = _status_message['maxfev']
- break
-
- minval = np.argmin(self.population_energies)
-
- # put the lowest energy into the best solution position.
- lowest_energy = self.population_energies[minval]
- self.population_energies[minval] = self.population_energies[0]
- self.population_energies[0] = lowest_energy
-
- self.population[[0, minval], :] = self.population[[minval, 0], :]
-
- if warning_flag:
- return OptimizeResult(
- x=self.x,
- fun=self.population_energies[0],
- nfev=nfev,
- nit=nit,
- message=status_message,
- success=(warning_flag != True))
-
- # do the optimisation.
- for nit in range(1, self.maxiter + 1):
- if self.dither is not None:
- self.scale = self.random_number_generator.rand(
- ) * (self.dither[1] - self.dither[0]) + self.dither[0]
- for candidate in range(np.size(self.population, 0)):
- if nfev > self.maxfun:
- warning_flag = True
- status_message = _status_message['maxfev']
- break
-
- trial = self._mutate(candidate)
- self._ensure_constraint(trial)
- parameters = self._scale_parameters(trial)
-
- energy = self.func(parameters, *self.args)
- nfev += 1
-
- if energy < self.population_energies[candidate]:
- self.population[candidate] = trial
- self.population_energies[candidate] = energy
-
- if energy < self.population_energies[0]:
- self.population_energies[0] = energy
- self.population[0] = trial
-
- # stop when the fractional s.d. of the population is less than tol
- # of the mean energy
- convergence = (np.std(self.population_energies) /
- np.abs(np.mean(self.population_energies) +
- _MACHEPS))
-
- if self.disp:
- print("differential_evolution step %d: f(x)= %g"
- % (nit,
- self.population_energies[0]))
-
- if (self.callback and
- self.callback(self._scale_parameters(self.population[0]),
- convergence=self.tol / convergence) is True):
-
- warning_flag = True
- status_message = ('callback function requested stop early '
- 'by returning True')
- break
-
- if convergence < self.tol or warning_flag:
- break
-
- else:
- status_message = _status_message['maxiter']
- warning_flag = True
-
- DE_result = OptimizeResult(
- x=self.x,
- fun=self.population_energies[0],
- nfev=nfev,
- nit=nit,
- message=status_message,
- success=(warning_flag != True))
-
- if self.polish:
- result = minimize(self.func,
- np.copy(DE_result.x),
- method='L-BFGS-B',
- bounds=self.limits.T,
- args=self.args)
-
- nfev += result.nfev
- DE_result.nfev = nfev
-
- if result.fun < DE_result.fun:
- DE_result.fun = result.fun
- DE_result.x = result.x
- DE_result.jac = result.jac
- # to keep internal state consistent
- self.population_energies[0] = result.fun
- self.population[0] = self._unscale_parameters(result.x)
-
- return DE_result
-
- def _scale_parameters(self, trial):
- """
- scale from a number between 0 and 1 to parameters
- """
- return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
-
- def _unscale_parameters(self, parameters):
- """
- scale from parameters to a number between 0 and 1.
- """
- return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
-
- def _ensure_constraint(self, trial):
- """
- make sure the parameters lie between the limits
- """
- for index, param in enumerate(trial):
- if param > 1 or param < 0:
- trial[index] = self.random_number_generator.rand()
-
- def _mutate(self, candidate):
- """
- create a trial vector based on a mutation strategy
- """
- trial = np.copy(self.population[candidate])
- parameter_count = np.size(trial, 0)
-
- fill_point = self.random_number_generator.randint(0, parameter_count)
-
- if (self.strategy == 'randtobest1exp'
- or self.strategy == 'randtobest1bin'):
- bprime = self.mutation_func(candidate,
- self._select_samples(candidate, 5))
- else:
- bprime = self.mutation_func(self._select_samples(candidate, 5))
-
- if self.strategy in self._binomial:
- crossovers = self.random_number_generator.rand(parameter_count)
- crossovers = crossovers < self.cross_over_probability
- # the last one is always from the bprime vector for binomial
- # If you fill in modulo with a loop you have to set the last one to
- # true. If you don't use a loop then you can have any random entry
- # be True.
- crossovers[fill_point] = True
- trial = np.where(crossovers, bprime, trial)
- return trial
-
- elif self.strategy in self._exponential:
- i = 0
- while (i < parameter_count and
- self.random_number_generator.rand() <
- self.cross_over_probability):
-
- trial[fill_point] = bprime[fill_point]
- fill_point = (fill_point + 1) % parameter_count
- i += 1
-
- return trial
-
- def _best1(self, samples):
- """
- best1bin, best1exp
- """
- r0, r1 = samples[:2]
- return (self.population[0] + self.scale *
- (self.population[r0] - self.population[r1]))
-
- def _rand1(self, samples):
- """
- rand1bin, rand1exp
- """
- r0, r1, r2 = samples[:3]
- return (self.population[r0] + self.scale *
- (self.population[r1] - self.population[r2]))
-
- def _randtobest1(self, candidate, samples):
- """
- randtobest1bin, randtobest1exp
- """
- r0, r1 = samples[:2]
- bprime = np.copy(self.population[candidate])
- bprime += self.scale * (self.population[0] - bprime)
- bprime += self.scale * (self.population[r0] -
- self.population[r1])
- return bprime
-
- def _best2(self, samples):
- """
- best2bin, best2exp
- """
- r0, r1, r2, r3 = samples[:4]
- bprime = (self.population[0] + self.scale *
- (self.population[r0] + self.population[r1]
- - self.population[r2] - self.population[r3]))
-
- return bprime
-
- def _rand2(self, samples):
- """
- rand2bin, rand2exp
- """
- r0, r1, r2, r3, r4 = samples
- bprime = (self.population[r0] + self.scale *
- (self.population[r1] + self.population[r2] -
- self.population[r3] - self.population[r4]))
-
- return bprime
-
- def _select_samples(self, candidate, number_samples):
- """
- obtain random integers from range(np.size(self.population, 0)),
- without replacement. You can't have the original candidate either.
- """
- idxs = list(range(np.size(self.population, 0)))
- idxs.remove(candidate)
- self.random_number_generator.shuffle(idxs)
- idxs = idxs[:number_samples]
- return idxs
-
-
-def _make_random_gen(seed):
- """Turn seed into a np.random.RandomState instance
-
- If seed is None, return the RandomState singleton used by np.random.
- If seed is an int, return a new RandomState instance seeded with seed.
- If seed is already a RandomState instance, return it.
- Otherwise raise ValueError.
- """
- if seed is None or seed is np.random:
- return np.random.mtrand._rand
- if isinstance(seed, (numbers.Integral, np.integer)):
- return np.random.RandomState(seed)
- if isinstance(seed, np.random.RandomState):
- return seed
- raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
- ' instance' % seed)
diff --git a/lmfit/_version.py b/lmfit/_version.py
index b5205c9..f1e8992 100644
--- a/lmfit/_version.py
+++ b/lmfit/_version.py
@@ -1,11 +1,21 @@
-# This file was generated by 'versioneer.py' (0.12) from
+# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
-version_version = '0.9.5'
-version_full = 'b8f4c6f4dac8e7ae395dbf2dd67b4212b34e304a'
-def get_versions(default={}, verbose=False):
- return {'version': version_version, 'full': version_full}
+import json
+version_json = '''
+{
+ "date": "2017-05-31T14:11:22-0500",
+ "dirty": false,
+ "error": null,
+ "full-revisionid": "c2b7ea15507baf2032f497441de917e505c6b784",
+ "version": "0.9.7"
+}
+''' # END VERSION_JSON
+
+
+def get_versions():
+ return json.loads(version_json)
diff --git a/lmfit/asteval.py b/lmfit/asteval.py
index cb839e8..48c411e 100644
--- a/lmfit/asteval.py
+++ b/lmfit/asteval.py
@@ -1,23 +1,25 @@
-"""
-Safe(ish) evaluator of python expressions, using ast module.
-The emphasis here is on mathematical expressions, and so
-numpy functions are imported if available and used.
+"""Safe(ish) evaluator of python expressions, using ast module.
+
+The emphasis here is on mathematical expressions, and so numpy functions
+are imported if available and used.
+
+Symbols are held in the Interpreter symtable -- a simple dictionary
+supporting a simple, flat namespace.
-Symbols are held in the Interpreter symtable -- a simple
-dictionary supporting a simple, flat namespace.
+Expressions can be compiled into ast node and then evaluated later, using
+the current values in the ???.
-Expressions can be compiled into ast node and then evaluated
-later, using the current values in the
"""
from __future__ import division, print_function
-from sys import exc_info, stdout, version_info
+
import ast
import math
+from sys import exc_info, stdout, version_info
-from .astutils import (FROM_PY, FROM_MATH, FROM_NUMPY, UNSAFE_ATTRS,
- LOCALFUNCS, NUMPY_RENAMES, op2func,
- ExceptionHolder, ReturnedNone, valid_symbol_name)
+from .astutils import (FROM_MATH, FROM_NUMPY, FROM_PY, LOCALFUNCS,
+ NUMPY_RENAMES, UNSAFE_ATTRS, ExceptionHolder,
+ ReturnedNone, op2func, valid_symbol_name)
HAS_NUMPY = False
try:
@@ -28,17 +30,17 @@ except ImportError:
class Interpreter:
- """mathematical expression compiler and interpreter.
+ """Mathematical expression compiler and interpreter.
This module compiles expressions and statements to AST representation,
using python's ast module, and then executes the AST representation
using a dictionary of named object (variable, functions).
The result is a restricted, simplified version of Python meant for
- numerical caclulations that is somewhat safer than 'eval' because some
- operations (such as 'import' and 'eval') are simply not allowed. The
- resulting language uses a flat namespace that works on Python objects,
- but does not allow new classes to be defined.
+ numerical caclulations that is somewhat safer than 'eval' because
+ some operations (such as 'import' and 'eval') are simply not allowed.
+ The resulting language uses a flat namespace that works on Python
+ objects, but does not allow new classes to be defined.
Many parts of Python syntax are supported, including:
for loops, while loops, if-then-elif-else conditionals
@@ -63,13 +65,15 @@ class Interpreter:
supported_nodes = ('arg', 'assert', 'assign', 'attribute', 'augassign',
'binop', 'boolop', 'break', 'call', 'compare',
'continue', 'delete', 'dict', 'ellipsis',
- 'excepthandler', 'expr', 'extslice', 'for',
- 'functiondef', 'if', 'ifexp', 'index', 'interrupt',
- 'list', 'listcomp', 'module', 'name', 'num', 'pass',
- 'print', 'raise', 'repr', 'return', 'slice', 'str',
- 'subscript', 'try', 'tuple', 'unaryop', 'while')
+ 'excepthandler', 'expr', 'expression', 'extslice',
+ 'for', 'functiondef', 'if', 'ifexp', 'index',
+ 'interrupt', 'list', 'listcomp', 'module', 'name',
+ 'nameconstant', 'num', 'pass', 'print', 'raise',
+ 'repr', 'return', 'slice', 'str', 'subscript',
+ 'try', 'tuple', 'unaryop', 'while')
def __init__(self, symtable=None, writer=None, use_numpy=True):
+ """TODO: docstring in public method."""
self.writer = writer or stdout
if symtable is None:
@@ -87,7 +91,7 @@ class Interpreter:
# add python symbols
py_symtable = dict((sym, __builtins__[sym]) for sym in FROM_PY
- if sym in __builtins__)
+ if sym in __builtins__)
symtable.update(py_symtable)
# add local symbols
@@ -96,18 +100,18 @@ class Interpreter:
# add math symbols
math_symtable = dict((sym, getattr(math, sym)) for sym in FROM_MATH
- if hasattr(math, sym))
+ if hasattr(math, sym))
symtable.update(math_symtable)
# add numpy symbols
if self.use_numpy:
numpy_symtable = dict((sym, getattr(numpy, sym)) for sym in FROM_NUMPY
- if hasattr(numpy, sym))
+ if hasattr(numpy, sym))
symtable.update(numpy_symtable)
npy_rename_symtable = dict((name, getattr(numpy, sym)) for name, sym
- in NUMPY_RENAMES.items()
- if hasattr(numpy, sym))
+ in NUMPY_RENAMES.items()
+ if hasattr(numpy, sym))
symtable.update(npy_rename_symtable)
self.node_handlers = dict(((node, getattr(self, "on_%s" % node))
@@ -122,15 +126,17 @@ class Interpreter:
or 'numpy.lib.index_tricks' in repr(val))]
def user_defined_symbols(self):
- """
- Return a set of symbols that have been added to symtable after
- construction. I.e. the symbols from self.symtable that are not in
+ """Return a set of symbols that have been added to symtable after
+ construction.
+
+ I.e., the symbols from self.symtable that are not in
self.no_deepcopy.
Returns
-------
unique_symbols : set
symbols in symtable that are not in self.no_deepcopy
+
"""
sym_in_current = set(self.symtable.keys())
sym_from_construction = set(self.no_deepcopy)
@@ -138,14 +144,14 @@ class Interpreter:
return unique_symbols
def unimplemented(self, node):
- "unimplemented nodes"
+ """Unimplemented nodes."""
self.raise_exception(node, exc=NotImplementedError,
msg="'%s' not supported" %
(node.__class__.__name__))
def raise_exception(self, node, exc=None, msg='', expr=None,
lineno=None):
- "add an exception"
+ """Add an exception."""
if self.error is None:
self.error = []
if expr is None:
@@ -166,13 +172,12 @@ class Interpreter:
exc = RuntimeError
raise exc(self.error_msg)
-
# main entry point for Ast node evaluation
# parse: text of statements -> ast
# run: ast -> result
# eval: string statement -> result = run(parse(statement))
def parse(self, text):
- """parse statement/expression to Ast representation"""
+ """Parse statement/expression to Ast representation."""
self.expr = text
try:
return ast.parse(text)
@@ -182,7 +187,7 @@ class Interpreter:
self.raise_exception(None, msg='Runtime Error', expr=text)
def run(self, node, expr=None, lineno=None, with_raise=True):
- """executes parsed Ast representation for an expression"""
+ """Execute parsed Ast representation for an expression."""
# Note: keep the 'node is None' test: internal code here may run
# run(None) and expect a None in return.
if len(self.error) > 0:
@@ -201,6 +206,7 @@ class Interpreter:
try:
handler = self.node_handlers[node.__class__.__name__.lower()]
except KeyError:
+ print(" lmfit asteval node handler error ", node)
return self.unimplemented(node)
# run the handler: this will likely generate
@@ -215,10 +221,11 @@ class Interpreter:
self.raise_exception(node, expr=expr)
def __call__(self, expr, **kw):
+ """TODO: docstring in public method."""
return self.eval(expr, **kw)
def eval(self, expr, lineno=0, show_errors=True):
- """evaluates a single statement"""
+ """Evaluate a single statement."""
self.lineno = lineno
self.error = []
try:
@@ -251,87 +258,95 @@ class Interpreter:
return
def dump(self, node, **kw):
- "simple ast dumper"
+ """Simple ast dumper."""
return ast.dump(node, **kw)
# handlers for ast components
def on_expr(self, node):
- "expression"
+ """Expression."""
return self.run(node.value) # ('value',)
def on_index(self, node):
- "index"
+ """Index."""
return self.run(node.value) # ('value',)
def on_return(self, node): # ('value',)
- "return statement: look for None, return special sentinal"
+ """Return statement: look for None, return special sentinal."""
self.retval = self.run(node.value)
if self.retval is None:
self.retval = ReturnedNone
return
def on_repr(self, node):
- "repr "
+ """Repr."""
return repr(self.run(node.value)) # ('value',)
def on_module(self, node): # ():('body',)
- "module def"
+ """Module def."""
out = None
for tnode in node.body:
out = self.run(tnode)
return out
+ def on_expression(self, node):
+ "basic expression"
+ return self.on_module(node) # ():('body',)
+
def on_pass(self, node):
- "pass statement"
+ """Pass statement."""
return None # ()
def on_ellipsis(self, node):
- "ellipses"
+ """Ellipses."""
return Ellipsis
# for break and continue: set the instance variable _interrupt
def on_interrupt(self, node): # ()
- "interrupt handler"
+ """Interrupt handler."""
self._interrupt = node
return node
def on_break(self, node):
- "break"
+ """Break."""
return self.on_interrupt(node)
def on_continue(self, node):
- "continue"
+ """Continue."""
return self.on_interrupt(node)
def on_assert(self, node): # ('test', 'msg')
- "assert statement"
+ """Assert statement."""
if not self.run(node.test):
self.raise_exception(node, exc=AssertionError, msg=node.msg)
return True
def on_list(self, node): # ('elt', 'ctx')
- "list"
+ """List."""
return [self.run(e) for e in node.elts]
def on_tuple(self, node): # ('elts', 'ctx')
- "tuple"
+ """Tuple."""
return tuple(self.on_list(node))
def on_dict(self, node): # ('keys', 'values')
- "dictionary"
+ """Dictionary."""
return dict([(self.run(k), self.run(v)) for k, v in
zip(node.keys, node.values)])
def on_num(self, node): # ('n',)
- 'return number'
+ """Return number."""
return node.n
def on_str(self, node): # ('s',)
- 'return string'
+ """Return string."""
return node.s
+ def on_nameconstant(self, node): # ('value',)
+ """named constant"""
+ return node.value
+
def on_name(self, node): # ('id', 'ctx')
- """ Name node """
+ """Name node."""
ctx = node.ctx.__class__
if ctx in (ast.Param, ast.Del):
return str(node.id)
@@ -343,8 +358,11 @@ class Interpreter:
self.raise_exception(node, exc=NameError, msg=msg)
def node_assign(self, node, val):
- """here we assign a value (not the node.value object) to a node
- this is used by on_assign, but also by for, list comprehension, etc.
+ """Assign a value (not the node.value object) to a node.
+
+ This is used by on_assign, but also by for, list comprehension,
+ etc.
+
"""
if node.__class__ == ast.Name:
if not valid_symbol_name(node.id):
@@ -378,7 +396,7 @@ class Interpreter:
raise ValueError('too many values to unpack')
def on_attribute(self, node): # ('value', 'attr', 'ctx')
- "extract attribute"
+ """Extract attribute."""
ctx = node.ctx.__class__
if ctx == ast.Store:
msg = "attribute for storage: shouldn't be here!"
@@ -403,31 +421,31 @@ class Interpreter:
self.raise_exception(node, exc=AttributeError, msg=msg)
def on_assign(self, node): # ('targets', 'value')
- "simple assignment"
+ """Simple assignment."""
val = self.run(node.value)
for tnode in node.targets:
self.node_assign(tnode, val)
return
def on_augassign(self, node): # ('target', 'op', 'value')
- "augmented assign"
+ """Augmented assign."""
return self.on_assign(ast.Assign(targets=[node.target],
value=ast.BinOp(left=node.target,
op=node.op,
right=node.value)))
def on_slice(self, node): # ():('lower', 'upper', 'step')
- "simple slice"
+ """Simple slice."""
return slice(self.run(node.lower),
self.run(node.upper),
self.run(node.step))
def on_extslice(self, node): # ():('dims',)
- "extended slice"
+ """Extended slice."""
return tuple([self.run(tnode) for tnode in node.dims])
def on_subscript(self, node): # ('value', 'slice', 'ctx')
- "subscript handling -- one of the tricky parts"
+ """Subscript handling -- one of the tricky parts."""
val = self.run(node.value)
nslice = self.run(node.slice)
ctx = node.ctx.__class__
@@ -441,7 +459,7 @@ class Interpreter:
self.raise_exception(node, msg=msg)
def on_delete(self, node): # ('targets',)
- "delete statement"
+ """Delete statement."""
for tnode in node.targets:
if tnode.ctx.__class__ != ast.Del:
break
@@ -459,16 +477,16 @@ class Interpreter:
self.raise_exception(node, msg=msg)
def on_unaryop(self, node): # ('op', 'operand')
- "unary operator"
+ """Unary operator."""
return op2func(node.op)(self.run(node.operand))
def on_binop(self, node): # ('left', 'op', 'right')
- "binary operator"
+ """Binary operator."""
return op2func(node.op)(self.run(node.left),
self.run(node.right))
def on_boolop(self, node): # ('op', 'values')
- "boolean operator"
+ """Boolean operator."""
val = self.run(node.values[0])
is_and = ast.And == node.op.__class__
if (is_and and val) or (not is_and and not val):
@@ -479,7 +497,7 @@ class Interpreter:
return val
def on_compare(self, node): # ('left', 'ops', 'comparators')
- "comparison operators"
+ """Comparison operators."""
lval = self.run(node.left)
out = True
for op, rnode in zip(node.ops, node.comparators):
@@ -493,8 +511,12 @@ class Interpreter:
return out
def on_print(self, node): # ('dest', 'values', 'nl')
- """ note: implements Python2 style print statement, not
- print() function. May need improvement...."""
+ """Note: implements Python2 style print statement, not print()
+ function.
+
+ May need improvement....
+
+ """
dest = self.run(node.dest) or self.writer
end = ''
if node.nl:
@@ -504,7 +526,7 @@ class Interpreter:
self._printer(*out, file=dest, end=end)
def _printer(self, *out, **kws):
- "generic print function"
+ """Generic print function."""
flush = kws.pop('flush', True)
fileh = kws.pop('file', self.writer)
sep = kws.pop('sep', ' ')
@@ -515,7 +537,7 @@ class Interpreter:
fileh.flush()
def on_if(self, node): # ('test', 'body', 'orelse')
- "regular if-then-else statement"
+ """Regular if-then-else statement."""
block = node.body
if not self.run(node.test):
block = node.orelse
@@ -523,14 +545,14 @@ class Interpreter:
self.run(tnode)
def on_ifexp(self, node): # ('test', 'body', 'orelse')
- "if expressions"
+ """If expressions."""
expr = node.orelse
if self.run(node.test):
expr = node.body
return self.run(expr)
def on_while(self, node): # ('test', 'body', 'orelse')
- "while blocks"
+ """While blocks."""
while self.run(node.test):
self._interrupt = None
for tnode in node.body:
@@ -545,7 +567,7 @@ class Interpreter:
self._interrupt = None
def on_for(self, node): # ('target', 'iter', 'body', 'orelse')
- "for blocks"
+ """For blocks."""
for val in self.run(node.iter):
self.node_assign(node.target, val)
self._interrupt = None
@@ -561,7 +583,7 @@ class Interpreter:
self._interrupt = None
def on_listcomp(self, node): # ('elt', 'generators')
- "list comprehension"
+ """List comprehension."""
out = []
for tnode in node.generators:
if tnode.__class__ == ast.comprehension:
@@ -575,11 +597,11 @@ class Interpreter:
return out
def on_excepthandler(self, node): # ('type', 'name', 'body')
- "exception handler..."
+ """Exception handler..."""
return (self.run(node.type), node.name, node.body)
def on_try(self, node): # ('body', 'handlers', 'orelse', 'finalbody')
- "try/except/else/finally blocks"
+ """Try/except/else/finally blocks."""
no_errors = True
for tnode in node.body:
self.run(tnode, with_raise=False)
@@ -606,7 +628,7 @@ class Interpreter:
self.run(tnode)
def on_raise(self, node): # ('type', 'inst', 'tback')
- "raise statement: note difference for python 2 and 3"
+ """Raise statement: note difference for python 2 and 3."""
if version_info[0] == 3:
excnode = node.exc
msgnode = node.cause
@@ -621,7 +643,7 @@ class Interpreter:
self.raise_exception(None, exc=out.__class__, msg=msg, expr='')
def on_call(self, node):
- "function execution"
+ """Function execution."""
# ('func', 'args', 'keywords'. Py<3.5 has 'starargs' and 'kwargs' too)
func = self.run(node.func)
if not hasattr(func, '__call__') and not isinstance(func, type):
@@ -650,12 +672,12 @@ class Interpreter:
self.raise_exception(node, msg="Error running %s" % (func))
def on_arg(self, node): # ('test', 'msg')
- "arg for function definitions"
+ """Arg for function definitions."""
# print(" ON ARG ! ", node, node.arg)
return node.arg
def on_functiondef(self, node):
- "define procedures"
+ """Define procedures."""
# ('name', 'args', 'body', 'decorator_list')
if node.decorator_list != []:
raise Warning("decorated procedures not supported!")
@@ -688,14 +710,17 @@ class Interpreter:
class Procedure(object):
- """Procedure: user-defined function for asteval
+ """Procedure: user-defined function for asteval.
+
+ This stores the parsed ast nodes as from the 'functiondef' ast node
+ for later evaluation.
- This stores the parsed ast nodes as from the
- 'functiondef' ast node for later evaluation.
"""
+
def __init__(self, name, interp, doc=None, lineno=0,
body=None, args=None, kwargs=None,
vararg=None, varkws=None):
+ """TODO: docstring in public method."""
self.name = name
self.__asteval__ = interp
self.raise_exc = self.__asteval__.raise_exception
@@ -708,6 +733,7 @@ class Procedure(object):
self.lineno = lineno
def __repr__(self):
+ """TODO: docstring in magic method."""
sig = ""
if len(self.argnames) > 0:
sig = "%s%s" % (sig, ', '.join(self.argnames))
@@ -727,6 +753,7 @@ class Procedure(object):
return sig
def __call__(self, *args, **kwargs):
+ """TODO: docstring in public method."""
symlocals = {}
args = list(args)
n_args = len(args)
diff --git a/lmfit/astutils.py b/lmfit/astutils.py
index 4906c9c..df1b23e 100644
--- a/lmfit/astutils.py
+++ b/lmfit/astutils.py
@@ -1,12 +1,12 @@
-"""
-utility functions for asteval
+"""Utility functions for asteval.
+
+Matthew Newville <newville@cars.uchicago.edu>, The University of Chicago
- Matthew Newville <newville@cars.uchicago.edu>,
- The University of Chicago
"""
from __future__ import division, print_function
-import re
+
import ast
+import re
from sys import exc_info
RESERVED_WORDS = ('and', 'as', 'assert', 'break', 'class', 'continue',
@@ -137,13 +137,15 @@ NUMPY_RENAMES = {'ln': 'log', 'asin': 'arcsin', 'acos': 'arccos',
'atan': 'arctan', 'atan2': 'arctan2', 'atanh':
'arctanh', 'acosh': 'arccosh', 'asinh': 'arcsinh'}
+
def _open(filename, mode='r', buffering=0):
- """read only version of open()"""
+ """Read-only version of open()."""
umode = 'r'
if mode == 'rb':
umode = 'rb'
return open(filename, umode, buffering)
+
LOCALFUNCS = {'open': _open}
OPERATORS = {ast.Is: lambda a, b: a is b,
@@ -177,10 +179,11 @@ OPERATORS = {ast.Is: lambda a, b: a is b,
def valid_symbol_name(name):
- """determines whether the input symbol name is a valid name
+ """Determine whether the input symbol name is a valid name.
This checks for reserved words, and that the name matches the
regular expression ``[a-zA-Z_][a-zA-Z0-9_]``
+
"""
if name in RESERVED_WORDS:
return False
@@ -188,24 +191,30 @@ def valid_symbol_name(name):
def op2func(op):
- "return function for operator nodes"
+ """Return function for operator nodes."""
return OPERATORS[op.__class__]
class Empty:
- """empty class"""
+ """Empty class."""
+
def __init__(self):
+ """TODO: docstring in public method."""
pass
def __nonzero__(self):
+ """TODO: docstring in magic method."""
return False
+
ReturnedNone = Empty()
class ExceptionHolder(object):
- "basic exception handler"
+ """Basic exception handler."""
+
def __init__(self, node, exc=None, msg='', expr=None, lineno=None):
+ """TODO: docstring in public method."""
self.node = node
self.expr = expr
self.msg = msg
@@ -218,7 +227,7 @@ class ExceptionHolder(object):
self.msg = self.exc_info[1]
def get_error(self):
- "retrieve error data"
+ """Retrieve error data."""
col_offset = -1
if self.node is not None:
try:
@@ -240,19 +249,23 @@ class ExceptionHolder(object):
class NameFinder(ast.NodeVisitor):
- """find all symbol names used by a parsed node"""
+ """Find all symbol names used by a parsed node."""
+
def __init__(self):
+ """TODO: docstring in public method."""
self.names = []
ast.NodeVisitor.__init__(self)
def generic_visit(self, node):
+ """TODO: docstring in public method."""
if node.__class__.__name__ == 'Name':
if node.ctx.__class__ == ast.Load and node.id not in self.names:
self.names.append(node.id)
ast.NodeVisitor.generic_visit(self, node)
+
def get_ast_names(astnode):
- "returns symbol Names from an AST node"
+ """Return symbol Names from an AST node."""
finder = NameFinder()
finder.generic_visit(astnode)
return finder.names
diff --git a/lmfit/confidence.py b/lmfit/confidence.py
index b1bce11..f593dbe 100644
--- a/lmfit/confidence.py
+++ b/lmfit/confidence.py
@@ -1,28 +1,26 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-"""
-Contains functions to calculate confidence intervals.
-"""
+"""Contains functions to calculate confidence intervals."""
from __future__ import print_function
+
+from collections import OrderedDict
from warnings import warn
+
import numpy as np
-from scipy.stats import f
from scipy.optimize import brentq
-from .minimizer import MinimizerException
+from scipy.special import erf
+from scipy.stats import f
-try:
- from collections import OrderedDict
-except ImportError:
- from ordereddict import OrderedDict
+from .minimizer import MinimizerException
-CONF_ERR_GEN = 'Cannot determine Confidence Intervals'
+CONF_ERR_GEN = 'Cannot determine Confidence Intervals'
CONF_ERR_STDERR = '%s without sensible uncertainty estimates' % CONF_ERR_GEN
-CONF_ERR_NVARS = '%s with < 2 variables' % CONF_ERR_GEN
+CONF_ERR_NVARS = '%s with < 2 variables' % CONF_ERR_GEN
+
def f_compare(ndata, nparas, new_chi, best_chi, nfix=1.):
- """
- Returns the probalitiy for two given parameter sets.
+ """Return the probalitiy for two given parameter sets.
+
nfix is the number of fixed parameters.
+
"""
nparas = nparas + nfix
nfree = ndata - nparas
@@ -32,7 +30,7 @@ def f_compare(ndata, nparas, new_chi, best_chi, nfix=1.):
def copy_vals(params):
- """Saves the values and stderrs of params in temporay dict"""
+ """Save the values and stderrs of params in temporary dict."""
tmp_params = {}
for para_key in params:
tmp_params[para_key] = (params[para_key].value,
@@ -41,21 +39,20 @@ def copy_vals(params):
def restore_vals(tmp_params, params):
- """Restores values and stderrs of params in temporay dict"""
+ """Restore values and stderrs of params from a temporary dict."""
for para_key in params:
params[para_key].value, params[para_key].stderr = tmp_params[para_key]
-def conf_interval(minimizer, result, p_names=None, sigmas=(0.674, 0.95, 0.997),
+def conf_interval(minimizer, result, p_names=None, sigmas=(1, 2, 3),
trace=False, maxiter=200, verbose=False, prob_func=None):
- """Calculates the confidence interval for parameters
- from the given a MinimizerResult, output from minimize.
+ """Calculate the confidence interval for parameters.
The parameter for which the ci is calculated will be varied, while
the remaining parameters are re-optimized for minimizing chi-square.
The resulting chi-square is used to calculate the probability with
- a given statistic e.g. F-statistic. This function uses a 1d-rootfinder
- from scipy to find the values resulting in the searched confidence
+ a given statistic (e.g., F-test). This function uses a 1d-rootfinder
+ from SciPy to find the values resulting in the searched confidence
region.
Parameters
@@ -68,37 +65,44 @@ def conf_interval(minimizer, result, p_names=None, sigmas=(0.674, 0.95, 0.997),
Names of the parameters for which the ci is calculated. If None,
the ci is calculated for every parameter.
sigmas : list, optional
- The probabilities (1-alpha) to find. Default is 1,2 and 3-sigma.
+ The sigma-levels to find. Default is [1, 2, 3]. See Note below.
trace : bool, optional
- Defaults to False, if true, each result of a probability calculation
- is saved along with the parameter. This can be used to plot so
- called "profile traces".
- maxiter : int
+ Defaults to False, if True, each result of a probability calculation
+ is saved along with the parameter. This can be used to plot so-called
+ "profile traces".
+ maxiter : int, optional
Maximum of iteration to find an upper limit. Default is 200.
- prob_func : ``None`` or callable
+ verbose: bool, optional
+ Print extra debuging information. Default is False.
+ prob_func : None or callable, optional
Function to calculate the probability from the optimized chi-square.
- Default (``None``) uses built-in f_compare (F test).
- verbose: bool
- print extra debuging information. Default is ``False``.
-
+ Default is None and uses built-in f_compare (F-test).
Returns
-------
output : dict
- A dict, which contains a list of (sigma, vals)-tuples for each name.
- trace_dict : dict
- Only if trace is set true. Is a dict, the key is the parameter which
+ A dictionary that contains a list of (sigma, vals)-tuples for each name.
+ trace_dict : dict, optional
+ Only if trace is True. Is a dict, the key is the parameter which
was fixed. The values are again a dict with the names as keys, but with
an additional key 'prob'. Each contains an array of the corresponding
values.
+ Note
+ -----
+ The values for `sigma` are taken as the number of standard deviations for
+ a normal distribution and converted to probabilities. That is, the default
+ ``sigma=(1, 2, 3)`` will use probabilities of 0.6827, 0.9545, and 0.9973.
+ If any of the sigma values is less than 1, that will be interpreted as a
+ probability. That is, a value of 1 and 0.6827 will give the same results,
+ within precision.
+
See also
--------
conf_interval2d
Examples
--------
-
>>> from lmfit.printfuncs import *
>>> mini = minimize(some_func, params)
>>> mini.leastsq()
@@ -111,12 +115,13 @@ def conf_interval(minimizer, result, p_names=None, sigmas=(0.674, 0.95, 0.997),
Now with quantiles for the sigmas and using the trace.
- >>> ci, trace = conf_interval(mini, sigmas=(0.25, 0.5, 0.75, 0.999), trace=True)
+ >>> ci, trace = conf_interval(mini, sigmas=(0.5, 1, 2, 3), trace=True)
>>> fixed = trace['para1']['para1']
>>> free = trace['para1']['not_para1']
>>> prob = trace['para1']['prob']
- This makes it possible to plot the dependence between free and fixed.
+ This makes it possible to plot the dependence between free and fixed
+ parameters.
"""
ci = ConfidenceInterval(minimizer, result, p_names, prob_func, sigmas,
@@ -128,7 +133,7 @@ def conf_interval(minimizer, result, p_names=None, sigmas=(0.674, 0.95, 0.997),
def map_trace_to_names(trace, params):
- "maps trace to param names"
+ """Map trace to parameter names."""
out = {}
allnames = list(params.keys()) + ['prob']
for name in trace.keys():
@@ -141,15 +146,12 @@ def map_trace_to_names(trace, params):
class ConfidenceInterval(object):
- """
- Class used to calculate the confidence interval.
- """
+ """Class used to calculate the confidence interval."""
+
def __init__(self, minimizer, result, p_names=None, prob_func=None,
- sigmas=(0.674, 0.95, 0.997), trace=False, verbose=False,
+ sigmas=(1, 2, 3), trace=False, verbose=False,
maxiter=50):
- """
-
- """
+ """TODO: docstring in public method."""
self.verbose = verbose
self.minimizer = minimizer
self.result = result
@@ -188,11 +190,16 @@ class ConfidenceInterval(object):
self.sigmas = list(sigmas)
self.sigmas.sort()
+ self.probs = []
+ for sigma in self.sigmas:
+ if sigma < 1:
+ prob = sigma
+ else:
+ prob = erf(sigma/np.sqrt(2))
+ self.probs.append(prob)
def calc_all_ci(self):
- """
- Calculates all cis.
- """
+ """Calculate all confidence intervals."""
out = OrderedDict()
for p in self.p_names:
@@ -206,15 +213,15 @@ class ConfidenceInterval(object):
return out
def calc_ci(self, para, direction):
- """
- Calculate the ci for a single parameter for a single direction.
+ """Calculate the ci for a single parameter for a single direction.
+
Direction is either positive or negative 1.
- """
+ """
if isinstance(para, str):
para = self.params[para]
- #function used to calculate the pro
+ # function used to calculate the pro
calc_prob = lambda val, prob: self.calc_prob(para, val, prob)
if self.trace:
x = [i.value for i in self.params.values()]
@@ -226,7 +233,7 @@ class ConfidenceInterval(object):
ret = []
orig_warn_settings = np.geterr()
np.seterr(all='ignore')
- for prob in self.sigmas:
+ for prob in self.probs:
if prob > max_prob:
ret.append((prob, direction*np.inf))
continue
@@ -252,17 +259,16 @@ class ConfidenceInterval(object):
return ret
def reset_vals(self):
+ """TODO: add method docstring."""
restore_vals(self.org, self.params)
def find_limit(self, para, direction):
- """
- For given para, search a value so that prob(val) > sigmas.
- """
+ """For given para, search a value so that prob(val) > sigmas."""
if self.verbose:
print('Calculating CI for ' + para.name)
self.reset_vals()
- #starting steps:
+ # starting steps:
if para.stderr > 0 and para.stderr < abs(para.value):
step = para.stderr
else:
@@ -274,7 +280,7 @@ class ConfidenceInterval(object):
limit = start_val
i = 0
- while old_prob < max(self.sigmas):
+ while old_prob < max(self.probs):
i = i + 1
limit += step * direction
@@ -285,13 +291,15 @@ class ConfidenceInterval(object):
# Check convergence.
if i > self.maxiter:
errmsg = "Warning, maxiter={0} reached".format(self.maxiter)
- errmsg += "and prob({0}={1}) = {2} < max(sigmas).".format(para.name, limit, new_prob)
+ errmsg += ("and prob({0}={1}) = {2} < "
+ "max(sigmas).".format(para.name, limit, new_prob))
warn(errmsg)
break
if rel_change < self.min_rel_change:
errmsg = "Warning, rel_change={0} < 0.01 ".format(rel_change)
- errmsg += " at iteration {3} and prob({0}={1}) = {2} < max(sigmas).".format(para.name, limit, new_prob, i)
+ errmsg += (" at iteration {3} and prob({0}={1}) = {2} < max"
+ "(sigmas).".format(para.name, limit, new_prob, i))
warn(errmsg)
break
@@ -300,7 +308,7 @@ class ConfidenceInterval(object):
return limit, new_prob
def calc_prob(self, para, val, offset=0., restore=False):
- """Returns the probability for given Value."""
+ """Calculate the probability for given value."""
if restore:
restore_vals(self.org, self.params)
para.value = val
@@ -317,11 +325,12 @@ class ConfidenceInterval(object):
self.params[para.name] = save_para
return prob - offset
+
def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
limits=None, prob_func=None):
- r"""Calculates confidence regions for two fixed parameters.
+ r"""Calculate confidence regions for two fixed parameters.
- The method is explained in *conf_interval*: here we are fixing
+ The method itself is explained in *conf_interval*: here we are fixing
two parameters.
Parameters
@@ -330,38 +339,37 @@ def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
The minimizer to use, holding objective function.
result : MinimizerResult
The result of running minimize().
- x_name : string
+ x_name : str
The name of the parameter which will be the x direction.
- y_name : string
+ y_name : str
The name of the parameter which will be the y direction.
- nx, ny : ints, optional
- Number of points.
- limits : tuple: optional
+ nx : int, optional
+ Number of points in the x direction.
+ ny : int, optional
+ Number of points in the y direction.
+ limits : tuple, optional
Should have the form ((x_upper, x_lower),(y_upper, y_lower)). If not
given, the default is 5 std-errs in each direction.
+ prob_func : None or callable, optional
+ Function to calculate the probability from the optimized chi-square.
+ Default is None and uses built-in f_compare (F-test).
Returns
-------
- x : (nx)-array
- x-coordinates
- y : (ny)-array
- y-coordinates
- grid : (nx,ny)-array
- grid contains the calculated probabilities.
+ x : numpy.ndarray
+ X-coordinates (same shape as nx).
+ y : numpy.ndarray
+ Y-coordinates (same shape as ny).
+ grid : numpy.ndarray
+ Grid containing the calculated probabilities (with shape (nx, ny)).
Examples
--------
-
>>> mini = Minimizer(some_func, params)
>>> result = mini.leastsq()
>>> x, y, gr = conf_interval2d(mini, result, 'para1','para2')
>>> plt.contour(x,y,gr)
- Other Parameters
- ----------------
- prob_func : ``None`` or callable
- Function to calculate the probability from the optimized chi-square.
- Default (``None``) uses built-in f_compare (F test).
"""
# used to detect that .leastsq() has run!
params = result.params
@@ -376,10 +384,8 @@ def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
y = params[y_name]
if limits is None:
- (x_upper, x_lower) = (x.value + 5 * x.stderr, x.value - 5
- * x.stderr)
- (y_upper, y_lower) = (y.value + 5 * y.stderr, y.value - 5
- * y.stderr)
+ (x_upper, x_lower) = (x.value + 5 * x.stderr, x.value - 5 * x.stderr)
+ (y_upper, y_lower) = (y.value + 5 * y.stderr, y.value - 5 * y.stderr)
elif len(limits) == 2:
(x_upper, x_lower) = limits[0]
(y_upper, y_lower) = limits[1]
@@ -392,6 +398,7 @@ def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
y.vary = False
def calc_prob(vals, restore=False):
+ """Calculate the probability."""
if restore:
restore_vals(org, result.params)
x.value = vals[0]
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
index 573cb33..4709568 100644
--- a/lmfit/lineshapes.py
+++ b/lmfit/lineshapes.py
@@ -1,52 +1,65 @@
-#!/usr/bin/env python
-"""
-basic model line shapes and distribution functions
-"""
+"""Basic model line shapes and distribution functions."""
from __future__ import division
-from numpy import (pi, log, exp, sqrt, arctan, cos, where)
-from numpy.testing import assert_allclose
+from numpy import arctan, cos, exp, log, pi, sqrt, where
+from numpy.testing import assert_allclose
+from scipy.special import erf, erfc, gammaln, wofz
from scipy.special import gamma as gamfcn
-from scipy.special import gammaln, erf, erfc, wofz
log2 = log(2)
s2pi = sqrt(2*pi)
-spi = sqrt(pi)
-s2 = sqrt(2.0)
+spi = sqrt(pi)
+s2 = sqrt(2.0)
functions = ('gaussian', 'lorentzian', 'voigt', 'pvoigt', 'moffat', 'pearson7',
- 'breit_wigner', 'damped_oscillator', 'logistic', 'lognormal',
+ 'breit_wigner', 'damped_oscillator', 'dho', 'logistic', 'lognormal',
'students_t', 'expgaussian', 'donaich', 'skewed_gaussian',
'skewed_voigt', 'step', 'rectangle', 'erf', 'erfc', 'wofz',
'gamma', 'gammaln', 'exponential', 'powerlaw', 'linear',
'parabolic')
+
def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):
- """1 dimensional gaussian:
- gaussian(x, amplitude, center, sigma)
+ """Return a 1-dimensional Gaussian function.
+
+ gaussian(x, amplitude, center, sigma) =
+ (amplitude/(s2pi*sigma)) * exp(-(1.0*x-center)**2 / (2*sigma**2))
+
"""
- return (amplitude/(s2pi*sigma)) * exp(-(1.0*x-center)**2 /(2*sigma**2))
+ return (amplitude/(s2pi*sigma)) * exp(-(1.0*x-center)**2 / (2*sigma**2))
+
def lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0):
- """1 dimensional lorentzian
- lorentzian(x, amplitude, center, sigma)
+ """Return a 1-dimensional Lorentzian function.
+
+ lorentzian(x, amplitude, center, sigma) =
+ (amplitude/(1 + ((1.0*x-center)/sigma)**2)) / (pi*sigma)
+
"""
- return (amplitude/(1 + ((1.0*x-center)/sigma)**2) ) / (pi*sigma)
+ return (amplitude/(1 + ((1.0*x-center)/sigma)**2)) / (pi*sigma)
+
def voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None):
- """1 dimensional voigt function.
+ """Return a 1-dimensional Voigt function.
+
+ voigt(x, amplitude, center, sigma, gamma) =
+ amplitude*wofz(z).real / (sigma*s2pi)
+
see http://en.wikipedia.org/wiki/Voigt_profile
+
"""
if gamma is None:
gamma = sigma
- z = (x-center + 1j*gamma)/ (sigma*s2)
+ z = (x-center + 1j*gamma) / (sigma*s2)
return amplitude*wofz(z).real / (sigma*s2pi)
+
def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):
- """1 dimensional pseudo-voigt:
- pvoigt(x, amplitude, center, sigma, fraction)
- = amplitude*(1-fraction)*gaussion(x, center, sigma_g) +
- amplitude*fraction*lorentzian(x, center, sigma)
+ """Return a 1-dimensional pseudo-Voigt function.
+
+ pvoigt(x, amplitude, center, sigma, fraction) =
+ amplitude*(1-fraction)*gaussion(x, center, sigma_g) +
+ amplitude*fraction*lorentzian(x, center, sigma)
where sigma_g (the sigma for the Gaussian component) is
@@ -54,101 +67,153 @@ def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):
so that the Gaussian and Lorentzian components have the
same FWHM of 2*sigma.
+
"""
sigma_g = sigma / sqrt(2*log2)
return ((1-fraction)*gaussian(x, amplitude, center, sigma_g) +
- fraction*lorentzian(x, amplitude, center, sigma))
+ fraction*lorentzian(x, amplitude, center, sigma))
+
def moffat(x, amplitude=1, center=0., sigma=1, beta=1.):
- """ 1 dimensional moffat function:
+ """Return a 1-dimensional Moffat function.
+
+ moffat(x, amplitude, center, sigma, beta) =
+ amplitude / (((x - center)/sigma)**2 + 1)**beta
- moffat(amplitude, center, sigma, beta) = amplitude / (((x - center)/sigma)**2 + 1)**beta
"""
return amplitude / (((x - center)/sigma)**2 + 1)**beta
+
def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0):
- """pearson7 lineshape, using the wikipedia definition:
+ """Return a Pearson7 lineshape.
+ Using the wikipedia definition:
pearson7(x, center, sigma, expon) =
- amplitude*(1+arg**2)**(-expon)/(sigma*beta(expon-0.5, 0.5))
+ amplitude*(1+arg**2)**(-expon)/(sigma*beta(expon-0.5, 0.5))
where arg = (x-center)/sigma
and beta() is the beta function.
+
"""
arg = (x-center)/sigma
scale = amplitude * gamfcn(expon)/(gamfcn(0.5)*gamfcn(expon-0.5))
- return scale*(1+arg**2)**(-expon)/sigma
+ return scale*(1+arg**2)**(-expon)/sigma
+
def breit_wigner(x, amplitude=1.0, center=0.0, sigma=1.0, q=1.0):
- """Breit-Wigner-Fano lineshape:
- = amplitude*(q*sigma/2 + x - center)**2 / ( (sigma/2)**2 + (x - center)**2 )
+ """Return a Breit-Wigner-Fano lineshape.
+
+ breit_wigner(x, amplitude, center, sigma, q) =
+ amplitude*(q*sigma/2 + x - center)**2 /
+ ( (sigma/2)**2 + (x - center)**2 )
+
"""
gam = sigma/2.0
- return amplitude*(q*gam + x - center)**2 / (gam*gam + (x-center)**2)
+ return amplitude*(q*gam + x - center)**2 / (gam*gam + (x-center)**2)
+
def damped_oscillator(x, amplitude=1.0, center=1., sigma=0.1):
- """amplitude for a damped harmonic oscillator
- amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
+ """Return the amplitude for a damped harmonic oscillator.
+
+ damped_oscillator(x, amplitude, center, sigma) =
+ amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
+
"""
center = max(1.e-9, abs(center))
- return (amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
+ return amplitude/sqrt((1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2)
+
+
+def dho(x, amplitude=1., center=0., sigma=1., gamma=1.0):
+ """Return a Damped Harmonic Oscillator.
+
+ Similar to version from PAN
+ dho(x, amplitude, center, sigma, gamma) =
+ (amplitude*sigma*(bose/pi)* (lm - lp)
+
+ where
+ bose(x, gamma) = 1.0/ (1.0 - exp(-x/gamma))
+ lm(x, center, sigma) = 1.0 / ((x-center)**2 + sigma**2)
+ lp(x, center, sigma) = 1.0 / ((x+center)**2 + sigma**2)
+
+ """
+ bose = 1.0/(1.0 - exp(-x/gamma))
+ lm = 1.0/((x-center)**2 + sigma**2)
+ lp = 1.0/((x+center)**2 + sigma**2)
+ return amplitude*sigma*pi*bose*(lm - lp)
+
def logistic(x, amplitude=1., center=0., sigma=1.):
- """Logistic lineshape (yet another sigmoidal curve)
+ """Return a Logistic lineshape (yet another sigmoidal curve).
+
+ logistic(x, amplitude, center, sigma) =
= amplitude*(1. - 1. / (1 + exp((x-center)/sigma)))
+
"""
return amplitude*(1. - 1./(1. + exp((x-center)/sigma)))
+
def lognormal(x, amplitude=1.0, center=0., sigma=1):
- """log-normal function
- lognormal(x, center, sigma)
+ """Return a log-normal function.
+
+ lognormal(x, amplitude, center, sigma)
= (amplitude/x) * exp(-(ln(x) - center)/ (2* sigma**2))
+
"""
- x[where(x<=1.e-19)] = 1.e-19
- return (amplitude/(x*sigma*s2pi)) * exp(-(log(x)-center)**2/ (2* sigma**2))
+ x[where(x <= 1.e-19)] = 1.e-19
+ return (amplitude/(x*sigma*s2pi)) * exp(-(log(x)-center)**2 / (2*sigma**2))
+
def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
- """Student's t distribution:
+ """Return Student's t distribution.
+
+ students_t(x, amplitude, center, sigma) =
+
gamma((sigma+1)/2) (1 + (x-center)**2/sigma)^(-(sigma+1)/2)
- = -------------------------
+ -------------------------
sqrt(sigma*pi)gamma(sigma/2)
"""
- s1 = (sigma+1)/2.0
+ s1 = (sigma+1)/2.0
denom = (sqrt(sigma*pi)*gamfcn(sigma/2))
return amplitude*(1 + (x-center)**2/sigma)**(-s1) * gamfcn(s1) / denom
def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
- """exponentially modified Gaussian
+ """Return a exponentially modified Gaussian.
- = (gamma/2) exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] *
- erfc[(center + gamma*sigma**2 - x)/(sqrt(2)*sigma)]
+ expgaussian(x, amplitude, center, sigma, gamma=)
+ = (gamma/2) exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] *
+ erfc[(center + gamma*sigma**2 - x)/(sqrt(2)*sigma)]
http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
+
"""
gss = gamma*sigma*sigma
- arg1 = gamma*(center +gss/2.0 - x)
+ arg1 = gamma*(center + gss/2.0 - x)
arg2 = (center + gss - x)/(s2*sigma)
return amplitude*(gamma/2) * exp(arg1) * erfc(arg2)
+
def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
- """Doniach Sunjic asymmetric lineshape, used for photo-emission
+ """Return a Doniach Sunjic asymmetric lineshape, used for photo-emission.
- = amplitude* cos(pi*gamma/2 + (1-gamma) arctan((x-center)/sigma) /
- (sigma**2 + (x-center)**2)**[(1-gamma)/2]
+ donaich(x, amplitude, center, sigma, gamma) =
+ amplitude* cos(pi*gamma/2 + (1-gamma) arctan((x-center)/sigma) /
+ (sigma**2 + (x-center)**2)**[(1-gamma)/2]
see http://www.casaxps.com/help_manual/line_shapes.htm
+
"""
arg = (x-center)/sigma
gm1 = (1.0 - gamma)
scale = amplitude/(sigma**gm1)
return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2)
+
def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0):
- """Gaussian, skewed with error function, equal to
+ """Return a Gaussian lineshape, skewed with error function.
- gaussian(x, center, sigma)*(1+erf(beta*(x-center)))
+ Equal to: gaussian(x, center, sigma)*(1+erf(beta*(x-center)))
with beta = gamma/(sigma*sqrt(2))
@@ -156,12 +221,15 @@ def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0):
gamma > 0: tail to high value of centroid
see http://en.wikipedia.org/wiki/Skew_normal_distribution
+
"""
asym = 1 + erf(gamma*(x-center)/(s2*sigma))
return asym * gaussian(x, amplitude, center, sigma)
+
def skewed_voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None, skew=0.0):
- """Skewed Voigt lineshape, skewed with error function
+ """Return a Voigt lineshape, skewed with error function.
+
useful for ad-hoc Compton scatter profile
with beta = skew/(sigma*sqrt(2))
@@ -171,13 +239,16 @@ def skewed_voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None, skew=0.0):
skew > 0: tail to high value of centroid
see http://en.wikipedia.org/wiki/Skew_normal_distribution
+
"""
beta = skew/(s2*sigma)
asym = 1 + erf(beta*(x-center))
return asym * voigt(x, amplitude, center, sigma, gamma=gamma)
+
def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
- """step function:
+ """Return a step function.
+
starts at 0.0, ends at amplitude, with half-max at center, and
rising with form:
'linear' (default) = amplitude * min(1, max(0, arg))
@@ -186,8 +257,9 @@ def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
'logistic' = amplitude * [1 - 1/(1 + exp(arg))]
where arg = (x - center)/sigma
+
"""
- if abs(sigma) < 1.e-13:
+ if abs(sigma) < 1.e-13:
sigma = 1.e-13
out = (x - center)/sigma
@@ -202,9 +274,12 @@ def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
out[where(out > 1)] = 1.0
return amplitude*out
+
def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
center2=1.0, sigma2=1.0, form='linear'):
- """rectangle function: step up, step down (see step function)
+ """Return a rectangle function: step up, step down.
+
+ (see step function)
starts at 0.0, rises to amplitude (at center1 with width sigma1)
then drops to 0.0 (at center2 with width sigma2) with form:
'linear' (default) = ramp_up + ramp_down
@@ -214,10 +289,11 @@ def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
where arg1 = (x - center1)/sigma1
and arg2 = -(x - center2)/sigma2
+
"""
- if abs(sigma1) < 1.e-13:
+ if abs(sigma1) < 1.e-13:
sigma1 = 1.e-13
- if abs(sigma2) < 1.e-13:
+ if abs(sigma2) < 1.e-13:
sigma2 = 1.e-13
arg1 = (x - center1)/sigma1
@@ -229,58 +305,90 @@ def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
elif form in ('atan', 'arctan'):
out = (arctan(arg1) + arctan(arg2))/pi
else:
- arg1[where(arg1 < 0)] = 0.0
- arg1[where(arg1 > 1)] = 1.0
- arg2[where(arg2 > 0)] = 0.0
+ arg1[where(arg1 < 0)] = 0.0
+ arg1[where(arg1 > 1)] = 1.0
+ arg2[where(arg2 > 0)] = 0.0
arg2[where(arg2 < -1)] = -1.0
out = arg1 + arg2
return amplitude*out
+
def _erf(x):
- """error function. = 2/sqrt(pi)*integral(exp(-t**2), t=[0, z])"""
+ """Return the error function.
+
+ erf = 2/sqrt(pi)*integral(exp(-t**2), t=[0, z])
+
+ """
return erf(x)
+
def _erfc(x):
- """complented error function. = 1 - erf(x)"""
+ """Return the complementary error function.
+
+ erfc = 1 - erf(x)
+
+ """
return erfc(x)
+
def _wofz(x):
- """fadeeva function for complex argument. = exp(-x**2)*erfc(-i*x)"""
+ """Return the fadeeva function for complex argument.
+
+ wofz = exp(-x**2)*erfc(-i*x)
+
+ """
return wofz(x)
+
def _gamma(x):
- """gamma function"""
+ """Return the gamma function."""
return gamfcn(x)
+
def _gammaln(x):
- """log of absolute value of gamma function"""
+ """Return the log of absolute value of gamma function."""
return gammaln(x)
def exponential(x, amplitude=1, decay=1):
- "x -> amplitude * exp(-x/decay)"
+ """Return an exponential function.
+
+ x -> amplitude * exp(-x/decay)
+
+ """
return amplitude * exp(-x/decay)
def powerlaw(x, amplitude=1, exponent=1.0):
- "x -> amplitude * x**exponent"
+ """Return the powerlaw function.
+
+ x -> amplitude * x**exponent
+
+ """
return amplitude * x**exponent
def linear(x, slope=1.0, intercept=0.0):
- "x -> slope * x + intercept"
+ """Return a linear function.
+
+ x -> slope * x + intercept
+
+ """
return slope * x + intercept
def parabolic(x, a=0.0, b=0.0, c=0.0):
- "x -> a * x**2 + b * x + c"
+ """Return a parabolic function.
+
+ x -> a * x**2 + b * x + c
+
+ """
return a * x**2 + b * x + c
def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
err_msg='', verbose=True):
- """returns whether all parameter values in actual are close to
- those in desired"""
+ """Check whether all actual and desired parameter values are close."""
for param_name, value in desired.items():
assert_allclose(actual[param_name], value, rtol,
atol, err_msg, verbose)
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index 46a582f..74df6e1 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -1,43 +1,47 @@
-"""
-Simple minimizer is a wrapper around scipy.leastsq, allowing a
-user to build a fitting model as a function of general purpose
-Fit Parameters that can be fixed or floated, bounded, and written
-as a simple expression of other Fit Parameters.
+"""Simple minimizer is a wrapper around scipy.leastsq, allowing a user to build
+a fitting model as a function of general purpose Fit Parameters that can be
+fixed or varied, bounded, and written as a simple expression of other Fit
+Parameters.
-The user sets up a model in terms of instance of Parameters, writes a
+The user sets up a model in terms of instance of Parameters and writes a
function-to-be-minimized (residual function) in terms of these Parameters.
+Original copyright:
Copyright (c) 2011 Matthew Newville, The University of Chicago
- <newville@cars.uchicago.edu>
+
+See LICENSE for more complete authorship information and license.
+
"""
+from collections import namedtuple
from copy import deepcopy
-import numpy as np
-from numpy import (dot, eye, ndarray, ones_like,
- sqrt, take, transpose, triu)
-from numpy.dual import inv
-from numpy.linalg import LinAlgError
import multiprocessing
import numbers
+import warnings
-##
-## scipy version notes:
-## currently scipy 0.14 is required.
-## feature scipy version added
-## minimize 0.11
-## OptimizeResult 0.13
-## diff_evolution 0.15
-## least_squares 0.17
-##
-
+import numpy as np
+from numpy import dot, eye, ndarray, ones_like, sqrt, take, transpose, triu
+from numpy.dual import inv
+from numpy.linalg import LinAlgError
+from scipy.optimize import brute as scipy_brute
from scipy.optimize import leastsq as scipy_leastsq
from scipy.optimize import minimize as scipy_minimize
+from scipy.optimize import differential_evolution
+from scipy.stats import cauchy as cauchy_dist
+from scipy.stats import norm as norm_dist
+import six
-# differential_evolution is only present in scipy >= 0.15
-try:
- from scipy.optimize import differential_evolution as scipy_diffev
-except ImportError:
- from ._differentialevolution import differential_evolution as scipy_diffev
+# use locally modified version of uncertainties package
+from . import uncertainties
+from .parameter import Parameter, Parameters
+
+# scipy version notes:
+# currently scipy 0.15 is required.
+# feature scipy version added
+# minimize 0.11
+# OptimizeResult 0.13
+# diff_evolution 0.15
+# least_squares 0.17
# check for scipy.opitimize.least_squares
HAS_LEAST_SQUARES = False
@@ -63,41 +67,41 @@ try:
except ImportError:
pass
-from .parameter import Parameter, Parameters
+# define the namedtuple here so pickle will work with the MinimizerResult
+Candidate = namedtuple('Candidate', ['params', 'score'])
-# use locally modified version of uncertainties package
-from . import uncertainties
+def asteval_with_uncertainties(*vals, **kwargs):
+ """Calculate object value, given values for variables.
+ This is used by the uncertainties package to calculate the
+ uncertainty in an object even with a complicated expression.
-def asteval_with_uncertainties(*vals, **kwargs):
- """
- given values for variables, calculate object value.
- This is used by the uncertainties package to calculate
- the uncertainty in an object even with a complicated
- expression.
"""
_obj = kwargs.get('_obj', None)
_pars = kwargs.get('_pars', None)
_names = kwargs.get('_names', None)
_asteval = _pars._asteval
if (_obj is None or _pars is None or _names is None or
- _asteval is None or _obj._expr_ast is None):
+ _asteval is None or _obj._expr_ast is None):
return 0
for val, name in zip(vals, _names):
_asteval.symtable[name] = val
return _asteval.eval(_obj._expr_ast)
+
wrap_ueval = uncertainties.wrap(asteval_with_uncertainties)
def eval_stderr(obj, uvars, _names, _pars):
- """evaluate uncertainty and set .stderr for a parameter `obj`
- given the uncertain values `uvars` (a list of uncertainties.ufloats),
- a list of parameter names that matches uvars, and a dict of param
- objects, keyed by name.
+ """Evaluate uncertainty and set .stderr for a parameter `obj`.
+
+ Given the uncertain values `uvars` (a list of uncertainties.ufloats), a
+ list of parameter names that matches uvars, and a dict of param objects,
+ keyed by name.
This uses the uncertainties package wrapped function to evaluate the
uncertainty for an arbitrary expression (in obj._expr_ast) of parameters.
+
"""
if not isinstance(obj, Parameter) or getattr(obj, '_expr_ast', None) is None:
return
@@ -109,7 +113,8 @@ def eval_stderr(obj, uvars, _names, _pars):
class MinimizerException(Exception):
- """General Purpose Exception"""
+ """General Purpose Exception."""
+
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
@@ -118,21 +123,6 @@ class MinimizerException(Exception):
return "\n%s" % self.msg
-def _differential_evolution(func, x0, **kwds):
- """
- A wrapper for differential_evolution that can be used with scipy.minimize
- """
- kwargs = dict(args=(), strategy='best1bin', maxiter=None, popsize=15,
- tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
- callback=None, disp=False, polish=True,
- init='latinhypercube')
-
- for k, v in kwds.items():
- if k in kwargs:
- kwargs[k] = v
-
- return scipy_diffev(func, kwds['bounds'], **kwargs)
-
SCALAR_METHODS = {'nelder': 'Nelder-Mead',
'powell': 'Powell',
'cg': 'CG',
@@ -148,89 +138,194 @@ SCALAR_METHODS = {'nelder': 'Nelder-Mead',
'differential_evolution': 'differential_evolution'}
-class MinimizerResult(object):
+def reduce_chisquare(r):
+ """Reduce residual array to scalar (chi-square).
+
+ Calculate the chi-square value from the residual array `r`: (r*r).sum()
+
+ Parameters
+ ----------
+ r : numpy.ndarray
+ Residual array.
+
+ Returns
+ -------
+ float
+ Chi-square calculated from the residual array
+
"""
- A class that holds the results of a minimization.
+ return (r*r).sum()
+
+
+def reduce_negentropy(r):
+ """Reduce residual array to scalar (negentropy).
+
+ Reduce residual array `r` to scalar using negative entropy and the normal
+ (Gaussian) probability distribution of `r` as pdf:
- This is a plain container (with no methods of its own) that
- simply holds the results of the minimization. Fit results
- include data such as status and error messages, fit statistics,
- and the updated (i.e. best-fit) parameters themselves :attr:`params`.
+ (norm.pdf(r)*norm.logpdf(r)).sum()
+
+ since pdf(r) = exp(-r*r/2)/sqrt(2*pi), this is
+ ((r*r/2 - log(sqrt(2*pi))) * exp(-r*r/2)).sum()
+
+ Parameters
+ ----------
+ r : numpy.ndarray
+ Residual array.
- The list of (possible) `MinimizerResult` attributes follows.
+ Returns
+ -------
+ float
+ Negative entropy value calculated from the residual array
+
+ """
+ return (norm_dist.pdf(r)*norm_dist.logpdf(r)).sum()
+
+
+def reduce_cauchylogpdf(r):
+ """Reduce residual array to scalar (cauchylogpdf).
+
+ Reduce residual array `r` to scalar using negative log-likelihood and a
+ Cauchy (Lorentzian) distribution of `r`:
+
+ -scipy.stats.cauchy.logpdf(r)
+
+ (where the Cauchy pdf = 1/(pi*(1+r*r))). This gives greater
+ suppression of outliers compared to normal sum-of-squares.
+
+ Parameters
+ ----------
+ r : numpy.ndarray
+ Residual array.
+
+ Returns
+ -------
+ float
+ Negative entropy value calculated from the residual array
+
+ """
+ return -cauchy_dist.logpdf(r).sum()
+
+
+class MinimizerResult(object):
+ r"""
+ The results of a minimization.
+
+ Minimization results include data such as status and error messages,
+ fit statistics, and the updated (i.e., best-fit) parameters themselves
+ in the :attr:`params` attribute.
+
+ The list of (possible) `MinimizerResult` attributes is given below:
Attributes
----------
- params : :class:`lmfit.parameters.Parameters`
+ params : :class:`~lmfit.parameter.Parameters`
The best-fit parameters resulting from the fit.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
var_names : list
Ordered list of variable parameter names used in optimization, and
- useful for understanding the the values in :attr:`init_vals` and
+ useful for understanding the values in :attr:`init_vals` and
:attr:`covar`.
covar : numpy.ndarray
- covariance matrix from minimization (`leastsq` only), with
- rows/columns using :attr:`var_names`.
+ Covariance matrix from minimization (`leastsq` only), with
+ rows and columns corresponding to :attr:`var_names`.
init_vals : list
List of initial values for variable parameters using :attr:`var_names`.
init_values : dict
Dictionary of initial values for variable parameters.
nfev : int
- Number of function evaluations
+ Number of function evaluations.
success : bool
- Boolean (``True``/``False``) for whether fit succeeded.
+ True if the fit succeeded, otherwise False.
errorbars : bool
- Boolean (``True``/``False``) for whether uncertainties were estimated.
- message : string
+ True if uncertainties were estimated, otherwise False.
+ message : str
Message about fit success.
ier : int
Integer error value from :scipydoc:`optimize.leastsq` (`leastsq` only).
- lmdif_message : string
+ lmdif_message : str
Message from :scipydoc:`optimize.leastsq` (`leastsq` only).
nvarys : int
- Number of variables in fit :math:`N_{\\rm varys}`.
+ Number of variables in fit: :math:`N_{\rm varys}`.
ndata : int
- Number of data points: :math:`N`
+ Number of data points: :math:`N`.
nfree : int
- Degrees of freedom in fit: :math:`N - N_{\\rm varys}`
+ Degrees of freedom in fit: :math:`N - N_{\rm varys}`.
residual : numpy.ndarray
- Residual array :math:`{\\rm Resid_i}`. Return value of the objective
- function.
+ Residual array :math:`{\rm Resid_i}`. Return value of the objective
+ function when using the best-fit values of the parameters.
chisqr : float
- Chi-square: :math:`\chi^2 = \sum_i^N [{\\rm Resid}_i]^2`
+ Chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`.
redchi : float
Reduced chi-square:
- :math:`\chi^2_{\\nu}= {\chi^2} / {(N - N_{\\rm varys})}`
+ :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm varys})}`.
aic : float
- Akaike Information Criterion statistic.
+ Akaike Information Criterion statistic:
+ :math:`N \ln(\chi^2/N) + 2 N_{\rm varys}`.
bic : float
- Bayesian Information Criterion statistic.
+ Bayesian Information Criterion statistic:
+ :math:`N \ln(\chi^2/N) + \ln(N) N_{\rm varys}`.
+ flatchain : pandas.DataFrame
+ A flatchain view of the sampling chain from the `emcee` method.
+
+ Methods
+ -------
+ show_candidates
+ Pretty_print() representation of candidates from the `brute` method.
"""
+
def __init__(self, **kws):
for key, val in kws.items():
setattr(self, key, val)
@property
def flatchain(self):
- """
- A flatchain view of the sampling chain from the `emcee` method.
- """
+ """A flatchain view of the sampling chain from the `emcee` method."""
if hasattr(self, 'chain'):
if HAS_PANDAS:
- return pd.DataFrame(self.chain.reshape((-1, self.nvarys)),
- columns=self.var_names)
+ if len(self.chain.shape) == 4:
+ return pd.DataFrame(self.chain[0, ...].reshape((-1, self.nvarys)),
+ columns=self.var_names)
+ elif len(self.chain.shape) == 3:
+ return pd.DataFrame(self.chain.reshape((-1, self.nvarys)),
+ columns=self.var_names)
else:
raise NotImplementedError('Please install Pandas to see the '
'flattened chain')
else:
return None
+ def show_candidates(self, candidate_nmb='all'):
+ """A pretty_print() representation of the candidates.
+
+ Showing candidates (default is 'all') or the specified candidate-#
+ from the `brute` method.
+
+ Parameters
+ ----------
+ candidate_nmb : int or 'all'
+ The candidate-number to show using the :meth:`pretty_print` method.
+
+ """
+ if hasattr(self, 'candidates'):
+ try:
+ candidate = self.candidates[candidate_nmb]
+ print("\nCandidate #{}, chisqr = "
+ "{:.3f}".format(candidate_nmb, candidate.score))
+ candidate.params.pretty_print()
+ except:
+ for i, candidate in enumerate(self.candidates):
+ print("\nCandidate #{}, chisqr = "
+ "{:.3f}".format(i, candidate.score))
+ candidate.params.pretty_print()
+
class Minimizer(object):
- """A general minimizer for curve fitting"""
+ """A general minimizer for curve fitting and optimization."""
+
_err_nonparam = ("params must be a minimizer.Parameters() instance or list "
"of Parameters()")
_err_maxfev = ("Too many function calls (max set to %i)! Use:"
@@ -239,61 +334,81 @@ class Minimizer(object):
def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None,
iter_cb=None, scale_covar=True, nan_policy='raise',
- **kws):
+ reduce_fcn=None, **kws):
"""
- Initialization of the Minimzer class
-
Parameters
----------
userfcn : callable
- objective function that returns the residual (difference between
- model and data) to be minimized in a least squares sense. The
- function must have the signature:
- `userfcn(params, *fcn_args, **fcn_kws)`
- params : :class:`lmfit.parameter.Parameters` object.
- contains the Parameters for the model.
+ Objective function that returns the residual (difference between
+ model and data) to be minimized in a least-squares sense. This
+ function must have the signature::
+
+ userfcn(params, *fcn_args, **fcn_kws)
+
+ params : :class:`~lmfit.parameter.Parameters`
+ Contains the Parameters for the model.
fcn_args : tuple, optional
- positional arguments to pass to `userfcn`.
+ Positional arguments to pass to `userfcn`.
fcn_kws : dict, optional
- keyword arguments to pass to `userfcn`.
+ Keyword arguments to pass to `userfcn`.
iter_cb : callable, optional
Function to be called at each fit iteration. This function should
- have the signature:
- `iter_cb(params, iter, resid, *fcn_args, **fcn_kws)`,
- where where `params` will have the current parameter values, `iter`
+ have the signature::
+
+ iter_cb(params, iter, resid, *fcn_args, **fcn_kws)
+
+ where `params` will have the current parameter values, `iter`
the iteration, `resid` the current residual array, and `*fcn_args`
- and `**fcn_kws` as passed to the objective function.
+ and `**fcn_kws` are passed to the objective function.
scale_covar : bool, optional
- Whether to automatically scale the covariance matrix (leastsq
- only).
+ Whether to automatically scale the covariance matrix (`leastsq` only).
nan_policy : str, optional
- Specifies action if `userfcn` (or a Jacobian) returns nan
+ Specifies action if `userfcn` (or a Jacobian) returns NaN
values. One of:
- 'raise' - a `ValueError` is raised
- 'propagate' - the values returned from `userfcn` are un-altered
- 'omit' - the non-finite values are filtered.
+ - 'raise' : a `ValueError` is raised
+ - 'propagate' : the values returned from `userfcn` are un-altered
+ - 'omit' : non-finite values are filtered
+
+ reduce_fcn : str or callable, optional
+ Function to convert a residual array to a scalar value for the scalar
+ minimizers. Optional values are (where `r` is the residual array):
+
+ - None : sum of squares of residual [default]
+
+ = (r*r).sum()
+
+ - 'negentropy' : neg entropy, using normal distribution
- kws : dict, optional
+ = rho*log(rho).sum()`, where rho = exp(-r*r/2)/(sqrt(2*pi))
+
+ - 'neglogcauchy': neg log likelihood, using Cauchy distribution
+
+ = -log(1/(pi*(1+r*r))).sum()
+
+ - callable : must take one argument (`r`) and return a float.
+
+ **kws : dict, optional
Options to pass to the minimizer being used.
Notes
-----
- The objective function should return the value to be minimized. For the
- Levenberg-Marquardt algorithm from :meth:`leastsq` or
- :meth:`least_squares`, this returned value must
- be an array, with a length greater than or equal to the number of
- fitting variables in the model. For the other methods, the return value
- can either be a scalar or an array. If an array is returned, the sum of
- squares of the array will be sent to the underlying fitting method,
- effectively doing a least-squares optimization of the return values. If
- the objective function returns non-finite values then a `ValueError`
+ The objective function should return the value to be minimized. For
+ the Levenberg-Marquardt algorithm from :meth:`leastsq` or
+ :meth:`least_squares`, this returned value must be an array, with a
+ length greater than or equal to the number of fitting variables in
+ the model. For the other methods, the return value can either be a
+ scalar or an array. If an array is returned, the sum of squares of
+ the array will be sent to the underlying fitting method, effectively
+ doing a least-squares optimization of the return values. If the
+ objective function returns non-finite values then a `ValueError`
will be raised because the underlying solvers cannot deal with them.
- A common use for the `fcn_args` and `fcn_kwds` would be to pass in
+ A common use for the `fcn_args` and `fcn_kws` would be to pass in
other data needed to calculate the residual, including such things
as the data array, dependent variable, uncertainties in the data,
and other data structures for the model calculation.
+
"""
self.userfcn = userfcn
self.userargs = fcn_args
@@ -319,44 +434,47 @@ class Minimizer(object):
self.redchi = None
self.covar = None
self.residual = None
-
+ self.reduce_fcn = reduce_fcn
self.params = params
self.jacfcn = None
self.nan_policy = nan_policy
@property
def values(self):
- """dict : Parameter values in a simple dictionary.
- """
+ """Return Parameter values in a simple dictionary."""
return {name: p.value for name, p in self.result.params.items()}
def __residual(self, fvars, apply_bounds_transformation=True):
- """
- Residual function used for least-squares fit.
- With the new, candidate values of fvars (the fitting variables), this
- evaluates all parameters, including setting bounds and evaluating
- constraints, and then passes those to the user-supplied function to
- calculate the residual.
+ """Residual function used for least-squares fit.
+
+ With the new, candidate values of `fvars` (the fitting variables),
+ this evaluates all parameters, including setting bounds and
+ evaluating constraints, and then passes those to the user-supplied
+ function to calculate the residual.
Parameters
- ----------------
- fvars : np.ndarray
+ ----------
+ fvars : numpy.ndarray
Array of new parameter values suggested by the minimizer.
apply_bounds_transformation : bool, optional
- If true, apply lmfits parameter transformation to constrain
- parameters. This is needed for solvers without inbuilt support for
- bounds.
+ Whether to apply lmfits parameter transformation to constrain
+ parameters (default is True). This is needed for solvers without
+ inbuilt support for bounds.
Returns
- -----------
- residuals : np.ndarray
- The evaluated function values for given fvars.
+ -------
+ residual : numpy.ndarray
+ The evaluated function values for given `fvars`.
+
"""
# set parameter values
if self._abort:
return None
params = self.result.params
+ if fvars.shape == ():
+ fvars = fvars.reshape((1,))
+
if apply_bounds_transformation:
for name, val in zip(self.result.var_names, fvars):
params[name].value = params[name].from_internal(val)
@@ -379,12 +497,12 @@ class Minimizer(object):
return np.asarray(out).ravel()
def __jacobian(self, fvars):
- """
- analytical jacobian to be used with the Levenberg-Marquardt
+ """Reuturn analytical jacobian to be used with Levenberg-Marquardt.
modified 02-01-2012 by Glenn Jones, Aberystwyth University
- modified 06-29-2015 M Newville to apply gradient scaling
- for bounded variables (thanks to JJ Helmus, N Mayorov)
+ modified 06-29-2015 M Newville to apply gradient scaling for
+ bounded variables (thanks to JJ Helmus, N Mayorov)
+
"""
pars = self.result.params
grad_scale = ones_like(fvars)
@@ -408,8 +526,32 @@ class Minimizer(object):
return jac
def penalty(self, fvars):
+ """Penalty function for scalar minimizers.
+
+ Parameters
+ ----------
+ fvars : numpy.ndarray
+ Array of values for the variable parameters.
+
+ Returns
+ -------
+ r : float
+ The evaluated user-supplied objective function.
+
+ If the objective function is an array of size greater than 1,
+ use the scalar returned by `self.reduce_fcn`. This defaults
+ to sum-of-squares, but can be replaced by other options.
+
"""
- Penalty function for scalar minimizers.
+ r = self.__residual(fvars)
+ if isinstance(r, ndarray) and r.size > 1:
+ r = self.reduce_fcn(r)
+ if isinstance(r, ndarray) and r.size > 1:
+ r = r.sum()
+ return r
+
+ def penalty_brute(self, fvars):
+ """Penalty function for brute force method.
Parameters
----------
@@ -419,17 +561,46 @@ class Minimizer(object):
Returns
-------
r : float
- The user evaluated user-supplied objective function. If the
- objective function is an array, return the array sum-of-squares
+ The evaluated user-supplied objective function.
+
+ If the objective function is an array of size greater than 1,
+ use the scalar returned by `self.reduce_fcn`. This defaults
+ to sum-of-squares, but can be replaced by other options.
+
"""
- r = self.__residual(fvars)
- if isinstance(r, ndarray):
+ r = self.__residual(fvars, apply_bounds_transformation=False)
+ if isinstance(r, ndarray) and r.size > 1:
r = (r*r).sum()
return r
def prepare_fit(self, params=None):
- """
- Prepares parameters for fitting, return array of initial values.
+ """Prepare parameters for fitting.
+
+ Prepares and initializes model and Parameters for subsequent
+ fitting. This routine prepares the conversion of :class:`Parameters`
+ into fit variables, organizes parameter bounds, and parses, "compiles"
+ and checks constrain expressions. The method also creates and returns
+ a new instance of a :class:`MinimizerResult` object that contains the
+ copy of the Parameters that will actually be varied in the fit.
+
+ Parameters
+ ----------
+ params : :class:`~lmfit.parameter.Parameters`, optional
+ Contains the Parameters for the model; if None, then the
+ Parameters used to initialize the Minimizer object are used.
+
+ Returns
+ -------
+ :class:`MinimizerResult`
+
+ Notes
+ -----
+ This method is called directly by the fitting methods, and it is
+ generally not necessary to call this function explicitly.
+
+ .. versionchanged:: 0.9.0
+ Return value changed to :class:`MinimizerResult`.
+
"""
# determine which parameters are actually variables
# and which are defined expressions.
@@ -473,65 +644,92 @@ class Minimizer(object):
result.nvarys = len(result.var_names)
result.init_values = {n: v for n, v in zip(result.var_names,
result.init_vals)}
+
+ # set up reduce function for scalar minimizers
+ # 1. user supplied callable
+ # 2. string starting with 'neglogc' or 'negent'
+ # 3. sum of squares
+ if not callable(self.reduce_fcn):
+ if isinstance(self.reduce_fcn, six.string_types):
+ if self.reduce_fcn.lower().startswith('neglogc'):
+ self.reduce_fcn = reduce_cauchylogpdf
+ elif self.reduce_fcn.lower().startswith('negent'):
+ self.reduce_fcn = reduce_negentropy
+ if self.reduce_fcn is None:
+ self.reduce_fcn = reduce_chisquare
return result
def unprepare_fit(self):
- """
- Clean fit state, so that subsequent fits will need to call prepare_fit.
+ """Clean fit state, so that subsequent fits need to call prepare_fit().
+
+ removes AST compilations of constraint expressions.
- Removes AST compilations of constraint expressions.
"""
pass
def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
- """
- Scalar minimization using `scipy.optimize.minimize`.
+ """Scalar minimization using :scipydoc:`optimize.minimize`.
+
+ Perform fit with any of the scalar minimization algorithms supported by
+ :scipydoc:`optimize.minimize`. Default argument values are:
+ +-------------------------+-----------------+-----------------------------------------------------+
+ | :meth:`scalar_minimize` | Default Value | Description |
+ | arg | | |
+ +=========================+=================+=====================================================+
+ | method | ``Nelder-Mead`` | fitting method |
+ +-------------------------+-----------------+-----------------------------------------------------+
+ | tol | 1.e-7 | fitting and parameter tolerance |
+ +-------------------------+-----------------+-----------------------------------------------------+
+ | hess | None | Hessian of objective function |
+ +-------------------------+-----------------+-----------------------------------------------------+
Parameters
----------
method : str, optional
- Name of the fitting method to use.
- One of:
- 'Nelder-Mead' (default)
- 'L-BFGS-B'
- 'Powell'
- 'CG'
- 'Newton-CG'
- 'COBYLA'
- 'TNC'
- 'trust-ncg'
- 'dogleg'
- 'SLSQP'
- 'differential_evolution'
-
- params : Parameters, optional
- Parameters to use as starting points.
- kws : dict, optional
- Minimizer options pass to `scipy.optimize.minimize`.
-
- If the objective function returns a numpy array instead
- of the expected scalar, the sum of squares of the array
- will be used.
-
- Note that bounds and constraints can be set on Parameters
- for any of these methods, so are not supported separately
- for those designed to use bounds. However, if you use the
- differential_evolution option you must specify finite
- (min, max) for each Parameter.
+ Name of the fitting method to use. One of:
+
+ - 'Nelder-Mead' (default)
+ - 'L-BFGS-B'
+ - 'Powell'
+ - 'CG'
+ - 'Newton-CG'
+ - 'COBYLA'
+ - 'TNC'
+ - 'trust-ncg'
+ - 'dogleg'
+ - 'SLSQP'
+ - 'differential_evolution'
+
+ params : :class:`~lmfit.parameter.Parameters`, optional
+ Parameters to use as starting point.
+ **kws : dict, optional
+ Minimizer options pass to :scipydoc:`optimize.minimize`.
Returns
-------
:class:`MinimizerResult`
- Object containing the optimized parameter
- and several goodness-of-fit statistics.
+ Object containing the optimized parameter and several
+ goodness-of-fit statistics.
.. versionchanged:: 0.9.0
- return value changed to :class:`MinimizerResult`
- """
+ Return value changed to :class:`MinimizerResult`.
+ Notes
+ -----
+ If the objective function returns a NumPy array instead
+ of the expected scalar, the sum of squares of the array
+ will be used.
+
+ Note that bounds and constraints can be set on Parameters
+ for any of these methods, so are not supported separately
+ for those designed to use bounds. However, if you use the
+ differential_evolution method you must specify finite
+ (min, max) for each varying Parameter.
+
+ """
result = self.prepare_fit(params=params)
result.method = method
vars = result.init_vals
@@ -558,21 +756,22 @@ class Minimizer(object):
fmin_kws.pop('jac')
if method == 'differential_evolution':
- fmin_kws['method'] = _differential_evolution
- bounds = np.asarray([(par.min, par.max)
- for par in params.values()])
- varying = np.asarray([par.vary for par in params.values()])
-
- if not np.all(np.isfinite(bounds[varying])):
- raise ValueError('With differential evolution finite bounds '
- 'are required for each varying parameter')
- bounds = [(-np.pi / 2., np.pi / 2.)] * len(vars)
- fmin_kws['bounds'] = bounds
-
- # in scipy 0.14 this can be called directly from scipy_minimize
- # When minimum scipy is 0.14 the following line and the else
- # can be removed.
- ret = _differential_evolution(self.penalty, vars, **fmin_kws)
+ for par in params.values():
+ if (par.vary and
+ not (np.isfinite(par.min) and np.isfinite(par.max))):
+ raise ValueError('differential_evolution requires finite '
+ 'bound for all varying parameters')
+
+ _bounds = [(-np.pi / 2., np.pi / 2.)] * len(vars)
+ kwargs = dict(args=(), strategy='best1bin', maxiter=None,
+ popsize=15, tol=0.01, mutation=(0.5, 1),
+ recombination=0.7, seed=None, callback=None,
+ disp=False, polish=True, init='latinhypercube')
+
+ for k, v in fmin_kws.items():
+ if k in kwargs:
+ kwargs[k] = v
+ ret = differential_evolution(self.penalty, _bounds, **kwargs)
else:
ret = scipy_minimize(self.penalty, vars, **fmin_kws)
@@ -595,7 +794,7 @@ class Minimizer(object):
result.chisqr = (result.chisqr**2).sum()
result.ndata = len(result.residual)
result.nfree = result.ndata - result.nvarys
- result.redchi = result.chisqr / result.nfree
+ result.redchi = result.chisqr / max(1, result.nfree)
# this is -2*loglikelihood
_neg2_log_likel = result.ndata * np.log(result.chisqr / result.ndata)
result.aic = _neg2_log_likel + 2 * result.nvarys
@@ -606,8 +805,8 @@ class Minimizer(object):
def emcee(self, params=None, steps=1000, nwalkers=100, burn=0, thin=1,
ntemps=1, pos=None, reuse_sampler=False, workers=1,
float_behavior='posterior', is_weighted=True, seed=None):
- """
- Bayesian sampling of the posterior distribution using the `emcee`.
+ r"""
+ Bayesian sampling of the posterior distribution using `emcee`.
Bayesian sampling of the posterior distribution for the parameters
using the `emcee` Markov Chain Monte Carlo package. The method assumes
@@ -616,9 +815,9 @@ class Minimizer(object):
Parameters
----------
- params : lmfit.Parameters, optional
+ params : :class:`~lmfit.parameter.Parameters`, optional
Parameters to use as starting point. If this is not specified
- then the Parameters used to initialise the Minimizer object are
+ then the Parameters used to initialize the Minimizer object are
used.
steps : int, optional
How many samples you would like to draw from the posterior
@@ -636,7 +835,7 @@ class Minimizer(object):
Only accept 1 in every `thin` samples.
ntemps : int, optional
If `ntemps > 1` perform a Parallel Tempering.
- pos : np.ndarray, optional
+ pos : numpy.ndarray, optional
Specify the initial positions for the sampler. If `ntemps == 1`
then `pos.shape` should be `(nwalkers, nvarys)`. Otherwise,
`(ntemps, nwalkers, nvarys)`. You can also initialise using a
@@ -648,7 +847,7 @@ class Minimizer(object):
If you have already run `emcee` on a given `Minimizer` object then
it possesses an internal ``sampler`` attribute. You can continue to
draw from the same sampler (retaining the chain history) if you set
- this option to `True`. Otherwise a new sampler is created. The
+ this option to True. Otherwise a new sampler is created. The
`nwalkers`, `ntemps`, `pos`, and `params` keywords are ignored with
this option.
**Important**: the Parameters used to create the sampler must not
@@ -672,9 +871,9 @@ class Minimizer(object):
Specifies meaning of the objective function output if it returns a
float. One of:
- 'posterior' - objective function returns a log-posterior
- probability
- 'chi2' - objective function returns :math:`\chi^2`.
+ - 'posterior' - objective function returns a log-posterior
+ probability
+ - 'chi2' - objective function returns :math:`\chi^2`
See Notes for further details.
is_weighted : bool, optional
@@ -691,20 +890,23 @@ class Minimizer(object):
**Important** this parameter only has any effect if your objective
function returns an array. If your objective function returns a
float, then this parameter is ignored. See Notes for more details.
- seed : int or `np.random.RandomState`, optional
- If `seed` is an int, a new `np.random.RandomState` instance is used,
- seeded with `seed`.
- If `seed` is already a `np.random.RandomState` instance, then that
- `np.random.RandomState` instance is used.
+ seed : int or `numpy.random.RandomState`, optional
+ If `seed` is an int, a new `numpy.random.RandomState` instance is
+ used, seeded with `seed`.
+ If `seed` is already a `numpy.random.RandomState` instance, then
+ that `numpy.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
Returns
-------
:class:`MinimizerResult`
MinimizerResult object containing updated params, statistics,
- etc. The `MinimizerResult` also contains the ``chain``,
- ``flatchain`` and ``lnprob`` attributes. The ``chain``
- and ``flatchain`` attributes contain the samples and have the shape
+ etc. The updated params represent the median (50th percentile) of
+ all the samples, whilst the parameter uncertainties are half of the
+ difference between the 15.87 and 84.13 percentiles.
+ The `MinimizerResult` also contains the ``chain``, ``flatchain``
+ and ``lnprob`` attributes. The ``chain`` and ``flatchain``
+ attributes contain the samples and have the shape
`(nwalkers, (steps - burn) // thin, nvarys)` or
`(ntemps, nwalkers, (steps - burn) // thin, nvarys)`,
depending on whether Parallel tempering was used or not.
@@ -716,6 +918,7 @@ class Minimizer(object):
log probability for each sample in ``chain``. The sample with the
highest probability corresponds to the maximum likelihood estimate.
+
Notes
-----
This method samples the posterior distribution of the parameters using
@@ -724,25 +927,27 @@ class Minimizer(object):
`D`, :math:`\ln p(F_{true} | D)`. This 'posterior probability' is
calculated as:
- ..math::
+ .. math::
- \ln p(F_{true} | D) \propto \ln p(D | F_{true}) + \ln p(F_{true})
+ \ln p(F_{true} | D) \propto \ln p(D | F_{true}) + \ln p(F_{true})
where :math:`\ln p(D | F_{true})` is the 'log-likelihood' and
:math:`\ln p(F_{true})` is the 'log-prior'. The default log-prior
encodes prior information already known about the model. This method
- assumes that the log-prior probability is `-np.inf` (impossible) if the
- one of the parameters is outside its limits. The log-prior probability
+ assumes that the log-prior probability is `-numpy.inf` (impossible) if
+ the one of the parameters is outside its limits. The log-prior probability
term is zero if all the parameters are inside their bounds (known as a
uniform prior). The log-likelihood function is given by [1]_:
- ..math::
+ .. math::
- \ln p(D|F_{true}) = -\frac{1}{2}\sum_n \left[\frac{\left(g_n(F_{true}) - D_n \right)^2}{s_n^2}+\ln (2\pi s_n^2)\right]
+ \ln p(D|F_{true}) = -\frac{1}{2}\sum_n \left[\frac{(g_n(F_{true}) - D_n)^2}{s_n^2}+\ln (2\pi s_n^2)\right]
The first summand in the square brackets represents the residual for a
- given datapoint (:math:`g` being the generative model) . This term
- represents :math:`\chi^2` when summed over all datapoints.
+ given datapoint (:math:`g` being the generative model, :math:`D_n` the
+ data and :math:`s_n` the standard deviation, or measurement
+ uncertainty, of the datapoint). This term represents :math:`\chi^2`
+ when summed over all data points.
Ideally the objective function used to create `lmfit.Minimizer` should
return the log-posterior probability, :math:`\ln p(F_{true} | D)`.
However, since the in-built log-prior term is zero, the objective
@@ -760,22 +965,24 @@ class Minimizer(object):
a vector of (possibly weighted) residuals. Therefore, if your objective
function returns a vector, `res`, then the vector is assumed to contain
the residuals. If `is_weighted is True` then your residuals are assumed
- to be correctly weighted by the standard deviation of the data points
- (`res = (data - model) / sigma`) and the log-likelihood (and
- log-posterior probability) is calculated as: `-0.5 * np.sum(res **2)`.
+ to be correctly weighted by the standard deviation (measurement
+ uncertainty) of the data points (`res = (data - model) / sigma`) and
+ the log-likelihood (and log-posterior probability) is calculated as:
+ `-0.5 * numpy.sum(res**2)`.
This ignores the second summand in the square brackets. Consequently,
in order to calculate a fully correct log-posterior probability value
your objective function should return a single value. If
`is_weighted is False` then the data uncertainty, `s_n`, will be
- treated as a nuisance parameter and will be marginalised out. This is
+ treated as a nuisance parameter and will be marginalized out. This is
achieved by employing a strictly positive uncertainty
- (homoscedasticity) for each data point, :math:`s_n = exp(__lnsigma)`.
+ (homoscedasticity) for each data point, :math:`s_n = \exp(\_\_lnsigma)`.
`__lnsigma` will be present in `MinimizerResult.params`, as well as
`Minimizer.chain`, `nvarys` will also be increased by one.
References
----------
.. [1] http://dan.iel.fm/emcee/current/user/line/
+
"""
if not HAS_EMCEE:
raise NotImplementedError('You must have emcee to use'
@@ -842,7 +1049,7 @@ class Minimizer(object):
# set up multiprocessing options for the samplers
auto_pool = None
sampler_kwargs = {}
- if type(workers) is int and workers > 1:
+ if isinstance(workers, int) and workers > 1:
auto_pool = multiprocessing.Pool(workers)
sampler_kwargs['pool'] = auto_pool
elif hasattr(workers, 'map'):
@@ -900,7 +1107,7 @@ class Minimizer(object):
if p0.shape == tpos.shape:
pass
# trying to initialise with a previous chain
- elif (tpos.shape[0::2] == (nwalkers, self.nvarys)):
+ elif tpos.shape[0::2] == (nwalkers, self.nvarys):
tpos = tpos[:, -1, :]
# initialising with a PTsampler chain.
elif ntemps > 1 and tpos.ndim == 4:
@@ -923,9 +1130,13 @@ class Minimizer(object):
# discard the burn samples and thin
chain = self.sampler.chain[..., burn::thin, :]
- lnprobability = self.sampler.lnprobability[:, burn::thin]
+ lnprobability = self.sampler.lnprobability[..., burn::thin]
- flatchain = chain.reshape((-1, self.nvarys))
+ # take the zero'th PTsampler temperature for the parameter estimators
+ if ntemps > 1:
+ flatchain = chain[0, ...].reshape((-1, self.nvarys))
+ else:
+ flatchain = chain.reshape((-1, self.nvarys))
quantiles = np.percentile(flatchain, [15.87, 50, 84.13], axis=0)
@@ -956,57 +1167,62 @@ class Minimizer(object):
return result
def least_squares(self, params=None, **kws):
- """
- Use the least_squares (new in scipy 0.17) function to perform a fit.
+ """Use the `least_squares` (new in scipy 0.17) to perform a fit.
+
+ It assumes that the input Parameters have been initialized, and
+ a function to minimize has been properly set up.
+ When possible, this calculates the estimated uncertainties and
+ variable correlations from the covariance matrix.
- This method assumes that Parameters have been stored, and a function to
- minimize has been properly set up.
- This method wraps scipy.optimize.least_squares, which has inbuilt
- support for bounds and robust loss functions.
+ This method wraps :scipydoc:`optimize.least_squares`, which
+ has inbuilt support for bounds and robust loss functions.
Parameters
----------
- params : Parameters, optional
- Parameters to use as starting points.
- kws : dict, optional
- Minimizer options to pass to scipy.optimize.least_squares.
+ params : :class:`~lmfit.parameter.Parameters`, optional
+ Parameters to use as starting point.
+ **kws : dict, optional
+ Minimizer options to pass to :scipydoc:`optimize.least_squares`.
Returns
-------
:class:`MinimizerResult`
- Object containing the optimized parameter
- and several goodness-of-fit statistics.
+ Object containing the optimized parameter and several
+ goodness-of-fit statistics.
.. versionchanged:: 0.9.0
- return value changed to :class:`MinimizerResult`
+ Return value changed to :class:`MinimizerResult`.
+
"""
if not HAS_LEAST_SQUARES:
- raise NotImplementedError("Scipy with a version higher than 0.17 "
+ raise NotImplementedError("SciPy with a version higher than 0.17 "
"is needed for this method.")
result = self.prepare_fit(params)
result.method = 'least_squares'
replace_none = lambda x, sign: sign*np.inf if x is None else x
- upper_bounds = [replace_none(i.max, 1) for i in self.params.values()]
- lower_bounds = [replace_none(i.min, -1) for i in self.params.values()]
- start_vals = [i.value for i in self.params.values()]
- ret = least_squares(self.__residual,
- start_vals,
+ start_vals, lower_bounds, upper_bounds = [], [], []
+ for vname in result.var_names:
+ par = self.params[vname]
+ start_vals.append(par.value)
+ lower_bounds.append(replace_none(par.min, -1))
+ upper_bounds.append(replace_none(par.max, -1))
+
+ ret = least_squares(self.__residual, start_vals,
bounds=(lower_bounds, upper_bounds),
kwargs=dict(apply_bounds_transformation=False),
- **kws
- )
+ **kws)
for attr in ret:
setattr(result, attr, ret[attr])
result.x = np.atleast_1d(result.x)
result.chisqr = result.residual = self.__residual(result.x, False)
- result.nvarys = len(start_vals)
+ result.nvarys = len(result.var_names)
result.ndata = 1
result.nfree = 1
if isinstance(result.residual, ndarray):
@@ -1021,24 +1237,36 @@ class Minimizer(object):
return result
def leastsq(self, params=None, **kws):
- """
- Use Levenberg-Marquardt minimization to perform a fit.
- This assumes that Parameters have been stored, and a function to
- minimize has been properly set up.
-
- This wraps scipy.optimize.leastsq.
+ """Use Levenberg-Marquardt minimization to perform a fit.
+ It assumes that the input Parameters have been initialized, and
+ a function to minimize has been properly set up.
When possible, this calculates the estimated uncertainties and
variable correlations from the covariance matrix.
- Writes outputs to many internal attributes.
+ This method calls :scipydoc:`optimize.leastsq`.
+ By default, numerical derivatives are used, and the following
+ arguments are set:
+
+ +------------------+----------------+------------------------------------------------------------+
+ | :meth:`leastsq` | Default Value | Description |
+ | arg | | |
+ +==================+================+============================================================+
+ | xtol | 1.e-7 | Relative error in the approximate solution |
+ +------------------+----------------+------------------------------------------------------------+
+ | ftol | 1.e-7 | Relative error in the desired sum of squares |
+ +------------------+----------------+------------------------------------------------------------+
+ | maxfev | 2000*(nvar+1) | Maximum number of function calls (nvar= # of variables) |
+ +------------------+----------------+------------------------------------------------------------+
+ | Dfun | None | Function to call for Jacobian calculation |
+ +------------------+----------------+------------------------------------------------------------+
Parameters
----------
- params : Parameters, optional
- Parameters to use as starting points.
- kws : dict, optional
- Minimizer options to pass to scipy.optimize.leastsq.
+ params : :class:`~lmfit.parameter.Parameters`, optional
+ Parameters to use as starting point.
+ **kws : dict, optional
+ Minimizer options to pass to :scipydoc:`optimize.leastsq`.
Returns
-------
@@ -1046,8 +1274,10 @@ class Minimizer(object):
Object containing the optimized parameter
and several goodness-of-fit statistics.
+
.. versionchanged:: 0.9.0
- return value changed to :class:`MinimizerResult`
+ Return value changed to :class:`MinimizerResult`.
+
"""
result = self.prepare_fit(params=params)
result.method = 'leastsq'
@@ -1077,13 +1307,18 @@ class Minimizer(object):
result.residual = resid = infodict['fvec']
result.ier = ier
result.lmdif_message = errmsg
- result.message = 'Fit succeeded.'
result.success = ier in [1, 2, 3, 4]
if result.aborted:
result.message = 'Fit aborted by user callback.'
result.success = False
+ elif ier in {1, 2, 3}:
+ result.message = 'Fit succeeded.'
elif ier == 0:
- result.message = 'Invalid Input Parameters.'
+ result.message = ('Invalid Input Parameters. I.e. more variables '
+ 'than data points given, tolerance < 0.0, or '
+ 'no data provided.')
+ elif ier == 4:
+ result.message = 'One or more variable did not affect the fit.'
elif ier == 5:
result.message = self._err_maxfev % lskws['maxfev']
else:
@@ -1130,6 +1365,8 @@ class Minimizer(object):
except (LinAlgError, ValueError):
result.covar = None
+ result.fjac = infodict['fjac']
+
has_expr = False
for par in params.values():
par.stderr, par.correl = 0, None
@@ -1150,8 +1387,9 @@ class Minimizer(object):
result.errorbars = result.errorbars and (par.stderr > 0.0)
for jvar, varn2 in enumerate(result.var_names):
if jvar != ivar:
- par.correl[varn2] = (result.covar[ivar, jvar] /
- (par.stderr * sqrt(result.covar[jvar, jvar])))
+ par.correl[varn2] = (
+ result.covar[ivar, jvar] /
+ (par.stderr * sqrt(result.covar[jvar, jvar])))
except:
result.errorbars = False
@@ -1173,25 +1411,185 @@ class Minimizer(object):
params[nam].value = v.nominal_value
if not result.errorbars:
- result.message = '%s. Could not estimate error-bars' % result.message
+ result.message = '%s Could not estimate error-bars.' % result.message
np.seterr(**orig_warn_settings)
return result
- def minimize(self, method='leastsq', params=None, **kws):
+ def brute(self, params=None, Ns=20, keep=50):
+ """Use the `brute` method to find the global minimum of a function.
+
+ The following parameters are passed to :scipydoc:`optimize.brute`
+ and cannot be changed:
+
+ +-------------------+-------+----------------------------------------+
+ | :meth:`brute` arg | Value | Description |
+ +===================+=======+========================================+
+ | full_output | 1 | Return the evaluation grid and |
+ | | | the objective function's values on it. |
+ +-------------------+-------+----------------------------------------+
+ | finish | None | No "polishing" function is to be used |
+ | | | after the grid search. |
+ +-------------------+-------+----------------------------------------+
+ | disp | False | Do not print convergence messages |
+ | | | (when finish is not None). |
+ +-------------------+-------+----------------------------------------+
+
+ It assumes that the input Parameters have been initialized, and a
+ function to minimize has been properly set up.
+
+ Parameters
+ ----------
+ params : :class:`~lmfit.parameter.Parameters` object, optional
+ Contains the Parameters for the model. If None, then the
+ Parameters used to initialize the Minimizer object are used.
+ Ns : int, optional
+ Number of grid points along the axes, if not otherwise specified
+ (see Notes).
+ keep : int, optional
+ Number of best candidates from the brute force method that are
+ stored in the :attr:`candidates` attribute. If 'all', then all grid
+ points from :scipydoc:`optimize.brute` are stored as candidates.
+
+ Returns
+ -------
+ :class:`MinimizerResult`
+ Object containing the parameters from the brute force method.
+ The return values (`x0`, `fval`, `grid`, `Jout`) from
+ :scipydoc:`optimize.brute` are stored as `brute_<parname>` attributes.
+ The `MinimizerResult` also contains the `candidates` attribute and
+ `show_candidates()` method. The `candidates` attribute contains the
+ parameters and chisqr from the brute force method as a namedtuple,
+ ('Candidate', ['params', 'score']), sorted on the (lowest) chisqr
+ value. To access the values for a particular candidate one can use
+ `result.candidate[#].params` or `result.candidate[#].score`, where
+ a lower # represents a better candidate. The `show_candidates(#)`
+ uses the :meth:`pretty_print` method to show a specific candidate-#
+ or all candidates when no number is specified.
+
+
+ .. versionadded:: 0.9.6
+
+
+ Notes
+ -----
+ The :meth:`brute` method evalutes the function at each point of a
+ multidimensional grid of points. The grid points are generated from the
+ parameter ranges using `Ns` and (optional) `brute_step`.
+ The implementation in :scipydoc:`optimize.brute` requires finite bounds
+ and the `range` is specified as a two-tuple `(min, max)` or slice-object
+ `(min, max, brute_step)`. A slice-object is used directly, whereas a
+ two-tuple is converted to a slice object that interpolates `Ns` points
+ from `min` to `max`, inclusive.
+
+ In addition, the :meth:`brute` method in lmfit, handles three other
+ scenarios given below with their respective slice-object:
+
+ - lower bound (:attr:`min`) and :attr:`brute_step` are specified:
+ range = (`min`, `min` + `Ns` * `brute_step`, `brute_step`).
+ - upper bound (:attr:`max`) and :attr:`brute_step` are specified:
+ range = (`max` - `Ns` * `brute_step`, `max`, `brute_step`).
+ - numerical value (:attr:`value`) and :attr:`brute_step` are specified:
+ range = (`value` - (`Ns`//2) * `brute_step`, `value` +
+ (`Ns`//2) * `brute_step`, `brute_step`).
+
"""
- Perform the minimization.
+ result = self.prepare_fit(params=params)
+ result.method = 'brute'
+
+ brute_kws = dict(full_output=1, finish=None, disp=False)
+
+ varying = np.asarray([par.vary for par in self.params.values()])
+ replace_none = lambda x, sign: sign*np.inf if x is None else x
+ lower_bounds = np.asarray([replace_none(i.min, -1) for i in
+ self.params.values()])[varying]
+ upper_bounds = np.asarray([replace_none(i.max, 1) for i in
+ self.params.values()])[varying]
+ value = np.asarray([i.value for i in self.params.values()])[varying]
+ stepsize = np.asarray([i.brute_step for i in self.params.values()])[varying]
+
+ ranges = []
+ for i, step in enumerate(stepsize):
+ if np.all(np.isfinite([lower_bounds[i], upper_bounds[i]])):
+ # lower AND upper bounds are specified (brute_step optional)
+ par_range = ((lower_bounds[i], upper_bounds[i], step)
+ if step else (lower_bounds[i], upper_bounds[i]))
+ elif np.isfinite(lower_bounds[i]) and step:
+ # lower bound AND brute_step are specified
+ par_range = (lower_bounds[i], lower_bounds[i] + Ns*step, step)
+ elif np.isfinite(upper_bounds[i]) and step:
+ # upper bound AND brute_step are specified
+ par_range = (upper_bounds[i] - Ns*step, upper_bounds[i], step)
+ elif np.isfinite(value[i]) and step:
+ # no bounds, but an initial value is specified
+ par_range = (value[i] - (Ns//2)*step, value[i] + (Ns//2)*step,
+ step)
+ else:
+ raise ValueError('Not enough information provided for the brute '
+ 'force method. Please specify bounds or at '
+ 'least an initial value and brute_step for '
+ 'parameter "{}".'.format(result.var_names[i]))
+ ranges.append(par_range)
+
+ ret = scipy_brute(self.penalty_brute, tuple(ranges), Ns=Ns, **brute_kws)
+
+ result.brute_x0 = ret[0]
+ result.brute_fval = ret[1]
+ result.brute_grid = ret[2]
+ result.brute_Jout = ret[3]
+
+ # sort the results of brute and populate .candidates attribute
+ grid_score = ret[3].ravel() # chisqr
+ grid_points = [par.ravel() for par in ret[2]]
+
+ if len(result.var_names) == 1:
+ grid_result = np.array([res for res in zip(zip(grid_points), grid_score)],
+ dtype=[('par', 'O'), ('score', 'float64')])
+ else:
+ grid_result = np.array([res for res in zip(zip(*grid_points), grid_score)],
+ dtype=[('par', 'O'), ('score', 'float64')])
+ grid_result_sorted = grid_result[grid_result.argsort(order='score')]
+
+ result.candidates = []
+
+ if keep == 'all':
+ keep_candidates = len(grid_result_sorted)
+ else:
+ keep_candidates = min(len(grid_result_sorted), keep)
+
+ for data in grid_result_sorted[:keep_candidates]:
+ pars = deepcopy(self.params)
+ for i, par in enumerate(result.var_names):
+ pars[par].value = data[0][i]
+ result.candidates.append(Candidate(params=pars, score=data[1]))
+
+ result.params = result.candidates[0].params
+ result.chisqr = ret[1]
+ result.nvarys = len(result.var_names)
+ result.residual = self.__residual(result.brute_x0, apply_bounds_transformation=False)
+ result.ndata = len(result.residual)
+ result.nfree = result.ndata - result.nvarys
+ result.redchi = result.chisqr / result.nfree
+ # this is -2*loglikelihood
+ _neg2_log_likel = result.ndata * np.log(result.chisqr / result.ndata)
+ result.aic = _neg2_log_likel + 2 * result.nvarys
+ result.bic = _neg2_log_likel + np.log(result.ndata) * result.nvarys
+
+ return result
+
+ def minimize(self, method='leastsq', params=None, **kws):
+ """Perform the minimization.
Parameters
----------
method : str, optional
Name of the fitting method to use. Valid values are:
- - `'leastsq'`: Levenberg-Marquardt (default).
- Uses `scipy.optimize.leastsq`.
- - `'least_squares'`: Levenberg-Marquardt.
- Uses `scipy.optimize.least_squares`.
- - 'nelder': Nelder-Mead
+ - `'leastsq'`: Levenberg-Marquardt (default)
+ - `'least_squares'`: Least-Squares minimization, using Trust Region Reflective method by default
+ - `'differential_evolution'`: differential evolution
+ - `'brute'`: brute force method
+ - '`nelder`': Nelder-Mead
- `'lbfgsb'`: L-BFGS-B
- `'powell'`: Powell
- `'cg'`: Conjugate-Gradient
@@ -1201,31 +1599,38 @@ class Minimizer(object):
- `'trust-ncg'`: Trust Newton-CGn
- `'dogleg'`: Dogleg
- `'slsqp'`: Sequential Linear Squares Programming
- - `'differential_evolution'`: differential evolution
+
+ In most cases, these methods wrap and use the method with the
+ same name from `scipy.optimize`, or use
+ `scipy.optimize.minimize` with the same `method` argument.
+ Thus '`leastsq`' will use `scipy.optimize.leastsq`, while
+ '`powell`' will use `scipy.optimize.minimizer(....,
+ method='powell')`
For more details on the fitting methods please refer to the
- `scipy docs <http://docs.scipy.org/doc/scipy/reference/optimize.html>`__.
+ `SciPy docs <http://docs.scipy.org/doc/scipy/reference/optimize.html>`__.
- params : :class:`lmfit.parameter.Parameters` object.
+ params : :class:`~lmfit.parameter.Parameters`, optional
Parameters of the model to use as starting values.
- **kwargs
+ **kws : optional
Additional arguments are passed to the underlying minimization
method.
Returns
-------
:class:`MinimizerResult`
- Object containing the optimized parameter
- and several goodness-of-fit statistics.
+ Object containing the optimized parameter and several
+ goodness-of-fit statistics.
.. versionchanged:: 0.9.0
- return value changed to :class:`MinimizerResult`
- """
+ Return value changed to :class:`MinimizerResult`.
+ """
function = self.leastsq
kwargs = {'params': params}
+ kwargs.update(self.kws)
kwargs.update(kws)
user_method = method.lower()
@@ -1233,23 +1638,24 @@ class Minimizer(object):
function = self.leastsq
elif user_method.startswith('least_s'):
function = self.least_squares
+ elif user_method.startswith('brute'):
+ function = self.brute
else:
function = self.scalar_minimize
for key, val in SCALAR_METHODS.items():
if (key.lower().startswith(user_method) or
- val.lower().startswith(user_method)):
+ val.lower().startswith(user_method)):
kwargs['method'] = val
return function(**kwargs)
def _lnprior(theta, bounds):
- """
- Calculates an improper uniform log-prior probability
+ """Calculate an improper uniform log-prior probability.
Parameters
----------
theta : sequence
- float parameter values (only those being varied)
+ Float parameter values (only those being varied).
bounds : np.ndarray
Lower and upper bounds of parameters that are varying.
Has shape (nvarys, 2).
@@ -1257,10 +1663,10 @@ def _lnprior(theta, bounds):
Returns
-------
lnprob : float
- Log prior probability
+ Log prior probability.
+
"""
- if (np.any(theta > bounds[:, 1])
- or np.any(theta < bounds[:, 0])):
+ if np.any(theta > bounds[:, 1]) or np.any(theta < bounds[:, 0]):
return -np.inf
else:
return 0
@@ -1269,55 +1675,54 @@ def _lnprior(theta, bounds):
def _lnpost(theta, userfcn, params, var_names, bounds, userargs=(),
userkws=None, float_behavior='posterior', is_weighted=True,
nan_policy='raise'):
- """
- Calculates the log-posterior probability. See the `Minimizer.emcee` method
- for more details
+ """Calculate the log-posterior probability.
+
+ See the `Minimizer.emcee` method for more details.
Parameters
----------
theta : sequence
- float parameter values (only those being varied)
+ Float parameter values (only those being varied).
userfcn : callable
- User objective function
- params : lmfit.Parameters
- The entire set of Parameters
+ User objective function.
+ params : :class:`~lmfit.parameters.Parameters`
+ The entire set of Parameters.
var_names : list
- The names of the parameters that are varying
- bounds : np.ndarray
+ The names of the parameters that are varying.
+ bounds : numpy.ndarray
Lower and upper bounds of parameters. Has shape (nvarys, 2).
userargs : tuple, optional
- Extra positional arguments required for user objective function
+ Extra positional arguments required for user objective function.
userkws : dict, optional
- Extra keyword arguments required for user objective function
+ Extra keyword arguments required for user objective function.
float_behavior : str, optional
Specifies meaning of objective when it returns a float. One of:
'posterior' - objective function returnins a log-posterior
- probability.
- 'chi2' - objective function returns a chi2 value.
+ probability
+ 'chi2' - objective function returns a chi2 value
is_weighted : bool
If `userfcn` returns a vector of residuals then `is_weighted`
specifies if the residuals have been weighted by data uncertainties.
nan_policy : str, optional
- Specifies action if `userfcn` returns nan
- values. One of:
+ Specifies action if `userfcn` returns NaN values. One of:
'raise' - a `ValueError` is raised
'propagate' - the values returned from `userfcn` are un-altered
- 'omit' - the non-finite values are filtered.
+ 'omit' - the non-finite values are filtered
Returns
-------
lnprob : float
- Log posterior probability
+ Log posterior probability.
+
"""
# the comparison has to be done on theta and bounds. DO NOT inject theta
# values into Parameters, then compare Parameters values to the bounds.
# Parameters values are clipped to stay within bounds.
- if (np.any(theta > bounds[:, 1])
- or np.any(theta < bounds[:, 0])):
+ if np.any(theta > bounds[:, 1]) or np.any(theta < bounds[:, 0]):
return -np.inf
for name, val in zip(var_names, theta):
@@ -1360,12 +1765,13 @@ def _lnpost(theta, userfcn, params, var_names, bounds, userargs=(),
def _make_random_gen(seed):
- """Turn seed into a np.random.RandomState instance
+ """Turn seed into a numpy.random.RandomState instance.
+
+ If seed is None, return the RandomState singleton used by
+ numpy.random. If seed is an int, return a new RandomState instance
+ seeded with seed. If seed is already a RandomState instance, return
+ it. Otherwise raise ValueError.
- If seed is None, return the RandomState singleton used by np.random.
- If seed is an int, return a new RandomState instance seeded with seed.
- If seed is already a RandomState instance, return it.
- Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
@@ -1378,21 +1784,20 @@ def _make_random_gen(seed):
def _nan_policy(a, nan_policy='raise', handle_inf=True):
- """
- Specifies behaviour when an array contains np.nan or np.inf
+ """Specify behaviour when an array contains numpy.nan or numpy.inf.
Parameters
----------
a : array_like
- Input array to consider
+ Input array to consider.
nan_policy : str, optional
One of:
- 'raise' - raise a `ValueError` if `a` contains NaN.
+ 'raise' - raise a `ValueError` if `a` contains NaN
'propagate' - propagate NaN
'omit' - filter NaN from input array
handle_inf : bool, optional
- As well as nan consider +/- inf
+ As well as NaN consider +/- inf.
Returns
-------
@@ -1402,12 +1807,12 @@ def _nan_policy(a, nan_policy='raise', handle_inf=True):
----
This function is copied, then modified, from
scipy/stats/stats.py/_contains_nan
- """
+ """
policies = ['propagate', 'raise', 'omit']
if handle_inf:
- handler_func = lambda a: ~np.isfinite(a)
+ handler_func = lambda a: ~np.isfinite(a)
else:
handler_func = np.isnan
@@ -1442,52 +1847,57 @@ def _nan_policy(a, nan_policy='raise', handle_inf=True):
def minimize(fcn, params, method='leastsq', args=None, kws=None,
- scale_covar=True, iter_cb=None, **fit_kws):
- """
- This function performs a fit of a set of parameters by minimizing
- an objective (or "cost") function using one one of the several
- available methods. The minimize function takes a objective function
- to be minimized, a dictionary (:class:`lmfit.parameter.Parameters`)
- containing the model parameters, and several optional arguments.
+ scale_covar=True, iter_cb=None, reduce_fcn=None, **fit_kws):
+ """Perform a fit of a set of parameters by minimizing an objective (or
+ cost) function using one one of the several available methods.
+
+ The minimize function takes a objective function to be minimized,
+ a dictionary (:class:`~lmfit.parameter.Parameters`) containing the model
+ parameters, and several optional arguments.
Parameters
----------
fcn : callable
- objective function to be minimized. When method is `leastsq` or
+ Objective function to be minimized. When method is `leastsq` or
`least_squares`, the objective function should return an array
of residuals (difference between model and data) to be minimized
- in a least squares sense. With the scalar methods the objective
+ in a least-squares sense. With the scalar methods the objective
function can either return the residuals array or a single scalar
value. The function must have the signature:
`fcn(params, *args, **kws)`
- params : :class:`lmfit.parameter.Parameters` object.
- contains the Parameters for the model.
+ params : :class:`~lmfit.parameter.Parameters`
+ Contains the Parameters for the model.
method : str, optional
Name of the fitting method to use. Valid values are:
- - `'leastsq'`: Levenberg-Marquardt (default).
- Uses `scipy.optimize.leastsq`.
- - `'least_squares'`: Levenberg-Marquardt.
- Uses `scipy.optimize.least_squares`.
- - 'nelder': Nelder-Mead
+ - `'leastsq'`: Levenberg-Marquardt (default)
+ - `'least_squares'`: Least-Squares minimization, using Trust Region Reflective method by default
+ - `'differential_evolution'`: differential evolution
+ - `'brute'`: brute force method
+ - '`nelder`': Nelder-Mead
- `'lbfgsb'`: L-BFGS-B
- `'powell'`: Powell
- `'cg'`: Conjugate-Gradient
- - `'newton'`: Newton-CG
+ - `'newton'`: Newton-Congugate-Gradient
- `'cobyla'`: Cobyla
- `'tnc'`: Truncate Newton
- - `'trust-ncg'`: Trust Newton-CGn
+ - `'trust-ncg'`: Trust Newton-Congugate-Gradient
- `'dogleg'`: Dogleg
- `'slsqp'`: Sequential Linear Squares Programming
- - `'differential_evolution'`: differential evolution
+
+ In most cases, these methods wrap and use the method of the same
+ name from `scipy.optimize`, or use `scipy.optimize.minimize` with
+ the same `method` argument. Thus '`leastsq`' will use
+ `scipy.optimize.leastsq`, while '`powell`' will use
+ `scipy.optimize.minimizer(...., method='powell')`
For more details on the fitting methods please refer to the
- `scipy docs <http://docs.scipy.org/doc/scipy/reference/optimize.html>`__.
+ `SciPy docs <http://docs.scipy.org/doc/scipy/reference/optimize.html>`__.
args : tuple, optional
Positional arguments to pass to `fcn`.
kws : dict, optional
- keyword arguments to pass to `fcn`.
+ Keyword arguments to pass to `fcn`.
iter_cb : callable, optional
Function to be called at each fit iteration. This function should
have the signature `iter_cb(params, iter, resid, *args, **kws)`,
@@ -1495,20 +1905,22 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None,
the iteration, `resid` the current residual array, and `*args`
and `**kws` as passed to the objective function.
scale_covar : bool, optional
- Whether to automatically scale the covariance matrix (leastsq
- only).
- fit_kws : dict, optional
+ Whether to automatically scale the covariance matrix (`leastsq` only).
+ reduce_fcn : str or callable, optional
+ Function to convert a residual array to a scalar value for the scalar
+ minimizers. See notes in `Minimizer`.
+ **fit_kws : dict, optional
Options to pass to the minimizer being used.
Returns
-------
:class:`MinimizerResult`
- Object containing the optimized parameter
- and several goodness-of-fit statistics.
+ Object containing the optimized parameter and several
+ goodness-of-fit statistics.
.. versionchanged:: 0.9.0
- return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
Notes
-----
@@ -1520,12 +1932,12 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None,
squares of the array will be sent to the underlying fitting method,
effectively doing a least-squares optimization of the return values.
- A common use for `args` and `kwds` would be to pass in other
+ A common use for `args` and `kws` would be to pass in other
data needed to calculate the residual, including such things as the
data array, dependent variable, uncertainties in the data, and other
data structures for the model calculation.
- On output, the params will be unchanged. The best-fit values, and where
+ On output, `params` will be unchanged. The best-fit values, and where
appropriate, estimated uncertainties and correlations, will all be
contained in the returned :class:`MinimizerResult`. See
:ref:`fit-results-label` for further details.
@@ -1534,10 +1946,11 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None,
and is equivalent to::
fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
- iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
+ iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
fitter.minimize(method=method)
"""
fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
- iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
+ iter_cb=iter_cb, scale_covar=scale_covar,
+ reduce_fcn=reduce_fcn, **fit_kws)
return fitter.minimize(method=method)
diff --git a/lmfit/model.py b/lmfit/model.py
index 5a65d7f..52114a8 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -1,22 +1,22 @@
-"""
-Concise nonlinear curve fitting.
-"""
+"""Concise nonlinear curve fitting."""
from __future__ import print_function
-import warnings
+
+from collections import OrderedDict
+from copy import deepcopy
+from functools import wraps
import inspect
import operator
-from copy import deepcopy
+import warnings
+
import numpy as np
-from . import Parameters, Parameter, Minimizer
-from .printfuncs import fit_report, ci_report
-from .confidence import conf_interval
+from scipy.special import erf
+from scipy.stats import t
-try:
- from collections import OrderedDict
-except ImportError:
- from ordereddict import OrderedDict
+from . import Minimizer, Parameter, Parameters
+from .confidence import conf_interval
+from .printfuncs import ci_report, fit_report
-# Use pandas.isnull for aligning missing data is pandas is available.
+# Use pandas.isnull for aligning missing data if pandas is available.
# otherwise use numpy.isnan
try:
from pandas import isnull, Series
@@ -24,8 +24,9 @@ except ImportError:
isnull = np.isnan
Series = type(NotImplemented)
+
def _align(var, mask, data):
- "align missing data, with pandas is available"
+ """Align missing data, if pandas is available."""
if isinstance(data, Series) and isinstance(var, Series):
return var.reindex_like(data).dropna()
elif mask is not None:
@@ -42,7 +43,10 @@ except ImportError:
def _ensureMatplotlib(function):
if _HAS_MATPLOTLIB:
- return function
+ @wraps(function)
+ def wrapper(*args, **kws):
+ return function(*args, **kws)
+ return wrapper
else:
def no_op(*args, **kwargs):
print('matplotlib module is required for plotting the results')
@@ -51,57 +55,91 @@ def _ensureMatplotlib(function):
class Model(object):
- """Create a model from a user-defined function.
-
- Parameters
- ----------
- func: function to be wrapped
- independent_vars: list of strings or None (default)
- arguments to func that are independent variables
- param_names: list of strings or None (default)
- names of arguments to func that are to be made into parameters
- missing: None, 'none', 'drop', or 'raise'
- 'none' or None: Do not check for null or missing values (default)
- 'drop': Drop null or missing observations in data.
- if pandas is installed, pandas.isnull is used, otherwise
- numpy.isnan is used.
- 'raise': Raise a (more helpful) exception when data contains null
- or missing values.
- name: None or string
- name for the model. When `None` (default) the name is the same as
- the model function (`func`).
-
- Note
- ----
- Parameter names are inferred from the function arguments,
- and a residual function is automatically constructed.
-
- Example
- -------
- >>> def decay(t, tau, N):
- ... return N*np.exp(-t/tau)
- ...
- >>> my_model = Model(decay, independent_vars=['t'])
+ """Create a model from a user-supplied model function.
+
+ The model function will normally take an independent variable
+ (generally, the first argument) and a series of arguments that are
+ meant to be parameters for the model. It will return an array of
+ data to model some data as for a curve-fitting problem.
+
"""
_forbidden_args = ('data', 'weights', 'params')
- _invalid_ivar = "Invalid independent variable name ('%s') for function %s"
- _invalid_par = "Invalid parameter name ('%s') for function %s"
+ _invalid_ivar = "Invalid independent variable name ('%s') for function %s"
+ _invalid_par = "Invalid parameter name ('%s') for function %s"
_invalid_missing = "missing must be None, 'none', 'drop', or 'raise'."
- _valid_missing = (None, 'none', 'drop', 'raise')
+ _valid_missing = (None, 'none', 'drop', 'raise')
_invalid_hint = "unknown parameter hint '%s' for param '%s'"
_hint_names = ('value', 'vary', 'min', 'max', 'expr')
def __init__(self, func, independent_vars=None, param_names=None,
missing='none', prefix='', name=None, **kws):
+ """
+ Parameters
+ ----------
+ func : callable
+ Function to be wrapped.
+ independent_vars : list of str, optional
+ Arguments to func that are independent variables (default is None).
+ param_names : list of str, optional
+ Names of arguments to func that are to be made into parameters
+ (default is None).
+ missing : str, optional
+ How to handle NaN and missing values in data. One of:
+
+ - 'none' or None : Do not check for null or missing values (default).
+
+ - 'drop' : Drop null or missing observations in data. If pandas is
+ installed, `pandas.isnull` is used, otherwise `numpy.isnan` is used.
+ - 'raise' : Raise a (more helpful) exception when data contains
+ null or missing values.
+
+ prefix : str, optional
+ Prefix used for the model.
+ name : str, optional
+ Name for the model. When None (default) the name is the same as
+ the model function (`func`).
+ **kws : dict, optional
+ Additional keyword arguments to pass to model function.
+
+ Notes
+ -----
+ 1. Parameter names are inferred from the function arguments,
+ and a residual function is automatically constructed.
+
+ 2. The model function must return an array that will be the same
+ size as the data being modeled.
+
+ Examples
+ --------
+ The model function will normally take an independent variable (generally,
+ the first argument) and a series of arguments that are meant to be
+ parameters for the model. Thus, a simple peak using a Gaussian
+ defined as:
+
+ >>> import numpy as np
+ >>> def gaussian(x, amp, cen, wid):
+ ... return amp * np.exp(-(x-cen)**2 / wid)
+
+ can be turned into a Model with:
+
+ >>> gmodel = Model(gaussian)
+
+ this will automatically discover the names of the independent variables
+ and parameters:
+
+ >>> print(gmodel.param_names, gmodel.independent_vars)
+ ['amp', 'cen', 'wid'], ['x']
+
+ """
self.func = func
self._prefix = prefix
self._param_root_names = param_names # will not include prefixes
self.independent_vars = independent_vars
self._func_allargs = []
self._func_haskeywords = False
- if not missing in self._valid_missing:
+ if missing not in self._valid_missing:
raise ValueError(self._invalid_missing)
self.missing = missing
self.opts = kws
@@ -129,6 +167,7 @@ class Model(object):
@property
def name(self):
+ """Return Model name."""
return self._reprstring(long=False)
@name.setter
@@ -137,36 +176,51 @@ class Model(object):
@property
def prefix(self):
+ """Return Model prefix."""
return self._prefix
@property
def param_names(self):
+ """Return the parameters of the Model."""
return self._param_names
def __repr__(self):
+ """ Return representation of Model."""
return "<lmfit.Model: %s>" % (self.name)
def copy(self, **kwargs):
- """DOES NOT WORK"""
+ """DOES NOT WORK."""
raise NotImplementedError("Model.copy does not work. Make a new Model")
def _parse_params(self):
- "build params from function arguments"
+ """Build parameters from function arguments."""
if self.func is None:
return
- argspec = inspect.getargspec(self.func)
- pos_args = argspec.args[:]
- keywords = argspec.keywords
- kw_args = {}
- if argspec.defaults is not None:
- for val in reversed(argspec.defaults):
- kw_args[pos_args.pop()] = val
-
- self._func_haskeywords = keywords is not None
+ if hasattr(self.func, 'argnames') and hasattr(self.func, 'kwargs'):
+ pos_args = self.func.argnames[:]
+ kw_args = {}
+ for name, defval in self.func.kwargs:
+ kw_args[name] = defval
+ keywords_ = list(kw_args.keys())
+ else:
+ try: # PY3
+ argspec = inspect.getfullargspec(self.func)
+ keywords_ = argspec.varkw
+ except AttributeError: # PY2
+ argspec = inspect.getargspec(self.func)
+ keywords_ = argspec.keywords
+
+ pos_args = argspec.args
+ kw_args = {}
+ if argspec.defaults is not None:
+ for val in reversed(argspec.defaults):
+ kw_args[pos_args.pop()] = val
+
+ self._func_haskeywords = keywords_ is not None
self._func_allargs = pos_args + list(kw_args.keys())
allargs = self._func_allargs
- if len(allargs) == 0 and keywords is not None:
+ if len(allargs) == 0 and keywords_ is not None:
return
# default independent_var = 1st argument
@@ -181,7 +235,7 @@ class Model(object):
self._param_root_names = pos_args[:]
for key, val in kw_args.items():
if (not isinstance(val, bool) and
- isinstance(val, (float, int))):
+ isinstance(val, (float, int))):
self._param_root_names.append(key)
self.def_vals[key] = val
elif val is None:
@@ -193,7 +247,7 @@ class Model(object):
new_opts = {}
for opt, val in self.opts.items():
if (opt in self._param_root_names or opt in might_be_param and
- isinstance(val, Parameter)):
+ isinstance(val, Parameter)):
self.set_param_hint(opt, value=val.value,
min=val.min, max=val.max, expr=val.expr)
elif opt in self._func_allargs:
@@ -214,20 +268,47 @@ class Model(object):
raise ValueError(self._invalid_ivar % (arg, fname))
for arg in names:
if (self._strip_prefix(arg) not in allargs or
- arg in self._forbidden_args):
+ arg in self._forbidden_args):
raise ValueError(self._invalid_par % (arg, fname))
# the following as been changed from OrderedSet for the time being.
self._param_names = names[:]
def set_param_hint(self, name, **kwargs):
- """set hints for parameter, including optional bounds
- and constraints (value, vary, min, max, expr)
- these will be used by make_params() when building
- default parameters
-
- example:
- model = GaussianModel()
- model.set_param_hint('amplitude', min=-100.0, max=0.)
+ """Set *hints* to use when creating parameters with `make_params()` for
+ the named parameter.
+
+ This is especially convenient for setting initial values. The `name`
+ can include the models `prefix` or not. The hint given can also
+ include optional bounds and constraints ``(value, vary, min, max, expr)``,
+ which will be used by make_params() when building default parameters.
+
+ Parameters
+ ----------
+ name : string
+ Parameter name.
+
+ **kwargs : optional
+ Arbitrary keyword arguments, needs to be a Parameter attribute.
+ Can be any of the following:
+
+ - value : float, optional
+ Numerical Parameter value.
+ - vary : bool, optional
+ Whether the Parameter is varied during a fit (default is True).
+ - min : float, optional
+ Lower bound for value (default is `-numpy.inf`, no lower bound).
+ - max : float, optional
+ Upper bound for value (default is `numpy.inf`, no upper bound).
+ - expr : str, optional
+ Mathematical expression used to constrain the value during the fit.
+
+
+ Example
+ --------
+
+ >>> model = GaussianModel()
+ >>> model.set_param_hint('sigma', min=0)
+
"""
npref = len(self._prefix)
if npref > 0 and name.startswith(self._prefix):
@@ -243,10 +324,13 @@ class Model(object):
warnings.warn(self._invalid_hint % (key, name))
def print_param_hints(self, colwidth=8):
- """Prints a nicely aligned text-table of parameters hints.
+ """Print a nicely aligned text-table of parameter hints.
+
+ Parameters
+ ----------
+ colwidth : int, optional
+ Width of each column, except for first and last columns.
- The argument `colwidth` is the width of each column,
- except for first and last columns.
"""
name_len = max(len(s) for s in self.param_hints)
print('{:{name_len}} {:>{n}} {:>{n}} {:>{n}} {:>{n}} {:{n}}'
@@ -261,8 +345,28 @@ class Model(object):
print(line.format(name_len=name_len, n=colwidth, **pvalues))
def make_params(self, verbose=False, **kwargs):
- """create and return a Parameters object for a Model.
- This applies any default values
+ """Create a Parameters object for a Model.
+
+ Parameters
+ ----------
+ verbose : bool, optional
+ Whether to print out messages (default is False).
+ **kwargs : optional
+ Parameter names and initial values.
+
+
+ Returns
+ ---------
+ params : Parameters
+
+ Notes
+ -----
+ 1. The parameters may or may not have decent initial values for each
+ parameter.
+
+ 2. This applies any default values or parameter hints that may have
+ been set.
+
"""
params = Parameters()
@@ -296,7 +400,7 @@ class Model(object):
par.value = kwargs[name]
params.add(par)
if verbose:
- print( ' - Adding parameter "%s"' % name)
+ print(' - Adding parameter "%s"' % name)
# next build parameters defined in param_hints
# note that composites may define their own additional
@@ -309,11 +413,13 @@ class Model(object):
par = Parameter(name=name)
params.add(par)
if verbose:
- print( ' - Adding parameter for hint "%s"' % name)
+ print(' - Adding parameter for hint "%s"' % name)
par._delay_asteval = True
for item in self._hint_names:
- if item in hint:
+ if item in hint:
setattr(par, item, hint[item])
+ if basename in kwargs:
+ par.value = kwargs[basename]
# Add the new parameter to self._param_names
if name not in self._param_names:
self._param_names.append(name)
@@ -322,27 +428,56 @@ class Model(object):
p._delay_asteval = False
return params
- def guess(self, data=None, **kws):
- """stub for guess starting values --
- should be implemented for each model subclass to
- run self.make_params(), update starting values
- and return a Parameters object"""
+ def guess(self, data, **kws):
+ """Guess starting values for the parameters of a model.
+
+ This is not implemented for all models, but is available for many of
+ the built-in models.
+
+ Parameters
+ ----------
+ data : array_like
+ Array of data to use to guess parameter values.
+ **kws : optional
+ Additional keyword arguments, passed to model function.
+
+ Returns
+ -------
+ params : Parameters
+
+ Notes
+ -----
+ Should be implemented for each model subclass to run
+ self.make_params(), update starting values and return a
+ Parameters object.
+
+ Raises
+ ------
+ NotImplementedError
+
+ """
cname = self.__class__.__name__
msg = 'guess() not implemented for %s' % cname
raise NotImplementedError(msg)
def _residual(self, params, data, weights, **kwargs):
- """default residual: (data-model)*weights
+ """Return the residual.
+
+ Default residual: (data-model)*weights.
- If the model returns complex values, the residual is computed by treating the real and imaginary
- parts separately. In this case, if the weights provided are real, they are assumed to apply equally to the
- real and imaginary parts. If the weights are complex, the real part of the weights are applied to the real
- part of the residual and the imaginary part is treated correspondingly.
+ If the model returns complex values, the residual is computed by
+ treating the real and imaginary parts separately. In this case,
+ if the weights provided are real, they are assumed to apply
+ equally to the real and imaginary parts. If the weights are
+ complex, the real part of the weights are applied to the real
+ part of the residual and the imaginary part is treated
+ correspondingly.
- Since the underlying scipy.optimize routines expect np.float arrays, the only complex type supported is
- np.complex.
+ Since the underlying scipy.optimize routines expect numpy.float
+ arrays, the only complex type supported is np.complex.
The "ravels" throughout are necessary to support pandas.Series.
+
"""
diff = self.eval(params, **kwargs) - data
@@ -361,7 +496,7 @@ class Model(object):
return np.asarray(diff).ravel() # for compatibility with pandas.Series
def _handle_missing(self, data):
- "handle missing data"
+ """Handle missing data."""
if self.missing == 'raise':
if np.any(isnull(data)):
raise ValueError("Data contains a null value.")
@@ -379,9 +514,11 @@ class Model(object):
return name
def make_funcargs(self, params=None, kwargs=None, strip=True):
- """convert parameter values and keywords to function arguments"""
- if params is None: params = {}
- if kwargs is None: kwargs = {}
+ """Convert parameter values and keywords to function arguments."""
+ if params is None:
+ params = {}
+ if kwargs is None:
+ kwargs = {}
out = {}
out.update(self.opts)
for name, par in params.items():
@@ -401,25 +538,62 @@ class Model(object):
return out
def _make_all_args(self, params=None, **kwargs):
- """generate **all** function args for all functions"""
+ """Generate **all** function args for all functions."""
args = {}
for key, val in self.make_funcargs(params, kwargs).items():
args["%s%s" % (self._prefix, key)] = val
return args
def eval(self, params=None, **kwargs):
- """evaluate the model with the supplied parameters"""
+ """Evaluate the model with supplied parameters and keyword arguments.
+
+ Parameters
+ -----------
+ params : Parameters, optional
+ Parameters to use in Model.
+ **kwargs : optional
+ Additional keyword arguments to pass to model function.
+
+ Returns
+ -------
+ numpy.ndarray
+ Value of model given the parameters and other arguments.
+
+ Notes
+ -----
+ 1. if `params` is None, the values for all parameters are
+ expected to be provided as keyword arguments. If `params` is
+ given, and a keyword argument for a parameter value is also given,
+ the keyword argument will be used.
+
+ 2. all non-parameter arguments for the model function, **including
+ all the independent variables** will need to be passed in using
+ keyword arguments.
+
+ """
return self.func(**self.make_funcargs(params, kwargs))
@property
def components(self):
- """return components for composite model"""
+ """Return components for composite model."""
return [self]
def eval_components(self, params=None, **kwargs):
- """
- evaluate the model with the supplied parameters and returns a ordered
- dict containting name, result pairs.
+ """Evaluate the model with the supplied parameters.
+
+ Parameters
+ -----------
+ params : Parameters, optional
+ Parameters to use in Model.
+ **kwargs : optional
+ Additional keyword arguments to pass to model function.
+
+ Returns
+ -------
+ OrderedDict
+ Keys are prefixes for component model, values are value of each
+ component.
+
"""
key = self._prefix
if len(key) < 1:
@@ -427,47 +601,68 @@ class Model(object):
return {key: self.eval(params=params, **kwargs)}
def fit(self, data, params=None, weights=None, method='leastsq',
- iter_cb=None, scale_covar=True, verbose=False, fit_kws=None, **kwargs):
- """Fit the model to the data.
+ iter_cb=None, scale_covar=True, verbose=False, fit_kws=None,
+ **kwargs):
+ """Fit the model to the data using the supplied Parameters.
Parameters
----------
- data: array-like
- params: Parameters object
- weights: array-like of same size as data
- used for weighted fit
- method: fitting method to use (default = 'leastsq')
- iter_cb: None or callable callback function to call at each iteration.
- scale_covar: bool (default True) whether to auto-scale covariance matrix
- verbose: bool (default True) print a message when a new parameter is
- added because of a hint.
- fit_kws: dict
- default fitting options, such as xtol and maxfev, for scipy optimizer
- keyword arguments: optional, named like the arguments of the
- model function, will override params. See examples below.
+ data : array_like
+ Array of data to be fit.
+ params : Parameters, optional
+ Parameters to use in fit (default is None).
+ weights : array_like of same size as `data`, optional
+ Weights to use for the calculation of the fit residual (default
+ is None).
+ method : str, optional
+ Name of fitting method to use (default is `'leastsq'`).
+ iter_cb : callable, optional
+ Callback function to call at each iteration (default is None).
+ scale_covar : bool, optional
+ Whether to automatically scale the covariance matrix when
+ calculating uncertainties (default is True, `leastsq` method only).
+ verbose: bool, optional
+ Whether to print a message when a new parameter is added because
+ of a hint (default is True).
+ fit_kws: dict, optional
+ Options to pass to the minimizer being used.
+ **kwargs: optional
+ Arguments to pass to the model function, possibly overriding
+ params.
Returns
-------
- lmfit.ModelResult
+ ModelResult
Examples
--------
- # Take t to be the independent variable and data to be the
- # curve we will fit.
+ Take `t` to be the independent variable and data to be the curve we
+ will fit. Use keyword arguments to set initial guesses:
- # Using keyword arguments to set initial guesses
>>> result = my_model.fit(data, tau=5, N=3, t=t)
- # Or, for more control, pass a Parameters object.
+ Or, for more control, pass a Parameters object.
+
>>> result = my_model.fit(data, params, t=t)
- # Keyword arguments override Parameters.
+ Keyword arguments override Parameters.
+
>>> result = my_model.fit(data, params, tau=5, t=t)
- Note
- ----
- All parameters, however passed, are copied on input, so the original
- Parameter objects are unchanged.
+ Notes
+ -----
+ 1. if `params` is None, the values for all parameters are
+ expected to be provided as keyword arguments. If `params` is
+ given, and a keyword argument for a parameter value is also given,
+ the keyword argument will be used.
+
+ 2. all non-parameter arguments for the model function, **including
+ all the independent variables** will need to be passed in using
+ keyword arguments.
+
+ 3. Parameters (however passed in), are copied on input, so the
+ original Parameter objects are unchanged, and the updated values
+ are in the returned `ModelResult`.
"""
if params is None:
@@ -487,7 +682,7 @@ class Model(object):
del kwargs[name]
# All remaining kwargs should correspond to independent variables.
- for name in kwargs.keys():
+ for name in kwargs:
if name not in self.independent_vars:
warnings.warn("The keyword argument %s does not" % name +
"match any arguments of the model function." +
@@ -503,7 +698,7 @@ class Model(object):
'Parameters or keyword arguments to fit.\n')
missing = [p for p in self.param_names if p not in params.keys()]
blank = [name for name, p in params.items()
- if (p.value is None and p.expr is None)]
+ if p.value is None and p.expr is None]
msg += 'Missing parameters: %s\n' % str(missing)
msg += 'Non initialized parameters: %s' % str(blank)
raise ValueError(msg)
@@ -543,54 +738,64 @@ class Model(object):
return output
def __add__(self, other):
+ """+"""
return CompositeModel(self, other, operator.add)
def __sub__(self, other):
+ """-"""
return CompositeModel(self, other, operator.sub)
def __mul__(self, other):
+ """*"""
return CompositeModel(self, other, operator.mul)
def __div__(self, other):
+ """/"""
return CompositeModel(self, other, operator.truediv)
def __truediv__(self, other):
+ """/"""
return CompositeModel(self, other, operator.truediv)
class CompositeModel(Model):
- """Create a composite model -- a binary operator of two Models
-
- Parameters
- ----------
- left_model: left-hand side model-- must be a Model()
- right_model: right-hand side model -- must be a Model()
- oper: callable binary operator (typically, operator.add, operator.mul, etc)
-
- independent_vars: list of strings or None (default)
- arguments to func that are independent variables
- param_names: list of strings or None (default)
- names of arguments to func that are to be made into parameters
- missing: None, 'none', 'drop', or 'raise'
- 'none' or None: Do not check for null or missing values (default)
- 'drop': Drop null or missing observations in data.
- if pandas is installed, pandas.isnull is used, otherwise
- numpy.isnan is used.
- 'raise': Raise a (more helpful) exception when data contains null
- or missing values.
- name: None or string
- name for the model. When `None` (default) the name is the same as
- the model function (`func`).
+ """Combine two models (`left` and `right`) with a binary operator (`op`)
+ into a CompositeModel.
+
+ Normally, one does not have to explicitly create a `CompositeModel`,
+ but can use normal Python operators `+`, '-', `*`, and `/` to combine
+ components as in::
+
+ >>> mod = Model(fcn1) + Model(fcn2) * Model(fcn3)
"""
+
_names_collide = ("\nTwo models have parameters named '{clash}'. "
"Use distinct names.")
- _bad_arg = "CompositeModel: argument {arg} is not a Model"
- _bad_op = "CompositeModel: operator {op} is not callable"
+ _bad_arg = "CompositeModel: argument {arg} is not a Model"
+ _bad_op = "CompositeModel: operator {op} is not callable"
_known_ops = {operator.add: '+', operator.sub: '-',
operator.mul: '*', operator.truediv: '/'}
def __init__(self, left, right, op, **kws):
+ """
+ Parameters
+ ----------
+ left : Model
+ Left-hand model.
+ right : Model
+ Right-hand model.
+ op : callable binary operator
+ Operator to combine `left` and `right` models.
+ **kws : optional
+ Additional keywords are passed to `Model` when creating this
+ new model.
+
+ Notes
+ -----
+ 1. The two models must use the same independent variable.
+
+ """
if not isinstance(left, Model):
raise ValueError(self._bad_arg.format(arg=left))
if not isinstance(right, Model):
@@ -598,9 +803,9 @@ class CompositeModel(Model):
if not callable(op):
raise ValueError(self._bad_op.format(op=op))
- self.left = left
+ self.left = left
self.right = right
- self.op = op
+ self.op = op
name_collisions = set(left.param_names) & set(right.param_names)
if len(name_collisions) > 0:
@@ -615,7 +820,8 @@ class CompositeModel(Model):
if 'missing' not in kws:
kws['missing'] = self.left.missing
- def _tmp(self, *args, **kws): pass
+ def _tmp(self, *args, **kws):
+ pass
Model.__init__(self, _tmp, **kws)
for side in (left, right):
@@ -639,86 +845,69 @@ class CompositeModel(Model):
self.right._reprstring(long=long))
def eval(self, params=None, **kwargs):
+ """TODO: docstring in public method."""
return self.op(self.left.eval(params=params, **kwargs),
self.right.eval(params=params, **kwargs))
def eval_components(self, **kwargs):
- """return ordered dict of name, results for each component"""
+ """Return OrderedDict of name, results for each component."""
out = OrderedDict(self.left.eval_components(**kwargs))
out.update(self.right.eval_components(**kwargs))
return out
@property
def param_names(self):
- return self.left.param_names + self.right.param_names
+ """Return parameter names for composite model."""
+ return self.left.param_names + self.right.param_names
@property
def components(self):
- """return components for composite model"""
+ """Return components for composite model."""
return self.left.components + self.right.components
def _make_all_args(self, params=None, **kwargs):
- """generate **all** function args for all functions"""
+ """Generate **all** function arguments for all functions."""
out = self.right._make_all_args(params=params, **kwargs)
out.update(self.left._make_all_args(params=params, **kwargs))
return out
class ModelResult(Minimizer):
- """Result from Model fit
-
- Attributes
- -----------
- model instance of Model -- the model function
- params instance of Parameters -- the fit parameters
- data array of data values to compare to model
- weights array of weights used in fitting
- init_params copy of params, before being updated by fit()
- init_values array of parameter values, before being updated by fit()
- init_fit model evaluated with init_params.
- best_fit model evaluated with params after being updated by fit()
-
- Methods:
- --------
- fit(data=None, params=None, weights=None, method=None, **kwargs)
- fit (or re-fit) model with params to data (with weights)
- using supplied method. The keyword arguments are sent to
- as keyword arguments to the model function.
-
- all inputs are optional, defaulting to the value used in
- the previous fit. This allows easily changing data or
- parameter settings, or both.
-
- eval(params=None, **kwargs)
- evaluate the current model, with parameters (defaults to the current
- parameter values), with values in kwargs sent to the model function.
-
- eval_components(params=Nones, **kwargs)
- evaluate the current model, with parameters (defaults to the current
- parameter values), with values in kwargs sent to the model function
- and returns an ordered dict with the model names as the key and the
- component results as the values.
-
- fit_report(modelpars=None, show_correl=True, min_correl=0.1)
- return a fit report.
-
- plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', xlabel = None, ylabel=None,
- numpoints=None, data_kws=None, fit_kws=None, init_kws=None,
- ax_kws=None)
- Plot the fit results using matplotlib.
-
- plot_residuals(self, ax=None, datafmt='o', data_kws=None, fit_kws=None,
- ax_kws=None)
- Plot the fit residuals using matplotlib.
-
- plot(self, datafmt='o', fitfmt='-', initfmt='--', xlabel=None, ylabel=None, numpoints=None,
- data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None,
- ax_fit_kws=None, fig_kws=None)
- Plot the fit results and residuals using matplotlib.
+ """Result from the Model fit.
+
+ This has many attributes and methods for viewing and working with
+ the results of a fit using Model. It inherits from Minimizer, so
+ that it can be used to modify and re-run the fit for the Model.
+
"""
+
def __init__(self, model, params, data=None, weights=None,
method='leastsq', fcn_args=None, fcn_kws=None,
iter_cb=None, scale_covar=True, **fit_kws):
+ """
+ Parameters
+ ----------
+ model : Model
+ Model to use.
+ params : Parameters
+ Parameters with initial values for model.
+ data : array_like, optional
+ Data to be modeled.
+ weights : array_like, optional
+ Weights to multiply (data-model) for fit residual.
+ method : str, optional
+ Name of minimization method to use (default is `'leastsq'`).
+ fcn_args : sequence, optional
+ Positional arguments to send to model function.
+ fcn_dict : dict, optional
+ Keyword arguments to send to model function.
+ iter_cb : callable, optional
+ Function to call on each iteration of fit.
+ scale_covar : bool, optional
+ Whether to scale covariance matrix for uncertainty evaluation.
+ **fit_kws : optional
+ Keyword arguments to send to minimization routine.
+ """
self.model = model
self.data = data
self.weights = weights
@@ -730,7 +919,22 @@ class ModelResult(Minimizer):
scale_covar=scale_covar, **fit_kws)
def fit(self, data=None, params=None, weights=None, method=None, **kwargs):
- """perform fit for a Model, given data and params"""
+ """Re-perform fit for a Model, given data and params.
+
+ Parameters
+ ----------
+ data : array_like, optional
+ Data to be modeled.
+ params : Parameters, optional
+ Parameters with initial values for model.
+ weights : array_like, optional
+ Weights to multiply (data-model) for fit residual.
+ method : str, optional
+ Name of minimization method to use (default is `'leastsq'`).
+ **kwargs : optional
+ Keyword arguments to send to minimization routine.
+
+ """
if data is not None:
self.data = data
if params is not None:
@@ -742,12 +946,12 @@ class ModelResult(Minimizer):
self.ci_out = None
self.userargs = (self.data, self.weights)
self.userkws.update(kwargs)
- self.init_fit = self.model.eval(params=self.params, **self.userkws)
+ self.init_fit = self.model.eval(params=self.params, **self.userkws)
_ret = self.minimize(method=self.method)
for attr in dir(_ret):
- if not attr.startswith('_') :
+ if not attr.startswith('_'):
try:
setattr(self, attr, getattr(_ret, attr))
except AttributeError:
@@ -755,115 +959,265 @@ class ModelResult(Minimizer):
self.init_values = self.model._make_all_args(self.init_params)
self.best_values = self.model._make_all_args(_ret.params)
- self.best_fit = self.model.eval(params=_ret.params, **self.userkws)
+ self.best_fit = self.model.eval(params=_ret.params, **self.userkws)
def eval(self, params=None, **kwargs):
- """
- evaluate model function
- Arguments:
- params (Parameters): parameters, defaults to ModelResult .params
- kwargs (variable): values of options, independent variables, etc
+ """Evaluate model function.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Parameters to use.
+ **kwargs : optional
+ Options to send to Model.eval()
+
+ Returns
+ -------
+ out : numpy.ndarray
+ Array for evaluated model.
- Returns:
- ndarray or float for evaluated model
"""
self.userkws.update(kwargs)
if params is None:
- params = self.params
+ params = self.params
return self.model.eval(params=params, **self.userkws)
def eval_components(self, params=None, **kwargs):
- """
- evaluate each component of a composite model function
- Arguments:
- params (Parameters): parameters, defaults to ModelResult .params
- kwargs (variable): values of options, independent variables, etc
-
- Returns:
- ordered dictionary with keys of prefixes, and values of values for
- each component of the model.
+ """Evaluate each component of a composite model function.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Parameters, defaults to ModelResult.params
+ **kwargs : optional
+ Leyword arguments to pass to model function.
+
+ Returns
+ -------
+ OrderedDict
+ Keys are prefixes of component models, and values are
+ the estimated model value for each component of the model.
+
"""
self.userkws.update(kwargs)
if params is None:
params = self.params
return self.model.eval_components(params=params, **self.userkws)
+ def eval_uncertainty(self, params=None, sigma=1, **kwargs):
+ """Evaluate the uncertainty of the *model function* from the
+ uncertainties for the best-fit parameters. This can be used to give
+ confidence bands for the model.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Parameters, defaults to ModelResult.params.
+ sigma : float, optional
+ Confidence level, i.e. how many sigma (default is 1).
+ **kwargs : optional
+ Values of options, independent variables, etcetera.
+
+ Returns
+ -------
+ numpy.ndarray
+ Uncertainty at each value of the model.
+
+ Example
+ -------
+
+ >>> out = model.fit(data, params, x=x)
+ >>> dely = out.eval_confidence_band(x=x)
+ >>> plt.plot(x, data)
+ >>> plt.plot(x, out.best_fit)
+ >>> plt.fill_between(x, out.best_fit-dely,
+ ... out.best_fit+dely, color='#888888')
+
+ Notes
+ -----
+ 1. This is based on the excellent and clear example from
+ https://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html#confidence-and-prediction-intervals,
+ which references the original work of:
+ J. Wolberg,Data Analysis Using the Method of Least Squares, 2006, Springer
+ 2. The value of sigma is number of `sigma` values, and is converted to a
+ probability. Values or 1, 2, or 3 give probalities of 0.6827, 0.9545,
+ and 0.9973, respectively. If the sigma value is < 1, it is interpreted
+ as the probability itself. That is, `sigma=1` and `sigma=0.6827` will
+ give the same results, within precision errors.
+
+ """
+ self.userkws.update(kwargs)
+ if params is None:
+ params = self.params
+
+ nvarys = self.nvarys
+ ndata = self.ndata
+ covar = self.covar / self.redchi
+ fjac = np.zeros(ndata*nvarys).reshape((nvarys, ndata))
+ df2 = np.zeros(ndata)
+
+ # find derivative by hand!
+ for i in range(nvarys):
+ pname = self.var_names[i]
+ pars = self.params
+ val0 = pars[pname].value
+ dval = pars[pname].stderr/3.0
+
+ pars[pname].value = val0 + dval
+ res1 = self.model.eval(pars, **self.userkws)
+
+ pars[pname].value = val0 - dval
+ res2 = self.model.eval(pars, **self.userkws)
+
+ pars[pname].value = val0
+ fjac[i] = (res1 - res2) / (2*dval)
+
+ for i in range(nvarys):
+ for j in range(nvarys):
+ df2 += fjac[i]*fjac[j]*covar[i, j]
+
+ if sigma < 1.0:
+ prob = sigma
+ else:
+ prob = erf(sigma/np.sqrt(2))
+ return np.sqrt(df2*self.redchi) * t.ppf((prob+1)/2.0, ndata-nvarys)
+
def conf_interval(self, **kwargs):
- """return explicitly calculated confidence intervals"""
+ """Calculate the confidence intervals for the variable parameters.
+
+ Confidence intervals are calculated using the
+ :func:`confidence.conf_interval()` function and keyword
+ arguments (`**kwargs`) are passed to that function. The result
+ is stored in the :attr:`ci_out` attribute so that it can be
+ accessed without recalculating them.
+
+ """
if self.ci_out is None:
self.ci_out = conf_interval(self, self, **kwargs)
return self.ci_out
def ci_report(self, with_offset=True, ndigits=5, **kwargs):
- """return nicely formatted report about confidence intervals"""
+ """Return a nicely formatted text report of the confidence intervals.
+
+ Parameters
+ ----------
+ with_offset : bool, optional
+ Whether to subtract best value from all other values (default is True).
+ ndigits : int, optional
+ Number of significant digits to show (default is 5).
+ **kwargs: optional
+ Keyword arguments that are passed to the `conf_interval` function.
+
+ Returns
+ -------
+ str
+ Text of formatted report on confidence intervals.
+
+ """
return ci_report(self.conf_interval(**kwargs),
with_offset=with_offset, ndigits=ndigits)
- def fit_report(self, **kwargs):
- "return fit report"
- return '[[Model]]\n %s\n%s\n' % (self.model._reprstring(long=True),
- fit_report(self, **kwargs))
+ def fit_report(self, modelpars=None, show_correl=True,
+ min_correl=0.1, sort_pars=False):
+ """Return a printable fit report.
+
+ The report contains fit statistics and best-fit values with
+ uncertainties and correlations.
+
+
+ Parameters
+ ----------
+ modelpars : Parameters, optional
+ Known Model Parameters.
+ show_correl : bool, optional
+ Whether to show list of sorted correlations (default is True).
+ min_correl : float, optional
+ Smallest correlation in absolute value to show (default is 0.1).
+ sort_pars : callable, optional
+ Whether to show parameter names sorted in alphanumerical order
+ (default is False). If False, then the parameters will be listed in
+ the order as they were added to the Parameters dictionary. If callable,
+ then this (one argument) function is used to extract a comparison key
+ from each list element.
+
+ Returns
+ -------
+ text : str
+ Multi-line text of fit report.
+
+ See Also
+ --------
+ :func:`fit_report()`
+
+ """
+ report = fit_report(self, modelpars=modelpars,
+ show_correl=show_correl,
+ min_correl=min_correl, sort_pars=sort_pars)
+ modname = self.model._reprstring(long=True)
+ return '[[Model]]\n %s\n%s\n' % (modname, report)
@_ensureMatplotlib
- def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', xlabel=None, ylabel=None, yerr=None,
- numpoints=None, data_kws=None, fit_kws=None, init_kws=None,
- ax_kws=None):
- """Plot the fit results using matplotlib.
+ def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
+ xlabel=None, ylabel=None, yerr=None, numpoints=None,
+ data_kws=None, fit_kws=None, init_kws=None, ax_kws=None):
+ """Plot the fit results using matplotlib, if available.
+
+ The plot will include the data points, the initial fit curve, and
+ the best-fit curve. If the fit model included weights or if `yerr`
+ is specified, errorbars will also be plotted.
- The method will plot results of the fit using matplotlib, including:
- the data points, the initial fit curve and the fitted curve. If the fit
- model included weights, errorbars will also be plotted.
Parameters
----------
ax : matplotlib.axes.Axes, optional
The axes to plot on. The default in None, which means use the
current pyplot axis or create one if there is none.
- datafmt : string, optional
- matplotlib format string for data points
- fitfmt : string, optional
- matplotlib format string for fitted curve
- initfmt : string, optional
- matplotlib format string for initial conditions for the fit
- xlabel : string, optional
- matplotlib format string for labeling the x-axis
- ylabel : string, optional
- matplotlib format string for labeling the y-axis
- yerr : ndarray, optional
- array of uncertainties for data array
+ datafmt : str, optional
+ Matplotlib format string for data points.
+ fitfmt : str, optional
+ Matplotlib format string for fitted curve.
+ initfmt : str, optional
+ Matplotlib format string for initial conditions for the fit.
+ xlabel : str, optional
+ Matplotlib format string for labeling the x-axis.
+ ylabel : str, optional
+ Matplotlib format string for labeling the y-axis.
+ yerr : numpy.ndarray, optional
+ Array of uncertainties for data array.
numpoints : int, optional
If provided, the final and initial fit curves are evaluated not
only at data points, but refined to contain `numpoints` points in
total.
- data_kws : dictionary, optional
- keyword arguments passed on to the plot function for data points
- fit_kws : dictionary, optional
- keyword arguments passed on to the plot function for fitted curve
- init_kws : dictionary, optional
- keyword arguments passed on to the plot function for the initial
- conditions of the fit
- ax_kws : dictionary, optional
- keyword arguments for a new axis, if there is one being created
+ data_kws : dict, optional
+ Keyword arguments passed on to the plot function for data points.
+ fit_kws : dict, optional
+ Keyword arguments passed on to the plot function for fitted curve.
+ init_kws : dict, optional
+ Keyword arguments passed on to the plot function for the initial
+ conditions of the fit.
+ ax_kws : dict, optional
+ Keyword arguments for a new axis, if there is one being created.
Returns
-------
matplotlib.axes.Axes
Notes
- ----
+ -----
For details about plot format strings and keyword arguments see
documentation of matplotlib.axes.Axes.plot.
- If yerr is specified or if the fit model included weights, then
- matplotlib.axes.Axes.errorbar is used to plot the data. If yerr is
- not specified and the fit includes weights, yerr set to 1/self.weights
+ If `yerr` is specified or if the fit model included weights, then
+ matplotlib.axes.Axes.errorbar is used to plot the data. If `yerr` is
+ not specified and the fit includes weights, `yerr` set to 1/self.weights
- If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
+ If `ax` is None then `matplotlib.pyplot.gca(**ax_kws)` is called.
See Also
--------
ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
ModelResult.plot : Plot the fit results and residuals using matplotlib.
+
"""
if data_kws is None:
data_kws = {}
@@ -892,12 +1246,15 @@ class ModelResult(Minimizer):
else:
x_array_dense = x_array
- ax.plot(x_array_dense, self.model.eval(self.init_params,
- **{independent_var: x_array_dense}), initfmt,
- label='init', **init_kws)
- ax.plot(x_array_dense, self.model.eval(self.params,
- **{independent_var: x_array_dense}), fitfmt,
- label='best-fit', **fit_kws)
+ ax.plot(
+ x_array_dense,
+ self.model.eval(self.init_params,
+ **{independent_var: x_array_dense}),
+ initfmt, label='init', **init_kws)
+ ax.plot(
+ x_array_dense,
+ self.model.eval(self.params, **{independent_var: x_array_dense}),
+ fitfmt, label='best-fit', **fit_kws)
if yerr is None and self.weights is not None:
yerr = 1.0/self.weights
@@ -910,65 +1267,64 @@ class ModelResult(Minimizer):
ax.set_title(self.model.name)
if xlabel is None:
ax.set_xlabel(independent_var)
- else: ax.set_xlabel(xlabel)
+ else:
+ ax.set_xlabel(xlabel)
if ylabel is None:
ax.set_ylabel('y')
- else: ax.set_ylabel(ylabel)
- ax.legend()
-
+ else:
+ ax.set_ylabel(ylabel)
+ ax.legend(loc='best')
return ax
@_ensureMatplotlib
def plot_residuals(self, ax=None, datafmt='o', yerr=None, data_kws=None,
fit_kws=None, ax_kws=None):
- """Plot the fit residuals using matplotlib.
+ """Plot the fit residuals using matplotlib, if available.
- The method will plot residuals of the fit using matplotlib, including:
- the data points and the fitted curve (as horizontal line). If the fit
- model included weights, errorbars will also be plotted.
+ If `yerr` is supplied or if the model included weights, errorbars
+ will also be plotted.
Parameters
----------
ax : matplotlib.axes.Axes, optional
The axes to plot on. The default in None, which means use the
current pyplot axis or create one if there is none.
- datafmt : string, optional
- matplotlib format string for data points
- yerr : ndarray, optional
- array of uncertainties for data array
- data_kws : dictionary, optional
- keyword arguments passed on to the plot function for data points
- fit_kws : dictionary, optional
- keyword arguments passed on to the plot function for fitted curve
- ax_kws : dictionary, optional
- keyword arguments for a new axis, if there is one being created
+ datafmt : str, optional
+ Matplotlib format string for data points.
+ yerr : numpy.ndarray, optional
+ Array of uncertainties for data array.
+ data_kws : dict, optional
+ Keyword arguments passed on to the plot function for data points.
+ fit_kws : dict, optional
+ Keyword arguments passed on to the plot function for fitted curve.
+ ax_kws : dict, optional
+ Keyword arguments for a new axis, if there is one being created.
Returns
-------
matplotlib.axes.Axes
Notes
- ----
+ -----
For details about plot format strings and keyword arguments see
documentation of matplotlib.axes.Axes.plot.
- If yerr is specified or if the fit model included weights, then
- matplotlib.axes.Axes.errorbar is used to plot the data. If yerr is
- not specified and the fit includes weights, yerr set to 1/self.weights
+ If `yerr` is specified or if the fit model included weights, then
+ matplotlib.axes.Axes.errorbar is used to plot the data. If `yerr` is
+ not specified and the fit includes weights, `yerr` set to 1/self.weights
- If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
+ If `ax` is None then `matplotlib.pyplot.gca(**ax_kws)` is called.
See Also
--------
ModelResult.plot_fit : Plot the fit results using matplotlib.
ModelResult.plot : Plot the fit results and residuals using matplotlib.
+
"""
if data_kws is None:
data_kws = {}
if fit_kws is None:
fit_kws = {}
- if fit_kws is None:
- fit_kws = {}
if ax_kws is None:
ax_kws = {}
@@ -997,16 +1353,15 @@ class ModelResult(Minimizer):
ax.set_title(self.model.name)
ax.set_ylabel('residuals')
- ax.legend()
-
+ ax.legend(loc='best')
return ax
@_ensureMatplotlib
- def plot(self, datafmt='o', fitfmt='-', initfmt='--', xlabel=None, ylabel=None, yerr=None,
- numpoints=None, fig=None, data_kws=None, fit_kws=None,
- init_kws=None, ax_res_kws=None, ax_fit_kws=None,
+ def plot(self, datafmt='o', fitfmt='-', initfmt='--', xlabel=None,
+ ylabel=None, yerr=None, numpoints=None, fig=None, data_kws=None,
+ fit_kws=None, init_kws=None, ax_res_kws=None, ax_fit_kws=None,
fig_kws=None):
- """Plot the fit results and residuals using matplotlib.
+ """Plot the fit results and residuals using matplotlib, if available.
The method will produce a matplotlib figure with both results of the
fit and the residuals plotted. If the fit model included weights,
@@ -1014,57 +1369,59 @@ class ModelResult(Minimizer):
Parameters
----------
- datafmt : string, optional
- matplotlib format string for data points
- fitfmt : string, optional
- matplotlib format string for fitted curve
- initfmt : string, optional
- matplotlib format string for initial conditions for the fit
- xlabel : string, optional
- matplotlib format string for labeling the x-axis
- ylabel : string, optional
- matplotlib format string for labeling the y-axis
- yerr : ndarray, optional
- array of uncertainties for data array
+ datafmt : str, optional
+ Matplotlib format string for data points.
+ fitfmt : str, optional
+ Matplotlib format string for fitted curve.
+ initfmt : str, optional
+ Matplotlib format string for initial conditions for the fit.
+ xlabel : str, optional
+ Matplotlib format string for labeling the x-axis.
+ ylabel : str, optional
+ Matplotlib format string for labeling the y-axis.
+ yerr : numpy.ndarray, optional
+ Array of uncertainties for data array.
numpoints : int, optional
If provided, the final and initial fit curves are evaluated not
only at data points, but refined to contain `numpoints` points in
total.
fig : matplotlib.figure.Figure, optional
- The figure to plot on. The default in None, which means use the
+ The figure to plot on. The default is None, which means use the
current pyplot figure or create one if there is none.
- data_kws : dictionary, optional
- keyword arguments passed on to the plot function for data points
- fit_kws : dictionary, optional
- keyword arguments passed on to the plot function for fitted curve
- init_kws : dictionary, optional
- keyword arguments passed on to the plot function for the initial
- conditions of the fit
- ax_res_kws : dictionary, optional
- keyword arguments for the axes for the residuals plot
- ax_fit_kws : dictionary, optional
- keyword arguments for the axes for the fit plot
- fig_kws : dictionary, optional
- keyword arguments for a new figure, if there is one being created
+ data_kws : dict, optional
+ Keyword arguments passed on to the plot function for data points.
+ fit_kws : dict, optional
+ Keyword arguments passed on to the plot function for fitted curve.
+ init_kws : dict, optional
+ Keyword arguments passed on to the plot function for the initial
+ conditions of the fit.
+ ax_res_kws : dict, optional
+ Keyword arguments for the axes for the residuals plot.
+ ax_fit_kws : dict, optional
+ Keyword arguments for the axes for the fit plot.
+ fig_kws : dict, optional
+ Keyword arguments for a new figure, if there is one being created.
Returns
-------
- matplotlib.figure.Figure
+ A tuple with matplotlib's Figure and GridSpec objects.
Notes
- ----
+ -----
The method combines ModelResult.plot_fit and ModelResult.plot_residuals.
- If yerr is specified or if the fit model included weights, then
- matplotlib.axes.Axes.errorbar is used to plot the data. If yerr is
- not specified and the fit includes weights, yerr set to 1/self.weights
+ If `yerr` is specified or if the fit model included weights, then
+ matplotlib.axes.Axes.errorbar is used to plot the data. If `yerr` is
+ not specified and the fit includes weights, `yerr` set to 1/self.weights
- If `fig` is None then matplotlib.pyplot.figure(**fig_kws) is called.
+ If `fig` is None then `matplotlib.pyplot.figure(**fig_kws)` is called,
+ otherwise `fig_kws` is ignored.
See Also
--------
ModelResult.plot_fit : Plot the fit results using matplotlib.
ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
+
"""
if data_kws is None:
data_kws = {}
@@ -1076,8 +1433,11 @@ class ModelResult(Minimizer):
ax_res_kws = {}
if ax_fit_kws is None:
ax_fit_kws = {}
- if fig_kws is None:
- fig_kws = {}
+ # make a square figure with side equal to the default figure's x-size
+ figxsize = plt.rcParams['figure.figsize'][0]
+ fig_kws_ = dict(figsize=(figxsize, figxsize))
+ if fig_kws is not None:
+ fig_kws_.update(fig_kws)
if len(self.model.independent_vars) != 1:
print('Fit can only be plotted if the model function has one '
@@ -1085,17 +1445,19 @@ class ModelResult(Minimizer):
return False
if not isinstance(fig, plt.Figure):
- fig = plt.figure(**fig_kws)
+ fig = plt.figure(**fig_kws_)
gs = plt.GridSpec(nrows=2, ncols=1, height_ratios=[1, 4])
ax_res = fig.add_subplot(gs[0], **ax_res_kws)
ax_fit = fig.add_subplot(gs[1], sharex=ax_res, **ax_fit_kws)
self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr,
- initfmt=initfmt, xlabel=xlabel, ylabel=ylabel, numpoints=numpoints, data_kws=data_kws,
+ initfmt=initfmt, xlabel=xlabel, ylabel=ylabel,
+ numpoints=numpoints, data_kws=data_kws,
fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws)
self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,
data_kws=data_kws, fit_kws=fit_kws,
ax_kws=ax_res_kws)
-
- return fig
+ plt.setp(ax_res.get_xticklabels(), visible=False)
+ ax_fit.set_title('')
+ return fig, gs
diff --git a/lmfit/models.py b/lmfit/models.py
index d508fda..8bf0510 100644
--- a/lmfit/models.py
+++ b/lmfit/models.py
@@ -1,58 +1,62 @@
+"""TODO: module docstring."""
import numpy as np
-from .model import Model
-
-from .lineshapes import (gaussian, lorentzian, voigt, pvoigt, moffat, pearson7,
- step, rectangle, breit_wigner, logistic,
- students_t, lognormal, damped_oscillator,
- expgaussian, skewed_gaussian, donaich,
- skewed_voigt, exponential, powerlaw, linear,
- parabolic)
from . import lineshapes
-
from .asteval import Interpreter
from .astutils import get_ast_names
+from .lineshapes import (breit_wigner, damped_oscillator, dho, donaich,
+ expgaussian, exponential, gaussian, linear, logistic,
+ lognormal, lorentzian, moffat, parabolic, pearson7,
+ powerlaw, pvoigt, rectangle, skewed_gaussian,
+ skewed_voigt, step, students_t, voigt)
+from .model import Model
+
class DimensionalError(Exception):
+ """TODO: class docstring."""
pass
+
def _validate_1d(independent_vars):
if len(independent_vars) != 1:
raise DimensionalError(
"This model requires exactly one independent variable.")
+
def index_of(arr, val):
- """return index of array nearest to a value
- """
+ """Return index of array nearest to a value."""
if val < min(arr):
return 0
return np.abs(arr-val).argmin()
+
def fwhm_expr(model):
- "return constraint expression for fwhm"
+ """Return constraint expression for fwhm."""
fmt = "{factor:.7f}*{prefix:s}sigma"
return fmt.format(factor=model.fwhm_factor, prefix=model.prefix)
+
def height_expr(model):
- "return constraint expression for maximum peak height"
+ """Return constraint expression for maximum peak height."""
fmt = "{factor:.7f}*{prefix:s}amplitude/max(1.e-15, {prefix:s}sigma)"
return fmt.format(factor=model.height_factor, prefix=model.prefix)
+
def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
- "estimate amp, cen, sigma for a peak, create params"
+ """Estimate amp, cen, sigma for a peak, create params."""
if x is None:
return 1.0, 0.0, 1.0
maxy, miny = max(y), min(y)
maxx, minx = max(x), min(x)
imaxy = index_of(y, maxy)
cen = x[imaxy]
- amp = (maxy - miny)*2.0
+ amp = (maxy - miny)*3.0
sig = (maxx-minx)/6.0
halfmax_vals = np.where(y > (maxy+miny)/2.0)[0]
if negative:
imaxy = index_of(y, miny)
- amp = -(maxy - miny)*2.0
+ amp = -(maxy - miny)*3.0
halfmax_vals = np.where(y < (maxy+miny)/2.0)[0]
if len(halfmax_vals) > 2:
sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0
@@ -64,49 +68,103 @@ def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
pars['%ssigma' % model.prefix].set(min=0.0)
return pars
+
def update_param_vals(pars, prefix, **kwargs):
- """convenience function to update parameter values
- with keyword arguments"""
+ """Update parameter values with keyword arguments."""
for key, val in kwargs.items():
pname = "%s%s" % (prefix, key)
if pname in pars:
pars[pname].value = val
return pars
-COMMON_DOC = """
-
-Parameters
-----------
-independent_vars: list of strings to be set as variable names
-missing: None, 'drop', or 'raise'
- None: Do not check for null or missing values.
- 'drop': Drop null or missing observations in data.
- Use pandas.isnull if pandas is available; otherwise,
- silently fall back to numpy.isnan.
- 'raise': Raise a (more helpful) exception when data contains null
- or missing values.
-prefix: string to prepend to paramter names, needed to add two Models that
- have parameter names in common. None by default.
-"""
+
+COMMON_INIT_DOC = """
+ Parameters
+ ----------
+ independent_vars: ['x']
+ Arguments to func that are independent variables.
+ prefix: string, optional
+ String to prepend to parameter names, needed to add two Models that
+ have parameter names in common.
+ missing: str or None, optional
+ How to handle NaN and missing values in data. One of:
+
+ - 'none' or None: Do not check for null or missing values (default).
+
+ - 'drop': Drop null or missing observations in data. if pandas is
+ installed, `pandas.isnull` is used, otherwise `numpy.isnan` is used.
+
+ - 'raise': Raise a (more helpful) exception when data contains null
+ or missing values.
+ **kwargs : optional
+ Keyword arguments to pass to :class:`Model`.
+
+ """
+
+COMMON_GUESS_DOC = """Guess starting values for the parameters of a model.
+
+ Parameters
+ ----------
+ data : array_like
+ Array of data to use to guess parameter values.
+ **kws : optional
+ Additional keyword arguments, passed to model function.
+
+ Returns
+ -------
+ params : Parameters
+
+ """
+
+COMMON_DOC = COMMON_INIT_DOC
+
class ConstantModel(Model):
- __doc__ = "x -> c" + COMMON_DOC
- def __init__(self, *args, **kwargs):
- def constant(x, c):
+ """Constant model, with a single Parameter: ``c``.
+
+ Note that this is 'constant' in the sense of having no dependence on
+ the independent variable ``x``, not in the sense of being non-
+ varying. To be clear, ``c`` will be a Parameter that will be varied
+ in the fit (by default, of course).
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+
+ def constant(x, c=0.0):
return c
- super(ConstantModel, self).__init__(constant, *args, **kwargs)
+ super(ConstantModel, self).__init__(constant, **kwargs)
def guess(self, data, **kwargs):
pars = self.make_params()
+
pars['%sc' % self.prefix].set(value=data.mean())
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class ComplexConstantModel(Model):
- __doc__ = "x -> re+1j*im" + COMMON_DOC
- def __init__(self, *args, **kwargs):
- def constant(x, re, im):
+ """Complex constant model, with wo Parameters: ``re``, and ``im``.
+
+ Note that ``re`` and ``im`` are 'constant' in the sense of having no
+ dependence on the independent variable ``x``, not in the sense of
+ being non-varying. To be clear, ``re`` and ``im`` will be Parameters
+ that will be varied in the fit (by default, of course).
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+
+ def constant(x, re=0., im=0.):
return re + 1j*im
- super(ComplexConstantModel, self).__init__(constant, *args, **kwargs)
+ super(ComplexConstantModel, self).__init__(constant, **kwargs)
def guess(self, data, **kwargs):
pars = self.make_params()
@@ -114,10 +172,28 @@ class ComplexConstantModel(Model):
pars['%sim' % self.prefix].set(value=data.imag.mean())
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
class LinearModel(Model):
- __doc__ = linear.__doc__ + COMMON_DOC if linear.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(LinearModel, self).__init__(linear, *args, **kwargs)
+ """Linear model, with two Parameters ``intercept`` and ``slope``.
+
+ Defined as:
+
+ .. math::
+
+ f(x; m, b) = m x + b
+
+ with ``slope`` for :math:`m` and ``intercept`` for :math:`b`.
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(LinearModel, self).__init__(linear, **kwargs)
def guess(self, data, x=None, **kwargs):
sval, oval = 0., 0.
@@ -126,11 +202,26 @@ class LinearModel(Model):
pars = self.make_params(intercept=oval, slope=sval)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class QuadraticModel(Model):
- __doc__ = parabolic.__doc__ + COMMON_DOC if parabolic.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(QuadraticModel, self).__init__(parabolic, *args, **kwargs)
+ """A quadratic model, with three Parameters ``a``, ``b``, and ``c``.
+
+ Defined as:
+
+ .. math::
+
+ f(x; a, b, c) = a x^2 + b x + c
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(QuadraticModel, self).__init__(parabolic, **kwargs)
def guess(self, data, x=None, **kwargs):
a, b, c = 0., 0., 0.
@@ -139,14 +230,34 @@ class QuadraticModel(Model):
pars = self.make_params(a=a, b=b, c=c)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
ParabolicModel = QuadraticModel
+
class PolynomialModel(Model):
- __doc__ = "x -> c0 + c1 * x + c2 * x**2 + ... c7 * x**7" + COMMON_DOC
- MAX_DEGREE=7
+ r"""A polynomial model with up to 7 Parameters, specfied by ``degree``.
+
+ .. math::
+
+ f(x; c_0, c_1, \ldots, c_7) = \sum_{i=0, 7} c_i x^i
+
+ with parameters ``c0``, ``c1``, ..., ``c7``. The supplied ``degree``
+ will specify how many of these are actual variable parameters. This
+ uses :numpydoc:`polyval` for its calculation of the polynomial.
+
+ """
+
+ MAX_DEGREE = 7
DEGREE_ERR = "degree must be an integer less than %d."
- def __init__(self, degree, *args, **kwargs):
- if not isinstance(degree, int) or degree > self.MAX_DEGREE:
+
+ def __init__(self, degree, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ if not isinstance(degree, int) or degree > self.MAX_DEGREE:
raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE)
self.poly_degree = degree
@@ -156,23 +267,46 @@ class PolynomialModel(Model):
def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x)
- super(PolynomialModel, self).__init__(polynomial, *args, **kwargs)
+ super(PolynomialModel, self).__init__(polynomial, **kwargs)
def guess(self, data, x=None, **kwargs):
pars = self.make_params()
if x is not None:
out = np.polyfit(x, data, self.poly_degree)
for i, coef in enumerate(out[::-1]):
- pars['%sc%i'% (self.prefix, i)].set(value=coef)
+ pars['%sc%i' % (self.prefix, i)].set(value=coef)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class GaussianModel(Model):
- __doc__ = gaussian.__doc__ + COMMON_DOC if gaussian.__doc__ else ""
+ r"""A model based on a Gaussian or normal distribution lineshape.
+ (see http://en.wikipedia.org/wiki/Normal_distribution), with three Parameters:
+ ``amplitude``, ``center``, and ``sigma``.
+ In addition, parameters ``fwhm`` and ``height`` are included as constraints
+ to report full width at half maximum and maximum peak height, respectively.
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]}
+
+ where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
+ :math:`\mu`, and ``sigma`` to :math:`\sigma`. The full width at
+ half maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately
+ :math:`2.3548\sigma`.
+
+ """
+
fwhm_factor = 2.354820
height_factor = 1./np.sqrt(2*np.pi)
- def __init__(self, *args, **kwargs):
- super(GaussianModel, self).__init__(gaussian, *args, **kwargs)
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(GaussianModel, self).__init__(gaussian, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
self.set_param_hint('height', expr=height_expr(self))
@@ -181,13 +315,35 @@ class GaussianModel(Model):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class LorentzianModel(Model):
- __doc__ = lorentzian.__doc__ + COMMON_DOC if lorentzian.__doc__ else ""
+ r"""A model based on a Lorentzian or Cauchy-Lorentz distribution function
+ (see http://en.wikipedia.org/wiki/Cauchy_distribution), with three Parameters:
+ ``amplitude``, ``center``, and ``sigma``.
+ In addition, parameters ``fwhm`` and ``height`` are included as constraints
+ to report full width at half maximum and maximum peak height, respectively.
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
+
+ where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
+ :math:`\mu`, and ``sigma`` to :math:`\sigma`. The full width at
+ half maximum is :math:`2\sigma`.
+
+ """
+
fwhm_factor = 2.0
height_factor = 1./np.pi
- def __init__(self, *args, **kwargs):
- super(LorentzianModel, self).__init__(lorentzian, *args, **kwargs)
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(LorentzianModel, self).__init__(lorentzian, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
self.set_param_hint('height', expr=height_expr(self))
@@ -196,16 +352,54 @@ class LorentzianModel(Model):
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class VoigtModel(Model):
- __doc__ = voigt.__doc__ + COMMON_DOC if voigt.__doc__ else ""
+ r"""A model based on a Voigt distribution function (see
+ http://en.wikipedia.org/wiki/Voigt_profile>), with four Parameters:
+ ``amplitude``, ``center``, ``sigma``, and ``gamma``. By default,
+ ``gamma`` is constrained to have value equal to ``sigma``, though it
+ can be varied independently. In addition, parameters ``fwhm`` and
+ ``height`` are included as constraints to report full width at half
+ maximum and maximum peak height, respectively. The definition for the
+ Voigt function used here is
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A \textrm{Re}[w(z)]}{\sigma\sqrt{2 \pi}}
+
+ where
+
+ .. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ z &=& \frac{x-\mu +i\gamma}{\sigma\sqrt{2}} \\
+ w(z) &=& e^{-z^2}{\operatorname{erfc}}(-iz)
+ \end{eqnarray*}
+
+ and :func:`erfc` is the complimentary error function. As above,
+ ``amplitude`` corresponds to :math:`A`, ``center`` to
+ :math:`\mu`, and ``sigma`` to :math:`\sigma`. The parameter ``gamma``
+ corresponds to :math:`\gamma`.
+ If ``gamma`` is kept at the default value (constrained to ``sigma``),
+ the full width at half maximum is approximately :math:`3.6013\sigma`.
+
+ """
+
fwhm_factor = 3.60131
height_factor = 1./np.sqrt(2*np.pi)
- def __init__(self, *args, **kwargs):
- super(VoigtModel, self).__init__(voigt, *args, **kwargs)
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(VoigtModel, self).__init__(voigt, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
- self.set_param_hint('fwhm', expr=fwhm_expr(self))
+ self.set_param_hint('fwhm', expr=fwhm_expr(self))
self.set_param_hint('height', expr=height_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
@@ -213,26 +407,74 @@ class VoigtModel(Model):
ampscale=1.5, sigscale=0.65)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class PseudoVoigtModel(Model):
- __doc__ = pvoigt.__doc__ + COMMON_DOC if pvoigt.__doc__ else ""
+ r"""A model based on a pseudo-Voigt distribution function
+ (see http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation),
+ which is a weighted sum of a Gaussian and Lorentzian distribution functions
+ that share values for ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
+ and full width at half maximum (and so have constrained values of
+ ``sigma`` (:math:`\sigma`). A parameter ``fraction`` (:math:`\alpha`)
+ controls the relative weight of the Gaussian and Lorentzian components,
+ giving the full definition of
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \alpha) = \frac{(1-\alpha)A}{\sigma_g\sqrt{2\pi}}
+ e^{[{-{(x-\mu)^2}/{{2\sigma_g}^2}}]}
+ + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
+
+ where :math:`\sigma_g = {\sigma}/{\sqrt{2\ln{2}}}` so that the full width
+ at half maximum of each component and of the sum is :math:`2\sigma`. The
+ :meth:`guess` function always sets the starting value for ``fraction`` at 0.5.
+
+ """
+
fwhm_factor = 2.0
- def __init__(self, *args, **kwargs):
- super(PseudoVoigtModel, self).__init__(pvoigt, *args, **kwargs)
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(PseudoVoigtModel, self).__init__(pvoigt, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('fraction', value=0.5)
- self.set_param_hint('fwhm', expr=fwhm_expr(self))
+ self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
pars['%sfraction' % self.prefix].set(value=0.5)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class MoffatModel(Model):
- __doc__ = moffat.__doc__ + COMMON_DOC if moffat.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(MoffatModel, self).__init__(moffat, *args, **kwargs)
+ r"""A model based on the Moffat distribution function
+ (see https://en.wikipedia.org/wiki/Moffat_distribution), with four Parameters:
+ ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), a width parameter
+ ``sigma`` (:math:`\sigma`) and an exponent ``beta`` (:math:`\beta`).
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \beta) = A \big[(\frac{x-\mu}{\sigma})^2+1\big]^{-\beta}
+
+ the full width have maximum is :math:`2\sigma\sqrt{2^{1/\beta}-1}`.
+ The :meth:`guess` function always sets the starting value for ``beta`` to 1.
+
+ Note that for (:math:`\beta=1`) the Moffat has a Lorentzian shape.
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(MoffatModel, self).__init__(moffat, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('beta')
self.set_param_hint('fwhm', expr="2*%ssigma*sqrt(2**(1.0/%sbeta)-1)" % (self.prefix, self.prefix))
@@ -241,95 +483,290 @@ class MoffatModel(Model):
pars = guess_from_peak(self, data, x, negative, ampscale=0.5, sigscale=1.)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class Pearson7Model(Model):
- __doc__ = pearson7.__doc__ + COMMON_DOC if pearson7.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(Pearson7Model, self).__init__(pearson7, *args, **kwargs)
- self.set_param_hint('expon', value=1.5)
+ r"""A model based on a Pearson VII distribution (see
+ http://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_VII_distribution),
+ with four parameers: ``amplitude`` (:math:`A`), ``center``
+ (:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``exponent`` (:math:`m`) in
+
+ .. math::
+
+ f(x; A, \mu, \sigma, m) = \frac{A}{\sigma{\beta(m-\frac{1}{2}, \frac{1}{2})}} \bigl[1 + \frac{(x-\mu)^2}{\sigma^2} \bigr]^{-m}
+
+ where :math:`\beta` is the beta function (see :scipydoc:`special.beta` in
+ :mod:`scipy.special`). The :meth:`guess` function always
+ gives a starting value for ``exponent`` of 1.5.
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(Pearson7Model, self).__init__(pearson7, **kwargs)
+ self.set_param_hint('expon', value=1.5)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
pars['%sexpon' % self.prefix].set(value=1.5)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class StudentsTModel(Model):
- __doc__ = students_t.__doc__ + COMMON_DOC if students_t.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(StudentsTModel, self).__init__(students_t, *args, **kwargs)
+ r"""A model based on a Student's t distribution function (see
+ http://en.wikipedia.org/wiki/Student%27s_t-distribution), with three Parameters:
+ ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`) in
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A \Gamma(\frac{\sigma+1}{2})} {\sqrt{\sigma\pi}\,\Gamma(\frac{\sigma}{2})} \Bigl[1+\frac{(x-\mu)^2}{\sigma}\Bigr]^{-\frac{\sigma+1}{2}}
+
+
+ where :math:`\Gamma(x)` is the gamma function.
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(StudentsTModel, self).__init__(students_t, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class BreitWignerModel(Model):
- __doc__ = breit_wigner.__doc__ + COMMON_DOC if breit_wigner.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(BreitWignerModel, self).__init__(breit_wigner, *args, **kwargs)
+ r"""A model based on a Breit-Wigner-Fano function (see
+ http://en.wikipedia.org/wiki/Fano_resonance>), with four Parameters:
+ ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`),
+ ``sigma`` (:math:`\sigma`), and ``q`` (:math:`q`) in
+
+ .. math::
+
+ f(x; A, \mu, \sigma, q) = \frac{A (q\sigma/2 + x - \mu)^2}{(\sigma/2)^2 + (x - \mu)^2}
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(BreitWignerModel, self).__init__(breit_wigner, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
pars['%sq' % self.prefix].set(value=1.0)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class LognormalModel(Model):
- __doc__ = lognormal.__doc__ + COMMON_DOC if lognormal.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(LognormalModel, self).__init__(lognormal, *args, **kwargs)
+ r"""A model based on the Log-normal distribution function
+ (see http://en.wikipedia.org/wiki/Lognormal), with three Parameters
+ ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma``
+ (:math:`\sigma`) in
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A e^{-(\ln(x) - \mu)/ 2\sigma^2}}{x}
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(LognormalModel, self).__init__(lognormal, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25)
pars['%ssigma' % self.prefix].set(min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class DampedOscillatorModel(Model):
- __doc__ = damped_oscillator.__doc__ + COMMON_DOC if damped_oscillator.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(DampedOscillatorModel, self).__init__(damped_oscillator, *args, **kwargs)
+ r"""A model based on the Damped Harmonic Oscillator Amplitude
+ (see http://en.wikipedia.org/wiki/Harmonic_oscillator#Amplitude_part), with
+ three Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+ ``sigma`` (:math:`\sigma`) in
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A}{\sqrt{ [1 - (x/\mu)^2]^2 + (2\sigma x/\mu)^2}}
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(DampedOscillatorModel, self).__init__(damped_oscillator, **kwargs)
+
+ def guess(self, data, x=None, negative=False, **kwargs):
+ pars = guess_from_peak(self, data, x, negative,
+ ampscale=0.1, sigscale=0.1)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class DampedHarmonicOscillatorModel(Model):
+ r"""A model based on a variation of the Damped Harmonic Oscillator (see
+ http://en.wikipedia.org/wiki/Harmonic_oscillator), following the
+ definition given in DAVE/PAN (see https://www.ncnr.nist.gov/dave/) with
+ four Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`),
+ ``sigma`` (:math:`\sigma`), and ``gamma`` (:math:`\gamma`) in
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A\sigma}{\pi [1 - \exp(-x/\gamma)]}
+ \Big[ \frac{1}{(x-\mu)^2 + \sigma^2} - \frac{1}{(x+\mu)^2 + \sigma^2} \Big]
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(DampedHarmonicOscillatorModel, self).__init__(dho, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
- pars =guess_from_peak(self, data, x, negative,
- ampscale=0.1, sigscale=0.1)
+ pars = guess_from_peak(self, data, x, negative,
+ ampscale=0.1, sigscale=0.1)
+ pars['%sgamma' % self.prefix].set(value=1.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
class ExponentialGaussianModel(Model):
- __doc__ = expgaussian.__doc__ + COMMON_DOC if expgaussian.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(ExponentialGaussianModel, self).__init__(expgaussian, *args, **kwargs)
+ r"""A model of an Exponentially modified Gaussian distribution
+ (see http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution) with
+ four Parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`),
+ ``sigma`` (:math:`\sigma`), and ``gamma`` (:math:`\gamma`) in
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2}
+ \exp\bigl[\gamma({\mu - x + \gamma\sigma^2/2})\bigr]
+ {\operatorname{erfc}}\Bigl(\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\Bigr)
+
+
+ where :func:`erfc` is the complimentary error function.
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(ExponentialGaussianModel, self).__init__(expgaussian, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
class SkewedGaussianModel(Model):
- __doc__ = skewed_gaussian.__doc__ + COMMON_DOC if skewed_gaussian.__doc__ else ""
+ r"""A variation of the Exponential Gaussian, this uses a skewed normal distribution
+ (see http://en.wikipedia.org/wiki/Skew_normal_distribution), with Parameters
+ ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`),
+ and ``gamma`` (:math:`\gamma`) in
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}}
+ e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 +
+ {\operatorname{erf}}\bigl[
+ \frac{\gamma(x-\mu)}{\sigma\sqrt{2}}
+ \bigr] \Bigr\}
+
+ where :func:`erf` is the error function.
+
+ """
+
fwhm_factor = 2.354820
- def __init__(self, *args, **kwargs):
- super(SkewedGaussianModel, self).__init__(skewed_gaussian, *args, **kwargs)
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(SkewedGaussianModel, self).__init__(skewed_gaussian, **kwargs)
self.set_param_hint('sigma', min=0)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
class DonaichModel(Model):
- __doc__ = donaich.__doc__ + COMMON_DOC if donaich.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(DonaichModel, self).__init__(donaich, *args, **kwargs)
+ r"""A model of an Doniach Sunjic asymmetric lineshape
+ (see http://www.casaxps.com/help_manual/line_shapes.htm), used in
+ photo-emission, with four Parameters ``amplitude`` (:math:`A`),
+ ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``gamma``
+ (:math:`\gamma`) in
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = A\frac{\cos\bigl[\pi\gamma/2 + (1-\gamma)
+ \arctan{(x - \mu)}/\sigma\bigr]} {\bigr[1 + (x-\mu)/\sigma\bigl]^{(1-\gamma)/2}}
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(DonaichModel, self).__init__(donaich, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=0.5)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class PowerLawModel(Model):
- __doc__ = powerlaw.__doc__ + COMMON_DOC if powerlaw.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(PowerLawModel, self).__init__(powerlaw, *args, **kwargs)
+ r"""A model based on a Power Law (see http://en.wikipedia.org/wiki/Power_law>),
+ with two Parameters: ``amplitude`` (:math:`A`), and ``exponent`` (:math:`k`), in:
+
+ .. math::
+
+ f(x; A, k) = A x^k
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(PowerLawModel, self).__init__(powerlaw, **kwargs)
def guess(self, data, x=None, **kwargs):
try:
@@ -340,13 +777,29 @@ class PowerLawModel(Model):
pars = self.make_params(amplitude=np.exp(amp), exponent=expon)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class ExponentialModel(Model):
- __doc__ = exponential.__doc__ + COMMON_DOC if exponential.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(ExponentialModel, self).__init__(exponential, *args, **kwargs)
+ r"""A model based on an exponential decay function
+ (see http://en.wikipedia.org/wiki/Exponential_decay) with two Parameters:
+ ``amplitude`` (:math:`A`), and ``decay`` (:math:`\tau`), in:
+
+ .. math::
+
+ f(x; A, \tau) = A e^{-x/\tau}
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(ExponentialModel, self).__init__(exponential, **kwargs)
def guess(self, data, x=None, **kwargs):
+
try:
sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
except:
@@ -354,11 +807,46 @@ class ExponentialModel(Model):
pars = self.make_params(amplitude=np.exp(oval), decay=-1.0/sval)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class StepModel(Model):
- __doc__ = step.__doc__ + COMMON_DOC if step.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(StepModel, self).__init__(step, *args, **kwargs)
+ r"""A model based on a Step function, with three Parameters:
+ ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`)
+ and four choices for functional form:
+
+ - ``linear`` (the default)
+
+ - ``atan`` or ``arctan`` for an arc-tangent function
+
+ - ``erf`` for an error function
+
+ - ``logistic`` for a logistic function (see http://en.wikipedia.org/wiki/Logistic_function).
+
+ The step function starts with a value 0, and ends with a value of
+ :math:`A` rising to :math:`A/2` at :math:`\mu`, with :math:`\sigma`
+ setting the characteristic width. The forms are
+
+ .. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) & = A \min{[1, \max{(0, \alpha)}]} \\
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) & = A [1/2 + \arctan{(\alpha)}/{\pi}] \\
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) & = A [1 + {\operatorname{erf}}(\alpha)]/2 \\
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A [1 - \frac{1}{1 + e^{\alpha}} ]
+ \end{eqnarray*}
+
+ where :math:`\alpha = (x - \mu)/{\sigma}`.
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(StepModel, self).__init__(step, **kwargs)
def guess(self, data, x=None, **kwargs):
if x is None:
@@ -370,17 +858,59 @@ class StepModel(Model):
pars['%ssigma' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class RectangleModel(Model):
- __doc__ = rectangle.__doc__ + COMMON_DOC if rectangle.__doc__ else ""
- def __init__(self, *args, **kwargs):
- super(RectangleModel, self).__init__(rectangle, *args, **kwargs)
+ r"""A model based on a Step-up and Step-down function, with five
+ Parameters: ``amplitude`` (:math:`A`), ``center1`` (:math:`\mu_1`),
+ ``center2`` (:math:`\mu_2`), `sigma1`` (:math:`\sigma_1`) and
+ ``sigma2`` (:math:`\sigma_2`) and four choices for functional form
+ (which is used for both the Step up and the Step down:
+
+ - ``linear`` (the default)
+
+ - ``atan`` or ``arctan`` for an arc-tangent function
+
+ - ``erf`` for an error function
+
+ - ``logistic`` for a logistic function (see http://en.wikipedia.org/wiki/Logistic_function).
+
+ The function starts with a value 0, transitions to a value of
+ :math:`A`, taking the value :math:`A/2` at :math:`\mu_1`, with :math:`\sigma_1`
+ setting the characteristic width. The function then transitions again to
+ the value :math:`A/2` at :math:`\mu_2`, with :math:`\sigma_2` setting the
+ characteristic width. The forms are
+
+ .. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) &= A \{ \min{[1, \max{(0, \alpha_1)}]} + \min{[-1, \max{(0, \alpha_2)}]} \} \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) &= A [\arctan{(\alpha_1)} + \arctan{(\alpha_2)}]/{\pi} \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) &= A [{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)]/2 \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A [1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 + e^{\alpha_2}} ]
+ \end{eqnarray*}
+
+
+ where :math:`\alpha_1 = (x - \mu_1)/{\sigma_1}` and
+ :math:`\alpha_2 = -(x - \mu_2)/{\sigma_2}`.
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', missing=None,
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'missing': missing,
+ 'independent_vars': independent_vars})
+ super(RectangleModel, self).__init__(rectangle, **kwargs)
self.set_param_hint('center1')
self.set_param_hint('center2')
self.set_param_hint('midpoint',
expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
self.prefix))
+
def guess(self, data, x=None, **kwargs):
if x is None:
return
@@ -393,30 +923,49 @@ class RectangleModel(Model):
pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
class ExpressionModel(Model):
- """Model from User-supplied expression
-
-Parameters
-----------
-expr: string of mathematical expression for model.
-independent_vars: list of strings to be set as variable names
-missing: None, 'drop', or 'raise'
- None: Do not check for null or missing values.
- 'drop': Drop null or missing observations in data.
- Use pandas.isnull if pandas is available; otherwise,
- silently fall back to numpy.isnan.
- 'raise': Raise a (more helpful) exception when data contains null
- or missing values.
-prefix: NOT supported for ExpressionModel
-"""
-
- idvar_missing = "No independent variable found in\n %s"
+
+ idvar_missing = "No independent variable found in\n %s"
idvar_notfound = "Cannot find independent variables '%s' in\n %s"
- no_prefix = "ExpressionModel does not support `prefix` argument"
+ no_prefix = "ExpressionModel does not support `prefix` argument"
+
def __init__(self, expr, independent_vars=None, init_script=None,
- *args, **kwargs):
+ missing=None, **kws):
+ """Model from User-supplied expression.
+
+ Parameters
+ ----------
+ expr : str
+ Mathematical expression for model.
+ independent_vars : list of strings or None, optional
+ Variable names to use as independent variables.
+ init_script : string or None, optional
+ Initial script to run in asteval interpreter.
+ missing : str or None, optional
+ How to handle NaN and missing values in data. One of:
+
+ - 'none' or None: Do not check for null or missing values (default).
+
+ - 'drop': Drop null or missing observations in data. if pandas is
+ installed, `pandas.isnull` is used, otherwise `numpy.isnan` is used.
+ - 'raise': Raise a (more helpful) exception when data contains null
+ or missing values.
+
+ **kws : optional
+ Keyword arguments to pass to :class:`Model`.
+
+ Notes
+ -----
+ 1. each instance of ExpressionModel will create and using its own
+ version of an asteval interpreter.
+ 2. prefix is **not supported** for ExpressionModel
+
+ """
# create ast evaluator, load custom functions
self.asteval = Interpreter()
for name in lineshapes.functions:
@@ -443,7 +992,7 @@ prefix: NOT supported for ExpressionModel
for name in sym_names:
if name in independent_vars:
idvar_found[independent_vars.index(name)] = True
- elif name not in self.asteval.symtable:
+ elif name not in param_names and name not in self.asteval.symtable:
param_names.append(name)
# make sure we have all independent parameters
@@ -455,8 +1004,8 @@ prefix: NOT supported for ExpressionModel
lost = ', '.join(lost)
raise ValueError(self.idvar_notfound % (lost, self.expr))
- kwargs['independent_vars'] = independent_vars
- if 'prefix' in kwargs:
+ kws['independent_vars'] = independent_vars
+ if 'prefix' in kws:
raise Warning(self.no_prefix)
def _eval(**kwargs):
@@ -464,21 +1013,26 @@ prefix: NOT supported for ExpressionModel
self.asteval.symtable[name] = val
return self.asteval.run(self.astcode)
- super(ExpressionModel, self).__init__(_eval, *args, **kwargs)
+ kws["missing"] = missing
+
+ super(ExpressionModel, self).__init__(_eval, **kws)
# set param names here, and other things normally
# set in _parse_params(), which will be short-circuited.
self.independent_vars = independent_vars
self._func_allargs = independent_vars + param_names
- self._param_names = set(param_names)
+ self._param_names = param_names
self._func_haskeywords = True
self.def_vals = {}
def __repr__(self):
- return "<lmfit.ExpressionModel('%s')>" % (self.expr)
+ """TODO: docstring in magic method."""
+ return "<lmfit.ExpressionModel('%s')>" % (self.expr)
def _parse_params(self):
- """ExpressionModel._parse_params is over-written (as `pass`)
- to prevent normal parsing of function for parameter names
+ """Over-write ExpressionModel._parse_params with `pass`.
+
+ This prevents normal parsing of function for parameter names.
+
"""
pass
diff --git a/lmfit/ordereddict.py b/lmfit/ordereddict.py
deleted file mode 100644
index 2d1d813..0000000
--- a/lmfit/ordereddict.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright (c) 2009 Raymond Hettinger
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction,
-# including without limitation the rights to use, copy, modify, merge,
-# publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-
-from UserDict import DictMixin
-
-
-class OrderedDict(dict, DictMixin):
-
- def __init__(self, *args, **kwds):
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__end
- except AttributeError:
- self.clear()
- self.update(*args, **kwds)
-
- def clear(self):
- self.__end = end = []
- end += [None, end, end] # sentinel node for doubly linked list
- self.__map = {} # key --> [key, prev, next]
- dict.clear(self)
-
- def __setitem__(self, key, value):
- if key not in self:
- end = self.__end
- curr = end[1]
- curr[2] = end[1] = self.__map[key] = [key, curr, end]
- dict.__setitem__(self, key, value)
-
- def __delitem__(self, key):
- dict.__delitem__(self, key)
- key, prev, next = self.__map.pop(key)
- prev[2] = next
- next[1] = prev
-
- def __iter__(self):
- end = self.__end
- curr = end[2]
- while curr is not end:
- yield curr[0]
- curr = curr[2]
-
- def __reversed__(self):
- end = self.__end
- curr = end[1]
- while curr is not end:
- yield curr[0]
- curr = curr[1]
-
- def popitem(self, last=True):
- if not self:
- raise KeyError('dictionary is empty')
- if last:
- key = reversed(self).next()
- else:
- key = iter(self).next()
- value = self.pop(key)
- return key, value
-
- def __reduce__(self):
- items = [[k, self[k]] for k in self]
- tmp = self.__map, self.__end
- del self.__map, self.__end
- inst_dict = vars(self).copy()
- self.__map, self.__end = tmp
- if inst_dict:
- return (self.__class__, (items,), inst_dict)
- return self.__class__, (items,)
-
- def keys(self):
- return list(self)
-
- setdefault = DictMixin.setdefault
- update = DictMixin.update
- pop = DictMixin.pop
- values = DictMixin.values
- items = DictMixin.items
- iterkeys = DictMixin.iterkeys
- itervalues = DictMixin.itervalues
- iteritems = DictMixin.iteritems
-
- def __repr__(self):
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, self.items())
-
- def copy(self):
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- if isinstance(other, OrderedDict):
- if len(self) != len(other):
- return False
- for p, q in zip(self.items(), other.items()):
- if p != q:
- return False
- return True
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index 9750036..db3a633 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -1,46 +1,42 @@
-"""
-Parameter class
-"""
+"""Parameter class."""
from __future__ import division
-import json
+
+from collections import OrderedDict
from copy import deepcopy
-try:
- from collections import OrderedDict
-except ImportError:
- from ordereddict import OrderedDict
+import json
+
+from numpy import arcsin, array, cos, inf, isfinite, nan, sin, sqrt
-from numpy import array, arcsin, cos, sin, sqrt, inf, nan, isfinite
from . import uncertainties
from .asteval import Interpreter
from .astutils import get_ast_names, valid_symbol_name
def check_ast_errors(expr_eval):
- """check for errors derived from asteval"""
+ """Check for errors derived from asteval."""
if len(expr_eval.error) > 0:
expr_eval.raise_exception(None)
def isclose(x, y, rtol=1e-5, atol=1e-8):
- """
- The truth whether two numbers are the same, within an absolute and
- relative tolerance.
+ """Check whether two numbers are the same within a tolerance.
- i.e. abs(`x` - `y`) <= (`atol` + `rtol` * absolute(`y`))
+ abs(`x` - `y`) <= (`atol` + `rtol` * abs(`y`))
Parameters
----------
x, y : float
- Input values
- rtol : float
- The relative tolerance parameter (see Notes).
- atol : float
- The absolute tolerance parameter (see Notes).
+ Input values.
+ rtol : float, optional
+ The relative tolerance parameter.
+ atol : float, optional
+ The absolute tolerance parameter.
Returns
-------
- y : bool
- Are `x` and `x` are equal within tolerance?
+ bool
+ True if `x` and `y` are the same within tolerance, otherwise False.
+
"""
def within_tol(x, y, atol, rtol):
return abs(x - y) <= atol + rtol * abs(y)
@@ -58,21 +54,38 @@ def isclose(x, y, rtol=1e-5, atol=1e-8):
class Parameters(OrderedDict):
- """
- A dictionary of all the Parameters required to specify a fit model.
+ """An ordered dictionary of all the Parameter objects required to
+ specify a fit model. All minimization and Model fitting routines in
+ lmfit will use exactly one Parameters object, typically given as the
+ first argument to the objective function.
+
+ All keys of a Parameters() instance must be strings and valid Python
+ symbol names, so that the name must match ``[a-z_][a-z0-9_]*`` and
+ cannot be a Python reserved word.
+
+ All values of a Parameters() instance must be Parameter objects.
- All keys must be strings, and valid Python symbol names, and all values
- must be Parameters.
+ A Parameters() instance includes an asteval interpreter used for
+ evaluation of constrained Parameters.
- Custom methods:
- ---------------
+ Parameters() support copying and pickling, and have methods to convert
+ to and from serializations using json strings.
- add()
- add_many()
- dumps() / dump()
- loads() / load()
"""
+
def __init__(self, asteval=None, *args, **kwds):
+ """
+ Arguments
+ ---------
+ asteval : :class:`asteval.Interpreter`, optional
+ Instance of the asteval Interpreter to use for constraint
+ expressions. If None, a new interpreter will be created.
+ *args : optional
+ Arguments.
+ **kwds : optional
+ Keyword arguments.
+
+ """
super(Parameters, self).__init__(self)
self._asteval = asteval
@@ -81,23 +94,21 @@ class Parameters(OrderedDict):
self.update(*args, **kwds)
def copy(self):
- """Parameters.copy() should always be a deepcopy"""
+ """Parameters.copy() should always be a deepcopy."""
return self.__deepcopy__(None)
- def __copy__(self, memo):
- """Parameters.copy() should always be a deepcopy"""
- self.__deepcopy__(memo)
+ def __copy__(self):
+ """Parameters.copy() should always be a deepcopy."""
+ return self.__deepcopy__(None)
def __deepcopy__(self, memo):
- """Parameters deepcopy needs to make sure that
- asteval is available and that all individula
- parameter objects are copied"""
+ """Parameters.deepcopy() needs to make sure that asteval is available
+ and that all individual Parameter objects are copied."""
_pars = Parameters(asteval=None)
# find the symbols that were added by users, not during construction
- sym_unique = self._asteval.user_defined_symbols()
- unique_symbols = {key: deepcopy(self._asteval.symtable[key], memo)
- for key in sym_unique}
+ unique_symbols = {key: self._asteval.symtable[key]
+ for key in self._asteval.user_defined_symbols()}
_pars._asteval.symtable.update(unique_symbols)
# we're just about to add a lot of Parameter objects to the newly
@@ -109,10 +120,12 @@ class Parameters(OrderedDict):
min=par.min,
max=par.max)
param.vary = par.vary
+ param.brute_step = par.brute_step
param.stderr = par.stderr
param.correl = par.correl
param.init_value = par.init_value
param.expr = par.expr
+ param.user_data = par.user_data
parameter_list.append(param)
_pars.add_many(*parameter_list)
@@ -120,6 +133,7 @@ class Parameters(OrderedDict):
return _pars
def __setitem__(self, key, par):
+ """TODO: add magic method docstring."""
if key not in self:
if not valid_symbol_name(key):
raise KeyError("'%s' is not a valid Parameters name" % key)
@@ -131,9 +145,7 @@ class Parameters(OrderedDict):
self._asteval.symtable[key] = par.value
def __add__(self, other):
- """
- Add Parameters objects
- """
+ """Add Parameters objects."""
if not isinstance(other, Parameters):
raise ValueError("'%s' is not a Parameters object" % other)
out = deepcopy(self)
@@ -142,9 +154,7 @@ class Parameters(OrderedDict):
return out
def __iadd__(self, other):
- """
- Add/assign Parameters objects
- """
+ """Add/assign Parameters objects."""
if not isinstance(other, Parameters):
raise ValueError("'%s' is not a Parameters object" % other)
params = other.values()
@@ -152,15 +162,11 @@ class Parameters(OrderedDict):
return self
def __array__(self):
- """
- Parameters to array
- """
+ """Convert Parameters to array."""
return array([float(k) for k in self.values()])
def __reduce__(self):
- """
- Required to pickle a Parameters instance.
- """
+ """Reduce Parameters instance such that it can be pickled."""
# make a list of all the parameters
params = [self[k] for k in self]
@@ -173,8 +179,7 @@ class Parameters(OrderedDict):
'params': params}
def __setstate__(self, state):
- """
- Unpickle a Parameters instance.
+ """Unpickle a Parameters instance.
Parameters
----------
@@ -182,6 +187,7 @@ class Parameters(OrderedDict):
state['unique_symbols'] is a dictionary containing symbols that
need to be injected into _asteval.symtable
state['params'] is a list of Parameter instances to be added
+
"""
# first update the Interpreter symbol table. This needs to be done
# first because Parameter's early in the list may depend on later
@@ -197,20 +203,19 @@ class Parameters(OrderedDict):
self.add_many(*state['params'])
def update_constraints(self):
- """
- Update all constrained parameters, checking that dependencies are
- evaluated as needed.
- """
+ """Update all constrained parameters, checking that dependencies are
+ evaluated as needed."""
requires_update = set(name for name, par in self.items()
if par._expr is not None)
updated_tracker = set(requires_update)
def _update_param(name):
- """
- Update a parameter value, including setting bounds.
- For a constrained parameter (one with an expr defined),
- this first updates (recursively) all parameters on which
- the parameter depends (using the 'deps' field).
+ """Update a parameter value, including setting bounds.
+
+ For a constrained parameter (one with an `expr` defined),
+ this first updates (recursively) all parameters on which the
+ parameter depends (using the 'deps' field).
+
"""
par = self.__getitem__(name)
if par._expr_eval is None:
@@ -225,6 +230,20 @@ class Parameters(OrderedDict):
_update_param(name)
def pretty_repr(self, oneline=False):
+ """Return a pretty representation of a Parameters class.
+
+ Parameters
+ ----------
+ oneline : bool, optional
+ If True prints a one-line parameters representation (default is
+ False).
+
+ Returns
+ -------
+ s: str
+ Parameters representation.
+
+ """
if oneline:
return super(Parameters, self).__repr__()
s = "Parameters({\n"
@@ -234,23 +253,25 @@ class Parameters(OrderedDict):
return s
def pretty_print(self, oneline=False, colwidth=8, precision=4, fmt='g',
- columns=['value', 'min', 'max', 'stderr', 'vary', 'expr']):
- """Pretty-print parameters data.
+ columns=['value', 'min', 'max', 'stderr', 'vary', 'expr',
+ 'brute_step']):
+ """Pretty-print of parameters data.
Parameters
----------
- oneline : boolean
- If True prints a one-line parameters representation. Default False.
- colwidth : int
- column width for all except the first (i.e. name) column.
- columns : list of strings
- list of columns names to print. All values must be valid
- :class:`Parameter` attributes.
- fmt : string
- single-char numeric formatter. Valid values: 'f' floating point,
- 'g' floating point and exponential, 'e' exponential.
- precision : int
- number of digits to be printed after floating point.
+ oneline : bool, optional
+ If True prints a one-line parameters representation (default is
+ False).
+ colwidth : int, optional
+ Column width for all columns specified in :attr:`columns`.
+ precision : int, optional
+ Number of digits to be printed after floating point.
+ fmt : {'g', 'e', 'f'}, optional
+ Single-character numeric formatter. Valid values are: 'f' floating
+ point, 'g' floating point and exponential, or 'e' exponential.
+ columns : :obj:`list` of :obj:`str`, optional
+ List of :class:`Parameter` attribute names to print.
+
"""
if oneline:
print(self.pretty_repr(oneline=oneline))
@@ -262,7 +283,8 @@ class Parameters(OrderedDict):
print(title.format(*allcols, name_len=name_len, n=colwidth).title())
numstyle = '{%s:>{n}.{p}{f}}' # format for numeric columns
otherstyles = dict(name='{name:<{name_len}} ', stderr='{stderr!s:>{n}}',
- vary='{vary!s:>{n}}', expr='{expr!s:>{n}}')
+ vary='{vary!s:>{n}}', expr='{expr!s:>{n}}',
+ brute_step='{brute_step!s:>{n}}')
line = ' '.join([otherstyles.get(k, numstyle % k) for k in allcols])
for name, values in sorted(self.items()):
pvalues = {k: getattr(values, k) for k in columns}
@@ -271,53 +293,78 @@ class Parameters(OrderedDict):
if 'stderr' in columns and pvalues['stderr'] is not None:
pvalues['stderr'] = (numstyle % '').format(
pvalues['stderr'], n=colwidth, p=precision, f=fmt)
- print(line.format(name_len=name_len, n=colwidth, p=precision, f=fmt,
- **pvalues))
+ elif 'brute_step' in columns and pvalues['brute_step'] is not None:
+ pvalues['brute_step'] = (numstyle % '').format(
+ pvalues['brute_step'], n=colwidth, p=precision, f=fmt)
+ print(line.format(name_len=name_len, n=colwidth, p=precision,
+ f=fmt, **pvalues))
- def add(self, name, value=None, vary=True, min=-inf, max=inf, expr=None):
- """
- Convenience function for adding a Parameter:
+ def add(self, name, value=None, vary=True, min=-inf, max=inf, expr=None,
+ brute_step=None):
+ """Add a Parameter.
- Example
- -------
- p = Parameters()
- p.add(name, value=XX, ...)
+ Parameters
+ ----------
+ name : str
+ Name of parameter. Must match ``[a-z_][a-z0-9_]*`` and cannot be
+ a Python reserved word.
+ value : float, optional
+ Numerical Parameter value, typically the *initial value*.
+ vary : bool, optional
+ Whether the Parameter is varied during a fit (default is True).
+ min : float, optional
+ Lower bound for value (default is `-numpy.inf`, no lower bound).
+ max : float, optional
+ Upper bound for value (default is `numpy.inf`, no upper bound).
+ expr : str, optional
+ Mathematical expression used to constrain the value during the fit.
+ brute_step : float, optional
+ Step size for grid points in the `brute` method.
+
+ Examples
+ --------
+ >>> params = Parameters()
+ >>> params.add('xvar', value=0.50, min=0, max=1)
+ >>> params.add('yvar', expr='1.0 - xvar')
+
+ which is equivalent to:
+
+ >>> params = Parameters()
+ >>> params['xvar'] = Parameter(name='xvar', value=0.50, min=0, max=1)
+ >>> params['yvar'] = Parameter(name='yvar', expr='1.0 - xvar')
- is equivalent to:
- p[name] = Parameter(name=name, value=XX, ....
"""
if isinstance(name, Parameter):
self.__setitem__(name.name, name)
else:
self.__setitem__(name, Parameter(value=value, name=name, vary=vary,
- min=min, max=max, expr=expr))
+ min=min, max=max, expr=expr,
+ brute_step=brute_step))
def add_many(self, *parlist):
- """
- Convenience function for adding a list of Parameters.
+ """Add many parameters, using a sequence of tuples.
Parameters
----------
- parlist : sequence
- A sequence of tuples, or a sequence of `Parameter` instances. If it
- is a sequence of tuples, then each tuple must contain at least the
- name. The order in each tuple is the following:
+ parlist : :obj:`sequence` of :obj:`tuple` or :class:`Parameter`
+ A sequence of tuples, or a sequence of `Parameter` instances. If
+ it is a sequence of tuples, then each tuple must contain at least
+ the name. The order in each tuple must be `(name, value, vary,
+ min, max, expr, brute_step)`.
- name, value, vary, min, max, expr
+ Examples
+ --------
+ >>> params = Parameters()
+ # add with tuples: (NAME VALUE VARY MIN MAX EXPR BRUTE_STEP)
+ >>> params.add_many(('amp', 10, True, None, None, None, None),
+ ... ('cen', 4, True, 0.0, None, None, None),
+ ... ('wid', 1, False, None, None, None, None),
+ ... ('frac', 0.5))
+ # add a sequence of Parameters
+ >>> f = Parameter('par_f', 100)
+ >>> g = Parameter('par_g', 2.)
+ >>> params.add_many(f, g)
- Example
- -------
- p = Parameters()
- # add a sequence of tuples
- p.add_many( (name1, val1, True, None, None, None),
- (name2, val2, True, 0.0, None, None),
- (name3, val3, False, None, None, None),
- (name4, val4))
-
- # add a sequence of Parameter
- f = Parameter('name5', val5)
- g = Parameter('name6', val6)
- p.add_many(f, g)
"""
for para in parlist:
if isinstance(para, Parameter):
@@ -327,42 +374,59 @@ class Parameters(OrderedDict):
self.__setitem__(param.name, param)
def valuesdict(self):
- """
+ """Return an ordered dictionary of parameter values.
+
Returns
-------
- An ordered dictionary of name:value pairs for each Parameter.
- This is distinct from the Parameters itself, as it has values of
- the Parameter values, not the full Parameter object.
- """
+ OrderedDict
+ An ordered dictionary of :attr:`name`::attr:`value` pairs for each
+ Parameter.
+ """
return OrderedDict(((p.name, p.value) for p in self.values()))
def dumps(self, **kws):
- """represent Parameters as a JSON string.
+ """Represent Parameters as a JSON string.
- all keyword arguments are passed to `json.dumps()`
+ Parameters
+ ----------
+ **kws : optional
+ Keyword arguments that are passed to `json.dumps()`.
Returns
-------
- json string representation of Parameters
+ str
+ JSON string representation of Parameters.
See Also
--------
dump(), loads(), load(), json.dumps()
+
"""
- out = [p.__getstate__() for p in self.values()]
- return json.dumps(out, **kws)
+ params = [p.__getstate__() for p in self.values()]
+ sym_unique = self._asteval.user_defined_symbols()
+ unique_symbols = {key: deepcopy(self._asteval.symtable[key])
+ for key in sym_unique}
+ return json.dumps({'unique_symbols': unique_symbols,
+ 'params': params}, **kws)
def loads(self, s, **kws):
- """load Parameters from a JSON string.
-
- current Parameters will be cleared before loading.
+ """Load Parameters from a JSON string.
- all keyword arguments are passed to `json.loads()`
+ Parameters
+ ----------
+ **kws : optional
+ Keyword arguments that are passed to `json.loads()`.
Returns
-------
- None. Parameters are updated as a side-effect
+ :class:`Parameters`
+ Updated Parameters from the JSON string.
+
+ Notes
+ -----
+ Current Parameters will be cleared before loading the data from the
+ JSON string.
See Also
--------
@@ -370,99 +434,125 @@ class Parameters(OrderedDict):
"""
self.clear()
- for parstate in json.loads(s, **kws):
+
+ tmp = json.loads(s, **kws)
+ state = {'unique_symbols': tmp['unique_symbols'],
+ 'params': []}
+ for parstate in tmp['params']:
_par = Parameter()
_par.__setstate__(parstate)
- self.__setitem__(parstate[0], _par)
+ state['params'].append(_par)
+ self.__setstate__(state)
+ return self
def dump(self, fp, **kws):
- """write JSON representation of Parameters to a file
- or file-like object (must have a `write()` method).
+ """Write JSON representation of Parameters to a file-like object.
- Arguments
- ---------
- fp open file-like object with `write()` method.
-
- all keyword arguments are passed to `dumps()`
+ Parameters
+ ----------
+ fp : file-like object
+ An open and ``.write()``-supporting file-like object.
+ **kws : optional
+ Keyword arguments that are passed to `dumps()`.
Returns
-------
- return value from `fp.write()`
+ None or int
+ Return value from `fp.write()`. None for Python 2.7 and the
+ number of characters written in Python 3.
See Also
--------
dump(), load(), json.dump()
+
"""
return fp.write(self.dumps(**kws))
def load(self, fp, **kws):
- """load JSON representation of Parameters from a file
- or file-like object (must have a `read()` method).
+ """Load JSON representation of Parameters from a file-like object.
- Arguments
- ---------
- fp open file-like object with `read()` method.
-
- all keyword arguments are passed to `loads()`
+ Parameters
+ ----------
+ fp : file-like object
+ An open and ``.read()``-supporting file-like object.
+ **kws : optional
+ Keyword arguments that are passed to `loads()`.
Returns
-------
- None. Parameters are updated as a side-effect
+ :class:`Parameters`
+ Updated Parameters loaded from `fp`.
See Also
--------
dump(), loads(), json.load()
+
"""
return self.loads(fp.read(), **kws)
class Parameter(object):
+ """A Parameter is an object that can be varied in a fit, or one of the
+ controlling variables in a model. It is a central component of lmfit,
+ and all minimization and modeling methods use Parameter objects.
+
+ A Parameter has a `name` attribute, and a scalar floating point
+ `value`. It also has a `vary` attribute that describes whether the
+ value should be varied during the minimization. Finite bounds can be
+ placed on the Parameter's value by setting its `min` and/or `max`
+ attributes. A Parameter can also have its value determined by a
+ mathematical expression of other Parameter values held in the `expr`
+ attrribute. Additional attributes include `brute_step` used as the step
+ size in a brute-force minimization, and `user_data` reserved
+ exclusively for user's need.
+
+ After a minimization, a Parameter may also gain other attributes,
+ including `stderr` holding the estimated standard error in the
+ Parameter's value, and `correl`, a dictionary of correlation values
+ with other Parameters used in the minimization.
+
"""
- A Parameter is an object used to define a Fit Model.
- Attributes
- ----------
- name : str
- Parameter name.
- value : float
- The numerical value of the Parameter.
- vary : bool
- Whether the Parameter is fixed during a fit.
- min : float
- Lower bound for value (None or -inf means no lower bound).
- max : float
- Upper bound for value (None or inf means no upper bound).
- expr : str
- An expression specifying constraints for the parameter.
- stderr : float
- The estimated standard error for the best-fit value.
- correl : dict
- Specifies correlation with the other fitted Parameter after a fit.
- Of the form `{'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}`
- """
- def __init__(self, name=None, value=None, vary=True,
- min=-inf, max=inf, expr=None):
+
+ def __init__(self, name=None, value=None, vary=True, min=-inf, max=inf,
+ expr=None, brute_step=None, user_data=None):
"""
Parameters
----------
name : str, optional
- Name of the parameter.
+ Name of the Parameter.
value : float, optional
Numerical Parameter value.
vary : bool, optional
- Whether the Parameter is fixed during a fit.
+ Whether the Parameter is varied during a fit (default is True).
min : float, optional
- Lower bound for value (None or -inf means no lower bound).
+ Lower bound for value (default is `-numpy.inf`, no lower bound).
max : float, optional
- Upper bound for value (None or inf means no upper bound).
+ Upper bound for value (default is `numpy.inf`, no upper bound).
expr : str, optional
Mathematical expression used to constrain the value during the fit.
+ brute_step : float, optional
+ Step size for grid points in the `brute` method.
+ user_data : optional
+ User-definable extra attribute used for a Parameter.
+
+ Attributes
+ ----------
+ stderr : float
+ The estimated standard error for the best-fit value.
+ correl : dict
+ A dictionary of the correlation with the other fitted Parameters
+ of the form::
+
+ `{'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}`
+
"""
self.name = name
self._val = value
- self.user_value = value
+ self.user_data = user_data
self.init_value = value
self.min = min
self.max = max
+ self.brute_step = brute_step
self.vary = vary
self._expr = expr
self._expr_ast = None
@@ -474,39 +564,80 @@ class Parameter(object):
self.from_internal = lambda val: val
self._init_bounds()
- def set(self, value=None, vary=None, min=-inf, max=inf, expr=None):
- """
- Set or update Parameter attributes.
+ def set(self, value=None, vary=None, min=None, max=None, expr=None,
+ brute_step=None):
+ """Set or update Parameter attributes.
Parameters
----------
value : float, optional
Numerical Parameter value.
vary : bool, optional
- Whether the Parameter is fixed during a fit.
+ Whether the Parameter is varied during a fit.
min : float, optional
- Lower bound for value. To remove a lower bound you must use -np.inf
+ Lower bound for value. To remove a lower bound you must use
+ `-numpy.inf`.
max : float, optional
- Upper bound for value. To remove an upper bound you must use np.inf
+ Upper bound for value. To remove an upper bound you must use
+ `numpy.inf`.
expr : str, optional
Mathematical expression used to constrain the value during the fit.
To remove a constraint you must supply an empty string.
- """
+ brute_step : float, optional
+ Step size for grid points in the `brute` method. To remove the
+ step size you must use ``0``.
+
+ Notes
+ -----
+ Each argument to `set()` has a default value of `None`, which will
+ leave the current value for the attribute unchanged. Thus, to lift a
+ lower or upper bound, passing in `None` will not work. Instead,
+ you must set these to `-numpy.inf` or `numpy.inf`, as with::
+
+ par.set(min=None) # leaves lower bound unchanged
+ par.set(min=-numpy.inf) # removes lower bound
+
+ Similarly, to clear an expression, pass a blank string, (not
+ ``None``!) as with::
+
+ par.set(expr=None) # leaves expression unchanged
+ par.set(expr='') # removes expression
+
+ Explicitly setting a value or setting `vary=True` will also
+ clear the expression.
- self.__set_expression(expr)
+ Finally, to clear the brute_step size, pass ``0``, not ``None``::
+
+ par.set(brute_step=None) # leaves brute_step unchanged
+ par.set(brute_step=0) # removes brute_step
+
+ """
if value is not None:
- self._val = value
+ self.value = value
+ self.__set_expression('')
+
if vary is not None:
self.vary = vary
- if min is None:
- min = -inf
- if max is None:
- max = inf
- self.min = min
- self.max = max
+ if vary:
+ self.__set_expression('')
+
+ if min is not None:
+ self.min = min
+
+ if max is not None:
+ self.max = max
+
+ if expr is not None:
+ self.__set_expression(expr)
+
+ if brute_step is not None:
+ if brute_step == 0.0:
+ self.brute_step = None
+ else:
+ self.brute_step = brute_step
def _init_bounds(self):
- """make sure initial bounds are self-consistent"""
+ """Make sure initial bounds are self-consistent."""
# _val is None means - infinity.
if self.max is None:
self.max = inf
@@ -527,14 +658,16 @@ class Parameter(object):
self.setup_bounds()
def __getstate__(self):
- """get state for pickle"""
+ """Get state for pickle."""
return (self.name, self.value, self.vary, self.expr, self.min,
- self.max, self.stderr, self.correl, self.init_value)
+ self.max, self.brute_step, self.stderr, self.correl,
+ self.init_value, self.user_data)
def __setstate__(self, state):
- """set state for pickle"""
- (self.name, self.value, self.vary, self.expr, self.min,
- self.max, self.stderr, self.correl, self.init_value) = state
+ """Set state for pickle."""
+ (self.name, self.value, self.vary, self.expr, self.min, self.max,
+ self.brute_step, self.stderr, self.correl, self.init_value,
+ self.user_data) = state
self._expr_ast = None
self._expr_eval = None
self._expr_deps = []
@@ -542,6 +675,7 @@ class Parameter(object):
self._init_bounds()
def __repr__(self):
+ """Returns printable representation of a Parameter object."""
s = []
if self.name is not None:
s.append("'%s'" % self.name)
@@ -554,16 +688,17 @@ class Parameter(object):
s.append("bounds=[%s:%s]" % (repr(self.min), repr(self.max)))
if self._expr is not None:
s.append("expr='%s'" % self.expr)
+ if self.brute_step is not None:
+ s.append("brute_step=%s" % (self.brute_step))
return "<Parameter %s>" % ', '.join(s)
def setup_bounds(self):
- """
- Set up Minuit-style internal/external parameter transformation
- of min/max bounds.
+ """Set up Minuit-style internal/external parameter transformation of
+ min/max bounds.
As a side-effect, this also defines the self.from_internal method
used to re-calculate self.value from the internal value, applying
- the inverse Minuit-style transformation. This method should be
+ the inverse Minuit-style transformation. This method should be
called prior to passing a Parameter to the user-defined objective
function.
@@ -571,9 +706,11 @@ class Parameter(object):
Returns
-------
- The internal value for parameter from self.value (which holds
- the external, user-expected value). This internal value should
- actually be used in a fit.
+ _val : float
+ The internal value for parameter from self.value (which holds
+ the external, user-expected value). This internal value should
+ actually be used in a fit.
+
"""
if self.min is None:
self.min = -inf
@@ -595,11 +732,19 @@ class Parameter(object):
return _val
def scale_gradient(self, val):
- """
+ """Return scaling factor for gradient.
+
+ Parameters
+ ----------
+ val: float
+ Numerical Parameter value.
+
Returns
-------
- scaling factor for gradient the according to Minuit-style
- transformation.
+ float
+ Scaling factor for gradient the according to Minuit-style
+ transformation.
+
"""
if self.min == -inf and self.max == inf:
return 1.0
@@ -611,16 +756,15 @@ class Parameter(object):
return cos(val) * (self.max - self.min) / 2.0
def _getval(self):
- """get value, with bounds applied"""
-
+ """Get value, with bounds applied."""
# Note assignment to self._val has been changed to self.value
# The self.value property setter makes sure that the
# _expr_eval.symtable is kept updated.
# If you just assign to self._val then
# _expr_eval.symtable[self.name]
# becomes stale if parameter.expr is not None.
- if (isinstance(self._val, uncertainties.Variable)
- and self._val is not nan):
+ if (isinstance(self._val, uncertainties.Variable) and
+ self._val is not nan):
try:
self.value = self._val.nominal_value
@@ -638,28 +782,27 @@ class Parameter(object):
self.value = self._expr_eval(self._expr_ast)
check_ast_errors(self._expr_eval)
- v = self._val
- if v > self.max:
- v = self.max
- if v < self.min:
- v = self.min
- self.value = self._val = v
+ if self._val is not None:
+ if self._val > self.max:
+ self._val = self.max
+ elif self._val < self.min:
+ self._val = self.min
+ if self._expr_eval is not None:
+ self._expr_eval.symtable[self.name] = self._val
return self._val
def set_expr_eval(self, evaluator):
- """set expression evaluator instance"""
+ """Set expression evaluator instance."""
self._expr_eval = evaluator
@property
def value(self):
- """The numerical value of the Parameter, with bounds applied"""
+ """Return the numerical value of the Parameter, with bounds applied."""
return self._getval()
@value.setter
def value(self, val):
- """
- Set the numerical Parameter value.
- """
+ """Set the numerical Parameter value."""
self._val = val
if not hasattr(self, '_expr_eval'):
self._expr_eval = None
@@ -668,16 +811,17 @@ class Parameter(object):
@property
def expr(self):
- """
- The mathematical expression used to constrain the value during the fit.
- """
+ """Return the mathematical expression used to constrain the value
+ during the fit."""
return self._expr
@expr.setter
def expr(self, val):
- """
- The mathematical expression used to constrain the value during the fit.
+ """Set the mathematical expression used to constrain the value during
+ the fit.
+
To remove a constraint you must supply an empty string.
+
"""
self.__set_expression(val)
@@ -750,7 +894,7 @@ class Parameter(object):
return self._getval() // other
def __divmod__(self, other):
- """divmod"""
+ """divmod."""
return divmod(self._getval(), other)
def __mod__(self, other):
@@ -824,6 +968,6 @@ class Parameter(object):
def isParameter(x):
- """Test for Parameter-ness"""
+ """Test for Parameter-ness."""
return (isinstance(x, Parameter) or
x.__class__.__name__ == 'Parameter')
diff --git a/lmfit/printfuncs.py b/lmfit/printfuncs.py
index 11ae463..9cb08a5 100644
--- a/lmfit/printfuncs.py
+++ b/lmfit/printfuncs.py
@@ -1,32 +1,19 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Apr 20 19:24:21 2012
-
-@author: Tillsten
-
-Changes:
- - 13-Feb-2013 M Newville
- complemented "report_errors" and "report_ci" with
- "error_report" and "ci_report" (respectively) which
- return the text of the report. Thus report_errors()
- is simply:
- def report_errors(params, modelpars=None, show_correl=True):
- print error_report(params, modelpars=modelpars,
- show_correl=show_correl)
- and similar for report_ci() / ci_report()
+"""Functions to display fitting results and confidence intervals."""
+from __future__ import print_function
-"""
+import re
-from __future__ import print_function
from .parameter import Parameters
-import re
+
def alphanumeric_sort(s, _nsre=re.compile('([0-9]+)')):
+ """Sort alphanumeric string."""
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
+
def getfloat_attr(obj, attr, fmt='%.3f'):
- "format an attribute of an object for printing"
+ """Format an attribute of an object for printing."""
val = getattr(obj, attr, None)
if val is None:
return 'unknown'
@@ -37,10 +24,13 @@ def getfloat_attr(obj, attr, fmt='%.3f'):
else:
return repr(val)
+
def gformat(val, length=11):
- """format a number with '%g'-like format, except that
- the return will be length ``length`` (default=12)
- and have at least length-6 significant digits
+ """Format a number with '%g'-like format.
+
+ The return will be length ``length`` (default is 12) and have at
+ least length-6 significant digits.
+
"""
length = max(length, 7)
fmt = '{0: .%ig}' % (length-6)
@@ -60,29 +50,43 @@ def gformat(val, length=11):
fmt = '{0: .%ig}' % (length-1)
out = fmt.format(val)[:length]
if len(out) < length:
- pad = '0' if '.' in out else ' '
+ pad = '0' if '.' in out else ' '
out += pad*(length-len(out))
return out
+
CORREL_HEAD = '[[Correlations]] (unreported correlations are < % .3f)'
+
def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
sort_pars=False):
- """return text of a report for fitted params best-fit values,
- uncertainties and correlations
+ """Generate a report of the fitting results.
+
+ The report contains the best-fit values for the parameters and their
+ uncertainties and correlations.
- arguments
+ Parameters
----------
- inpars Parameters from fit or Minizer object returned from a fit.
- modelpars Optional Known Model Parameters [None]
- show_correl whether to show list of sorted correlations [True]
- min_correl smallest correlation absolute value to show [0.1]
- sort_pars If True, then fit_report will show parameter names
- sorted in alphanumerical order. If False, then the
- parameters will be listed in the order they were added to
- the Parameters dictionary. If sort_pars is callable, then
- this (one argument) function is used to extract a
- comparison key from each list element.
+ inpars : Parameters
+ Input Parameters from fit or MinimizerResult returned from a fit.
+ modelpars : Parameters, optional
+ Known Model Parameters.
+ show_correl : bool, optional
+ Whether to show list of sorted correlations (default is True).
+ min_correl : float, optional
+ Smallest correlation in absolute value to show (default is 0.1).
+ sort_pars : bool or callable, optional
+ Whether to show parameter names sorted in alphanumerical order. If
+ False (default), then the parameters will be listed in the order they
+ were added to the Parameters dictionary. If callable, then this (one
+ argument) function is used to extract a comparison key from each
+ list element.
+
+ Returns
+ -------
+ string
+ Multi-line text of fit report.
+
"""
if isinstance(inpars, Parameters):
result, params = None, inpars
@@ -97,8 +101,8 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
key = alphanumeric_sort
parnames = sorted(params, key=key)
else:
- # dict.keys() returns a KeysView in py3, and they're indexed further
- # down
+ # dict.keys() returns a KeysView in py3, and they're indexed
+ # further down
parnames = list(params.keys())
buff = []
@@ -146,7 +150,6 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
add(" %s % .7g (fixed)" % (nout, par.value))
if show_correl:
- add(CORREL_HEAD % min_correl)
correls = {}
for i, name in enumerate(parnames):
par = params[name]
@@ -155,11 +158,13 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
if hasattr(par, 'correl') and par.correl is not None:
for name2 in parnames[i+1:]:
if (name != name2 and name2 in par.correl and
- abs(par.correl[name2]) > min_correl):
+ abs(par.correl[name2]) > min_correl):
correls["%s, %s" % (name, name2)] = par.correl[name2]
sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
sort_correl.reverse()
+ if len(sort_correl) > 0:
+ add(CORREL_HEAD % min_correl)
for name, val in sort_correl:
lspace = max(1, 25 - len(name))
add(' C(%s)%s = % .3f ' % (name, (' '*30)[:lspace], val))
@@ -167,45 +172,49 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
def report_errors(params, **kws):
- """print a report for fitted params: see error_report()"""
+ """Print a report for fitted params: see error_report()."""
print(fit_report(params, **kws))
def report_fit(params, **kws):
- """print a report for fitted params: see error_report()"""
+ """Print a report for fitted params: see error_report()."""
print(fit_report(params, **kws))
def ci_report(ci, with_offset=True, ndigits=5):
- """return text of a report for confidence intervals
+ """Return text of a report for confidence intervals.
Parameters
----------
- with_offset : bool (default `True`)
- whether to subtract best value from all other values.
- ndigits : int (default 5)
- number of significant digits to show
+ with_offset : bool, optional
+ Whether to subtract best value from all other values (default is True).
+ ndigits : int, optional
+ Number of significant digits to show (default is 5).
Returns
-------
- text of formatted report on confidence intervals.
+ str
+ Text of formatted report on confidence intervals.
+
"""
maxlen = max([len(i) for i in ci])
buff = []
add = buff.append
+
def convp(x):
+ """TODO: function docstring."""
if abs(x[0]) < 1.e-2:
return "_BEST_"
return "%.2f%%" % (x[0]*100)
title_shown = False
- fmt_best = fmt_diff = "{0:.%if}" % ndigits
+ fmt_best = fmt_diff = "{0:.%if}" % ndigits
if with_offset:
fmt_diff = "{0:+.%if}" % ndigits
for name, row in ci.items():
if not title_shown:
- add("".join([''.rjust(maxlen+1)]+[i.rjust(ndigits+5)
- for i in map(convp, row)]))
+ add("".join([''.rjust(maxlen+1)] + [i.rjust(ndigits+5)
+ for i in map(convp, row)]))
title_shown = True
thisrow = [" %s:" % name.ljust(maxlen)]
offset = 0.0
@@ -225,5 +234,5 @@ def ci_report(ci, with_offset=True, ndigits=5):
def report_ci(ci):
- """print a report for confidence intervals"""
+ """Print a report for confidence intervals."""
print(ci_report(ci))
diff --git a/lmfit/ui/basefitter.py b/lmfit/ui/basefitter.py
index 60a0987..0d118f9 100644
--- a/lmfit/ui/basefitter.py
+++ b/lmfit/ui/basefitter.py
@@ -1,10 +1,11 @@
import warnings
+
import numpy as np
-from ..model import Model
-from ..models import ExponentialModel # arbitrary default
from ..asteval import Interpreter
from ..astutils import NameFinder
+from ..model import Model
+from ..models import ExponentialModel # arbitrary default
from ..parameter import check_ast_errors
diff --git a/lmfit/ui/ipy_fitter.py b/lmfit/ui/ipy_fitter.py
index 80edda5..6535a46 100644
--- a/lmfit/ui/ipy_fitter.py
+++ b/lmfit/ui/ipy_fitter.py
@@ -1,15 +1,15 @@
import warnings
+
+import IPython
+from IPython.display import clear_output, display
import numpy as np
from ..model import Model
-
-from .basefitter import MPLFitter, _COMMON_DOC, _COMMON_EXAMPLES_DOC
+from .basefitter import _COMMON_DOC, _COMMON_EXAMPLES_DOC, MPLFitter
# Note: If IPython is not available of the version is < 2,
# this module will not be imported, and a different Fitter.
-import IPython
-from IPython.display import display, clear_output
# Widgets were only experimental in IPython 2.x, but this does work there.
# Handle the change in naming from 2.x to 3.x.
IPY2 = IPython.release.version_info[0] == 2
diff --git a/lmfit/uncertainties/__init__.py b/lmfit/uncertainties/__init__.py
index 5c2ec34..bea8a49 100644
--- a/lmfit/uncertainties/__init__.py
+++ b/lmfit/uncertainties/__init__.py
@@ -233,6 +233,7 @@ import math
from math import sqrt, log # Optimization: no attribute look-up
import copy
import warnings
+import six
# Numerical version:
__version_info__ = (1, 9)
@@ -1628,7 +1629,7 @@ def ufloat(representation, tag=None):
# thus does not have any overhead.
#! Different, in Python 3:
- if isinstance(representation, basestring):
+ if isinstance(representation, six.string_types):
representation = str_to_number_with_uncert(representation)
#! The tag is forced to be a string, so that the user does not
@@ -1637,9 +1638,8 @@ def ufloat(representation, tag=None):
# from being considered as tags, here:
if tag is not None:
#! 'unicode' is removed in Python3:
- assert isinstance(tag, (str, unicode)), "The tag can only be a string."
+ assert isinstance(tag, six.string_types), "The tag can only be a string."
#! The special ** syntax is for Python 2.5 and before (Python 2.6+
# understands tag=tag):
return Variable(*representation, **{'tag': tag})
-
diff --git a/lmfit/uncertainties/umath.py b/lmfit/uncertainties/umath.py
index e0608c8..5a18715 100644
--- a/lmfit/uncertainties/umath.py
+++ b/lmfit/uncertainties/umath.py
@@ -38,13 +38,14 @@ author.'''
from __future__ import division # Many analytical derivatives depend on this
# Standard modules
+import functools
+import itertools
import math
import sys
-import itertools
-import functools
# Local modules
-from __init__ import wrap, set_doc, __author__, to_affine_scalar, AffineScalarFunc
+from __init__ import (AffineScalarFunc, __author__, set_doc, to_affine_scalar,
+ wrap)
###############################################################################
diff --git a/requirements.txt b/requirements.txt
index 99f0a77..89dc23a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,3 @@
-numpy>=1.5
-scipy>=0.14
+six>1.10
+numpy>=1.9.1
+scipy>=0.15.1
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..cbd46d0
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,13 @@
+[versioneer]
+vcs = git
+style = pep440
+versionfile_source = lmfit/_version.py
+versionfile_build = lmfit/_version.py
+tag_prefix =
+parentdir_prefix = lmfit-
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/setup.py b/setup.py
index ac98539..24ff17a 100644
--- a/setup.py
+++ b/setup.py
@@ -1,14 +1,19 @@
#!/usr/bin/env python
# from distutils.core import setup
-from setuptools import setup
+from __future__ import print_function
+
+import sys
+from setuptools import setup
import versioneer
-versioneer.VCS = 'git'
-versioneer.versionfile_source = 'lmfit/_version.py'
-versioneer.versionfile_build = 'lmfit/_version.py'
-versioneer.tag_prefix = ''
-versioneer.parentdir_prefix = 'lmfit-'
+# Minimal Python version sanity check
+# taken from the Jupyter Notebook setup.py -- Modified BSD License
+v = sys.version_info
+if v[:2] < (2, 7) or (v[0] >= 3 and v[:2] < (3, 3)):
+ error = "ERROR: lmfit requires Python version 2.7 or 3.3 or above."
+ print(error, file=sys.stderr)
+ sys.exit(1)
long_desc = """A library for least-squares minimization and data fitting in
Python. Built on top of scipy.optimize, lmfit provides a Parameter object
@@ -36,7 +41,7 @@ setup(name = 'lmfit',
author_email = 'matt.newville@gmail.com',
url = 'http://lmfit.github.io/lmfit-py/',
download_url = 'http://lmfit.github.io//lmfit-py/',
- install_requires = ['numpy', 'scipy'],
+ install_requires = ['numpy', 'scipy', 'six'],
license = 'BSD',
description = "Least-Squares Minimization with Bounds and Constraints",
long_description = long_desc,
@@ -51,4 +56,3 @@ setup(name = 'lmfit',
package_dir = {'lmfit': 'lmfit'},
packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],
)
-
diff --git a/tests/test_NIST_Strd.py b/tests/test_NIST_Strd.py
index aec9a8f..fd053c1 100644
--- a/tests/test_NIST_Strd.py
+++ b/tests/test_NIST_Strd.py
@@ -9,7 +9,7 @@ from NISTModels import Models, ReadNistData
HASPYLAB = False
for arg in sys.argv:
- if 'nose' in arg:
+ if 'nose' in arg or 'pytest' in arg:
HASPYLAB = False
if HASPYLAB:
diff --git a/tests/test_algebraic_constraint2.py b/tests/test_algebraic_constraint2.py
index ab64cef..3557eae 100644
--- a/tests/test_algebraic_constraint2.py
+++ b/tests/test_algebraic_constraint2.py
@@ -8,7 +8,7 @@ import sys
# Turn off plotting if run by nosetests.
WITHPLOT = True
for arg in sys.argv:
- if 'nose' in arg:
+ if 'nose' in arg or 'pytest' in arg:
WITHPLOT = False
if WITHPLOT:
diff --git a/tests/test_brute_method.py b/tests/test_brute_method.py
new file mode 100644
index 0000000..722ed97
--- /dev/null
+++ b/tests/test_brute_method.py
@@ -0,0 +1,235 @@
+from __future__ import print_function
+import pickle
+import numpy as np
+from numpy.testing import (assert_, decorators, assert_raises,
+ assert_almost_equal, assert_equal,
+ assert_allclose)
+from scipy import optimize
+import lmfit
+
+
+# use example problem described int he scipy documentation:
+# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brute.html
+
+# setup for scipy-brute optimization #
+params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
+
+def f1(z, *params):
+ x, y = z
+ a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+ return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
+
+def f2(z, *params):
+ x, y = z
+ a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+ return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
+
+def f3(z, *params):
+ x, y = z
+ a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+ return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
+
+def f(z, *params):
+ return f1(z, *params) + f2(z, *params) + f3(z, *params)
+# setup for scipy-brute optimization #
+
+# setup for lmfit-brute optimization #
+params_lmfit = lmfit.Parameters()
+params_lmfit.add_many(
+ ('a', 2, False, None, None, None),
+ ('b', 3, False, None, None, None),
+ ('c', 7, False, None, None, None),
+ ('d', 8, False, None, None, None),
+ ('e', 9, False, None, None, None),
+ ('f', 10, False, None, None, None),
+ ('g', 44, False, None, None, None),
+ ('h', -1, False, None, None, None),
+ ('i', 2, False, None, None, None),
+ ('j', 26, False, None, None, None),
+ ('k', 1, False, None, None, None),
+ ('l', -2, False, None, None, None),
+ ('scale', 0.5, False, None, None, None),
+ ('x', -4.0, True, -4.0, 4.0, None, None),
+ ('y', -2.0, True, -2.0, 2.0, None, None),
+ )
+
+def f1_lmfit(p):
+ par = p.valuesdict()
+ return (par['a'] * par['x']**2 + par['b'] * par['x'] * par['y'] +
+ par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] +
+ par['f'])
+
+def f2_lmfit(p):
+ par = p.valuesdict()
+ return (-1.0*par['g']*np.exp(-((par['x']-par['h'])**2 +
+ (par['y']-par['i'])**2) / par['scale']))
+
+def f3_lmfit(p):
+ par = p.valuesdict()
+ return (-1.0*par['j']*np.exp(-((par['x']-par['k'])**2 +
+ (par['y']-par['l'])**2) / par['scale']))
+
+def f_lmfit(params_lmfit):
+ return f1_lmfit(params_lmfit) + f2_lmfit(params_lmfit) + f3_lmfit(params_lmfit)
+# setup for lmfit-brute optimization ###
+
+
+def test_brute_lmfit_vs_scipy():
+ # The tests below are to make sure that the implementation of the brute
+ # method in lmfit gives identical results to scipy.optimize.brute, when
+ # using finite bounds for all varying parameters.
+
+ # TEST 1: using bounds, with (default) Ns=20 and no stepsize specified
+ assert(not params_lmfit['x'].brute_step) # brute_step for x == None
+ assert(not params_lmfit['y'].brute_step) # brute_step for y == None
+
+ rranges = ((-4, 4), (-2, 2))
+ resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=20,
+ finish=None)
+ fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
+ resbrute_lmfit = fitter.minimize(method='brute', Ns=20)
+
+ assert_equal(resbrute[2], resbrute_lmfit.brute_grid, verbose=True) # grid identical
+ assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical
+ assert_equal(resbrute[0][0], resbrute_lmfit.brute_x0[0], verbose=True) # best fit x value identical
+ assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value stored correctly
+ assert_equal(resbrute[0][1], resbrute_lmfit.brute_x0[1], verbose=True) # best fit y value identical
+ assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value stored correctly
+ assert_equal(resbrute[1], resbrute_lmfit.brute_fval, verbose=True) # best fit function value identical
+ assert_equal(resbrute[1], resbrute_lmfit.chisqr, verbose=True) # best fit function value stored correctly
+
+ # TEST 2: using bounds, setting Ns=40 and no stepsize specified
+ assert(not params_lmfit['x'].brute_step) # brute_step for x == None
+ assert(not params_lmfit['y'].brute_step) # brute_step for y == None
+
+ rranges = ((-4, 4), (-2, 2))
+ resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=40,
+ finish=None)
+ fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
+ resbrute_lmfit = fitter.minimize(method='brute', Ns=40)
+
+ assert_equal(resbrute[2], resbrute_lmfit.brute_grid, verbose=True) # grid identical
+ assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical
+ assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value identical
+ assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value identical
+ assert_equal(resbrute[1], resbrute_lmfit.chisqr, verbose=True) # best fit function value identical
+
+ # TEST 3: using bounds and specifing stepsize for both parameters
+ params_lmfit['x'].set(brute_step=0.25)
+ params_lmfit['y'].set(brute_step=0.25)
+ assert_equal(params_lmfit['x'].brute_step, 0.25 ,verbose=True)
+ assert_equal(params_lmfit['y'].brute_step, 0.25 ,verbose=True)
+
+ rranges = (slice(-4, 4, 0.25), slice(-2, 2, 0.25))
+ resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=20,
+ finish=None)
+ fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
+ resbrute_lmfit = fitter.minimize(method='brute')
+
+ assert_equal(resbrute[2], resbrute_lmfit.brute_grid, verbose=True) # grid identical
+ assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical
+ assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value identical
+ assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value identical
+ assert_equal(resbrute[1], resbrute_lmfit.chisqr, verbose=True) # best fit function value identical
+
+ # TEST 4: using bounds, Ns=10, adn specifing stepsize for parameter 'x'
+ params_lmfit['x'].set(brute_step=0.15)
+ params_lmfit['y'].set(brute_step=0) # brute_step for y == None
+ assert_equal(params_lmfit['x'].brute_step, 0.15 ,verbose=True)
+ assert(not params_lmfit['y'].brute_step)
+
+ rranges = (slice(-4, 4, 0.15), (-2, 2))
+ resbrute = optimize.brute(f, rranges, args=params, full_output=True, Ns=10,
+ finish=None)
+ fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
+ resbrute_lmfit = fitter.minimize(method='brute', Ns=10, keep='all')
+
+ assert_equal(resbrute[2], resbrute_lmfit.brute_grid, verbose=True) # grid identical
+ assert_equal(resbrute[3], resbrute_lmfit.brute_Jout, verbose=True) # function values on grid identical
+ assert_equal(resbrute[0][0], resbrute_lmfit.params['x'].value, verbose=True) # best fit x value identical
+ assert_equal(resbrute[0][1], resbrute_lmfit.params['y'].value, verbose=True) # best fit y value identical
+ assert_equal(resbrute[1], resbrute_lmfit.chisqr, verbose=True) # best fit function value identical
+
+
+def test_brute():
+ # The tests below are to make sure that the implementation of the brute
+ # method in lmfit works as intended.
+
+ # restore original settings for paramers 'x' and 'y'
+ params_lmfit.add_many(
+ ('x', -4.0, True, -4.0, 4.0, None, None),
+ ('y', -2.0, True, -2.0, 2.0, None, None))
+
+ # TEST 1: only upper bound and brute_step specified, using default Ns=20
+ Ns = 20
+ params_lmfit['x'].set(min=-np.inf)
+ params_lmfit['x'].set(brute_step=0.25)
+ fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
+ resbrute_lmfit = fitter.minimize(method='brute')
+ grid_x_expected = np.linspace(params_lmfit['x'].max - Ns*params_lmfit['x'].brute_step,
+ params_lmfit['x'].max, Ns, False)
+ grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0])
+ assert_almost_equal(grid_x_expected, grid_x, verbose=True)
+ grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1])
+ grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].max, Ns)
+ assert_almost_equal(grid_y_expected, grid_y, verbose=True)
+
+ # TEST 2: only lower bound and brute_step specified, using Ns=15
+ Ns = 15
+ params_lmfit['y'].set(max=np.inf)
+ params_lmfit['y'].set(brute_step=0.1)
+ fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
+ resbrute_lmfit = fitter.minimize(method='brute', Ns=15)
+ grid_x_expected = np.linspace(params_lmfit['x'].max - Ns*params_lmfit['x'].brute_step,
+ params_lmfit['x'].max, Ns, False)
+ grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0])
+ assert_almost_equal(grid_x_expected, grid_x, verbose=True)
+ grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1])
+ grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].min + Ns*params_lmfit['y'].brute_step, Ns, False)
+ assert_almost_equal(grid_y_expected, grid_y, verbose=True)
+
+ # TEST 3: only value and brute_step specified, using Ns=15
+ Ns = 15
+ params_lmfit['x'].set(max=np.inf)
+ params_lmfit['x'].set(min=-np.inf)
+ params_lmfit['x'].set(brute_step=0.1)
+ fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
+ resbrute_lmfit = fitter.minimize(method='brute', Ns=15)
+ grid_x_expected = np.linspace(params_lmfit['x'].value - (Ns//2)*params_lmfit['x'].brute_step,
+ params_lmfit['x'].value + (Ns//2)*params_lmfit['x'].brute_step, Ns)
+ grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0])
+ assert_almost_equal(grid_x_expected, grid_x, verbose=True)
+ grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1])
+ grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].min + Ns*params_lmfit['y'].brute_step, Ns, False)
+ assert_almost_equal(grid_y_expected, grid_y, verbose=True)
+
+ # TEST 3: only value and brute_step specified, using Ns=15
+ fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
+ resbrute_lmfit = fitter.minimize(method='brute', Ns=15)
+ grid_x_expected = np.linspace(params_lmfit['x'].value - (Ns//2)*params_lmfit['x'].brute_step,
+ params_lmfit['x'].value + (Ns//2)*params_lmfit['x'].brute_step, Ns)
+ grid_x = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][0])
+ assert_almost_equal(grid_x_expected, grid_x, verbose=True)
+ grid_y = np.unique([par.ravel() for par in resbrute_lmfit.brute_grid][1])
+ grid_y_expected = np.linspace(params_lmfit['y'].min, params_lmfit['y'].min + Ns*params_lmfit['y'].brute_step, Ns, False)
+ assert_almost_equal(grid_y_expected, grid_y, verbose=True)
+
+ # TEST 4: check for correct functioning of keep argument and candidates attribute
+ params_lmfit.add_many( # restore original settings for paramers 'x' and 'y'
+ ('x', -4.0, True, -4.0, 4.0, None, None),
+ ('y', -2.0, True, -2.0, 2.0, None, None))
+
+ fitter = lmfit.Minimizer(f_lmfit, params_lmfit)
+ resbrute_lmfit = fitter.minimize(method='brute')
+ assert(len(resbrute_lmfit.candidates) == 50) # default number of stored candidates
+
+ resbrute_lmfit = fitter.minimize(method='brute', keep=10)
+ assert(len(resbrute_lmfit.candidates) == 10)
+
+ assert(isinstance(resbrute_lmfit.candidates[0].params, lmfit.Parameters))
+
+ # TEST 5: make sure the MinimizerResult can be pickle'd
+ pkl = pickle.dumps(resbrute_lmfit)
+
+test_brute_lmfit_vs_scipy()
+test_brute()
diff --git a/tests/test_confidence.py b/tests/test_confidence.py
index f000d95..3291d91 100644
--- a/tests/test_confidence.py
+++ b/tests/test_confidence.py
@@ -34,7 +34,7 @@ def test_confidence1():
ci = lmfit.conf_interval(minimizer, out)
assert_allclose(ci['b'][0][0], 0.997, rtol=0.01)
assert_allclose(ci['b'][0][1], -2.022, rtol=0.01)
- assert_allclose(ci['b'][2][0], 0.674, rtol=0.01)
+ assert_allclose(ci['b'][2][0], 0.683, rtol=0.01)
assert_allclose(ci['b'][2][1], -1.997, rtol=0.01)
assert_allclose(ci['b'][5][0], 0.95, rtol=0.01)
assert_allclose(ci['b'][5][1], -1.96, rtol=0.01)
@@ -71,7 +71,7 @@ def test_confidence2():
ci = lmfit.conf_interval(minimizer, out)
assert_allclose(ci['b'][0][0], 0.997, rtol=0.01)
assert_allclose(ci['b'][0][1], -2.022, rtol=0.01)
- assert_allclose(ci['b'][2][0], 0.674, rtol=0.01)
+ assert_allclose(ci['b'][2][0], 0.683, rtol=0.01)
assert_allclose(ci['b'][2][1], -1.997, rtol=0.01)
assert_allclose(ci['b'][5][0], 0.95, rtol=0.01)
assert_allclose(ci['b'][5][1], -1.96, rtol=0.01)
diff --git a/tests/test_minimizer.py b/tests/test_minimizer.py
new file mode 100644
index 0000000..c356fd1
--- /dev/null
+++ b/tests/test_minimizer.py
@@ -0,0 +1,20 @@
+from lmfit import Parameters, Minimizer
+
+
+def test_scalar_minimize_neg_value():
+ x0 = 3.14
+ fmin = -1.1
+ xtol = 0.001
+ ftol = 2.0 * xtol
+
+ def objective(pars):
+ return (pars['x'] - x0) ** 2.0 + fmin
+
+ params = Parameters()
+ params.add('x', value=2*x0)
+
+ minr = Minimizer(objective, params)
+ result = minr.scalar_minimize(method='Nelder-Mead', options={'xtol': xtol,
+ 'ftol': ftol})
+ assert abs(result.params['x'].value - x0) < xtol
+ assert abs(result.fun - fmin) < ftol
diff --git a/tests/test_model.py b/tests/test_model.py
index e176139..a2fab13 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -7,6 +7,7 @@ import numpy as np
from lmfit import Model, Parameter, models
from lmfit.lineshapes import gaussian
+from lmfit.models import PseudoVoigtModel
def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
err_msg='', verbose=True):
@@ -134,6 +135,18 @@ class CommonTests(object):
if hasattr(short_eval, '__len__'):
self.assertEqual(len(short_eval), 3)
+ def test_result_report(self):
+ pars = self.model.make_params(**self.guess())
+ result = self.model.fit(self.data, pars, x=self.x)
+ report = result.fit_report()
+ assert("[[Model]]" in report)
+ assert("[[Variables]]" in report)
+ assert("[[Fit Statistics]]" in report)
+ assert(" # function evals =" in report)
+ assert(" Akaike " in report)
+ assert(" chi-square " in report)
+
+
def test_data_alignment(self):
_skip_if_no_pandas()
from pandas import Series
@@ -391,6 +404,38 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
for mod in [model1, model2, model3, model4]:
self.assertTrue(mod in model_total3.components)
+
+ def test_eval_components(self):
+ model1 = models.GaussianModel(prefix='g1_')
+ model2 = models.GaussianModel(prefix='g2_')
+ model3 = models.ConstantModel(prefix='bkg_')
+ mod = model1 + model2 + model3
+ pars = mod.make_params()
+
+ values1 = dict(amplitude=7.10, center=1.1, sigma=2.40)
+ values2 = dict(amplitude=12.2, center=2.5, sigma=0.5)
+ data = (1.01 + gaussian(x=self.x, **values1) +
+ gaussian(x=self.x, **values2) + 0.05*self.noise)
+
+ pars['g1_sigma'].set(2)
+ pars['g1_center'].set(1, max=1.5)
+ pars['g1_amplitude'].set(3)
+ pars['g2_sigma'].set(1)
+ pars['g2_center'].set(2.6, min=2.0)
+ pars['g2_amplitude'].set(1)
+ pars['bkg_c'].set(1.88)
+
+ result = mod.fit(data, params=pars, x=self.x)
+
+ self.assertTrue(abs(result.params['g1_amplitude'].value - 7.1) < 1.5)
+ self.assertTrue(abs(result.params['g2_amplitude'].value - 12.2) < 1.5)
+ self.assertTrue(abs(result.params['g1_center'].value - 1.1) < 0.2)
+ self.assertTrue(abs(result.params['g2_center'].value - 2.5) < 0.2)
+ self.assertTrue(abs(result.params['bkg_c'].value - 1.0) < 0.25)
+
+ comps = mod.eval_components(x=self.x)
+ assert('bkg_' in comps)
+
def test_composite_has_bestvalues(self):
# test that a composite model has non-empty best_values
model1 = models.GaussianModel(prefix='g1_')
@@ -473,6 +518,11 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
self.assertEqual(models[0].param_hints['amp'],
models[1].param_hints['amp'])
+ def test_param_hint_explicit_value(self):
+ # tests Github Issue 384
+ pmod = PseudoVoigtModel()
+ params = pmod.make_params(sigma=2, fraction=0.77)
+ assert_allclose(params['fraction'].value, 0.77, rtol=0.01)
def test_composite_model_with_expr_constrains(self):
"""Smoke test for composite model fitting with expr constraints.
@@ -585,4 +635,33 @@ class TestComplexConstant(CommonTests, unittest.TestCase):
self.guess = lambda: dict(re=2,im=2)
self.model_constructor = models.ComplexConstantModel
super(TestComplexConstant, self).setUp()
+
+class TestExpression(CommonTests, unittest.TestCase):
+ def setUp(self):
+ self.true_values = lambda: dict(off_c=0.25, amp_c=1.0, x0=2.0)
+ self.guess = lambda: dict(off_c=0.20, amp_c=1.5, x0=2.5)
+ self.expression = "off_c + amp_c * exp(-x/x0)"
+ self.model_constructor = (
+ lambda *args, **kwargs: models.ExpressionModel(self.expression, *args, **kwargs))
+ super(TestExpression, self).setUp()
+
+ def test_composite_with_expression(self):
+ expression_model = models.ExpressionModel("exp(-x/x0)", name='exp')
+ amp_model = models.ConstantModel(prefix='amp_')
+ off_model = models.ConstantModel(prefix='off_', name="off")
+
+ comp_model = off_model + amp_model * expression_model
+
+ x = self.x
+ true_values = self.true_values()
+ data = comp_model.eval(x=x, **true_values) + self.noise
+ # data = 0.25 + 1 * np.exp(-x / 2.)
+
+ params = comp_model.make_params(**self.guess())
+
+ result = comp_model.fit(data, x=x, params=params)
+ assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
+
+ data_components = comp_model.eval_components(x=x)
+ self.assertIn('exp', data_components)
#
diff --git a/tests/test_model_uncertainties.py b/tests/test_model_uncertainties.py
new file mode 100644
index 0000000..0e62bb2
--- /dev/null
+++ b/tests/test_model_uncertainties.py
@@ -0,0 +1,97 @@
+"""
+tests of ModelResult.eval_uncertainty()
+
+"""
+import numpy as np
+from numpy.testing import assert_allclose
+from lmfit.lineshapes import gaussian
+from lmfit.models import LinearModel, GaussianModel
+
+def get_linearmodel(slope=0.8, intercept=0.5, noise=1.5):
+ # create data to be fitted
+ np.random.seed(88)
+ x = np.linspace(0, 10, 101)
+ y = intercept + x*slope
+ y = y + np.random.normal(size=len(x), scale=noise)
+
+ model = LinearModel()
+ params = model.make_params(intercept=intercept, slope=slope)
+
+ return x, y, model, params
+
+def get_gaussianmodel(amplitude=1.0, center=5.0, sigma=1.0, noise=0.1):
+ # create data to be fitted
+ np.random.seed(7392)
+ x = np.linspace(-20, 20, 201)
+ y = gaussian(x, amplitude, center=center, sigma=sigma)
+ y = y + np.random.normal(size=len(x), scale=noise)
+
+ model = GaussianModel()
+ params = model.make_params(amplitude=amplitude/5.0,
+ center=center-1.0,
+ sigma=sigma*2.0)
+ return x, y, model, params
+
+def test_linear_constant_intercept():
+ x, y, model, params = get_linearmodel(slope=4, intercept=-10)
+
+ params['intercept'].vary = False
+
+ ret = model.fit(y, params, x=x)
+
+ dely = ret.eval_uncertainty(sigma=1)
+ slope_stderr = ret.params['slope'].stderr
+
+ assert_allclose(dely.min(), 0, rtol=1.e-2)
+ assert_allclose(dely.max(), slope_stderr*x.max(), rtol=1.e-2)
+ assert_allclose(dely.mean(),slope_stderr*x.mean(), rtol=1.e-2)
+
+def test_linear_constant_slope():
+ x, y, model, params = get_linearmodel(slope=-4, intercept=2.3)
+
+ params['slope'].vary = False
+
+ ret = model.fit(y, params, x=x)
+
+ dely = ret.eval_uncertainty(sigma=1)
+
+ intercept_stderr = ret.params['intercept'].stderr
+
+ assert_allclose(dely.min(), intercept_stderr, rtol=1.e-2)
+ assert_allclose(dely.max(), intercept_stderr, rtol=1.e-2)
+
+
+def test_gauss_sigmalevel():
+ """ test that dely increases as sigma increases"""
+ x, y, model, params = get_gaussianmodel(amplitude=50.0, center=4.5,
+ sigma=0.78, noise=0.1)
+ ret = model.fit(y, params, x=x)
+
+ dely_sigma1 = ret.eval_uncertainty(sigma=1)
+ dely_sigma2 = ret.eval_uncertainty(sigma=2)
+ dely_sigma3 = ret.eval_uncertainty(sigma=3)
+
+ assert(dely_sigma3.mean() > 1.5*dely_sigma2.mean())
+ assert(dely_sigma2.mean() > 1.5*dely_sigma1.mean())
+
+def test_gauss_noiselevel():
+ """ test that dely increases as expected with changing noise level"""
+ lonoise = 0.05
+ hinoise = 10*lonoise
+ x, y, model, params = get_gaussianmodel(amplitude=20.0, center=2.1,
+ sigma=1.0, noise=lonoise)
+ ret1 = model.fit(y, params, x=x)
+ dely_lonoise = ret1.eval_uncertainty(sigma=1)
+
+ x, y, model, params = get_gaussianmodel(amplitude=20.0, center=2.1,
+ sigma=1.0, noise=hinoise)
+ ret2 = model.fit(y, params, x=x)
+ dely_hinoise = ret2.eval_uncertainty(sigma=1)
+
+ assert_allclose(dely_hinoise.mean(), 10*dely_lonoise.mean(), rtol=1.e-2)
+
+if __name__ == '__main__':
+ test_linear_constant_intercept()
+ test_linear_constant_slope()
+ test_gauss_sigmalevel()
+ test_gauss_noiselevel()
diff --git a/tests/test_nose.py b/tests/test_nose.py
index 9e4f4dd..c2c6fae 100644
--- a/tests/test_nose.py
+++ b/tests/test_nose.py
@@ -4,10 +4,12 @@ from lmfit import minimize, Parameters, Parameter, report_fit, Minimizer
from lmfit.minimizer import (SCALAR_METHODS, HAS_EMCEE,
MinimizerResult, _lnpost, _nan_policy)
from lmfit.lineshapes import gaussian
+from lmfit import ufloat
import numpy as np
from numpy import pi
from numpy.testing import (assert_, decorators, assert_raises,
- assert_almost_equal, assert_equal)
+ assert_almost_equal, assert_equal,
+ assert_allclose)
import unittest
import nose
from nose import SkipTest
@@ -281,6 +283,48 @@ def test_scalar_minimize_has_no_uncertainties():
assert_(out2.errorbars == False)
+def test_scalar_minimize_reduce_fcn():
+ # test that the reduce_fcn option for scalar_minimize
+ # gives different and improved results with outliers
+
+ np.random.seed(2)
+ x = np.linspace(0, 10, 101)
+
+ yo = 1.0 + 2.0*np.sin(4*x) * np.exp(-x / 5)
+ y = yo + np.random.normal(size=len(yo), scale=0.250)
+ outliers = np.random.random_integers(int(len(x)/3.0), len(x)-1,
+ int(len(x)/12))
+ y[outliers] += 5*np.random.random(len(outliers))
+
+ # define objective function: returns the array to be minimized
+ def objfunc(pars, x, data):
+ decay = pars['decay']
+ offset= pars['offset']
+ omega = pars['omega']
+ amp = pars['amp']
+ model = offset + amp * np.sin(x*omega) * np.exp(-x/decay)
+ return model - data
+
+ # create a set of Parameters
+ params = Parameters()
+ params.add('offset', 2.0)
+ params.add('omega', 3.3)
+ params.add('amp', 2.5)
+ params.add('decay', 1.0)
+
+ method='L-BFGS-B'
+ out1 = minimize(objfunc, params, args=(x, y), method=method)
+ out2 = minimize(objfunc, params, args=(x, y), method=method,
+ reduce_fcn='neglogcauchy')
+
+ #print assert all
+ assert_allclose(out1.params['omega'].value, 4.0, rtol=0.01)
+ assert_allclose(out1.params['decay'].value, 7.6, rtol=0.01)
+
+ assert_allclose(out2.params['omega'].value, 4.0, rtol=0.01)
+ assert_allclose(out2.params['decay'].value, 5.8, rtol=0.01)
+
+
def test_multidimensional_fit_GH205():
# test that you don't need to flatten the output from the objective
# function. Tests regression for GH205.
@@ -305,6 +349,25 @@ def test_multidimensional_fit_GH205():
mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data))
res = mini.minimize()
+def test_ufloat():
+ """
+ test of ufloat from uncertainties
+ """
+ x = ufloat((1, 0.1))
+ assert_allclose(x.nominal_value, 1.0, rtol=1.e-7)
+ assert_allclose(x.std_dev(), 0.1, rtol=1.e-7)
+
+ y = x*x
+ assert_allclose(y.nominal_value, 1.0, rtol=1.e-7)
+ assert_allclose(y.std_dev(), 0.2, rtol=1.e-7)
+
+ y = x - x
+ assert_allclose(y.nominal_value, 0.0, rtol=1.e-7)
+ assert_allclose(y.std_dev(), 0.0, rtol=1.e-7)
+
+
+
+
class CommonMinimizerTest(unittest.TestCase):
def setUp(self):
@@ -574,7 +637,38 @@ class CommonMinimizerTest(unittest.TestCase):
assert_(np.isfinite(out.params['amp'].correl['period']))
# the lnprob array should be the same as the chain size
- assert_(np.size(out.chain)//4 == np.size(out.lnprob))
+ assert_(np.size(out.chain)//out.nvarys == np.size(out.lnprob))
+
+ # test chain output shapes
+ assert_(out.lnprob.shape == (10, (20-5+1)/2) )
+ assert_(out.chain.shape == (10, (20-5+1)/2, out.nvarys) )
+ assert_(out.flatchain.shape == (10*(20-5+1)/2, out.nvarys))
+
+ def test_emcee_PT_output(self):
+ # test mcmc output when using parallel tempering
+ if not HAS_EMCEE:
+ return True
+ try:
+ from pandas import DataFrame
+ except ImportError:
+ return True
+ out = self.mini.emcee(ntemps=6, nwalkers=10, steps=20, burn=5, thin=2)
+ assert_(isinstance(out, MinimizerResult))
+ assert_(isinstance(out.flatchain, DataFrame))
+
+ # check that we can access the chains via parameter name
+ assert_(out.flatchain['amp'].shape[0] == 80)
+ assert_(out.errorbars is True)
+ assert_(np.isfinite(out.params['amp'].correl['period']))
+
+ # the lnprob array should be the same as the chain size
+ assert_(np.size(out.chain)//out.nvarys == np.size(out.lnprob))
+
+ # test chain output shapes
+ assert_(out.lnprob.shape == (6, 10, (20-5+1)/2) )
+ assert_(out.chain.shape == (6, 10, (20-5+1)/2, out.nvarys) )
+ # Only the 0th temperature is returned
+ assert_(out.flatchain.shape == (10*(20-5+1)/2, out.nvarys))
@decorators.slow
def test_emcee_float(self):
diff --git a/tests/test_parameters.py b/tests/test_parameters.py
index 29f3c2d..07fc871 100644
--- a/tests/test_parameters.py
+++ b/tests/test_parameters.py
@@ -3,7 +3,7 @@ from lmfit import Parameters, Parameter
from lmfit.parameter import isclose
from numpy.testing import assert_, assert_almost_equal, assert_equal
import unittest
-from copy import deepcopy
+from copy import deepcopy, copy
import numpy as np
import pickle
@@ -41,6 +41,35 @@ class TestParameters(unittest.TestCase):
assert(p2._asteval.symtable is not None)
assert((p2['y'].value > 20) and (p2['y'].value < 21))
+ def test_copy_function(self):
+ # check copy(Parameters) does not fail
+ p1 = Parameters()
+ p1.add('t', 2.0, min=0.0, max=5.0)
+ p1.add('x', 10.0)
+ p1.add('y', expr='x*t + sqrt(t)/3.0')
+
+ p2 = copy(p1)
+ assert(isinstance(p2, Parameters))
+
+ # change the 'x' value in the original
+ p1['x'].value = 4.0
+
+ assert(p2['x'].value > 9.8)
+ assert(p2['x'].value < 10.2)
+ assert(np.isinf(p2['x'].max) and p2['x'].max > 0)
+
+ assert('t' in p2)
+ assert('y' in p2)
+ assert(p2['t'].max < 6.0)
+
+ assert(np.isinf(p2['x'].min) and p2['x'].min < 0)
+ assert('sqrt(t)' in p2['y'].expr )
+ assert(p2._asteval is not None)
+ assert(p2._asteval.symtable is not None)
+ assert((p2['y'].value > 20) and (p2['y'].value < 21))
+
+ assert(p1['y'].value < 10)
+
def test_deepcopy(self):
# check that a simple copy works
@@ -138,6 +167,31 @@ class TestParameters(unittest.TestCase):
pkl = pickle.dumps(p)
q = pickle.loads(pkl)
+
+ def test_set_symtable(self):
+ # test that we use Parameter.set(value=XXX) and have
+ # that new value be used in constraint expressions
+ pars = Parameters()
+ pars.add('x', value=1.0)
+ pars.add('y', expr='x + 1')
+
+ assert_(isclose(pars['y'].value, 2.0))
+ pars['x'].set(value=3.0)
+ assert_(isclose(pars['y'].value, 4.0))
+
+ def test_dumps_loads_parameters(self):
+ # test that we can dumps() and then loads() a Parameters
+ pars = Parameters()
+ pars.add('x', value=1.0)
+ pars.add('y', value=2.0)
+ pars['x'].expr = 'y / 2.0'
+
+ dumps = pars.dumps()
+
+ newpars = Parameters().loads(dumps)
+ newpars['y'].value = 100.0
+ assert_(isclose(newpars['x'].value, 50.0))
+
def test_isclose(self):
assert_(isclose(1., 1+1e-5, atol=1e-4, rtol=0))
assert_(not isclose(1., 1+1e-5, atol=1e-6, rtol=0))
diff --git a/tests/test_params_set.py b/tests/test_params_set.py
index ebf7ff9..5adbc77 100644
--- a/tests/test_params_set.py
+++ b/tests/test_params_set.py
@@ -16,7 +16,7 @@ def test_param_set():
# test #1: gamma is constrained to equal sigma
assert(params['gamma'].expr == 'sigma')
params.update_constraints()
- sigval = params['gamma'].value
+ sigval = params['sigma'].value
assert_allclose(params['gamma'].value, sigval, 1e-4, 1e-4, '', True)
# test #2: explicitly setting a param value should work, even when
@@ -45,4 +45,143 @@ def test_param_set():
assert(params['gamma'].vary)
assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
+ # test 5: make sure issue #389 is fixed: set boundaries and make sure
+ # they are kept when changing the value
+ amplitude_vary = params['amplitude'].vary
+ amplitude_expr = params['amplitude'].expr
+ params['amplitude'].set(min=0.0, max=100.0)
+ params.update_constraints()
+ assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
+ params['amplitude'].set(value=40.0)
+ params.update_constraints()
+ assert_allclose(params['amplitude'].value, 40.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
+ assert(params['amplitude'].expr == amplitude_expr)
+ assert(params['amplitude'].vary == amplitude_vary)
+ assert(not params['amplitude'].brute_step)
+
+ # test for possible regressions of this fix (without 'expr'):
+ # the set function should only change the requested attribute(s)
+ params['amplitude'].set(value=35.0)
+ params.update_constraints()
+ assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
+ assert(params['amplitude'].vary == amplitude_vary)
+ assert(params['amplitude'].expr == amplitude_expr)
+ assert(not params['amplitude'].brute_step)
+
+ # set minimum
+ params['amplitude'].set(min=10.0)
+ params.update_constraints()
+ assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
+ assert(params['amplitude'].vary == amplitude_vary)
+ assert(params['amplitude'].expr == amplitude_expr)
+ assert(not params['amplitude'].brute_step)
+
+ # set maximum
+ params['amplitude'].set(max=110.0)
+ params.update_constraints()
+ assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True)
+ assert(params['amplitude'].vary == amplitude_vary)
+ assert(params['amplitude'].expr == amplitude_expr)
+ assert(not params['amplitude'].brute_step)
+
+ # set vary
+ params['amplitude'].set(vary=False)
+ params.update_constraints()
+ assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True)
+ assert(params['amplitude'].vary == False)
+ assert(params['amplitude'].expr == amplitude_expr)
+ assert(not params['amplitude'].brute_step)
+
+ # set brute_step
+ params['amplitude'].set(brute_step=0.1)
+ params.update_constraints()
+ assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True)
+ assert(params['amplitude'].vary == False)
+ assert(params['amplitude'].expr == amplitude_expr)
+ assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True)
+
+ # test for possible regressions of this fix for variables WITH 'expr':
+ height_value = params['height'].value
+ height_min = params['height'].min
+ height_max = params['height'].max
+ height_vary = params['height'].vary
+ height_expr = params['height'].expr
+ height_brute_step = params['height'].brute_step
+
+ # set vary=True should remove expression
+ params['height'].set(vary=True)
+ params.update_constraints()
+ assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].min, height_min, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
+ assert(params['height'].vary == True)
+ assert(params['height'].expr == None)
+ assert(params['height'].brute_step == height_brute_step)
+
+ # setting an expression should set vary=False
+ params['height'].set(expr=height_expr)
+ params.update_constraints()
+ assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].min, height_min, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
+ assert(params['height'].vary == False)
+ assert(params['height'].expr == height_expr)
+ assert(params['height'].brute_step == height_brute_step)
+
+ # changing min/max should not remove expression
+ params['height'].set(min=0)
+ params.update_constraints()
+ assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
+ assert(params['height'].vary == height_vary)
+ assert(params['height'].expr == height_expr)
+ assert(params['height'].brute_step == height_brute_step)
+
+ # changing brute_step should not remove expression
+ params['height'].set(brute_step=0.1)
+ params.update_constraints()
+ assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
+ assert(params['height'].vary == height_vary)
+ assert(params['height'].expr == height_expr)
+ assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True)
+
+ # changing the value should remove expression and keep vary=False
+ params['height'].set(brute_step=0)
+ params['height'].set(value=10.0)
+ params.update_constraints()
+ assert_allclose(params['height'].value, 10.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
+ assert(params['height'].vary == False)
+ assert(params['height'].expr == None)
+ assert(params['height'].brute_step == height_brute_step)
+
+ # passing expr='' should only remove the expression
+ params['height'].set(expr=height_expr) # first restore the original expr
+ params.update_constraints()
+ params['height'].set(expr='')
+ params.update_constraints()
+ assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
+ assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
+ assert(params['height'].vary == False)
+ assert(params['height'].expr == None)
+ assert(params['height'].brute_step == height_brute_step)
+
test_param_set()
diff --git a/versioneer.py b/versioneer.py
index 481180d..64fea1c 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1,7 +1,8 @@
-# Version: 0.12
+# Version: 0.18
+
+"""The Versioneer - like a rocketeer, but for versions.
-"""
The Versioneer
==============
@@ -9,9 +10,13 @@ The Versioneer
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
-* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
-
-[![Build Status](https://travis-ci.org/warner/python-versioneer.png?branch=master)](https://travis-ci.org/warner/python-versioneer)
+* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
+* [![Latest Version]
+(https://pypip.in/version/versioneer/badge.svg?style=flat)
+](https://pypi.python.org/pypi/versioneer/)
+* [![Build Status]
+(https://travis-ci.org/warner/python-versioneer.png?branch=master)
+](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
@@ -23,8 +28,8 @@ system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
-* run `versioneer-installer` in your source tree: this installs `versioneer.py`
-* follow the instructions below (also in the `versioneer.py` docstring)
+* add a `[versioneer]` section to your setup.cfg (see below)
+* run `versioneer install` in your source tree, commit the results
## Version Identifiers
@@ -53,7 +58,7 @@ unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
-for example 'git describe --tags --dirty --always' reports things like
+for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
@@ -67,189 +72,186 @@ The version identifier is used for multiple purposes:
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
-dynamically ask the VCS tool for version information at import time. However,
-when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
-copy is replaced by a small static file that contains just the generated
-version data.
+dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
-during the "git archive" command. As a result, generated tarballs will
+during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
+To allow `setup.py` to compute a version too, a `versioneer.py` is added to
+the top level of your source tree, next to `setup.py` and the `setup.cfg`
+that configures it. This overrides several distutils/setuptools commands to
+compute the version when invoked, and changes `setup.py build` and `setup.py
+sdist` to replace `_version.py` with a small static file that contains just
+the generated version data.
## Installation
-First, decide on values for the following configuration variables:
-
-* `VCS`: the version control system you use. Currently accepts "git".
+See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
-* `versionfile_source`:
-
- A project-relative pathname into which the generated version strings should
- be written. This is usually a `_version.py` next to your project's main
- `__init__.py` file, so it can be imported at runtime. If your project uses
- `src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
- This file should be checked in to your VCS as usual: the copy created below
- by `setup.py versioneer` will include code that parses expanded VCS
- keywords in generated tarballs. The 'build' and 'sdist' commands will
- replace it with a copy that has just the calculated version string.
+## Version-String Flavors
- This must be set even if your project does not have any modules (and will
- therefore never import `_version.py`), since "setup.py sdist" -based trees
- still need somewhere to record the pre-calculated version strings. Anywhere
- in the source tree should do. If there is a `__init__.py` next to your
- `_version.py`, the `setup.py versioneer` command (described below) will
- append some `__version__`-setting assignments, if they aren't already
- present.
+Code which uses Versioneer can learn about its version string at runtime by
+importing `_version` from your main `__init__.py` file and running the
+`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
+import the top-level `versioneer.py` and run `get_versions()`.
-* `versionfile_build`:
+Both functions return a dictionary with different flavors of version
+information:
- Like `versionfile_source`, but relative to the build directory instead of
- the source directory. These will differ when your setup.py uses
- 'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
- then you will probably have `versionfile_build='myproject/_version.py'` and
- `versionfile_source='src/myproject/_version.py'`.
+* `['version']`: A condensed version string, rendered using the selected
+ style. This is the most commonly used value for the project's version
+ string. The default "pep440" style yields strings like `0.11`,
+ `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
+ below for alternative styles.
- If this is set to None, then `setup.py build` will not attempt to rewrite
- any `_version.py` in the built tree. If your project does not have any
- libraries (e.g. if it only builds a script), then you should use
- `versionfile_build = None` and override `distutils.command.build_scripts`
- to explicitly insert a copy of `versioneer.get_version()` into your
- generated script.
+* `['full-revisionid']`: detailed revision identifier. For Git, this is the
+ full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
-* `tag_prefix`:
+* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
+ commit date in ISO 8601 format. This will be None if the date is not
+ available.
- a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
- If your tags look like 'myproject-1.2.0', then you should use
- tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
- should be an empty string.
+* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
+ this is only accurate if run in a VCS checkout, otherwise it is likely to
+ be False or None
-* `parentdir_prefix`:
+* `['error']`: if the version string could not be computed, this will be set
+ to a string describing the problem, otherwise it will be None. It may be
+ useful to throw an exception in setup.py if this is set, to avoid e.g.
+ creating tarballs with a version string of "unknown".
- a string, frequently the same as tag_prefix, which appears at the start of
- all unpacked tarball filenames. If your tarball unpacks into
- 'myproject-1.2.0', this should be 'myproject-'.
+Some variants are more useful than others. Including `full-revisionid` in a
+bug report should allow developers to reconstruct the exact code being tested
+(or indicate the presence of local changes that should be shared with the
+developers). `version` is suitable for display in an "about" box or a CLI
+`--version` output: it can be easily compared against release notes and lists
+of bugs fixed in various releases.
-This tool provides one script, named `versioneer-installer`. That script does
-one thing: write a copy of `versioneer.py` into the current directory.
+The installer adds the following text to your `__init__.py` to place a basic
+version in `YOURPROJECT.__version__`:
-To versioneer-enable your project:
+ from ._version import get_versions
+ __version__ = get_versions()['version']
+ del get_versions
-* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
- source tree.
+## Styles
+
+The setup.cfg `style=` configuration controls how the VCS information is
+rendered into a version string.
-* 2: add the following lines to the top of your `setup.py`, with the
- configuration values you decided earlier:
+The default style, "pep440", produces a PEP440-compliant string, equal to the
+un-prefixed tag name for actual releases, and containing an additional "local
+version" section with more detail for in-between builds. For Git, this is
+TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
+--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
+tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
+that this commit is two revisions ("+2") beyond the "0.11" tag. For released
+software (exactly equal to a known tag), the identifier will only contain the
+stripped tag, e.g. "0.11".
- import versioneer
- versioneer.VCS = 'git'
- versioneer.versionfile_source = 'src/myproject/_version.py'
- versioneer.versionfile_build = 'myproject/_version.py'
- versioneer.tag_prefix = '' # tags are like 1.2.0
- versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
+Other styles are available. See [details.md](details.md) in the Versioneer
+source tree for descriptions.
-* 3: add the following arguments to the setup() call in your setup.py:
+## Debugging
- version=versioneer.get_version(),
- cmdclass=versioneer.get_cmdclass(),
+Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
+to return a version of "0+unknown". To investigate the problem, run `setup.py
+version`, which will run the version-lookup code in a verbose mode, and will
+display the full contents of `get_versions()` (including the `error` string,
+which may help identify what went wrong).
-* 4: now run `setup.py versioneer`, which will create `_version.py`, and will
- modify your `__init__.py` (if one exists next to `_version.py`) to define
- `__version__` (by calling a function from `_version.py`). It will also
- modify your `MANIFEST.in` to include both `versioneer.py` and the generated
- `_version.py` in sdist tarballs.
+## Known Limitations
-* 5: commit these changes to your VCS. To make sure you won't forget,
- `setup.py versioneer` will mark everything it touched for addition.
+Some situations are known to cause problems for Versioneer. This details the
+most significant ones. More can be found on Github
+[issues page](https://github.com/warner/python-versioneer/issues).
-## Post-Installation Usage
+### Subprojects
-Once established, all uses of your tree from a VCS checkout should get the
-current version string. All generated tarballs should include an embedded
-version string (so users who unpack them will not need a VCS tool installed).
+Versioneer has limited support for source trees in which `setup.py` is not in
+the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
+two common reasons why `setup.py` might not be in the root:
-If you distribute your project through PyPI, then the release process should
-boil down to two steps:
+* Source trees which contain multiple subprojects, such as
+ [Buildbot](https://github.com/buildbot/buildbot), which contains both
+ "master" and "slave" subprojects, each with their own `setup.py`,
+ `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
+ distributions (and upload multiple independently-installable tarballs).
+* Source trees whose main purpose is to contain a C library, but which also
+ provide bindings to Python (and perhaps other langauges) in subdirectories.
-* 1: git tag 1.0
-* 2: python setup.py register sdist upload
+Versioneer will look for `.git` in parent directories, and most operations
+should get the right version string. However `pip` and `setuptools` have bugs
+and implementation details which frequently cause `pip install .` from a
+subproject directory to fail to find a correct version string (so it usually
+defaults to `0+unknown`).
-If you distribute it through github (i.e. users use github to generate
-tarballs with `git archive`), the process is:
+`pip install --editable .` should work correctly. `setup.py install` might
+work too.
-* 1: git tag 1.0
-* 2: git push; git push --tags
+Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
+some later version.
-Currently, all version strings must be based upon a tag. Versioneer will
-report "unknown" until your tree has at least one tag in its history. This
-restriction will be fixed eventually (see issue #12).
+[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
+this issue. The discussion in
+[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
+issue from the Versioneer side in more detail.
+[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
+[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
+pip to let Versioneer work correctly.
-## Version-String Flavors
+Versioneer-0.16 and earlier only looked for a `.git` directory next to the
+`setup.cfg`, so subprojects were completely unsupported with those releases.
-Code which uses Versioneer can learn about its version string at runtime by
-importing `_version` from your main `__init__.py` file and running the
-`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
-import the top-level `versioneer.py` and run `get_versions()`.
+### Editable installs with setuptools <= 18.5
-Both functions return a dictionary with different keys for different flavors
-of the version string:
+`setup.py develop` and `pip install --editable .` allow you to install a
+project into a virtualenv once, then continue editing the source code (and
+test) without re-installing after every change.
-* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
- this uses the output of `git describe --tags --dirty --always` but strips
- the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
- is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
- that this commit is two revisions ("-2-") beyond the "0.11" tag. For
- released software (exactly equal to a known tag), the identifier will only
- contain the stripped tag, e.g. "0.11".
+"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
+convenient way to specify executable scripts that should be installed along
+with the python package.
-* `['full']`: detailed revision identifier. For Git, this is the full SHA1
- commit id, followed by "-dirty" if the tree contains uncommitted changes,
- e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
+These both work as expected when using modern setuptools. When using
+setuptools-18.5 or earlier, however, certain operations will cause
+`pkg_resources.DistributionNotFound` errors when running the entrypoint
+script, which must be resolved by re-installing the package. This happens
+when the install happens with one version, then the egg_info data is
+regenerated while a different version is checked out. Many setup.py commands
+cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
+a different virtualenv), so this can be surprising.
-Some variants are more useful than others. Including `full` in a bug report
-should allow developers to reconstruct the exact code being tested (or
-indicate the presence of local changes that should be shared with the
-developers). `version` is suitable for display in an "about" box or a CLI
-`--version` output: it can be easily compared against release notes and lists
-of bugs fixed in various releases.
+[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
+this one, but upgrading to a newer version of setuptools should probably
+resolve it.
-In the future, this will also include a
-[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
-(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
-for a hash-based revision id), but is safe to use in a `setup.py`
-"`version=`" argument. It also enables tools like *pip* to compare version
-strings and evaluate compatibility constraint declarations.
+### Unicode version strings
-The `setup.py versioneer` command adds the following text to your
-`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
+While Versioneer works (and is continually tested) with both Python 2 and
+Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
+Newer releases probably generate unicode version strings on py2. It's not
+clear that this is wrong, but it may be surprising for applications when then
+write these strings to a network connection or include them in bytes-oriented
+APIs like cryptographic checksums.
+
+[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
+this question.
- from ._version import get_versions
- __version__ = get_versions()['version']
- del get_versions
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
-* re-run `versioneer-installer` in your source tree to replace your copy of
- `versioneer.py`
-* edit `setup.py`, if necessary, to include any new configuration settings
- indicated by the release notes
-* re-run `setup.py versioneer` to replace `SRC/_version.py`
+* edit `setup.cfg`, if necessary, to include any new configuration settings
+ indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
+* re-run `versioneer install` in your source tree, to replace
+ `SRC/_version.py`
* commit any changed files
-### Upgrading from 0.10 to 0.11
-
-You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
-`setup.py versioneer`. This will enable the use of additional version-control
-systems (SVN, etc) in the future.
-
-### Upgrading from 0.11 to 0.12
-
-Nothing special.
-
## Future Directions
This tool is designed to make it easily extended to other version-control
@@ -266,35 +268,130 @@ number of intermediate scripts.
## License
-To make Versioneer easier to embed, all its code is hereby released into the
-public domain. The `_version.py` that it creates is also in the public
-domain.
+To make Versioneer easier to embed, all its code is dedicated to the public
+domain. The `_version.py` that it creates is also in the public domain.
+Specifically, both are released under the Creative Commons "Public Domain
+Dedication" license (CC0-1.0), as described in
+https://creativecommons.org/publicdomain/zero/1.0/ .
"""
-import os, sys, re, subprocess, errno
-from distutils.core import Command
-from distutils.command.sdist import sdist as _sdist
-from distutils.command.build import build as _build
+from __future__ import print_function
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+import errno
+import json
+import os
+import re
+import subprocess
+import sys
+
+
+class VersioneerConfig:
+ """Container for Versioneer configuration parameters."""
+
+
+def get_root():
+ """Get the project root directory.
+
+ We require that all commands are run from the project root, i.e. the
+ directory that contains setup.py, setup.cfg, and versioneer.py .
+ """
+ root = os.path.realpath(os.path.abspath(os.getcwd()))
+ setup_py = os.path.join(root, "setup.py")
+ versioneer_py = os.path.join(root, "versioneer.py")
+ if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
+ # allow 'python path/to/setup.py COMMAND'
+ root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
+ setup_py = os.path.join(root, "setup.py")
+ versioneer_py = os.path.join(root, "versioneer.py")
+ if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
+ err = ("Versioneer was unable to run the project root directory. "
+ "Versioneer requires setup.py to be executed from "
+ "its immediate directory (like 'python setup.py COMMAND'), "
+ "or in a way that lets it use sys.argv[0] to find the root "
+ "(like 'python path/to/setup.py COMMAND').")
+ raise VersioneerBadRootError(err)
+ try:
+ # Certain runtime workflows (setup.py install/develop in a setuptools
+ # tree) execute all dependencies in a single python process, so
+ # "versioneer" may be imported multiple times, and python's shared
+ # module-import table will cache the first one. So we can't use
+ # os.path.dirname(__file__), as that will find whichever
+ # versioneer.py was first imported, even in later projects.
+ me = os.path.realpath(os.path.abspath(__file__))
+ me_dir = os.path.normcase(os.path.splitext(me)[0])
+ vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
+ if me_dir != vsr_dir:
+ print("Warning: build in %s is using versioneer.py from %s"
+ % (os.path.dirname(me), versioneer_py))
+ except NameError:
+ pass
+ return root
+
+
+def get_config_from_root(root):
+ """Read the project setup.cfg file to determine Versioneer config."""
+ # This might raise EnvironmentError (if setup.cfg is missing), or
+ # configparser.NoSectionError (if it lacks a [versioneer] section), or
+ # configparser.NoOptionError (if it lacks "VCS="). See the docstring at
+ # the top of versioneer.py for instructions on writing your setup.cfg .
+ setup_cfg = os.path.join(root, "setup.cfg")
+ parser = configparser.SafeConfigParser()
+ with open(setup_cfg, "r") as f:
+ parser.readfp(f)
+ VCS = parser.get("versioneer", "VCS") # mandatory
+
+ def get(parser, name):
+ if parser.has_option("versioneer", name):
+ return parser.get("versioneer", name)
+ return None
+ cfg = VersioneerConfig()
+ cfg.VCS = VCS
+ cfg.style = get(parser, "style") or ""
+ cfg.versionfile_source = get(parser, "versionfile_source")
+ cfg.versionfile_build = get(parser, "versionfile_build")
+ cfg.tag_prefix = get(parser, "tag_prefix")
+ if cfg.tag_prefix in ("''", '""'):
+ cfg.tag_prefix = ""
+ cfg.parentdir_prefix = get(parser, "parentdir_prefix")
+ cfg.verbose = get(parser, "verbose")
+ return cfg
+
+
+class NotThisMethod(Exception):
+ """Exception raised if a method is not valid for the current scenario."""
-# these configuration settings will be overridden by setup.py after it
-# imports us
-versionfile_source = None
-versionfile_build = None
-tag_prefix = None
-parentdir_prefix = None
-VCS = None
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
+HANDLERS = {}
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+
+def register_vcs_handler(vcs, method): # decorator
+ """Decorator to mark a method as the handler for a particular VCS."""
+ def decorate(f):
+ """Store f in HANDLERS[vcs][method]."""
+ if vcs not in HANDLERS:
+ HANDLERS[vcs] = {}
+ HANDLERS[vcs][method] = f
+ return f
+ return decorate
+
+
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
+ env=None):
+ """Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
+ dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
- p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+ p = subprocess.Popen([c] + args, cwd=cwd, env=env,
+ stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
@@ -303,21 +400,23 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
if e.errno == errno.ENOENT:
continue
if verbose:
- print("unable to run %s" % args[0])
+ print("unable to run %s" % dispcmd)
print(e)
- return None
+ return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
- return None
+ return None, None
stdout = p.communicate()[0].strip()
- if sys.version >= '3':
+ if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
- print("unable to run %s (error)" % args[0])
- return None
- return stdout
+ print("unable to run %s (error)" % dispcmd)
+ print("stdout was %s" % stdout)
+ return None, p.returncode
+ return stdout, p.returncode
+
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
@@ -327,26 +426,78 @@ LONG_VERSION_PY['git'] = '''
# that just contains the computed version number.
# This file is released into the public domain. Generated by
-# versioneer-0.12 (https://github.com/warner/python-versioneer)
+# versioneer-0.18 (https://github.com/warner/python-versioneer)
+
+"""Git implementation of _version.py."""
+
+import errno
+import os
+import re
+import subprocess
+import sys
+
+
+def get_keywords():
+ """Get the keywords needed to look up the version information."""
+ # these strings will be replaced by git during git-archive.
+ # setup.py/versioneer.py will grep for the variable names, so they must
+ # each be defined on a line of their own. _version.py will just call
+ # get_keywords().
+ git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
+ git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
+ git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
+ keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
+ return keywords
+
+
+class VersioneerConfig:
+ """Container for Versioneer configuration parameters."""
+
+
+def get_config():
+ """Create, populate and return the VersioneerConfig() object."""
+ # these strings are filled in when 'setup.py versioneer' creates
+ # _version.py
+ cfg = VersioneerConfig()
+ cfg.VCS = "git"
+ cfg.style = "%(STYLE)s"
+ cfg.tag_prefix = "%(TAG_PREFIX)s"
+ cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
+ cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
+ cfg.verbose = False
+ return cfg
+
+
+class NotThisMethod(Exception):
+ """Exception raised if a method is not valid for the current scenario."""
+
+
+LONG_VERSION_PY = {}
+HANDLERS = {}
-# these strings will be replaced by git during git-archive
-git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
-git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
-# these strings are filled in when 'setup.py versioneer' creates _version.py
-tag_prefix = "%(TAG_PREFIX)s"
-parentdir_prefix = "%(PARENTDIR_PREFIX)s"
-versionfile_source = "%(VERSIONFILE_SOURCE)s"
+def register_vcs_handler(vcs, method): # decorator
+ """Decorator to mark a method as the handler for a particular VCS."""
+ def decorate(f):
+ """Store f in HANDLERS[vcs][method]."""
+ if vcs not in HANDLERS:
+ HANDLERS[vcs] = {}
+ HANDLERS[vcs][method] = f
+ return f
+ return decorate
-import os, sys, re, subprocess, errno
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
+ env=None):
+ """Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
+ dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
- p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+ p = subprocess.Popen([c] + args, cwd=cwd, env=env,
+ stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
@@ -355,42 +506,59 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
if e.errno == errno.ENOENT:
continue
if verbose:
- print("unable to run %%s" %% args[0])
+ print("unable to run %%s" %% dispcmd)
print(e)
- return None
+ return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
- return None
+ return None, None
stdout = p.communicate()[0].strip()
- if sys.version >= '3':
+ if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
- print("unable to run %%s (error)" %% args[0])
- return None
- return stdout
+ print("unable to run %%s (error)" %% dispcmd)
+ print("stdout was %%s" %% stdout)
+ return None, p.returncode
+ return stdout, p.returncode
+
+
+def versions_from_parentdir(parentdir_prefix, root, verbose):
+ """Try to determine the version from the parent directory name.
+
+ Source tarballs conventionally unpack into a directory that includes both
+ the project name and a version string. We will also support searching up
+ two directory levels for an appropriately named parent directory
+ """
+ rootdirs = []
+
+ for i in range(3):
+ dirname = os.path.basename(root)
+ if dirname.startswith(parentdir_prefix):
+ return {"version": dirname[len(parentdir_prefix):],
+ "full-revisionid": None,
+ "dirty": False, "error": None, "date": None}
+ else:
+ rootdirs.append(root)
+ root = os.path.dirname(root) # up a level
+ if verbose:
+ print("Tried directories %%s but none started with prefix %%s" %%
+ (str(rootdirs), parentdir_prefix))
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
-def versions_from_parentdir(parentdir_prefix, root, verbose=False):
- # Source tarballs conventionally unpack into a directory that includes
- # both the project name and a version string.
- dirname = os.path.basename(root)
- if not dirname.startswith(parentdir_prefix):
- if verbose:
- print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
- (root, dirname, parentdir_prefix))
- return None
- return {"version": dirname[len(parentdir_prefix):], "full": ""}
+@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
+ """Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
- f = open(versionfile_abs,"r")
+ f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
@@ -400,19 +568,35 @@ def git_get_keywords(versionfile_abs):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
+ if line.strip().startswith("git_date ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
-def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
+
+@register_vcs_handler("git", "keywords")
+def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ """Get version information from git keywords."""
if not keywords:
- return {} # keyword-finding function failed to find keywords
+ raise NotThisMethod("no keywords at all, weird")
+ date = keywords.get("date")
+ if date is not None:
+ # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
+ # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
+ # -like" string, which we must then edit to make compliant), because
+ # it's been around since git-1.5.3, and it's too difficult to
+ # discover which version we're using, or to work around using an
+ # older one.
+ date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
- return {} # unexpanded, so not in an unpacked git-archive tarball
+ raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
@@ -428,7 +612,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
- print("discarding '%%s', no digits" %% ",".join(refs-tags))
+ print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
@@ -437,81 +621,336 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
- return { "version": r,
- "full": keywords["full"].strip() }
- # no suitable tags, so we use the full revision id
+ return {"version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": None,
+ "date": date}
+ # no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
- print("no suitable tags, using full revision id")
- return { "version": keywords["full"].strip(),
- "full": keywords["full"].strip() }
+ print("no suitable tags, using unknown + full revision id")
+ return {"version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": "no suitable tags", "date": None}
-def git_versions_from_vcs(tag_prefix, root, verbose=False):
- # this runs 'git' from the root of the source tree. This only gets called
- # if the git-archive 'subst' keywords were *not* expanded, and
- # _version.py hasn't already been rewritten with a short version string,
- # meaning we're inside a checked out source tree.
-
- if not os.path.exists(os.path.join(root, ".git")):
- if verbose:
- print("no .git in %%s" %% root)
- return {}
+@register_vcs_handler("git", "pieces_from_vcs")
+def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
+ """Get version from 'git describe' in the root of the source tree.
+ This only gets called if the git-archive 'subst' keywords were *not*
+ expanded, and _version.py hasn't already been rewritten with a short
+ version string, meaning we're inside a checked out source tree.
+ """
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
- stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
- cwd=root)
- if stdout is None:
- return {}
- if not stdout.startswith(tag_prefix):
+
+ out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
+ hide_stderr=True)
+ if rc != 0:
if verbose:
- print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
- return {}
- tag = stdout[len(tag_prefix):]
- stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
- if stdout is None:
- return {}
- full = stdout.strip()
- if tag.endswith("-dirty"):
- full += "-dirty"
- return {"version": tag, "full": full}
-
-
-def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
+ print("Directory %%s not under git control" %% root)
+ raise NotThisMethod("'git rev-parse --git-dir' returned error")
+
+ # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
+ # if there isn't one, this yields HEX[-dirty] (no NUM)
+ describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
+ "--always", "--long",
+ "--match", "%%s*" %% tag_prefix],
+ cwd=root)
+ # --long was added in git-1.5.5
+ if describe_out is None:
+ raise NotThisMethod("'git describe' failed")
+ describe_out = describe_out.strip()
+ full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ if full_out is None:
+ raise NotThisMethod("'git rev-parse' failed")
+ full_out = full_out.strip()
+
+ pieces = {}
+ pieces["long"] = full_out
+ pieces["short"] = full_out[:7] # maybe improved later
+ pieces["error"] = None
+
+ # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
+ # TAG might have hyphens.
+ git_describe = describe_out
+
+ # look for -dirty suffix
+ dirty = git_describe.endswith("-dirty")
+ pieces["dirty"] = dirty
+ if dirty:
+ git_describe = git_describe[:git_describe.rindex("-dirty")]
+
+ # now we have TAG-NUM-gHEX or HEX
+
+ if "-" in git_describe:
+ # TAG-NUM-gHEX
+ mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ if not mo:
+ # unparseable. Maybe git-describe is misbehaving?
+ pieces["error"] = ("unable to parse git-describe output: '%%s'"
+ %% describe_out)
+ return pieces
+
+ # tag
+ full_tag = mo.group(1)
+ if not full_tag.startswith(tag_prefix):
+ if verbose:
+ fmt = "tag '%%s' doesn't start with prefix '%%s'"
+ print(fmt %% (full_tag, tag_prefix))
+ pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
+ %% (full_tag, tag_prefix))
+ return pieces
+ pieces["closest-tag"] = full_tag[len(tag_prefix):]
+
+ # distance: number of commits since tag
+ pieces["distance"] = int(mo.group(2))
+
+ # commit: short hex revision ID
+ pieces["short"] = mo.group(3)
+
+ else:
+ # HEX: no tags
+ pieces["closest-tag"] = None
+ count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
+ cwd=root)
+ pieces["distance"] = int(count_out) # total number of commits
+
+ # commit date: see ISO-8601 comment in git_versions_from_keywords()
+ date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
+ cwd=root)[0].strip()
+ pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
+
+ return pieces
+
+
+def plus_or_dot(pieces):
+ """Return a + if we don't already have one, else return a ."""
+ if "+" in pieces.get("closest-tag", ""):
+ return "."
+ return "+"
+
+
+def render_pep440(pieces):
+ """Build up version string, with post-release "local version identifier".
+
+ Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+
+ Exceptions:
+ 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += plus_or_dot(pieces)
+ rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ else:
+ # exception #1
+ rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
+ pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ return rendered
+
+
+def render_pep440_pre(pieces):
+ """TAG[.post.devDISTANCE] -- No -dirty.
+
+ Exceptions:
+ 1: no tags. 0.post.devDISTANCE
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += ".post.dev%%d" %% pieces["distance"]
+ else:
+ # exception #1
+ rendered = "0.post.dev%%d" %% pieces["distance"]
+ return rendered
+
+
+def render_pep440_post(pieces):
+ """TAG[.postDISTANCE[.dev0]+gHEX] .
+
+ The ".dev0" means dirty. Note that .dev0 sorts backwards
+ (a dirty tree will appear "older" than the corresponding clean one),
+ but you shouldn't be releasing software with -dirty anyways.
+
+ Exceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%%d" %% pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += plus_or_dot(pieces)
+ rendered += "g%%s" %% pieces["short"]
+ else:
+ # exception #1
+ rendered = "0.post%%d" %% pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += "+g%%s" %% pieces["short"]
+ return rendered
+
+
+def render_pep440_old(pieces):
+ """TAG[.postDISTANCE[.dev0]] .
+
+ The ".dev0" means dirty.
+
+ Eexceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%%d" %% pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ else:
+ # exception #1
+ rendered = "0.post%%d" %% pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ return rendered
+
+
+def render_git_describe(pieces):
+ """TAG[-DISTANCE-gHEX][-dirty].
+
+ Like 'git describe --tags --dirty --always'.
+
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render_git_describe_long(pieces):
+ """TAG-DISTANCE-gHEX[-dirty].
+
+ Like 'git describe --tags --dirty --always -long'.
+ The distance/hash is unconditional.
+
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render(pieces, style):
+ """Render the given version pieces into the requested style."""
+ if pieces["error"]:
+ return {"version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"],
+ "date": None}
+
+ if not style or style == "default":
+ style = "pep440" # the default
+
+ if style == "pep440":
+ rendered = render_pep440(pieces)
+ elif style == "pep440-pre":
+ rendered = render_pep440_pre(pieces)
+ elif style == "pep440-post":
+ rendered = render_pep440_post(pieces)
+ elif style == "pep440-old":
+ rendered = render_pep440_old(pieces)
+ elif style == "git-describe":
+ rendered = render_git_describe(pieces)
+ elif style == "git-describe-long":
+ rendered = render_git_describe_long(pieces)
+ else:
+ raise ValueError("unknown style '%%s'" %% style)
+
+ return {"version": rendered, "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"], "error": None,
+ "date": pieces.get("date")}
+
+
+def get_versions():
+ """Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
- keywords = { "refnames": git_refnames, "full": git_full }
- ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
- if ver:
- return ver
+ cfg = get_config()
+ verbose = cfg.verbose
try:
- root = os.path.abspath(__file__)
+ return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
+ verbose)
+ except NotThisMethod:
+ pass
+
+ try:
+ root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
- for i in range(len(versionfile_source.split(os.sep))):
+ for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
- return default
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to find root of source tree",
+ "date": None}
+
+ try:
+ pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
+ return render(pieces, cfg.style)
+ except NotThisMethod:
+ pass
- return (git_versions_from_vcs(tag_prefix, root, verbose)
- or versions_from_parentdir(parentdir_prefix, root, verbose)
- or default)
+ try:
+ if cfg.parentdir_prefix:
+ return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
+ except NotThisMethod:
+ pass
+
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None,
+ "error": "unable to compute version", "date": None}
'''
+
+@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
+ """Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
- f = open(versionfile_abs,"r")
+ f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
@@ -521,19 +960,35 @@ def git_get_keywords(versionfile_abs):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
+ if line.strip().startswith("git_date ="):
+ mo = re.search(r'=\s*"(.*)"', line)
+ if mo:
+ keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
-def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
+
+@register_vcs_handler("git", "keywords")
+def git_versions_from_keywords(keywords, tag_prefix, verbose):
+ """Get version information from git keywords."""
if not keywords:
- return {} # keyword-finding function failed to find keywords
+ raise NotThisMethod("no keywords at all, weird")
+ date = keywords.get("date")
+ if date is not None:
+ # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
+ # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
+ # -like" string, which we must then edit to make compliant), because
+ # it's been around since git-1.5.3, and it's too difficult to
+ # discover which version we're using, or to work around using an
+ # older one.
+ date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
- return {} # unexpanded, so not in an unpacked git-archive tarball
+ raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
@@ -549,7 +1004,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
- print("discarding '%s', no digits" % ",".join(refs-tags))
+ print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
@@ -558,48 +1013,116 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
- return { "version": r,
- "full": keywords["full"].strip() }
- # no suitable tags, so we use the full revision id
+ return {"version": r,
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": None,
+ "date": date}
+ # no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
- print("no suitable tags, using full revision id")
- return { "version": keywords["full"].strip(),
- "full": keywords["full"].strip() }
+ print("no suitable tags, using unknown + full revision id")
+ return {"version": "0+unknown",
+ "full-revisionid": keywords["full"].strip(),
+ "dirty": False, "error": "no suitable tags", "date": None}
-def git_versions_from_vcs(tag_prefix, root, verbose=False):
- # this runs 'git' from the root of the source tree. This only gets called
- # if the git-archive 'subst' keywords were *not* expanded, and
- # _version.py hasn't already been rewritten with a short version string,
- # meaning we're inside a checked out source tree.
-
- if not os.path.exists(os.path.join(root, ".git")):
- if verbose:
- print("no .git in %s" % root)
- return {}
+@register_vcs_handler("git", "pieces_from_vcs")
+def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
+ """Get version from 'git describe' in the root of the source tree.
+ This only gets called if the git-archive 'subst' keywords were *not*
+ expanded, and _version.py hasn't already been rewritten with a short
+ version string, meaning we're inside a checked out source tree.
+ """
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
- stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
- cwd=root)
- if stdout is None:
- return {}
- if not stdout.startswith(tag_prefix):
+
+ out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
+ hide_stderr=True)
+ if rc != 0:
if verbose:
- print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
- return {}
- tag = stdout[len(tag_prefix):]
- stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
- if stdout is None:
- return {}
- full = stdout.strip()
- if tag.endswith("-dirty"):
- full += "-dirty"
- return {"version": tag, "full": full}
+ print("Directory %s not under git control" % root)
+ raise NotThisMethod("'git rev-parse --git-dir' returned error")
+
+ # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
+ # if there isn't one, this yields HEX[-dirty] (no NUM)
+ describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
+ "--always", "--long",
+ "--match", "%s*" % tag_prefix],
+ cwd=root)
+ # --long was added in git-1.5.5
+ if describe_out is None:
+ raise NotThisMethod("'git describe' failed")
+ describe_out = describe_out.strip()
+ full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+ if full_out is None:
+ raise NotThisMethod("'git rev-parse' failed")
+ full_out = full_out.strip()
+
+ pieces = {}
+ pieces["long"] = full_out
+ pieces["short"] = full_out[:7] # maybe improved later
+ pieces["error"] = None
+
+ # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
+ # TAG might have hyphens.
+ git_describe = describe_out
+
+ # look for -dirty suffix
+ dirty = git_describe.endswith("-dirty")
+ pieces["dirty"] = dirty
+ if dirty:
+ git_describe = git_describe[:git_describe.rindex("-dirty")]
+
+ # now we have TAG-NUM-gHEX or HEX
+
+ if "-" in git_describe:
+ # TAG-NUM-gHEX
+ mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
+ if not mo:
+ # unparseable. Maybe git-describe is misbehaving?
+ pieces["error"] = ("unable to parse git-describe output: '%s'"
+ % describe_out)
+ return pieces
+
+ # tag
+ full_tag = mo.group(1)
+ if not full_tag.startswith(tag_prefix):
+ if verbose:
+ fmt = "tag '%s' doesn't start with prefix '%s'"
+ print(fmt % (full_tag, tag_prefix))
+ pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
+ % (full_tag, tag_prefix))
+ return pieces
+ pieces["closest-tag"] = full_tag[len(tag_prefix):]
+
+ # distance: number of commits since tag
+ pieces["distance"] = int(mo.group(2))
+
+ # commit: short hex revision ID
+ pieces["short"] = mo.group(3)
+
+ else:
+ # HEX: no tags
+ pieces["closest-tag"] = None
+ count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
+ cwd=root)
+ pieces["distance"] = int(count_out) # total number of commits
+
+ # commit date: see ISO-8601 comment in git_versions_from_keywords()
+ date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
+ cwd=root)[0].strip()
+ pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
+
+ return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
+ """Git-specific installation logic for Versioneer.
+
+ For Git, this means creating/changing .gitattributes to mark _version.py
+ for export-subst keyword substitution.
+ """
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
@@ -631,183 +1154,538 @@ def do_vcs_install(manifest_in, versionfile_source, ipy):
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
-def versions_from_parentdir(parentdir_prefix, root, verbose=False):
- # Source tarballs conventionally unpack into a directory that includes
- # both the project name and a version string.
- dirname = os.path.basename(root)
- if not dirname.startswith(parentdir_prefix):
- if verbose:
- print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
- (root, dirname, parentdir_prefix))
- return None
- return {"version": dirname[len(parentdir_prefix):], "full": ""}
+
+def versions_from_parentdir(parentdir_prefix, root, verbose):
+ """Try to determine the version from the parent directory name.
+
+ Source tarballs conventionally unpack into a directory that includes both
+ the project name and a version string. We will also support searching up
+ two directory levels for an appropriately named parent directory
+ """
+ rootdirs = []
+
+ for i in range(3):
+ dirname = os.path.basename(root)
+ if dirname.startswith(parentdir_prefix):
+ return {"version": dirname[len(parentdir_prefix):],
+ "full-revisionid": None,
+ "dirty": False, "error": None, "date": None}
+ else:
+ rootdirs.append(root)
+ root = os.path.dirname(root) # up a level
+
+ if verbose:
+ print("Tried directories %s but none started with prefix %s" %
+ (str(rootdirs), parentdir_prefix))
+ raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
+
SHORT_VERSION_PY = """
-# This file was generated by 'versioneer.py' (0.12) from
+# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
-version_version = '%(version)s'
-version_full = '%(full)s'
-def get_versions(default={}, verbose=False):
- return {'version': version_version, 'full': version_full}
+import json
+
+version_json = '''
+%s
+''' # END VERSION_JSON
+
+def get_versions():
+ return json.loads(version_json)
"""
-DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
- versions = {}
+ """Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
- for line in f.readlines():
- mo = re.match("version_version = '([^']+)'", line)
- if mo:
- versions["version"] = mo.group(1)
- mo = re.match("version_full = '([^']+)'", line)
- if mo:
- versions["full"] = mo.group(1)
+ contents = f.read()
except EnvironmentError:
- return {}
+ raise NotThisMethod("unable to read _version.py")
+ mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
+ contents, re.M | re.S)
+ if not mo:
+ mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
+ contents, re.M | re.S)
+ if not mo:
+ raise NotThisMethod("no version_json in _version.py")
+ return json.loads(mo.group(1))
- return versions
def write_to_version_file(filename, versions):
+ """Write the given version number to the given _version.py file."""
+ os.unlink(filename)
+ contents = json.dumps(versions, sort_keys=True,
+ indent=1, separators=(",", ": "))
with open(filename, "w") as f:
- f.write(SHORT_VERSION_PY % versions)
+ f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
-def get_root():
- try:
- return os.path.dirname(os.path.abspath(__file__))
- except NameError:
- return os.path.dirname(os.path.abspath(sys.argv[0]))
-
-def vcs_function(vcs, suffix):
- return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
-
-def get_versions(default=DEFAULT, verbose=False):
- # returns dict with two keys: 'version' and 'full'
- assert versionfile_source is not None, "please set versioneer.versionfile_source"
- assert tag_prefix is not None, "please set versioneer.tag_prefix"
- assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
- assert VCS is not None, "please set versioneer.VCS"
-
- # I am in versioneer.py, which must live at the top of the source tree,
- # which we use to compute the root directory. py2exe/bbfreeze/non-CPython
- # don't have __file__, in which case we fall back to sys.argv[0] (which
- # ought to be the setup.py script). We prefer __file__ since that's more
- # robust in cases where setup.py was invoked in some weird way (e.g. pip)
+def plus_or_dot(pieces):
+ """Return a + if we don't already have one, else return a ."""
+ if "+" in pieces.get("closest-tag", ""):
+ return "."
+ return "+"
+
+
+def render_pep440(pieces):
+ """Build up version string, with post-release "local version identifier".
+
+ Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
+ get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
+
+ Exceptions:
+ 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += plus_or_dot(pieces)
+ rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ else:
+ # exception #1
+ rendered = "0+untagged.%d.g%s" % (pieces["distance"],
+ pieces["short"])
+ if pieces["dirty"]:
+ rendered += ".dirty"
+ return rendered
+
+
+def render_pep440_pre(pieces):
+ """TAG[.post.devDISTANCE] -- No -dirty.
+
+ Exceptions:
+ 1: no tags. 0.post.devDISTANCE
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += ".post.dev%d" % pieces["distance"]
+ else:
+ # exception #1
+ rendered = "0.post.dev%d" % pieces["distance"]
+ return rendered
+
+
+def render_pep440_post(pieces):
+ """TAG[.postDISTANCE[.dev0]+gHEX] .
+
+ The ".dev0" means dirty. Note that .dev0 sorts backwards
+ (a dirty tree will appear "older" than the corresponding clean one),
+ but you shouldn't be releasing software with -dirty anyways.
+
+ Exceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += plus_or_dot(pieces)
+ rendered += "g%s" % pieces["short"]
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ rendered += "+g%s" % pieces["short"]
+ return rendered
+
+
+def render_pep440_old(pieces):
+ """TAG[.postDISTANCE[.dev0]] .
+
+ The ".dev0" means dirty.
+
+ Eexceptions:
+ 1: no tags. 0.postDISTANCE[.dev0]
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"] or pieces["dirty"]:
+ rendered += ".post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ else:
+ # exception #1
+ rendered = "0.post%d" % pieces["distance"]
+ if pieces["dirty"]:
+ rendered += ".dev0"
+ return rendered
+
+
+def render_git_describe(pieces):
+ """TAG[-DISTANCE-gHEX][-dirty].
+
+ Like 'git describe --tags --dirty --always'.
+
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ if pieces["distance"]:
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render_git_describe_long(pieces):
+ """TAG-DISTANCE-gHEX[-dirty].
+
+ Like 'git describe --tags --dirty --always -long'.
+ The distance/hash is unconditional.
+
+ Exceptions:
+ 1: no tags. HEX[-dirty] (note: no 'g' prefix)
+ """
+ if pieces["closest-tag"]:
+ rendered = pieces["closest-tag"]
+ rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
+ else:
+ # exception #1
+ rendered = pieces["short"]
+ if pieces["dirty"]:
+ rendered += "-dirty"
+ return rendered
+
+
+def render(pieces, style):
+ """Render the given version pieces into the requested style."""
+ if pieces["error"]:
+ return {"version": "unknown",
+ "full-revisionid": pieces.get("long"),
+ "dirty": None,
+ "error": pieces["error"],
+ "date": None}
+
+ if not style or style == "default":
+ style = "pep440" # the default
+
+ if style == "pep440":
+ rendered = render_pep440(pieces)
+ elif style == "pep440-pre":
+ rendered = render_pep440_pre(pieces)
+ elif style == "pep440-post":
+ rendered = render_pep440_post(pieces)
+ elif style == "pep440-old":
+ rendered = render_pep440_old(pieces)
+ elif style == "git-describe":
+ rendered = render_git_describe(pieces)
+ elif style == "git-describe-long":
+ rendered = render_git_describe_long(pieces)
+ else:
+ raise ValueError("unknown style '%s'" % style)
+
+ return {"version": rendered, "full-revisionid": pieces["long"],
+ "dirty": pieces["dirty"], "error": None,
+ "date": pieces.get("date")}
+
+
+class VersioneerBadRootError(Exception):
+ """The project root directory is unknown or missing key files."""
+
+
+def get_versions(verbose=False):
+ """Get the project version from whatever source is available.
+
+ Returns dict with two keys: 'version' and 'full'.
+ """
+ if "versioneer" in sys.modules:
+ # see the discussion in cmdclass.py:get_cmdclass()
+ del sys.modules["versioneer"]
+
root = get_root()
- versionfile_abs = os.path.join(root, versionfile_source)
+ cfg = get_config_from_root(root)
+
+ assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
+ handlers = HANDLERS.get(cfg.VCS)
+ assert handlers, "unrecognized VCS '%s'" % cfg.VCS
+ verbose = verbose or cfg.verbose
+ assert cfg.versionfile_source is not None, \
+ "please set versioneer.versionfile_source"
+ assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
- # extract version from first of _version.py, VCS command (e.g. 'git
+ versionfile_abs = os.path.join(root, cfg.versionfile_source)
+
+ # extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
- get_keywords_f = vcs_function(VCS, "get_keywords")
- versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
- if get_keywords_f and versions_from_keywords_f:
- vcs_keywords = get_keywords_f(versionfile_abs)
- ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
- if ver:
- if verbose: print("got version from expanded keyword %s" % ver)
+ get_keywords_f = handlers.get("get_keywords")
+ from_keywords_f = handlers.get("keywords")
+ if get_keywords_f and from_keywords_f:
+ try:
+ keywords = get_keywords_f(versionfile_abs)
+ ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
+ if verbose:
+ print("got version from expanded keyword %s" % ver)
return ver
+ except NotThisMethod:
+ pass
- ver = versions_from_file(versionfile_abs)
- if ver:
- if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
+ try:
+ ver = versions_from_file(versionfile_abs)
+ if verbose:
+ print("got version from file %s %s" % (versionfile_abs, ver))
return ver
+ except NotThisMethod:
+ pass
- versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
- if versions_from_vcs_f:
- ver = versions_from_vcs_f(tag_prefix, root, verbose)
- if ver:
- if verbose: print("got version from VCS %s" % ver)
+ from_vcs_f = handlers.get("pieces_from_vcs")
+ if from_vcs_f:
+ try:
+ pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
+ ver = render(pieces, cfg.style)
+ if verbose:
+ print("got version from VCS %s" % ver)
return ver
+ except NotThisMethod:
+ pass
- ver = versions_from_parentdir(parentdir_prefix, root, verbose)
- if ver:
- if verbose: print("got version from parentdir %s" % ver)
- return ver
+ try:
+ if cfg.parentdir_prefix:
+ ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
+ if verbose:
+ print("got version from parentdir %s" % ver)
+ return ver
+ except NotThisMethod:
+ pass
- if verbose: print("got version from default %s" % default)
- return default
+ if verbose:
+ print("unable to compute version")
-def get_version(verbose=False):
- return get_versions(verbose=verbose)["version"]
+ return {"version": "0+unknown", "full-revisionid": None,
+ "dirty": None, "error": "unable to compute version",
+ "date": None}
+
+
+def get_version():
+ """Get the short version string for this project."""
+ return get_versions()["version"]
-class cmd_version(Command):
- description = "report generated version string"
- user_options = []
- boolean_options = []
- def initialize_options(self):
- pass
- def finalize_options(self):
- pass
- def run(self):
- ver = get_version(verbose=True)
- print("Version is currently: %s" % ver)
-
-
-class cmd_build(_build):
- def run(self):
- versions = get_versions(verbose=True)
- _build.run(self)
- # now locate _version.py in the new build/ directory and replace it
- # with an updated value
- if versionfile_build:
- target_versionfile = os.path.join(self.build_lib, versionfile_build)
- print("UPDATING %s" % target_versionfile)
- os.unlink(target_versionfile)
- with open(target_versionfile, "w") as f:
- f.write(SHORT_VERSION_PY % versions)
-if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
- from cx_Freeze.dist import build_exe as _build_exe
+def get_cmdclass():
+ """Get the custom setuptools/distutils subclasses used by Versioneer."""
+ if "versioneer" in sys.modules:
+ del sys.modules["versioneer"]
+ # this fixes the "python setup.py develop" case (also 'install' and
+ # 'easy_install .'), in which subdependencies of the main project are
+ # built (using setup.py bdist_egg) in the same python process. Assume
+ # a main project A and a dependency B, which use different versions
+ # of Versioneer. A's setup.py imports A's Versioneer, leaving it in
+ # sys.modules by the time B's setup.py is executed, causing B to run
+ # with the wrong versioneer. Setuptools wraps the sub-dep builds in a
+ # sandbox that restores sys.modules to it's pre-build state, so the
+ # parent is protected against the child's "import versioneer". By
+ # removing ourselves from sys.modules here, before the child build
+ # happens, we protect the child from the parent's versioneer too.
+ # Also see https://github.com/warner/python-versioneer/issues/52
+
+ cmds = {}
+
+ # we add "version" to both distutils and setuptools
+ from distutils.core import Command
+
+ class cmd_version(Command):
+ description = "report generated version string"
+ user_options = []
+ boolean_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ vers = get_versions(verbose=True)
+ print("Version: %s" % vers["version"])
+ print(" full-revisionid: %s" % vers.get("full-revisionid"))
+ print(" dirty: %s" % vers.get("dirty"))
+ print(" date: %s" % vers.get("date"))
+ if vers["error"]:
+ print(" error: %s" % vers["error"])
+ cmds["version"] = cmd_version
+
+ # we override "build_py" in both distutils and setuptools
+ #
+ # most invocation pathways end up running build_py:
+ # distutils/build -> build_py
+ # distutils/install -> distutils/build ->..
+ # setuptools/bdist_wheel -> distutils/install ->..
+ # setuptools/bdist_egg -> distutils/install_lib -> build_py
+ # setuptools/install -> bdist_egg ->..
+ # setuptools/develop -> ?
+ # pip install:
+ # copies source tree to a tempdir before running egg_info/etc
+ # if .git isn't copied too, 'git describe' will fail
+ # then does setup.py bdist_wheel, or sometimes setup.py install
+ # setup.py egg_info -> ?
+
+ # we override different "build_py" commands for both environments
+ if "setuptools" in sys.modules:
+ from setuptools.command.build_py import build_py as _build_py
+ else:
+ from distutils.command.build_py import build_py as _build_py
- class cmd_build_exe(_build_exe):
+ class cmd_build_py(_build_py):
def run(self):
- versions = get_versions(verbose=True)
- target_versionfile = versionfile_source
+ root = get_root()
+ cfg = get_config_from_root(root)
+ versions = get_versions()
+ _build_py.run(self)
+ # now locate _version.py in the new build/ directory and replace
+ # it with an updated value
+ if cfg.versionfile_build:
+ target_versionfile = os.path.join(self.build_lib,
+ cfg.versionfile_build)
+ print("UPDATING %s" % target_versionfile)
+ write_to_version_file(target_versionfile, versions)
+ cmds["build_py"] = cmd_build_py
+
+ if "cx_Freeze" in sys.modules: # cx_freeze enabled?
+ from cx_Freeze.dist import build_exe as _build_exe
+ # nczeczulin reports that py2exe won't like the pep440-style string
+ # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
+ # setup(console=[{
+ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
+ # "product_version": versioneer.get_version(),
+ # ...
+
+ class cmd_build_exe(_build_exe):
+ def run(self):
+ root = get_root()
+ cfg = get_config_from_root(root)
+ versions = get_versions()
+ target_versionfile = cfg.versionfile_source
+ print("UPDATING %s" % target_versionfile)
+ write_to_version_file(target_versionfile, versions)
+
+ _build_exe.run(self)
+ os.unlink(target_versionfile)
+ with open(cfg.versionfile_source, "w") as f:
+ LONG = LONG_VERSION_PY[cfg.VCS]
+ f.write(LONG %
+ {"DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ })
+ cmds["build_exe"] = cmd_build_exe
+ del cmds["build_py"]
+
+ if 'py2exe' in sys.modules: # py2exe enabled?
+ try:
+ from py2exe.distutils_buildexe import py2exe as _py2exe # py3
+ except ImportError:
+ from py2exe.build_exe import py2exe as _py2exe # py2
+
+ class cmd_py2exe(_py2exe):
+ def run(self):
+ root = get_root()
+ cfg = get_config_from_root(root)
+ versions = get_versions()
+ target_versionfile = cfg.versionfile_source
+ print("UPDATING %s" % target_versionfile)
+ write_to_version_file(target_versionfile, versions)
+
+ _py2exe.run(self)
+ os.unlink(target_versionfile)
+ with open(cfg.versionfile_source, "w") as f:
+ LONG = LONG_VERSION_PY[cfg.VCS]
+ f.write(LONG %
+ {"DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ })
+ cmds["py2exe"] = cmd_py2exe
+
+ # we override different "sdist" commands for both environments
+ if "setuptools" in sys.modules:
+ from setuptools.command.sdist import sdist as _sdist
+ else:
+ from distutils.command.sdist import sdist as _sdist
+
+ class cmd_sdist(_sdist):
+ def run(self):
+ versions = get_versions()
+ self._versioneer_generated_versions = versions
+ # unless we update this, the command will keep using the old
+ # version
+ self.distribution.metadata.version = versions["version"]
+ return _sdist.run(self)
+
+ def make_release_tree(self, base_dir, files):
+ root = get_root()
+ cfg = get_config_from_root(root)
+ _sdist.make_release_tree(self, base_dir, files)
+ # now locate _version.py in the new base_dir directory
+ # (remembering that it may be a hardlink) and replace it with an
+ # updated value
+ target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
- os.unlink(target_versionfile)
- with open(target_versionfile, "w") as f:
- f.write(SHORT_VERSION_PY % versions)
-
- _build_exe.run(self)
- os.unlink(target_versionfile)
- with open(versionfile_source, "w") as f:
- assert VCS is not None, "please set versioneer.VCS"
- LONG = LONG_VERSION_PY[VCS]
- f.write(LONG % {"DOLLAR": "$",
- "TAG_PREFIX": tag_prefix,
- "PARENTDIR_PREFIX": parentdir_prefix,
- "VERSIONFILE_SOURCE": versionfile_source,
- })
-
-class cmd_sdist(_sdist):
- def run(self):
- versions = get_versions(verbose=True)
- self._versioneer_generated_versions = versions
- # unless we update this, the command will keep using the old version
- self.distribution.metadata.version = versions["version"]
- return _sdist.run(self)
-
- def make_release_tree(self, base_dir, files):
- _sdist.make_release_tree(self, base_dir, files)
- # now locate _version.py in the new base_dir directory (remembering
- # that it may be a hardlink) and replace it with an updated value
- target_versionfile = os.path.join(base_dir, versionfile_source)
- print("UPDATING %s" % target_versionfile)
- os.unlink(target_versionfile)
- with open(target_versionfile, "w") as f:
- f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
+ write_to_version_file(target_versionfile,
+ self._versioneer_generated_versions)
+ cmds["sdist"] = cmd_sdist
+
+ return cmds
+
+
+CONFIG_ERROR = """
+setup.cfg is missing the necessary Versioneer configuration. You need
+a section like:
+
+ [versioneer]
+ VCS = git
+ style = pep440
+ versionfile_source = src/myproject/_version.py
+ versionfile_build = myproject/_version.py
+ tag_prefix =
+ parentdir_prefix = myproject-
+
+You will also need to edit your setup.py to use the results:
+
+ import versioneer
+ setup(version=versioneer.get_version(),
+ cmdclass=versioneer.get_cmdclass(), ...)
+
+Please read the docstring in ./versioneer.py for configuration instructions,
+edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
+"""
+
+SAMPLE_CONFIG = """
+# See the docstring in versioneer.py for instructions. Note that you must
+# re-run 'versioneer.py setup' after changing this section, and commit the
+# resulting files.
+
+[versioneer]
+#VCS = git
+#style = pep440
+#versionfile_source =
+#versionfile_build =
+#tag_prefix =
+#parentdir_prefix =
+
+"""
INIT_PY_SNIPPET = """
from ._version import get_versions
@@ -815,87 +1693,130 @@ __version__ = get_versions()['version']
del get_versions
"""
-class cmd_update_files(Command):
- description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
- user_options = []
- boolean_options = []
- def initialize_options(self):
- pass
- def finalize_options(self):
- pass
- def run(self):
- print(" creating %s" % versionfile_source)
- with open(versionfile_source, "w") as f:
- assert VCS is not None, "please set versioneer.VCS"
- LONG = LONG_VERSION_PY[VCS]
- f.write(LONG % {"DOLLAR": "$",
- "TAG_PREFIX": tag_prefix,
- "PARENTDIR_PREFIX": parentdir_prefix,
- "VERSIONFILE_SOURCE": versionfile_source,
- })
-
- ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
- if os.path.exists(ipy):
- try:
- with open(ipy, "r") as f:
- old = f.read()
- except EnvironmentError:
- old = ""
- if INIT_PY_SNIPPET not in old:
- print(" appending to %s" % ipy)
- with open(ipy, "a") as f:
- f.write(INIT_PY_SNIPPET)
- else:
- print(" %s unmodified" % ipy)
- else:
- print(" %s doesn't exist, ok" % ipy)
- ipy = None
-
- # Make sure both the top-level "versioneer.py" and versionfile_source
- # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
- # they'll be copied into source distributions. Pip won't be able to
- # install the package without this.
- manifest_in = os.path.join(get_root(), "MANIFEST.in")
- simple_includes = set()
+
+def do_setup():
+ """Main VCS-independent setup function for installing Versioneer."""
+ root = get_root()
+ try:
+ cfg = get_config_from_root(root)
+ except (EnvironmentError, configparser.NoSectionError,
+ configparser.NoOptionError) as e:
+ if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
+ print("Adding sample versioneer config to setup.cfg",
+ file=sys.stderr)
+ with open(os.path.join(root, "setup.cfg"), "a") as f:
+ f.write(SAMPLE_CONFIG)
+ print(CONFIG_ERROR, file=sys.stderr)
+ return 1
+
+ print(" creating %s" % cfg.versionfile_source)
+ with open(cfg.versionfile_source, "w") as f:
+ LONG = LONG_VERSION_PY[cfg.VCS]
+ f.write(LONG % {"DOLLAR": "$",
+ "STYLE": cfg.style,
+ "TAG_PREFIX": cfg.tag_prefix,
+ "PARENTDIR_PREFIX": cfg.parentdir_prefix,
+ "VERSIONFILE_SOURCE": cfg.versionfile_source,
+ })
+
+ ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
+ "__init__.py")
+ if os.path.exists(ipy):
try:
- with open(manifest_in, "r") as f:
- for line in f:
- if line.startswith("include "):
- for include in line.split()[1:]:
- simple_includes.add(include)
+ with open(ipy, "r") as f:
+ old = f.read()
except EnvironmentError:
- pass
- # That doesn't cover everything MANIFEST.in can do
- # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
- # it might give some false negatives. Appending redundant 'include'
- # lines is safe, though.
- if "versioneer.py" not in simple_includes:
- print(" appending 'versioneer.py' to MANIFEST.in")
- with open(manifest_in, "a") as f:
- f.write("include versioneer.py\n")
- else:
- print(" 'versioneer.py' already in MANIFEST.in")
- if versionfile_source not in simple_includes:
- print(" appending versionfile_source ('%s') to MANIFEST.in" %
- versionfile_source)
- with open(manifest_in, "a") as f:
- f.write("include %s\n" % versionfile_source)
+ old = ""
+ if INIT_PY_SNIPPET not in old:
+ print(" appending to %s" % ipy)
+ with open(ipy, "a") as f:
+ f.write(INIT_PY_SNIPPET)
else:
- print(" versionfile_source already in MANIFEST.in")
+ print(" %s unmodified" % ipy)
+ else:
+ print(" %s doesn't exist, ok" % ipy)
+ ipy = None
+
+ # Make sure both the top-level "versioneer.py" and versionfile_source
+ # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
+ # they'll be copied into source distributions. Pip won't be able to
+ # install the package without this.
+ manifest_in = os.path.join(root, "MANIFEST.in")
+ simple_includes = set()
+ try:
+ with open(manifest_in, "r") as f:
+ for line in f:
+ if line.startswith("include "):
+ for include in line.split()[1:]:
+ simple_includes.add(include)
+ except EnvironmentError:
+ pass
+ # That doesn't cover everything MANIFEST.in can do
+ # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
+ # it might give some false negatives. Appending redundant 'include'
+ # lines is safe, though.
+ if "versioneer.py" not in simple_includes:
+ print(" appending 'versioneer.py' to MANIFEST.in")
+ with open(manifest_in, "a") as f:
+ f.write("include versioneer.py\n")
+ else:
+ print(" 'versioneer.py' already in MANIFEST.in")
+ if cfg.versionfile_source not in simple_includes:
+ print(" appending versionfile_source ('%s') to MANIFEST.in" %
+ cfg.versionfile_source)
+ with open(manifest_in, "a") as f:
+ f.write("include %s\n" % cfg.versionfile_source)
+ else:
+ print(" versionfile_source already in MANIFEST.in")
- # Make VCS-specific changes. For git, this means creating/changing
- # .gitattributes to mark _version.py for export-time keyword
- # substitution.
- do_vcs_install(manifest_in, versionfile_source, ipy)
+ # Make VCS-specific changes. For git, this means creating/changing
+ # .gitattributes to mark _version.py for export-subst keyword
+ # substitution.
+ do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
+ return 0
-def get_cmdclass():
- cmds = {'version': cmd_version,
- 'versioneer': cmd_update_files,
- 'build': cmd_build,
- 'sdist': cmd_sdist,
- }
- if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
- cmds['build_exe'] = cmd_build_exe
- del cmds['build']
- return cmds
+def scan_setup_py():
+ """Validate the contents of setup.py against Versioneer's expectations."""
+ found = set()
+ setters = False
+ errors = 0
+ with open("setup.py", "r") as f:
+ for line in f.readlines():
+ if "import versioneer" in line:
+ found.add("import")
+ if "versioneer.get_cmdclass()" in line:
+ found.add("cmdclass")
+ if "versioneer.get_version()" in line:
+ found.add("get_version")
+ if "versioneer.VCS" in line:
+ setters = True
+ if "versioneer.versionfile_source" in line:
+ setters = True
+ if len(found) != 3:
+ print("")
+ print("Your setup.py appears to be missing some important items")
+ print("(but I might be wrong). Please make sure it has something")
+ print("roughly like the following:")
+ print("")
+ print(" import versioneer")
+ print(" setup( version=versioneer.get_version(),")
+ print(" cmdclass=versioneer.get_cmdclass(), ...)")
+ print("")
+ errors += 1
+ if setters:
+ print("You should remove lines like 'versioneer.VCS = ' and")
+ print("'versioneer.versionfile_source = ' . This configuration")
+ print("now lives in setup.cfg, and should be removed from setup.py")
+ print("")
+ errors += 1
+ return errors
+
+
+if __name__ == "__main__":
+ cmd = sys.argv[1]
+ if cmd == "setup":
+ errors = do_setup()
+ errors += scan_setup_py()
+ if errors:
+ sys.exit(1)