summaryrefslogtreecommitdiff
path: root/doc
diff options
context:
space:
mode:
authorPicca Frédéric-Emmanuel <picca@debian.org>2014-10-07 21:02:30 +0200
committerPicca Frédéric-Emmanuel <picca@debian.org>2014-10-07 21:02:30 +0200
commit51e44e224e0825f7b1d97f81870dab2cde0ed29c (patch)
treee822846c1be494e95cce9afa7df13fa3106f2c5a /doc
parentea3665143e82ab188e13521d2ad4686c80a1c0f2 (diff)
Imported Upstream version 0.8.0
Diffstat (limited to 'doc')
-rw-r--r--doc/Makefile14
-rw-r--r--doc/_images/conf_interval1.pngbin21741 -> 21021 bytes
-rw-r--r--doc/_images/conf_interval1a.pngbin19853 -> 18892 bytes
-rw-r--r--doc/_images/conf_interval2.pngbin16793 -> 16756 bytes
-rw-r--r--doc/_images/model_fit2a.pngbin0 -> 28079 bytes
-rw-r--r--doc/_images/models_peak1.pngbin161561 -> 32134 bytes
-rw-r--r--doc/_images/models_peak2.pngbin175732 -> 35125 bytes
-rw-r--r--doc/_images/models_peak3.pngbin159332 -> 33508 bytes
-rw-r--r--doc/_images/models_peak4.pngbin164721 -> 33004 bytes
-rw-r--r--doc/_templates/indexsidebar.html10
-rw-r--r--doc/_templates/layout.html58
-rw-r--r--doc/builtin_models.rst325
-rw-r--r--doc/conf.py16
-rw-r--r--doc/confidence.rst117
-rw-r--r--doc/constraints.rst36
-rw-r--r--doc/extensions.py10
-rw-r--r--doc/fitting.rst158
-rw-r--r--doc/index.rst77
-rw-r--r--doc/installation.rst58
-rw-r--r--doc/model.rst824
-rw-r--r--doc/parameters.rst129
-rw-r--r--doc/sphinx/ext_mathjax.py10
-rw-r--r--doc/sphinx/ext_pngmath.py10
-rw-r--r--doc/sphinx/mathjax/conf.py180
-rw-r--r--doc/sphinx/pngmath/conf.py180
-rw-r--r--doc/sphinx/theme/lmfitdoc/layout.html57
26 files changed, 1235 insertions, 1034 deletions
diff --git a/doc/Makefile b/doc/Makefile
index d432b5d..1c72ec9 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -6,8 +6,6 @@ SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
-JAXMATHCONF = sphinx/mathjax/conf.py
-PNGMATHCONF = sphinx/pngmath/conf.py
INSTALLDIR = /home/newville/public_html/lmfit/
@@ -20,25 +18,19 @@ ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: all install pdf
html:
- cp conf.py SAVEconf.py
- cp $(JAXMATHCONF) conf.py
+ cp sphinx/ext_mathjax.py extensions.py
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- cp SAVEconf.py conf.py
@echo
@echo "html build finished: $(BUILDDIR)/html."
htmlzip: html
- cp conf.py SAVEconf.py
- cp $(PNGMATHCONF) conf.py
+ cp sphinx/ext_pngmath.py extensions.py
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/lmfit_doc
- cp SAVEconf.py conf.py
cd $(BUILDDIR) && zip -pur html/lmfit_doc.zip lmfit_doc
epub:
- cp conf.py SAVEconf.py
- cp $(PNGMATHCONF) conf.py
+ cp sphinx/ext_pngmath.py extensions.py
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
- cp SAVEconf.py conf.py
cp -pr $(BUILDDIR)/epub/*.epub $(BUILDDIR)/html/.
pdf: latex
diff --git a/doc/_images/conf_interval1.png b/doc/_images/conf_interval1.png
index b2c8432..1115595 100644
--- a/doc/_images/conf_interval1.png
+++ b/doc/_images/conf_interval1.png
Binary files differ
diff --git a/doc/_images/conf_interval1a.png b/doc/_images/conf_interval1a.png
index 7e411fd..a5e30a2 100644
--- a/doc/_images/conf_interval1a.png
+++ b/doc/_images/conf_interval1a.png
Binary files differ
diff --git a/doc/_images/conf_interval2.png b/doc/_images/conf_interval2.png
index 98c3a46..4278493 100644
--- a/doc/_images/conf_interval2.png
+++ b/doc/_images/conf_interval2.png
Binary files differ
diff --git a/doc/_images/model_fit2a.png b/doc/_images/model_fit2a.png
new file mode 100644
index 0000000..a6b2458
--- /dev/null
+++ b/doc/_images/model_fit2a.png
Binary files differ
diff --git a/doc/_images/models_peak1.png b/doc/_images/models_peak1.png
index 79b49cf..c74adba 100644
--- a/doc/_images/models_peak1.png
+++ b/doc/_images/models_peak1.png
Binary files differ
diff --git a/doc/_images/models_peak2.png b/doc/_images/models_peak2.png
index 9fc6000..05d5b87 100644
--- a/doc/_images/models_peak2.png
+++ b/doc/_images/models_peak2.png
Binary files differ
diff --git a/doc/_images/models_peak3.png b/doc/_images/models_peak3.png
index 6b04fcf..706514d 100644
--- a/doc/_images/models_peak3.png
+++ b/doc/_images/models_peak3.png
Binary files differ
diff --git a/doc/_images/models_peak4.png b/doc/_images/models_peak4.png
index e1a732c..ce9ceb8 100644
--- a/doc/_images/models_peak4.png
+++ b/doc/_images/models_peak4.png
Binary files differ
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
index 960a7b3..d0f0ac2 100644
--- a/doc/_templates/indexsidebar.html
+++ b/doc/_templates/indexsidebar.html
@@ -6,10 +6,20 @@
<p>Development version: <br>
&nbsp; &nbsp; <a href="https://github.com/lmfit/lmfit-py/">github.com</a> <br>
+<h3>Support and Feedback</h3>
+
+&nbsp; <a href="https://groups.google.com/group/lmfit-py"> Mailing List</a>
+
+<br>
+&nbsp; <a href="https://github.com/lmfit/lmfit-py/issues"> Issue Tracker</a>
+
<h3>Off-line Documentation</h3>
+
[<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.pdf">PDF</a>
|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.epub">EPUB</a>
|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit_doc.zip">HTML(zip)</a>
]
+
+
<hr>
<p>
diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html
deleted file mode 100644
index 92a1cb0..0000000
--- a/doc/_templates/layout.html
+++ /dev/null
@@ -1,58 +0,0 @@
-{% extends "!layout.html" %}
-
-{%- block extrahead %}
- <script type="text/x-mathjax-config">
- MathJax.Hub.Config({
- "TeX": {Macros: {AA : "{\\unicode{x212B}}"}},
- "HTML-CSS": {scale: 90}
- });</script>
-{% endblock %}
-
-
-
-{% block rootrellink %}
- <li>[<a href="{{ pathto('intro') }}">intro</a>|</li>
- <li><a href="{{ pathto('parameters') }}">parameters</a>|</li>
- <li><a href="{{ pathto('fitting') }}"> minimize</a>|</li>
- <li><a href="{{ pathto('model') }}"> model</a>|</li>
- <li><a href="{{ pathto('builtin_models') }}"> builtin models</a>|</li>
- <li><a href="{{ pathto('confidence') }}">confidence intervals</a>|</li>
- <li><a href="{{ pathto('bounds') }}">bounds</a>|</li>
- <li><a href="{{ pathto('constraints') }}">constraints</a>]</li>
-{% endblock %}
-
-{% block relbar1 %}
-<div>
-<table border=0>
- <tr><td></td><td width=75% padding=5 align=left>
- <a href="index.html" style="color: #157"> <font size=+2>LMFIT</font></a>
- </td><td></td>
- <td width=8% align=left>
- <a href="contents.html" style="color: #882222">
- <font size+=1>Contents</font></a> </td>
- <td width=8% align=left>
- <a href="installation.html" style="color: #882222">
- <font size+=1>Download</font></a></td>
- <td width=8% align=left>
- <a href="https://github.com/lmfit/lmfit-py/" style="color: #882222">
- <font size+=1>Develop</font></a></td>
- </tr>
- <tr><td></td><td width=75% padding=5 align=left>
- <a href="index.html" style="color: #157"> <font size=+1>
- Non-Linear Least-Squares Minimization and Curve-Fitting for Python</font></a>
- </td><td></td>
- <td width=8% align=left>
- <a href="intro.html" style="color: #882222">
- <font size+=1>Introduction</font></a> </td>
- <td width=8% align=left>
- <a href="parameters.html" style="color: #882222">
- <font size+=1>Parameters</font></a> </td>
- <td width=8% align=left>
- <a href="model.html" style="color: #882222">
- <font size+=1>Models</font></a> </td>
-
- </tr>
-</table>
-</div>
-{{ super() }}
-{% endblock %}
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index 9185311..a6d310e 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -1,8 +1,11 @@
.. _builtin_models_chapter:
-=================================================
-Built-in Fitting Models in the :mod:`models`
-=================================================
+=====================================================
+Built-in Fitting Models in the :mod:`models` module
+=====================================================
+
+.. module:: models
+
Lmfit provides several builtin fitting models in the :mod:`models` module.
These pre-defined models each subclass from the :class:`Model` class of the
@@ -11,12 +14,10 @@ Gaussians, Lorentzian, and Exponentials that are used in a wide range of
scientific domains. In fact, all the models are all based on simple, plain
python functions defined in the :mod:`lineshapes` module. In addition to
wrapping a function into a :class:`Model`, these models also provide a
-:meth:`guess_starting_values` method that is intended to give a reasonable
+:meth:`guess` method that is intended to give a reasonable
set of starting values from a data array that closely approximates the
data to be fit.
-.. module:: models
-
As shown in the previous chapter, a key feature of the :class:`Model` class
is that models can easily be combined to give a composite
:class:`Model`. Thus while some of the models listed here may seem pretty
@@ -29,9 +30,6 @@ example, a Lorentzian plus a linear background might be represented as::
>>> background = LinearModel()
>>> model = peak + background
-
-
-
All the models listed below are one dimensional, with an independent
variable named ``x``. Many of these models represent a function with a
distinct peak, and so share common features. To maintain uniformity,
@@ -49,7 +47,9 @@ Peak-like models
There are many peak-like models available. These include
:class:`GaussianModel`, :class:`LorentzianModel`, :class:`VoigtModel` and
-some less commonly used variations.
+some less commonly used variations. The :meth:`guess`
+methods for all of these make a fairly crude guess for the value of
+``amplitude``, but also set a lower bound of 0 on the value of ``sigma``.
:class:`GaussianModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -144,7 +144,7 @@ in
\big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big] + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
-The :meth:`guess_starting_values` function always gives a starting
+The :meth:`guess` function always gives a starting
value for ``fraction`` of 0.5
:class:`Pearson7Model`
@@ -154,24 +154,17 @@ value for ``fraction`` of 0.5
A model based on a `Pearson VII distribution
<http://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_VII_distribution>`_.
-This is another Voigt-like distribution function. It has the usual
+This is a Lorenztian-like distribution function. It has the usual
parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), and also ``exponent`` (:math:`p`) in
-
-.. math::
-
- f(x; A, \mu, \sigma, p) = \frac{sA}{\big\{[1 + (\frac{x-\mu}{\sigma})^2] (2^{1/p} -1) \big\}^p}
-
-where
+``sigma`` (:math:`\sigma`), and also an ``exponent`` (:math:`m`) in
.. math::
- s = \frac{\Gamma(p) \sqrt{2^{1/p} -1}}{ \sigma\sqrt{\pi}\,\Gamma(p-1/2)}
-
-where :math:`\Gamma(x)` is the gamma function.
+ f(x; A, \mu, \sigma, m) = \frac{A}{\sigma{\beta(m-\frac{1}{2}, \frac{1}{2})}} \bigl[1 + \frac{(x-\mu)^2}{\sigma^2} \bigr]^{-m}
-The :meth:`guess_starting_values` function always gives a starting
-value for ``exponent`` of 0.5.
+where :math:`\beta` is the beta function (see :func:`scipy.special.beta` in
+:mod:`scipy.special`). The :meth:`guess` function always
+gives a starting value for ``exponent`` of 1.5.
:class:`StudentsTModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -251,11 +244,32 @@ It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2}
\exp\bigl[\gamma({\mu - x + \sigma^2/2})\bigr]
- {\operatorname{erfc}}\bigl[\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\bigr]
+ {\operatorname{erfc}}\Bigl(\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\Bigr)
where :func:`erfc` is the complimentary error function.
+:class:`SkewedGaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: SkewedGaussianModel()
+
+A variation of the above model, this is a `Skewed normal distribution
+<http://en.wikipedia.org/wiki/Skew_normal_distribution>`_.
+It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`), and also ``gamma`` (:math:`\gamma`) in
+
+.. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}}
+ e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 +
+ {\operatorname{erf}}\bigl[
+ \frac{\gamma(x-\mu)}{\sigma\sqrt{2\pi}}
+ \bigr] \Bigr\}
+
+
+where :func:`erf` is the error function.
+
:class:`DonaichModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -343,7 +357,8 @@ with parameters ``a``, ``b``, and ``c``.
f(x; c_0, c_1, \ldots, c_7) = \sum_{i=0, 7} c_i x^i
with parameters ``c0``, ``c1``, ..., ``c7``. The supplied ``degree``
-will specify how many of these are actual variable parameters.
+will specify how many of these are actual variable parameters. This uses
+:func:`numpy.polyval` for its calculation of the polynomial.
@@ -438,10 +453,10 @@ form:
-Example 1: Fit Peaked data to Gaussian or Voigt profiles
-------------------------------------------------------------------
+Example 1: Fit Peaked data to Gaussian, Lorentzian, and Voigt profiles
+------------------------------------------------------------------------
-Here, we will fit data to two similar lineshapes, in order to decide which
+Here, we will fit data to three similar lineshapes, in order to decide which
might be the better model. We will start with a Gaussian profile, as in
the previous chapter, but use the built-in :class:`GaussianModel` instead
of one we write ourselves. This is a slightly different version from the
@@ -456,31 +471,34 @@ built-in default values. So, we'll simply use::
y = data[:, 1]
mod = GaussianModel()
- mod.guess_starting_values(y, x=x)
- out = mod.fit(y, x=x)
- print(mod.fit_report(min_correl=0.25))
+ pars = mod.guess(y, x=x)
+ out = mod.fit(y, pars, x=x)
+ print(out.fit_report(min_correl=0.25))
which prints out the results::
+ [[Model]]
+ gaussian
[[Fit Statistics]]
- # function evals = 25
+ # function evals = 21
# data points = 401
# variables = 3
chi-square = 29.994
reduced chi-square = 0.075
[[Variables]]
- amplitude: 30.31352 +/- 0.1571252 (0.52%) initial = 21.54192
- center: 9.242771 +/- 0.00737481 (0.08%) initial = 9.25
- fwhm: 2.901562 +/- 0.01736635 (0.60%) == '2.354820*sigma'
- sigma: 1.23218 +/- 0.00737481 (0.60%) initial = 1.35
+ amplitude: 30.3135571 +/- 0.157126 (0.52%) (init= 29.08159)
+ center: 9.24277049 +/- 0.007374 (0.08%) (init= 9.25)
+ fwhm: 2.90156963 +/- 0.017366 (0.60%) == '2.3548200*sigma'
+ sigma: 1.23218319 +/- 0.007374 (0.60%) (init= 1.35)
[[Correlations]] (unreported correlations are < 0.250)
C(amplitude, sigma) = 0.577
-We see a few interesting differences from the results of the previous
-chapter. First, the parameter names are longer. Second, there is a
-``fwhm``, defined as :math:`\sim 2.355\sigma`. And third, the automated
-initial guesses are pretty good. A plot of the fit shows not such a great
-fit:
+
+[We see a few interesting differences from the results of the previous
+ chapter. First, the parameter names are longer. Second, there is a
+ ``fwhm`` parameter, defined as :math:`\sim 2.355\sigma`. And third, the
+ automated initial guesses are pretty good. A plot of the fit shows not
+ such a great fit:
.. _figA1:
@@ -500,55 +518,60 @@ Perhaps a Lorentzian would be better? To do this, we simply replace
from lmfit.models import LorentzianModel
mod = LorentzianModel()
- mod.guess_starting_values(y, x=x)
- out = mod.fit(y, x=x)
- print(mod.fit_report(min_correl=0.25))
+ pars = mod.guess(y, x=x)
+ out = mod.fit(y, pars, x=x)
+ print(out.fit_report(min_correl=0.25))
-The results, or course, are worse::
+Predictably, the first thing we try gives results that are worse::
+ [[Model]]
+ lorentzian
[[Fit Statistics]]
- # function evals = 29
+ # function evals = 25
# data points = 401
# variables = 3
chi-square = 53.754
reduced chi-square = 0.135
[[Variables]]
- amplitude: 38.97278 +/- 0.3138612 (0.81%) initial = 21.54192
- center: 9.244389 +/- 0.009276152 (0.10%) initial = 9.25
- fwhm: 2.30968 +/- 0.02631297 (1.14%) == '2.0000000*sigma'
- sigma: 1.15484 +/- 0.01315648 (1.14%) initial = 1.35
+ amplitude: 38.9728645 +/- 0.313857 (0.81%) (init= 36.35199)
+ center: 9.24438944 +/- 0.009275 (0.10%) (init= 9.25)
+ fwhm: 2.30969034 +/- 0.026312 (1.14%) == '2.0000000*sigma'
+ sigma: 1.15484517 +/- 0.013156 (1.14%) (init= 1.35)
[[Correlations]] (unreported correlations are < 0.250)
C(amplitude, sigma) = 0.709
-with the plot shown in the figure above.
+with the plot shown on the right in the figure above.
A Voigt model does a better job. Using :class:`VoigtModel`, this is
as simple as::
- from lmfit.models import LorentzianModel
- mod = LorentzianModel()
- mod.guess_starting_values(y, x=x)
- out = mod.fit(y, x=x)
- print(mod.fit_report(min_correl=0.25))
+ from lmfit.models import VoigtModel
+ mod = VoigtModel()
+ pars = mod.guess(y, x=x)
+ out = mod.fit(y, pars, x=x)
+ print(out.fit_report(min_correl=0.25))
which gives::
+ [[Model]]
+ voigt
[[Fit Statistics]]
- # function evals = 30
+ # function evals = 17
# data points = 401
# variables = 3
chi-square = 14.545
reduced chi-square = 0.037
[[Variables]]
- amplitude: 35.75536 +/- 0.1386167 (0.39%) initial = 21.54192
- center: 9.244111 +/- 0.005055079 (0.05%) initial = 9.25
- fwhm: 2.629512 +/- 0.01326999 (0.50%) == '3.6013100*sigma'
- gamma: 0.7301542 +/- 0.003684769 (0.50%) == 'sigma'
- sigma: 0.7301542 +/- 0.003684769 (0.50%) initial = 1.35
+ amplitude: 35.7554017 +/- 0.138614 (0.39%) (init= 43.62238)
+ center: 9.24411142 +/- 0.005054 (0.05%) (init= 9.25)
+ fwhm: 2.62951718 +/- 0.013269 (0.50%) == '3.6013100*sigma'
+ gamma: 0.73015574 +/- 0.003684 (0.50%) == 'sigma'
+ sigma: 0.73015574 +/- 0.003684 (0.50%) (init= 0.8775)
[[Correlations]] (unreported correlations are < 0.250)
C(amplitude, sigma) = 0.651
+
with the much better value for :math:`\chi^2` and the obviously better
match to the data as seen in the figure below (left).
@@ -572,48 +595,52 @@ the ``gamma`` parameter from a constrained expression and give it a
starting value::
mod = VoigtModel()
- mod.guess_starting_values(y, x=x)
- mod.params['gamma'].expr = None
- mod.params['gamma'].value = 0.7
+ pars = mod.guess(y, x=x)
+ pars['gamma'].set(value=0.7, vary=True, expr='')
- out = mod.fit(y, x=x)
- print(mod.fit_report(min_correl=0.25))
+ out = mod.fit(y, pars, x=x)
+ print(out.fit_report(min_correl=0.25))
which gives::
+ [[Model]]
+ voigt
[[Fit Statistics]]
- # function evals = 32
+ # function evals = 21
# data points = 401
# variables = 4
chi-square = 10.930
reduced chi-square = 0.028
[[Variables]]
- amplitude: 34.19147 +/- 0.1794683 (0.52%) initial = 21.54192
- center: 9.243748 +/- 0.00441902 (0.05%) initial = 9.25
- fwhm: 3.223856 +/- 0.05097446 (1.58%) == '3.6013100*sigma'
- gamma: 0.5254013 +/- 0.01857953 (3.54%) initial = 0.7
- sigma: 0.8951898 +/- 0.01415442 (1.58%) initial = 1.35
+ amplitude: 34.1914716 +/- 0.179468 (0.52%) (init= 43.62238)
+ center: 9.24374845 +/- 0.004419 (0.05%) (init= 9.25)
+ fwhm: 3.22385491 +/- 0.050974 (1.58%) == '3.6013100*sigma'
+ gamma: 0.52540157 +/- 0.018579 (3.54%) (init= 0.7)
+ sigma: 0.89518950 +/- 0.014154 (1.58%) (init= 0.8775)
[[Correlations]] (unreported correlations are < 0.250)
C(amplitude, gamma) = 0.821
-and the fit shown above (on the right).
+
+and the fit shown on the right above.
Comparing the two fits with the Voigt function, we see that :math:`\chi^2`
-is definitely better with a separately varying ``gamma`` parameter. In
+is definitely improved with a separately varying ``gamma`` parameter. In
addition, the two values for ``gamma`` and ``sigma`` differ significantly
-- well outside the estimated uncertainties. Even more compelling, reduced
:math:`\chi^2` is improved even though a fourth variable has been added to
-the fit, justifying it as a significant variable in the model.
+the fit. In the simplest statistical sense, this suggests that ``gamma``
+is a significant variable in the model.
This example shows how easy it can be to alter and compare fitting models
-for simple problems.
+for simple problems. The example is included in the ``doc_peakmodels.py``
+file in the examples directory.
+
Example 2: Fit data to a Composite Model with pre-defined models
------------------------------------------------------------------
-
Here, we repeat the point made at the end of the last chapter that instances
of :class:`Model` class can be added them together to make a *composite
model*. But using the large number of built-in models available, this is
@@ -623,26 +650,34 @@ constant:
.. literalinclude:: ../examples/doc_stepmodel.py
After constructing step-like data, we first create a :class:`StepModel`
-telling it to use the ``erf`` form (see details below), and a
+telling it to use the ``erf`` form (see details above), and a
:class:`ConstantModel`. We set initial values, in one case using the data
-and :meth:`guess_starting_values` method, and using the explicit
-:meth:`set_paramval` for the initial constant value. Making a composite
-model, we run :meth:`fit` and report the results, which give::
+and :meth:`guess` method for the intial step function paramaters, and
+:meth:`make_params` arguments for the linear component.
+After making a composite model, we run :meth:`fit` and report the
+results, which give::
+
+ [[Model]]
+ Composite Model:
+ step(prefix='step_',form='erf')
+ linear(prefix='line_')
[[Fit Statistics]]
- # function evals = 52
+ # function evals = 49
# data points = 201
- # variables = 4
- chi-square = 600.191
- reduced chi-square = 3.047
+ # variables = 5
+ chi-square = 633.465
+ reduced chi-square = 3.232
[[Variables]]
- amplitude: 111.1106 +/- 0.3122441 (0.28%) initial = 115.3431
- c: 11.31151 +/- 0.2631688 (2.33%) initial = 9.278188
- center: 3.122191 +/- 0.00506929 (0.16%) initial = 5
- sigma: 0.6637199 +/- 0.009799607 (1.48%) initial = 1.428571
+ line_intercept: 11.5685248 +/- 0.285611 (2.47%) (init= 10.72406)
+ line_slope: 2.03270159 +/- 0.096041 (4.72%) (init= 0)
+ step_amplitude: 112.270535 +/- 0.674790 (0.60%) (init= 136.3006)
+ step_center: 3.12343845 +/- 0.005370 (0.17%) (init= 2.5)
+ step_sigma: 0.67468813 +/- 0.011336 (1.68%) (init= 1.428571)
[[Correlations]] (unreported correlations are < 0.100)
- C(c, center) = 0.381
- C(amplitude, sigma) = 0.381
+ C(step_amplitude, step_sigma) = 0.564
+ C(line_intercept, step_center) = 0.428
+ C(step_amplitude, step_center) = 0.109
with a plot of
@@ -669,52 +704,49 @@ involving a decaying exponential and two gaussians.
where we give a separate prefix to each model (they all have an
``amplitude`` parameter). The ``prefix`` values are attached transparently
-to the models. Note that the calls to :meth:`set_paramval` used the bare
-name, without the prefix. We could have used them, but because we used
-the individual model ``gauss1`` and ``gauss2``, there was no need. Had we
-used the composite model to set the initial parameter values, we would have
-needed to, as with::
+to the models.
- ## WRONG
- mod.set_paramval('amplitude', 500, min=10)
+MN----: Note that the calls to :meth:`make_param` used the bare
+name, without the prefix. We could have used them, but because we used the
+individual model ``gauss1`` and ``gauss2``, there was no need.
- ## Raises KeyError: "'amplitude' not a parameter name"
-
- ## Correct
- mod.set_paramval('g1_amplitude', 501, min=10)
+Note also in the example here that we explicitly set bounds on many of the
+parameter values.
The fit results printed out are::
+ [[Model]]
+ Composite Model:
+ gaussian(prefix='g1_')
+ gaussian(prefix='g2_')
+ exponential(prefix='exp_')
[[Fit Statistics]]
- # function evals = 66
+ # function evals = 55
# data points = 250
# variables = 8
chi-square = 1247.528
reduced chi-square = 5.155
[[Variables]]
- exp_amplitude: 99.01833 +/- 0.5374884 (0.54%) initial = 162.2102
- exp_decay: 90.95088 +/- 1.103105 (1.21%) initial = 93.24905
- g1_amplitude: 4257.774 +/- 42.38366 (1.00%) initial = 500
- g1_center: 107.031 +/- 0.1500691 (0.14%) initial = 105
- g1_fwhm: 39.26092 +/- 0.3779083 (0.96%) == '2.354820*g1_sigma'
- g1_sigma: 16.67258 +/- 0.1604829 (0.96%) initial = 12
- g2_amplitude: 2493.417 +/- 36.16923 (1.45%) initial = 500
- g2_center: 153.2701 +/- 0.194667 (0.13%) initial = 150
- g2_fwhm: 32.51287 +/- 0.4398624 (1.35%) == '2.354820*g2_sigma'
- g2_sigma: 13.80695 +/- 0.1867924 (1.35%) initial = 12
- [[Correlations]] (unreported correlations are < 0.100)
+ exp_amplitude: 99.0183291 +/- 0.537487 (0.54%) (init= 162.2102)
+ exp_decay: 90.9508788 +/- 1.103104 (1.21%) (init= 93.24905)
+ g1_amplitude: 4257.77384 +/- 42.38354 (1.00%) (init= 2000)
+ g1_center: 107.030955 +/- 0.150068 (0.14%) (init= 105)
+ g1_fwhm: 39.2609205 +/- 0.377907 (0.96%) == '2.3548200*g1_sigma'
+ g1_sigma: 16.6725781 +/- 0.160482 (0.96%) (init= 15)
+ g2_amplitude: 2493.41747 +/- 36.16907 (1.45%) (init= 2000)
+ g2_center: 153.270103 +/- 0.194665 (0.13%) (init= 155)
+ g2_fwhm: 32.5128760 +/- 0.439860 (1.35%) == '2.3548200*g2_sigma'
+ g2_sigma: 13.8069474 +/- 0.186791 (1.35%) (init= 15)
+ [[Correlations]] (unreported correlations are < 0.500)
C(g1_amplitude, g1_sigma) = 0.824
C(g2_amplitude, g2_sigma) = 0.815
C(g1_sigma, g2_center) = 0.684
C(g1_amplitude, g2_center) = 0.648
C(g1_center, g2_center) = 0.621
C(g1_center, g1_sigma) = 0.507
- C(g1_amplitude, g1_center) = 0.418
- C(exp_amplitude, g2_amplitude) = 0.282
- C(exp_amplitude, g2_sigma) = 0.171
- C(exp_amplitude, g1_amplitude) = 0.148
- C(exp_decay, g1_center) = 0.105
+
+
We get a very good fit to this challenging problem (described at the NIST
site as of average difficulty, but the tests there are generally hard) by
@@ -732,9 +764,12 @@ on the parameter values. This fit is shown on the left:
One final point on setting initial values. From looking at the data
-itself, we can see the two Gaussian peaks are reasonably well centered. We
-can simplify the initial parameter values by using this, and by defining an
-:func:`index_of` function to limit the data range. That is, with::
+itself, we can see the two Gaussian peaks are reasonably well separated but
+do overlap. Furthermore, we can tell that the initial guess for the
+decaying exponential component was poorly estimated because we used the
+full data range. We can simplify the initial parameter values by using
+this, and by defining an :func:`index_of` function to limit the data range.
+That is, with::
def index_of(arrval, value):
"return index of array *at or below* value "
@@ -745,13 +780,19 @@ can simplify the initial parameter values by using this, and by defining an
ix2 = index_of(x, 135)
ix3 = index_of(x, 175)
- exp_mod.guess_starting_values(y[:ix1], x=x[:ix1])
- gauss1.guess_starting_values(y[ix1:ix2], x=x[ix1:ix2])
- gauss2.guess_starting_values(y[ix2:ix3], x=x[ix2:ix3])
+ exp_mod.guess(y[:ix1], x=x[:ix1])
+ gauss1.guess(y[ix1:ix2], x=x[ix1:ix2])
+ gauss2.guess(y[ix2:ix3], x=x[ix2:ix3])
we can get a better initial estimate, and the fit converges in fewer steps,
-and without any bounds on parameters::
-
+getting to identical values (to the precision printed out in the report),
+and without any bounds on parameters at all::
+
+ [[Model]]
+ Composite Model:
+ gaussian(prefix='g1_')
+ gaussian(prefix='g2_')
+ exponential(prefix='exp_')
[[Fit Statistics]]
# function evals = 46
# data points = 250
@@ -759,16 +800,16 @@ and without any bounds on parameters::
chi-square = 1247.528
reduced chi-square = 5.155
[[Variables]]
- exp_amplitude: 99.01833 +/- 0.5374875 (0.54%) initial = 94.53724
- exp_decay: 90.95089 +/- 1.103105 (1.21%) initial = 111.1985
- g1_amplitude: 4257.773 +/- 42.38338 (1.00%) initial = 2126.432
- g1_center: 107.031 +/- 0.1500679 (0.14%) initial = 106.5
- g1_fwhm: 39.26091 +/- 0.3779053 (0.96%) == '2.354820*g1_sigma'
- g1_sigma: 16.67258 +/- 0.1604816 (0.96%) initial = 14.5
- g2_amplitude: 2493.418 +/- 36.16948 (1.45%) initial = 1878.892
- g2_center: 153.2701 +/- 0.1946675 (0.13%) initial = 150
- g2_fwhm: 32.51288 +/- 0.4398666 (1.35%) == '2.354820*g2_sigma'
- g2_sigma: 13.80695 +/- 0.1867942 (1.35%) initial = 15
+ exp_amplitude: 99.0183281 +/- 0.537487 (0.54%) (init= 94.53724)
+ exp_decay: 90.9508863 +/- 1.103105 (1.21%) (init= 111.1985)
+ g1_amplitude: 4257.77321 +/- 42.38338 (1.00%) (init= 2126.432)
+ g1_center: 107.030954 +/- 0.150067 (0.14%) (init= 106.5)
+ g1_fwhm: 39.2609141 +/- 0.377905 (0.96%) == '2.3548200*g1_sigma'
+ g1_sigma: 16.6725754 +/- 0.160481 (0.96%) (init= 14.5)
+ g2_amplitude: 2493.41766 +/- 36.16948 (1.45%) (init= 1878.892)
+ g2_center: 153.270100 +/- 0.194667 (0.13%) (init= 150)
+ g2_fwhm: 32.5128777 +/- 0.439866 (1.35%) == '2.3548200*g2_sigma'
+ g2_sigma: 13.8069481 +/- 0.186794 (1.35%) (init= 15)
[[Correlations]] (unreported correlations are < 0.500)
C(g1_amplitude, g1_sigma) = 0.824
C(g2_amplitude, g2_sigma) = 0.815
@@ -778,5 +819,9 @@ and without any bounds on parameters::
C(g1_center, g1_sigma) = 0.507
+
This example is in the file ``doc_nistgauss2.py`` in the examples folder,
-and the fit result shown on the right above.
+and the fit result shown on the right above shows an improved initial
+estimate of the data.
+
+
diff --git a/doc/conf.py b/doc/conf.py
index 451458e..63e0dfa 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -22,12 +22,7 @@ sys.path.append(os.path.abspath(os.path.join('.')))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.todo',
- 'sphinx.ext.coverage',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.intersphinx',
- 'numpydoc']
+from extensions import extensions
try:
import IPython.sphinxext.ipython_directive
@@ -38,8 +33,9 @@ except ImportError:
intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
- 'numpy': ('http://scipy.org/docs/numpy/', None),
- 'scipy': ('http://scipy.org/docs/scipy/reference/', None)}
+ 'numpy': ('http://docs.scipy.org/doc/numpy/', None),
+ 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
+ }
intersphinx_cache_limit = 10
@@ -150,8 +146,8 @@ html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
-html_use_modindex = False
-#html_use_index = True
+html_domain_indices = False
+html_use_index = True
#html_split_index = False
# If true, links to the reST sources are added to the pages.
diff --git a/doc/confidence.rst b/doc/confidence.rst
index 75bcae3..6c0cdab 100644
--- a/doc/confidence.rst
+++ b/doc/confidence.rst
@@ -1,12 +1,12 @@
Calculation of confidence intervals
====================================
-.. py:module:: confidence
+.. module:: confidence
-Since version `0.5`, lmfit is also capable of calculating the confidence
-intervals directly. For most models, it is not necessary: the estimation
-of the standard error from the estimated covariance matrix is normally quite
-good.
+The lmfit :mod:`confidence` module allows you to explicitly calculate
+confidence intervals for variable parameters. For most models, it is not
+necessary: the estimation of the standard error from the estimated
+covariance matrix is normally quite good.
But for some models, e.g. a sum of two exponentials, the approximation
begins to fail. For this case, lmfit has the function :func:`conf_interval`
@@ -17,6 +17,7 @@ are more robust.
Method used for calculating confidence intervals
-------------------------------------------------
+
The F-test is used to compare our null model, which is the best fit we have
found, with an alternate model, where one of the parameters is fixed to a
specific value. The value is changed until the difference between :math:`\chi^2_0`
@@ -32,77 +33,93 @@ N is the number of data-points, P the number of parameter of the null model.
difference of number of parameters between our null model and the alternate
model).
-A log-likelihood method will be added soon.
+Adding a log-likelihood method is under consideration.
A basic example
---------------
-First we create a toy problem::
-
+First we create an example problem::
>>> import lmfit
>>> import numpy as np
>>> x = np.linspace(0.3,10,100)
>>> y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
>>> p = lmfit.Parameters()
- >>> p.add_many(('a',0.1),('b',1))
+ >>> p.add_many(('a', 0.1), ('b', 1))
>>> def residual(p):
... a = p['a'].value
... b = p['b'].value
... return 1/(a*x)+b-y
-We have to fit it, before we can generate the confidence intervals::
+before we can generate the confidence intervals, we have to run a fit, so
+that the automated estimate of the standard errors can be used as a
+starting point::
>>> mi = lmfit.minimize(residual, p)
- >>> mi.leastsq()
>>> lmfit.printfuncs.report_fit(mi.params)
- [[Variables]]
- a: 0.09978076 +/- 0.0002112132 (0.21%) initial = 0.09978076
- b: 1.992907 +/- 0.0132743 (0.67%) initial = 1.992907
+ [Variables]]
+ a: 0.09943895 +/- 0.000193 (0.19%) (init= 0.1)
+ b: 1.98476945 +/- 0.012226 (0.62%) (init= 1)
[[Correlations]] (unreported correlations are < 0.100)
C(a, b) = 0.601
-
-Now it just a simple function call to start the calculation::
+Now it is just a simple function call to calculate the confidence
+intervals::
>>> ci = lmfit.conf_interval(mi)
>>> lmfit.printfuncs.report_ci(ci)
99.70% 95.00% 67.40% 0.00% 67.40% 95.00% 99.70%
- a 0.09960 0.09981 0.10000 0.10019 0.10039 0.10058 0.10079
- b 1.97035 1.98326 1.99544 2.00008 2.01936 2.03154 2.04445
-
-
-As we can see, the estimated error is almost the same, and the
-uncertainties are well behaved: Going from 1 :math:`\sigma` (68%
-confidence) to 3 :math:`\sigma` (99.7% confidence) uncertainties is fairly
-linear. For this problem, it is not necessary to calculate confidence
-intervals, and the estimates of the uncertainties from the covariance
-matrix are sufficient.
+ a 0.09886 0.09905 0.09925 0.09944 0.09963 0.09982 0.10003
+ b 1.94751 1.96049 1.97274 1.97741 1.99680 2.00905 2.02203
+
+This shows the best-fit values for the parameters in the `0.00%` column,
+and parameter values that are at the varying confidence levels given by
+steps in :math:`\sigma`. As we can see, the estimated error is almost the
+same, and the uncertainties are well behaved: Going from 1 :math:`\sigma`
+(68% confidence) to 3 :math:`\sigma` (99.7% confidence) uncertainties is
+fairly linear. It can also be seen that the errors are fairy symmetric
+around the best fit value. For this problem, it is not necessary to
+calculate confidence intervals, and the estimates of the uncertainties from
+the covariance matrix are sufficient.
An advanced example
-------------------
Now we look at a problem where calculating the error from approximated
-covariance can lead to misleading results::
+covariance can lead to misleading result -- two decaying exponentials. In
+fact such a problem is particularly hard for the Levenberg-Marquardt
+method, so we fitst estimate the results using the slower but robust
+Nelder-Mead method, and *then* use Levenberg-Marquardt to estimate the
+uncertainties and correlations::
+
- >>> y = 3*np.exp(-x/2.)-5*np.exp(-x/10.)+0.2*np.random.randn(x.size)
+ >>> x = np.linspace(1, 10, 250)
+ >>> np.random.seed(0)
+ >>> y = 3.0*np.exp(-x/2) -5.0*np.exp(-(x-0.1)/10.) + 0.1*np.random.randn(len(x))
+ >>>
>>> p = lmfit.Parameters()
- >>> p.add_many(('a1', 5), ('a2', -5), ('t1', 2), ('t2', 5))
+ >>> p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3.))
+ >>>
>>> def residual(p):
- ... a1, a2, t1, t2 = [i.value for i in p.values()]
- ... return a1*np.exp(-x/t1)+a2*np.exp(-x/t2)-y
-
+ ... v = p.valuesdict()
+ ... return v['a1']*np.exp(-x/v['t1']) + v['a2']*np.exp(-(x-0.1)/v['t2'])-y
+ >>>
+ >>> # first solve with Nelder-Mead
+ >>> mi = lmfit.minimize(residual, p, method='Nelder')
+ >>> # then solve with Levenberg-Marquardt
>>> mi = lmfit.minimize(residual, p)
- >>> mi.leastsq()
- >>> lmfit.printfuncs.report_fit(mi.params, show_correl=False)
+ >>>
+ >>> lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
[[Variables]]
- a1: 2.611013 +/- 0.3279648 (12.56%) initial = 2.611013
- a2: -4.512928 +/- 0.3991997 (8.85%) initial = -4.512928
- t1: 1.569477 +/- 0.3345078 (21.31%) initial = 1.569477
- t2: 10.96137 +/- 1.263874 (11.53%) initial = 10.96137
+ a1: 2.98622120 +/- 0.148671 (4.98%) (init= 2.986237)
+ a2: -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256)
+ t1: 1.30994233 +/- 0.131211 (10.02%) (init= 1.309932)
+ t2: 11.8240350 +/- 0.463164 (3.92%) (init= 11.82408)
+ [[Correlations]] (unreported correlations are < 0.500)
+ C(a2, t2) = 0.987
Again we call :func:`conf_interval`, this time with tracing and only for 1-
@@ -110,17 +127,19 @@ and 2 :math:`\sigma`::
>>> ci, trace = lmfit.conf_interval(mi, sigmas=[0.68,0.95], trace=True, verbose=False)
>>> lmfit.printfuncs.report_ci(ci)
+
95.00% 68.00% 0.00% 68.00% 95.00%
- a1 2.11679 2.33696 2.61101 3.06631 4.28694
- a2 -6.39449 -5.05982 -4.20173 -4.19528 -3.97850
- t2 8.00414 9.62688 12.17331 12.17886 13.34857
- t1 1.07009 1.28482 1.37407 1.97509 2.64341
+ a1 2.71850 2.84525 2.98622 3.14874 3.34076
+ a2 -4.63180 -4.46663 -4.35429 -4.22883 -4.14178
+ t2 10.82699 11.33865 11.78219 12.28195 12.71094
+ t1 1.08014 1.18566 1.38044 1.45566 1.62579
+
Comparing these two different estimates, we see that the estimate for `a1`
-is reasonable well approximated from the covariance matrix, but the
-estimates for `a2`, `t1`, and `t2` are very asymmetric and that going from
-1 :math:`\sigma` (68% confidence) to 2 :math:`\sigma` (95% confidence) is
-not very predictable.
+is reasonably well approximated from the covariance matrix, but the
+estimates for `a2` and especially for `t1`, and `t2` are very asymmetric
+and that going from 1 :math:`\sigma` (68% confidence) to 2 :math:`\sigma`
+(95% confidence) is not very predictable.
Now let's plot a confidence region::
@@ -147,7 +166,11 @@ which shows the figure on the left below for ``a1`` and ``t2``, and for
Neither of these plots is very much like an ellipse, which is implicitly
assumed by the approach using the covariance matrix.
-Remember the trace? It shows also shows the dependence between two parameters::
+The trace returned as the optional second argument from
+:func:`conf_interval` contains a dictionary for each variable parameter.
+The values are dictionaries with arrays of values for each variable, and an
+array of corresponding probabilities for the corresponding cumulative variables. This
+can be used to show the dependence between two parameters::
>>> x, y, prob = trace['a1']['a1'], trace['a1']['t2'],trace['a1']['prob']
>>> x2, y2, prob2 = trace['t2']['t2'], trace['t2']['a1'],trace['t2']['prob']
diff --git a/doc/constraints.rst b/doc/constraints.rst
index ee5ca88..7d06b8d 100644
--- a/doc/constraints.rst
+++ b/doc/constraints.rst
@@ -6,14 +6,32 @@ Using Mathematical Constraints
.. _asteval: http://newville.github.io/asteval/
-While being able to fix variables and place upper and lower bounds on their
-values are key parts of lmfit, the ability to place mathematical
-constraints on parameters is also highly desirable. This section describes
-how to do this, and what sort of parameterizations are possible -- see
-the `asteval`_ for further documentation.
+Being able to fix variables to a constant value or place upper and lower
+bounds on their values can greatly simplify modeling real data. These
+capabilities are key to lmfit's Parameters. In addition, it is sometimes
+highly desirable to place mathematical constraints on parameter values.
+For example, one might want to require that two Gaussian peaks have the
+same width, or have amplitudes that are constrained to add to some value.
+Of course, one could rewrite the objective or model function to place such
+requirements, but this is somewhat error prone, and limits the flexibility
+so that exploring constraints becomes laborious.
+
+To simplify the setting of constraints, Parameters can be assigned a
+mathematical expression of other Parameters, builtin constants, and builtin
+mathematical functions that will be used to determine its value. The
+expressions used for constraints are evaluated using the `asteval`_ module,
+which uses Python syntax, and evaluates the constraint expressions in a safe
+and isolated namespace.
+
+This approach to mathematical constraints allows one to not have to write a
+separate model function for two Gaussians where the two ``sigma`` values are
+forced to be equal, or where amplitudes are related. Instead, one can write a
+more general two Gaussian model (perhaps using :class:`GaussianModel`) and
+impose such constraints on the Parameters for a particular fit.
+
Overview
-===========
+===============
Just as one can place bounds on a Parameter, or keep it fixed during the
fit, so too can one place mathematical constraints on parameters. The way
@@ -136,13 +154,13 @@ The `asteval`_ interpreter uses a flat namespace, implemented as a single
dictionary. That means you can preload any Python symbol into the namespace
for the constraints::
- def lorentzian(x, amp, cen, wid):
+ def mylorentzian(x, amp, cen, wid):
"lorentzian function: wid = half-width at half-max"
return (amp / (1 + ((x-cen)/wid)**2))
fitter = Minimizer()
- fitter.asteval.symtable['lorenztian'] = lorenztian
+ fitter.asteval.symtable['lorentzian'] = mylorentzian
-and this :meth:`lorenztian` function can now be used in constraint
+and this :meth:`lorentzian` function can now be used in constraint
expressions.
diff --git a/doc/extensions.py b/doc/extensions.py
new file mode 100644
index 0000000..40de659
--- /dev/null
+++ b/doc/extensions.py
@@ -0,0 +1,10 @@
+# sphinx extensions for mathjax
+extensions = ['sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.intersphinx',
+ 'numpydoc']
+mathjax = 'sphinx.ext.mathjax'
+pngmath = 'sphinx.ext.pngmath'
+
+extensions.append(mathjax)
diff --git a/doc/fitting.rst b/doc/fitting.rst
index eb56637..6bd8aa5 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -4,30 +4,31 @@
Performing Fits, Analyzing Outputs
=======================================
-As shown in the introduction, a simple fit can be performed with
-the :func:`minimize` function. For more sophisticated modeling,
-the :class:`Minimizer` class can be used to gain a bit more control,
-especially when using complicated constraints.
+As shown in the previous chapter, a simple fit can be performed with the
+:func:`minimize` function. For more sophisticated modeling, the
+:class:`Minimizer` class can be used to gain a bit more control, especially
+when using complicated constraints.
The :func:`minimize` function
===============================
-The minimize function takes a function to minimize, a dictionary of
-:class:`Parameter` , and several optional arguments. See
-:ref:`fit-func-label` for details on writing the function to minimize.
+The minimize function takes a objective function (the function that
+calculates the array to be minimized), a :class:`Parameters` ordered
+dictionary, and several optional arguments. See :ref:`fit-func-label` for
+details on writing the function to minimize.
.. function:: minimize(function, params[, args=None[, kws=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **leastsq_kws]]]]]])
- find values for the params so that the sum-of-squares of the returned array
- from function is minimized.
+ find values for the ``params`` so that the sum-of-squares of the array returned
+ from ``function`` is minimized.
:param function: function to return fit residual. See :ref:`fit-func-label` for details.
:type function: callable.
- :param params: a dictionary of Parameters. Keywords must be strings
+ :param params: a :class:`Parameters` dictionary. Keywords must be strings
that match ``[a-z_][a-z0-9_]*`` and is not a python
reserved word. Each value must be :class:`Parameter`.
- :type params: dict
+ :type params: dict or :class:`Parameters`.
:param args: arguments tuple to pass to the residual function as positional arguments.
:type args: tuple
:param kws: dictionary to pass to the residual function as keyword arguments.
@@ -48,15 +49,20 @@ The minimize function takes a function to minimize, a dictionary of
appropriate, estimated uncertainties and correlations. See
:ref:`fit-results-label` for further details.
+ If provided, the ``iter_cb`` function should take arguments of ``params,
+ iter, resid, *args, **kws``, where ``params`` will have the current
+ parameter values, ``iter`` the iteration, ``resid`` the current residual
+ array, and ``*args`` and ``**kws`` as passed to the objective function.
+
.. _fit-func-label:
Writing a Fitting Function
===============================
-An important component of a fit is writing a function to be minimized in
-the least-squares sense. Since this function will be called by other
+An important component of a fit is writing a function to be minimized --
+the *objective function*. Since this function will be called by other
routines, there are fairly stringent requirements for its call signature
-and return value. In principle, your function can be any python callable,
+and return value. In principle, your function can be any python callable,
but it must look like this:
.. function:: func(params, *args, **kws):
@@ -85,16 +91,17 @@ method, effectively doing a least-squares optimization of the return values.
Since the function will be passed in a dictionary of :class:`Parameters`, it is advisable
-to unpack these to get numerical values at the top of the function. A simple example
-would look like::
+to unpack these to get numerical values at the top of the function. A
+simple way to do this is with :meth:`Parameters.valuesdict`, as with::
+
def residual(pars, x, data=None, eps=None):
# unpack parameters:
# extract .value attribute for each parameter
- amp = pars['amp'].value
- period = pars['period'].value
- shift = pars['shift'].value
- decay = pars['decay'].value
+ parvals = pars.valuesdict()
+ period = parvals['period']
+ shift = parvals['shift']
+ decay = parvals['decay']
if abs(shift) > pi/2:
shift = shift - sign(shift)*pi
@@ -102,7 +109,7 @@ would look like::
if abs(period) < 1.e-10:
period = sign(period)*1.e-10
- model = amp * sin(shift + x/period) * exp(-x*x*decay*decay)
+ model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay)
if data is None:
return model
@@ -110,20 +117,23 @@ would look like::
return (model - data)
return (model - data)/eps
-In this example, ``x`` is a positional (required) argument, while the ``data``
-array is actually optional (so that the function returns the model calculation
-if the data is neglected). Also note that the model calculation will divide
-``x`` by the varied value of the 'period' Parameter. It might be wise to
-make sure this parameter cannot be 0. It would be possible to use the bounds
-on the :class:`Parameter` to do this::
+In this example, ``x`` is a positional (required) argument, while the
+``data`` array is actually optional (so that the function returns the model
+calculation if the data is neglected). Also note that the model
+calculation will divide ``x`` by the value of the 'period' Parameter. It
+might be wise to ensure this parameter cannot be 0. It would be possible
+to use the bounds on the :class:`Parameter` to do this::
params['period'] = Parameter(value=2, min=1.e-10)
-but might be wiser to put this directly in the function with::
+but putting this directly in the function with::
if abs(period) < 1.e-10:
period = sign(period)*1.e-10
+is also a reasonable approach. Similarly, one could place bounds on the
+``decay`` parameter to take values only between ``-pi/2`` and ``pi/2``.
+
.. _fit-methods-label:
Choosing Different Fitting Methods
@@ -137,16 +147,10 @@ being fast, and well-behaved for most curve-fitting needs, and making it
easy to estimate uncertainties for and correlations between pairs of fit
variables, as discussed in :ref:`fit-results-label`.
-Alternative algorithms can also be used. These include `simulated annealing
-<http://en.wikipedia.org/wiki/Simulated_annealing>`_ which promises a
-better ability to avoid local minima, and `BFGS
-<http://en.wikipedia.org/wiki/Limited-memory_BFGS>`_, which is a
-modification of the quasi-Newton method.
-
-To select which of these algorithms to use, use the ``method`` keyword to the
-:func:`minimize` function or use the corresponding method name from the
-:class:`Minimizer` class as listed in the
-:ref:`Table of Supported Fitting Methods <fit-methods-table>`.
+Alternative algorithms can also be used by providing the ``method`` keyword
+to the :func:`minimize` function or use the corresponding method name from
+the :class:`Minimizer` class as listed in the :ref:`Table of Supported
+Fitting Methods <fit-methods-table>`.
.. _fit-methods-table:
@@ -162,8 +166,6 @@ To select which of these algorithms to use, use the ``method`` keyword to the
+-----------------------+--------------------+---------------------+-------------------------+
| L-BFGS-B | ``lbfgsb`` | :meth:`lbfgsb` | ``L-BFGS-B`` |
+-----------------------+--------------------+---------------------+-------------------------+
- | Simulated Annealing | ``anneal`` | :meth:`anneal` | ``Anneal`` |
- +-----------------------+--------------------+---------------------+-------------------------+
| Powell | ``powell`` | | ``Powell`` |
+-----------------------+--------------------+---------------------+-------------------------+
| Conjugate Gradient | ``cg`` | | ``CG`` |
@@ -172,16 +174,20 @@ To select which of these algorithms to use, use the ``method`` keyword to the
+-----------------------+--------------------+---------------------+-------------------------+
| COBYLA | ``cobyla`` | | ``COBYLA`` |
+-----------------------+--------------------+---------------------+-------------------------+
+ | COBYLA | ``cobyla`` | | ``COBYLA`` |
+ +-----------------------+--------------------+---------------------+-------------------------+
+ | Truncated Newton | ``tnc`` | | ``TNC`` |
+ +-----------------------+--------------------+---------------------+-------------------------+
+ | Trust Newton-CGn | ``trust-ncg`` | | ``trust-ncg`` |
+ +-----------------------+--------------------+---------------------+-------------------------+
+ | Dogleg | ``dogleg`` | | ``dogleg`` |
+ +-----------------------+--------------------+---------------------+-------------------------+
| Sequential Linear | ``slsqp`` | | ``SLSQP`` |
| Squares Programming | | | |
+-----------------------+--------------------+---------------------+-------------------------+
.. note::
- Use of :meth:`scipy.optimize.minimize` requires scipy 0.11 or higher.
-
-.. note::
-
The objective function for the Levenberg-Marquardt method **must**
return an array, with more elements than variables. All other methods
can return either a scalar value or an array.
@@ -189,14 +195,11 @@ To select which of these algorithms to use, use the ``method`` keyword to the
.. warning::
- The Levenberg-Marquardt method is *by far* the most tested fit method,
- and much of this documentation assumes that this is the method used. For
- example, many of the fit statistics and estimates for uncertainties in
- parameters discussed in :ref:`fit-results-label` are done only for the
- ``leastsq`` method.
+ Much of this documentation assumes that the Levenberg-Marquardt method is
+ the method used. Many of the fit statistics and estimates for
+ uncertainties in parameters discussed in :ref:`fit-results-label` are
+ done only for this method.
-In particular, the simulated annealing method appears to not work
-correctly.... understanding this is on the ToDo list.
.. _fit-results-label:
@@ -273,6 +276,7 @@ near the maximum or minimum value makes the covariance matrix singular. In
these cases, the :attr:`errorbars` attribute of the fit result
(:class:`Minimizer` object) will be ``False``.
+.. module:: Minimizer
.. _fit-minimizer-label:
@@ -426,53 +430,21 @@ Getting and Printing Fit Reports
print text of report from :func:`fit_report`.
-An example fit with an error report::
-
- p_true = Parameters()
- p_true.add('amp', value=14.0)
- p_true.add('period', value=5.33)
- p_true.add('shift', value=0.123)
- p_true.add('decay', value=0.010)
-
- def residual(pars, x, data=None):
- amp = pars['amp'].value
- per = pars['period'].value
- shift = pars['shift'].value
- decay = pars['decay'].value
-
- if abs(shift) > pi/2:
- shift = shift - sign(shift)*pi
- model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
- if data is None:
- return model
- return (model - data)
-
- n = 2500
- xmin = 0.
- xmax = 250.0
- noise = random.normal(scale=0.7215, size=n)
- x = linspace(xmin, xmax, n)
- data = residual(p_true, x) + noise
- fit_params = Parameters()
- fit_params.add('amp', value=13.0)
- fit_params.add('period', value=2)
- fit_params.add('shift', value=0.0)
- fit_params.add('decay', value=0.02)
+An example fit with report would be
- out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+.. literalinclude:: ../examples/doc_withreport.py
- fit = residual(fit_params, x)
- report_errors(fit_params)
+which would write out::
-would generate this report::
[[Variables]]
- amp: 13.969724 +/- 0.050145 (0.36%) initial = 13.000000
- decay: 0.009990 +/- 0.000042 (0.42%) initial = 0.020000
- period: 5.331423 +/- 0.002788 (0.05%) initial = 2.000000
- shift: 0.125333 +/- 0.004938 (3.94%) initial = 0.000000
+ amp: 13.9121944 +/- 0.141202 (1.01%) (init= 13)
+ decay: 0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
+ period: 5.48507044 +/- 0.026664 (0.49%) (init= 2)
+ shift: 0.16203677 +/- 0.014056 (8.67%) (init= 0)
[[Correlations]] (unreported correlations are < 0.100)
- C(period, shift) = 0.800
- C(amp, decay) = 0.576
+ C(period, shift) = 0.797
+ C(amp, decay) = 0.582
+
diff --git a/doc/index.rst b/doc/index.rst
index 0167344..9356c84 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -5,53 +5,42 @@ Non-Linear Least-Square Minimization and Curve-Fitting for Python
.. _Levenberg-Marquardt: http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm
.. _MINPACK-1: http://en.wikipedia.org/wiki/MINPACK
-.. _Nelder-Mead: http://en.wikipedia.org/wiki/Nelder-Mead_method
-
-The lmfit python package provides a simple and flexible interface to
-non-linear optimization and curve fitting problems. Lmfit extends the
-optimization capabilities of :mod:`scipy.optimize`. Initially designed to
-extend the the `Levenberg-Marquardt`_ algorithm in
-:func:`scipy.optimize.minimize.leastsq`, lmfit supports most of the
-optimization methods from :mod:`scipy.optimize`. It also provides a simple
-way to apply this extension to *curve fitting* problems.
-
-The key concept in lmfit is that instead of using plain floating pointing
-values for the variables to be optimized (as all the optimization routines
-in :mod:`scipy.optimize` use), optimizations are done using
-:class:`Parameter` objects. A :class:`Parameter` can have its value fixed
-or varied, have upper and/or lower bounds placed on its value, or have
-values that are evaluated from algebraic expressions of other Parameter
-values. This is all done outside the optimization routine, so that these
-bounds and constraints can be applied to **all** optimization routines from
-:mod:`scipy.optimize`, and with a more Pythonic interface than any of the
-routines that do provide bounds.
-
-By using :class:`Parameter` objects instead of plain variables, the
-objective function does not have to be rewritten to reflect every change of
-what is varied in the fit, or if relationships or constraints are placed on
-the Parameters. This simplifies the writing of models, and gives the user
-more flexibility in using and testing variations of that model.
-
-
-Lmfit supports several of the optimization methods from
-:mod:`scipy.optimize`. The default, and by far best tested optimization
-method used (and the origin of the name) is the `Levenberg-Marquardt`_
-algorithm of :func:`scipy.optimize.leastsq` and
-:func:`scipy.optimize.curve_fit`. Much of this document assumes this
-algorithm is used unless explicitly stated. An important point for many
-scientific analysis is that this is only method that automatically
-estimates uncertainties and correlations between fitted variables from the
-covariance matrix calculated during the fit. Because the approach derived
-from `MINPACK-1`_ using the covariance matrix to determine uncertainties is
-sometimes questioned (and sometimes rightly so), lmfit supports methods to
-do a brute force search of the confidence intervals and correlations for
-sets of parameters.
+
+
+Lmfit provides a high-level interface to non-linear optimization and
+curve fitting problems for Python. Lmfit builds on
+`Levenberg-Marquardt`_ algorithm of :func:`scipy.optimize.leastsq`, but
+also supports most of the optimization methods from :mod:`scipy.optimize`.
+It has a number of useful enhancements, including:
+
+ * Using :class:`Parameter` objects instead of plain floats as variables.
+ A :class:`Parameter` has a value that can be varied in the fit, fixed,
+ have upper and/or lower bounds. It can even have a value that is
+ constrained by an algebraic expression of other Parameter values.
+
+ * Ease of changing fitting algorithms. Once a fitting model is set up,
+ one can change the fitting algorithm without changing the objective
+ function.
+
+ * Improved estimation of confidence intervals. While
+ :func:`scipy.optimize.leastsq` will automatically calculate
+ uncertainties and correlations from the covariance matrix, lmfit also
+ has functions to explicitly explore parameter space to determine
+ confidence levels even for the most difficult cases.
+
+ * Improved curve-fitting with the :class:`Model` class. This which
+ extends the capabilities of :func:`scipy.optimize.curve_fit`, allowing
+ you to turn a function that models for your data into a python class
+ that helps you parametrize and fit data with that model.
+
+ * Many :ref:`pre-built models <builtin_models_chapter>` for common
+ lineshapes are included and ready to use.
.. _lmfit github repository: http://github.com/lmfit/lmfit-py
-The lmfit package is an open-source project, and this document are a works
-in progress. If you are interested in participating in this effort please
-use the `lmfit github repository`_.
+The lmfit package is Free software, using an MIT license. The software and
+this document are works in progress. If you are interested in
+participating in this effort please use the `lmfit github repository`_.
.. toctree::
diff --git a/doc/installation.rst b/doc/installation.rst
index 2c5ed93..9a90056 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -2,25 +2,27 @@
Downloading and Installation
====================================
+.. _lmfit github repository: http://github.com/lmfit/lmfit-py
+.. _Python Setup Tools: http://pypi.python.org/pypi/setuptools
+.. _pip: https://pip.pypa.io/
+.. _nose: http://nose.readthedocs.org/
+
Prerequisites
~~~~~~~~~~~~~~~
The lmfit package requires Python, Numpy, and Scipy. Scipy version 0.13 or
higher is recommended, but extensive testing on compatibility with various
-versions of scipy has not been done. Lmfit does work with Python 2.7, 3.2,
-and 3.3. No testing has been done with Python 3.4, but as the package is
-pure Python, relying only on scipy and numpy, no significant troubles are
-expected. Nose is required for running the test suite, and IPython and
-matplotib are recommended. If Pandas is available, it will be used in
-portions of lmfit.
+versions of scipy has not been done. Lmfit does work with Python 2.7, and
+3.2 and 3.3. No testing has been done with Python 3.4, but as the package
+is pure Python, relying only on scipy and numpy, no significant troubles
+are expected. The `nose`_ frameworkt is required for running the test
+suite, and IPython and matplotib are recommended. If Pandas is available,
+it will be used in portions of lmfit.
Downloads
~~~~~~~~~~~~~
-.. _lmfit github repository: http://github.com/lmfit/lmfit-py
-.. _Python Setup Tools: http://pypi.python.org/pypi/setuptools
-.. _pip: https://pip.pypa.io/
The latest stable version of lmfit is available from `PyPi <http://pypi.python.org/pypi/lmfit/>`_.
@@ -54,29 +56,43 @@ and install using::
python setup.py install
+
+Testing
+~~~~~~~~~~
+
+A battery of tests scripts that can be run with the `nose`_ testing
+framework is distributed with lmfit in the ``tests`` folder. These are
+routinely run on the development version. Running ``nosetests`` should run
+all of these tests to completion without errors or failures.
+
+Many of the examples in this documentation are distributed with lmfit in
+the ``examples`` folder, and sould also run for you. Many of these require
+
+
Acknowledgements
~~~~~~~~~~~~~~~~~~
LMFIT was originally written by Matthew Newville. Substantial code and
documentation improvements, especially for improved estimates of confidence
-intervals was provided by Till Stensitzki. The implementation of parameter
-bounds as described in the MINUIT documentation is taken from Jonathan
-J. Helmus' leastsqbound code, with permission. The code for propagation of
-uncertainties is taken from Eric O. Le Bigot's uncertainties package, with
-permission. Much of the work on improved unit testing and high-level model
-functions was done by Daniel B. Allen. Many valuable suggestions for
-improvements have come from Christoph Deil. The code obviously depends on,
-and owes a very large debt to the code in scipy.optimize. Several
-discussions on the scipy mailing lists have also led to improvements in
-this code.
+intervals was provided by Till Stensitzki. Much of the work on improved
+unit testing and high-level model functions was done by Daniel B. Allen,
+with substantial input from Antonino Ingargiola. Many valuable suggestions
+for improvements have come from Christoph Deil. The implementation of
+parameter bounds as described in the MINUIT documentation is taken from
+Jonathan J. Helmus' leastsqbound code, with permission. The code for
+propagation of uncertainties is taken from Eric O. Le Bigot's uncertainties
+package, with permission. The code obviously depends on, and owes a very
+large debt to the code in scipy.optimize. Several discussions on the scipy
+mailing lists have also led to improvements in this code.
License
~~~~~~~~~~~~~
The LMFIT-py code is distribution under the following license:
- Copyright (c) 2012 Matthew Newville, The University of Chicago
- Till Stensitzki, Freie Universitat Berlin
+ Copyright (c) 2014 Matthew Newville, The University of Chicago, Till
+ Stensitzki, Freie Universitat Berlin, Daniel B. Allen, Johns Hopkins
+ University, Antonino Ingargiola, University of California, Los Angeles
Permission to use and redistribute the source code or binary forms of this
software and its documentation, with or without modification is hereby
diff --git a/doc/model.rst b/doc/model.rst
index c741fb9..7ece86d 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -4,27 +4,32 @@
Modeling Data and Curve Fitting
=================================================
-A very common application of least-squares minimization is *curve fitting*,
-where one has a parametrized model function meant to explain some
-phenomena, and wants to adjust the numerical values for the model to
-most closely match some particular data. Within the :mod:`scipy` world,
-such curve fitting problems are commonly solved with
-:func:`scipy.optimize.curve_fit`, which simply calls
-:func:`scipy.optimize.leastsq`. As lmfit is a high-level wrapper around
-:func:`scipy.optimize.leastsq`, it can be used for curve-fitting problems,
-but here we discuss an even easier way to do it that is closer in spirit to
-:func:`scipy.optimize.curve_fit`, but better.
-
-The :class:`Model` class makes it easy to turn a model function that
-calculates a model for your data into a fitting model. In an effort to
-make simple things truly simple, lmfit also provides canonical definitions
-for many known lineshapes such as Gaussian or Lorentzian peaks and
-Exponential decays that are widely used in many scientific domains. These
-are available in the :mod:`models` module that will be discussed in more
-detail in the next chapter (:ref:`builtin_models_chapter`). We mention it
-here as you may want to consult that list before writing your own model.
-For now, we focus on turning python function into high-level fitting models
-with the :class:`Model` class, and using these to fit data.
+
+A common use of least-squares minimization is *curve fitting*, where one
+has a parametrized model function meant to explain some phenomena and wants
+to adjust the numerical values for the model to most closely match some
+data. With :mod:`scipy`, such problems are commonly solved with
+:func:`scipy.optimize.curve_fit`, which is a wrapper around
+:func:`scipy.optimize.leastsq`. Since Lmit's :func:`minimize` is also a
+high-level wrapper around :func:`scipy.optimize.leastsq` it can be used for
+curve-fitting problems, but requires more effort than using
+:func:`scipy.optimize.curve_fit`.
+
+Here we discuss lmfit's :class:`Model` class. This takes a model function
+-- a function that calculates a model for some data -- and provides methods
+to create parameters for that model and to fit data using that model
+function. This is closer in spirit to :func:`scipy.optimize.curve_fit`,
+but with the advantages of using :class:`Parameters` and lmfit.
+
+In addition to allowing you turn any model function into a curve-fitting
+method, Lmfit also provides canonical definitions for many known lineshapes
+such as Gaussian or Lorentzian peaks and Exponential decays that are widely
+used in many scientific domains. These are available in the :mod:`models`
+module that will be discussed in more detail in the next chapter
+(:ref:`builtin_models_chapter`). We mention it here as you may want to
+consult that list before writing your own model. For now, we focus on
+turning python function into high-level fitting models with the
+:class:`Model` class, and using these to fit data.
Example: Fit data to Gaussian profile
@@ -33,18 +38,17 @@ Example: Fit data to Gaussian profile
Let's start with a simple and common example of fitting data to a Gaussian
peak. As we will see, there is a buit-in :class:`GaussianModel` class that
provides a model function for a Gaussian profile, but here we'll build our
-own. We start with a definition the model function that we might want to
-use to fit to some data::
+own. We start with a simple definition the model function:
>>> from numpy import sqrt, pi, exp, linspace
>>>
>>> def gaussian(x, amp, cen, wid):
- ... "1-d gaussian: gaussian(x, amp, cen, wid)"
- ... return (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+ ... return amp * exp(-(x-cen)**2 /wid)
...
-To some data :math:`y(x)` represented by the arrays ``y`` and ``x`` with we
-would do something like::
+that we want to use to fit to some data :math:`y(x)` represented by the
+arrays ``y`` and ``x``. Using :func:`scipy.optimize.curve_fit` makes this
+easy to do, allowing us to do something like::
>>> from scipy.optimize import curve_fit
>>>
@@ -56,61 +60,75 @@ would do something like::
That is, we read in data from somewhere, make an initial guess of the model
-values, and run ``curve_fit`` with the model function, data arrays, and
-initial guesses. The results returned are the optimal values for the
-parameters and the covariance matrix. It's pretty simple to do, but
-misses many of the key benefits of lmfit.
+values, and run :func:`scipy.optimize.curve_fit` with the model function,
+data arrays, and initial guesses. The results returned are the optimal
+values for the parameters and the covariance matrix. It's simple and very
+useful. But it misses the benefits of lmfit.
-To solve this with lmfit we could write a residual function but such a
-residual function would be fairly simple (essentially, ``data - model``,
+To solve this with lmfit we would have to write an objective function. But
+such a function would be fairly simple (essentially, ``data - model``,
possibly with some weighting), and we would need to define and use
-appropriately named parameters. Though convenient, it also becomes
-somewhat of a burden to keep all the parameter names straight. After doing
-this a few times it appears as a recurring pattern, and we can imagine
-automating this process. That's where the :class:`Model` class comes in.
-We can pass this class the ``gaussian`` function, and it will automatically
-generate the appropriate residual function and the corresponding parameters
-from the function signature itself::
+appropriately named parameters. Though convenient, it is somewhat of a
+burden to keep the named parameter straight (on the other hand, with
+func:`scipy.optimize.curve_fit` you are required to remember the parameter
+order). After doing this a few times it appears as a recurring pattern,
+and we can imagine automating this process. That's where the
+:class:`Model` class comes in.
+
+The :class:`Model` allows us to easily wrap a model function such as the
+``gaussian`` function. This automatically generate the appropriate
+residual function, and determines the corresponding parameter names from
+the function signature itself::
>>> from lmfit import Model
>>> gmod = Model(gaussian)
- >>> for name, par in gmod.params.items():
- ... print(name, par)
- ...
- 'amp', <Parameter 'amp', None, bounds=[None:None]>
- 'wid', <Parameter 'wid', None, bounds=[None:None]>
- 'cen', <Parameter 'cen', None, bounds=[None:None]>
- >>> print("Independent Variables: ", gmod.independent_vars)
- 'Independent Variables: ', ['x']
-
-The Model ``gmod`` is constructed to have a ``params`` member that holds the
-:class:`Parameters` for the model, and an ``independent_vars`` that holds
-the name of the independent variables. By default, the first argument of
-the function is taken as the independent variable, and the rest of the
-parameters are used for variable Parameters. Thus, for the ``gaussian``
+ >>> gmod.param_names
+ set(['amp', 'wid', 'cen'])
+ >>> gmod.independent_vars)
+ ['x']
+
+The Model ``gmod`` knows the names of the parameters and the independent
+variables. By default, the first argument of the function is taken as the
+independent variable, held in :attr:`independent_vars`, and the rest of the
+functions positional arguments (and, in certain cases, keyword arguments --
+see below) are used for Parameter names. Thus, for the ``gaussian``
function above, the parameters are named ``amp``, ``cen``, and ``wid``, and
-``x`` is the independent variable -- taken directly from the signature of
-the model function.
+``x`` is the independent variable -- all taken directly from the signature
+of the model function. As we will see below, you can specify what the
+independent variable is, and you can add or alter parameters too.
+
+On creation of the model, parameters are *not* created. The model knows
+what the parameters should be named, but not anything about the scale and
+range of your data. You will normally have to make these parameters and
+assign initiald values and other attributes. To help you do this, each
+model has a :meth:`make_params` method that will generate parameters with
+the expected names:
+
+ >>> params = gmod.make_params()
-On creation of the model, the parameters are not initialized (the values
-are all ``None``), and will need to be given initial values before the
-model can be used. This can be done in one of two ways, or a mixture of
-the two. First, the initial values for the models parameters can be set
-explicitly, as with:
+This creates the :class:`Parameters` but doesn't necessarily give them
+initial values -- again, the model has no idea what the scale should be.
+You can set initial values for parameters with keyword arguments to
+:meth:`make_params`, as with:
- >>> gmod.params['amp'].value = 10.0
-and so on. This is also useful to setting parameter bounds and so forth.
-Alternatively, one can use the :meth:`eval` method (to evaluate the model)
-or the :meth:`fit` method (to fit data to this model) with explicit keyword
-arguments for the parameter values. For example, one could use
-:meth:`eval` to calculate the predicted function::
+ >>> params = gmod.make_params(cen=5, amp=200, wid=1)
+
+or assign them (and other parameter properties) after the
+:class:`Parameters` has been created.
+
+A :class:`Model` has several methods associated with it. For example, one
+can use the :meth:`eval` method to evaluate the model or the :meth:`fit`
+method to fit data to this model with a :class:`Parameter` object. Both of
+these methods can take explicit keyword arguments for the parameter values.
+For example, one could use :meth:`eval` to calculate the predicted
+function::
>>> x = linspace(0, 10, 201)
>>> y = gmod.eval(x=x, amp=10, cen=6.2, wid=0.75)
-So far, this is a slightly long-winded way to calculate a Gaussian
+Admittedly, this a slightly long-winded way to calculate a Gaussian
function. But now that the model is set up, we can also use its
:meth:`fit` method to fit this model to data, as with::
@@ -121,19 +139,28 @@ Putting everything together, the script to do such a fit (included in the
.. literalinclude:: ../examples/doc_model1.py
-which is pretty compact and to the point. Of course, the parameter in the
-returned ``result`` have pulled apart the covariance matrix, so that the
-results printed out are::
+which is pretty compact and to the point. The returned ``result`` will be
+a :class:`ModelFit` object. As we will see below, this has many
+components, including a :meth:`fit_report` method, which will show::
+ [[Model]]
+ gaussian
+ [[Fit Statistics]]
+ # function evals = 33
+ # data points = 101
+ # variables = 3
+ chi-square = 3.409
+ reduced chi-square = 0.035
[[Variables]]
- amp: 8.880218 +/- 0.1135949 (1.28%) initial = 5
- cen: 5.658661 +/- 0.01030495 (0.18%) initial = 5
- wid: 0.6976547 +/- 0.01030495 (1.48%) initial = 1
- [[Correlations]] (unreported correlations are < 0.250)
- C(amp, wid) = 0.577
-
+ amp: 8.88021829 +/- 0.113594 (1.28%) (init= 5)
+ cen: 5.65866102 +/- 0.010304 (0.18%) (init= 5)
+ wid: 0.69765468 +/- 0.010304 (1.48%) (init= 1)
+ [[Correlations]] (unreported correlations are < 0.100)
+ C(amp, wid) = 0.577
-and the plot generated gives:
+The result will also have :attr:`init_fit` for the fit with the initial
+parameter values and a :attr:`best_fit` for the fit with the best fit
+parameter values. These can be used to generate the following plot:
.. image:: _images/model_fit1.png
@@ -141,83 +168,60 @@ and the plot generated gives:
:width: 50%
which shows the data in blue dots, the best fit as a solid red line, and
-the initial fit in black dashed line.
+the initial fit as a dashed black line.
We emphasize here that the fit to this model function was really performed
-with 2 lines of code. These lines clearly express that we want to turn the
-``gaussian`` function into a fitting model, and then fit the :math:`y(x)`
-data to this model, starting with values of 5 for ``amp``, 5 for ``cen``
-and 1 for ``wid``::
+with 2 lines of code::
gmod = Model(gaussian)
result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
-which compares well to :func:`scipy.optimize.curve_fit`::
+These lines clearly express that we want to turn the ``gaussian`` function
+into a fitting model, and then fit the :math:`y(x)` data to this model,
+starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``, and
+compare well to :func:`scipy.optimize.curve_fit`::
best_vals, covar = curve_fit(gaussian, x, y, p0=[5, 5, 1])
-except that all the other features of lmfit are included.
-
-Some model functions may be more complicated than the Gaussian function
-here. We'll discuss these below, but for now we've shown that at least the
-wrapping of a simple model function for curve fitting is easy.
+except that all the other features of lmfit are included such as that the
+:class:`Parameters` can have bounds and constraints and the result is a
+richer object that can be reused to explore the fit in more detail.
+.. module:: model
The :class:`Model` class
=======================================
-.. module:: model
-
The :class:`Model` class provides a general way to wrap a pre-defined
function as a fitting model.
-.. class:: Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix='' [, components=None]]]]])
+.. class:: Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix='' [, name=None[, **kws]]]]]])
Create a model based on the user-supplied function. This uses
introspection to automatically converting argument names of the
function to Parameter names.
- :param func: function to be wrapped
+ :param func: model function to be wrapped
:type func: callable
:param independent_vars: list of argument names to ``func`` that are independent variables.
:type independent_vars: ``None`` (default) or list of strings.
:param param_names: list of argument names to ``func`` that should be made into Parameters.
:type param_names: ``None`` (default) or list of strings
:param missing: how to handle missing values.
- :type missing: one of ``None`` (default), 'drop', or 'raise'
+ :type missing: one of ``None`` (default), 'none', 'drop', or 'raise'.
:param prefix: prefix to add to all parameter names to distinguish components.
:type prefix: string
- :param components: list of model components for a composite fit (usually handled internally).
- :type components: ``None`` or default.
+ :param name: name for the model. When ``None`` (default) the name is the same as the model function (``func``).
+ :type name: ``None`` or string.
+ :param kws: addtional keyword arguments to pass to model function.
-Methods and Attributes of the :class:`Model` class
-----------------------------------------------------
-
-.. method:: guess_starting_values(data, **kws)
-
- by default this is left to raise a ``NotImplementedError``, but may be
- overwritten by subclasses. Generally, this method should take some
- values for ``data`` and use it to construct reasonable starting values for
- the parameters.
-
-.. method:: set_paramval(parname, value[, min=None[, max=None[, vary=True]]])
-
- set the value for a named parameter. This is convenient for setting
- initial values. The ``parname`` can include the models ``prefix`` or
- not.
-
- :param parname: parameter name.
- :type parname: string
- :param value: value for parameter
- :type value: float
- :param min: lower bound for parameter value
- :type min: ``None`` or float
- :param max: upper bound for parameter value
- :type max: ``None`` or float
- :param vary: whether to vary parameter in fit.
- :type vary: boolean
+ Of course, the model function will have to return an array that will be
+ the same size as the data being modeled. Generally this is handled by
+ also specifying one or more independent variables.
+:class:`Model` class Methods
+---------------------------------
.. method:: eval(params=None[, **kws])
@@ -225,19 +229,23 @@ Methods and Attributes of the :class:`Model` class
:param params: parameters to use for fit.
:type params: ``None`` (default) or Parameters
-
+ :param kws: addtional keyword arguments to pass to model function.
:return: ndarray for model given the parameters and other arguments.
- If ``params`` is ``None``, the internal ``params`` will be used.
+ If ``params`` is ``None``, the values for all parameters are expected to
+ be provided as keyword arguments. If ``params`` is given, and a keyword
+ argument for a parameter value is also given, the keyword argument will
+ be used.
- Note that all other arguments for the model function (including all the
- independent variables!) will need to be passed in using keyword
- arguments.
+ Note that all non-parameter arguments for the model function --
+ **including all the independent variables!** -- will need to be passed
+ in using keyword arguments.
.. method:: fit(data[, params=None[, weights=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **kws]]]]]])
- perform a fit of the model to the ``data`` array.
+ perform a fit of the model to the ``data`` array with a set of
+ parameters.
:param data: array of data to be fitted.
:type data: ndarray-like
@@ -251,8 +259,10 @@ Methods and Attributes of the :class:`Model` class
:type scale_covar: bool (default ``True``)
:param iter_cb: function to be called at each fit iteration
:type iter_cb: callable or ``None``
-
- :return: fit result object.
+ :param verbose: print a message when a new parameter is created due to a *hint*
+ :type verbose: bool (default ``True``)
+ :param kws: addtional keyword arguments to pass to model function.
+ :return: :class:`ModeFitResult` object.
If ``params`` is ``None``, the internal ``params`` will be used. If it
is supplied, these will replace the internal ones. If supplied,
@@ -263,57 +273,79 @@ Methods and Attributes of the :class:`Model` class
independent variables!) will need to be passed in using keyword
arguments.
- The result returned from :meth:`fit` will contains all of the items
- returned from :func:`minimize` (see :ref:`Table of Fit Results
- <goodfit-table>` plus those listed in the :ref:`Table of Model Fit results <modelfit-table>`
-.. method:: fit_report(modelpars=None[, show_correl=True[, min_correl=0.1]])
+.. method:: guess(data, **kws)
- return result of :func:`fit_report` after completing :meth:`fit`.
+ Guess starting values for model parameters.
+ :param data: data array used to guess parameter values
+ :type func: ndarray
+ :param kws: addtional options to pass to model function.
+ :return: :class:`Parameters` with guessed initial values for each parameter.
-.. _modelfit-table:
+ by default this is left to raise a ``NotImplementedError``, but may be
+ overwritten by subclasses. Generally, this method should take some
+ values for ``data`` and use it to construct reasonable starting values for
+ the parameters.
-Table of Model Fit Results: These values are included in the return value
-from :meth:`Model.fit`, in addition to the standard Goodness-of-Fit
-statistics and fit results given in :ref:`Table of Fit Results
-<goodfit-table>`.
- +----------------------------+------------------------------------------------------+
- | result attribute | Description / Formula |
- +============================+======================================================+
- | ``init_params`` | initial set of parameters |
- +----------------------------+------------------------------------------------------+
- | ``init_fit`` | initial estimate of fit to data |
- +----------------------------+------------------------------------------------------+
- | ``best_fit`` | final estimate of fit to data |
- +----------------------------+------------------------------------------------------+
+.. method:: make_params(**kws)
+ Create a set of parameters for model.
-.. attribute:: independent_vars
+ :param kws: optional keyword/value pairs to set initial values for parameters.
+ :return: :class:`Parameters`.
- list of strings for independent variables.
+ The parameters may or may not have decent initial values for each
+ parameter.
-.. attribute:: param_names
- list of strings of parameter names.
+.. method:: set_param_hint(name, value=None[, min=None[, max=None[, vary=True[, expr=None]]]])
-.. attribute:: params
+ set *hints* to use when creating parameters with :meth:`make_param` for
+ the named parameter. This is especially convenient for setting initial
+ values. The ``name`` can include the models ``prefix`` or not.
- :class:`Parameters` object for the model
+ :param name: parameter name.
+ :type name: string
+ :param value: value for parameter
+ :type value: float
+ :param min: lower bound for parameter value
+ :type min: ``None`` or float
+ :param max: upper bound for parameter value
+ :type max: ``None`` or float
+ :param vary: whether to vary parameter in fit.
+ :type vary: boolean
+ :param expr: mathematical expression for constraint
+ :type expr: string
-.. attribute:: prefix
+ See :ref:`model_param_hints_section`.
- prefix used for name-mangling of parameter names. The default is ''.
- If a particular :class:`Model` has arguments ``amplitude``,
- ``center``, and ``sigma``, these would become the parameter names.
- Using a prefix of ``g1_`` would convert these parameter names to
- ``g1_amplitude``, ``g1_center``, and ``g1_sigma``. This can be
- essential to avoid name collision in composite models.
+:class:`Model` class Attributes
+---------------------------------
+
+.. attribute:: components
+
+ a list of instances of :class:`Model` that make up a *composite model*.
+ See :ref:`composite_models_section`. Normally, you will not need to use
+ this, but is used by :class:`Model` itself when constructing a composite
+ model from two or more models.
+
+.. attribute:: func
+
+ The model function used to calculate the model.
+
+.. attribute:: independent_vars
+
+ list of strings for names of the independent variables.
+
+.. attribute:: is_composite
+
+ Boolean value for whether model is a composite model.
.. attribute:: missing
- what to do for missing values. The choices are
+ describes what to do for missing values. The choices are
* ``None``: Do not check for null or missing values (default)
* ``'none'``: Do not check for null or missing values.
@@ -322,11 +354,32 @@ statistics and fit results given in :ref:`Table of Fit Results
* ``'raise'``: Raise a (more helpful) exception when data contains null
or missing values.
-.. attribute:: components
+.. attribute:: name
- a list of instances of :class:`Model` that make up a composite model.
- Normally, you will not need to use this, but is used my :class:`Model`
- itself when constructing a composite model (that is adding models together).
+ name of the model, used only in the string representation of the
+ model. By default this will be taken from the model function.
+
+.. attribute:: opts
+
+ extra keyword arguments to pass to model function. Normally this will
+ be determined internally and should not be changed.
+
+.. attribute:: param_hints
+
+ Dictionary of parameter hints. See :ref:`model_param_hints_section`.
+
+.. attribute:: param_names
+
+ list of strings of parameter names.
+
+.. attribute:: prefix
+
+ prefix used for name-mangling of parameter names. The default is ''.
+ If a particular :class:`Model` has arguments ``amplitude``,
+ ``center``, and ``sigma``, these would become the parameter names.
+ Using a prefix of ``g1_`` would convert these parameter names to
+ ``g1_amplitude``, ``g1_center``, and ``g1_sigma``. This can be
+ essential to avoid name collision in composite models.
Determining parameter names and independent variables for a function
@@ -351,20 +404,11 @@ on Parameters, or fix their values.
-More Details on building models from functions
-============================================================
-
-
-Here we explore some of the variations of building a :class:`Model` from a
-user-defined function that didn't get mentioned in the example above for
-the Gaussian model.
-
-
Explicitly specifying ``independent_vars``
-------------------------------------------------
-As for the example above of the Gaussian model, creating a :class:`Model`
-from a function is fairly easy::
+As we saw for the Gaussian example above, creating a :class:`Model` from a
+function is fairly easy. Let's try another::
>>> def decay(t, tau, N):
... return N*np.exp(-t/tau)
@@ -378,12 +422,12 @@ from a function is fairly easy::
tau <Parameter 'tau', None, bounds=[None:None]>
N <Parameter 'N', None, bounds=[None:None]>
-Here, ``t`` is assumed to be the independent variable because it comes
-first, and that the other function arguments are used to create the
-remaining parameters are created from the other parameters.
+Here, ``t`` is assumed to be the independent variable because it is the
+first argument to the function. The other function arguments are used to
+create parameters for the model.
-If you wanted ``tau`` to be the independent variable in the above example,
-you would just do this::
+If you want ``tau`` to be the independent variable in the above example,
+you can say so::
>>> decay_model = Model(decay, independent_vars=['tau'])
>>> print decay_model.independent_vars
@@ -395,12 +439,27 @@ you would just do this::
N <Parameter 'N', None, bounds=[None:None]>
+You can also supply multiple values for multi-dimensional functions with
+multiple independent variables. In fact, the meaning of *independent
+variable* here is simple, and based on how it treats arguments of the
+function you are modeling:
+
+independent variable
+ a function argument that is not a parameter or otherwise part of the
+ model, and that will be required to be explicitly provided as a
+ keyword argument for each fit with :meth:`fit` or evaluation
+ with :meth:`eval`.
+
+Note that independent variables are not required to be arrays, or even
+floating point numbers.
+
+
Functions with keyword arguments
-----------------------------------------
If the model function had keyword parameters, these would be turned into
Parameters if the supplied default value was a valid number (but not
-``None``).
+``None``, ``True``, or ``False``).
>>> def decay2(t, tau, N=10, check_positive=False):
... if check_small:
@@ -421,7 +480,10 @@ into a parameter, with the default numerical value as its initial value.
By default, it is permitted to be varied in the fit -- the 10 is taken as
an initial value, not a fixed value. On the other hand, the
``check_positive`` keyword argument, was not converted to a parameter
-because it has a boolean default value.
+because it has a boolean default value. In some sense,
+``check_positive`` becomes like an independent variable to the model.
+However, because it has a default value it is not required to be given for
+each model evaluation or fit, as independent variables are.
Defining a ``prefix`` for the Parameters
--------------------------------------------
@@ -449,46 +511,292 @@ You would refer to these parameters as ``f1_amplitude`` and so forth, and
the model will know to map these to the ``amplitude`` argument of ``myfunc``.
-More on initialing model parameters
+Initializing model parameters
-----------------------------------------
-As mentioned above, the parameters created by :class:`Model` are generally
-created with invalid initial values of ``None``. These values must be
-initialized in order for the model to be evaluated or used in a fit. There
-are three ways to do this initialization that can be used in any
-combination:
+As mentioned above, the parameters created by :meth:`Model.make_params` are
+generally created with invalid initial values of ``None``. These values
+**must** be initialized in order for the model to be evaluated or used in a
+fit. There are four different ways to do this initialization that can be
+used in any combination:
1. You can supply initial values in the definition of the model function.
- 2. You can initialize the parameters after the model has been created.
- 3. You can supply initial values for the parameters to the :meth:`eval`
- or :meth:`fit` methods.
+ 2. You can initialize the parameters when creating parameters with :meth:`make_params`.
+ 3. You can give parameter hints with :meth:`set_param_hint`.
+ 4. You can supply initial values for the parameters when you use the
+ :meth:`eval` or :meth:`fit` methods.
+
+Of course these methods can be mixed, allowing you to overwrite initial
+values at any point in the process of defining and using the model.
+
+Initializing values in the function definition
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For option 1, consider doing::
+To supply initial values for parameters in the definition of the model
+function, you can simply supply a default value::
>>> def myfunc(x, a=1, b=0):
>>> ...
-instead of::
+instead of using::
>>> def myfunc(x, a, b):
>>> ...
-For option 2, you can do::
+This has the advantage of working at the function level -- all parameters
+with keywords can be treated as options. It also means that some default
+initial value will always be available for the parameter.
+
+
+Initializing values with :meth:`make_params`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When creating parameters with :meth:`make_params` you can specify initial
+values. To do this, use keyword arguments for the parameter names and
+initial values::
+
+ >>> mod = Model(myfunc)
+ >>> pars = mod.make_params(a=3, b=0.5)
+
+
+Initializing values by setting parameter hints
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After a model has been created, but prior to creating parameters with
+:meth:`make_params`, you can set parameter hints. These allows you to set
+not only a default initial value but also to set other parameter attributes
+controlling bounds, whether it is varied in the fit, or a constraint
+expression. To set a parameter hint, you can use :meth:`set_param_hint`,
+as with::
+
+ >>> mod = Model(myfunc)
+ >>> mod.set_param_hint('a', value = 1.0)
+ >>> mod.set_param_hint('b', value = 0.3, min=0, max=1.0)
+ >>> pars = mod.make_params()
+
+Parameter hints are discussed in more detail in section
+:ref:`model_param_hints_section`.
+
+
+Initializing values when using a model
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Finally, you can explicitly supply initial values when using a model. That
+is, as with :meth:`make_params`, you can include values
+as keyword arguments to either the :meth:`eval` or :meth:`fit` methods::
+
+ >>> y1 = mod.eval(x=x, a=7.0, b=-2.0)
+
+ >>> out = mod.fit(x=x, pars, a=3.0, b=-0.0)
+
+These approachess to initialization provide many opportunities for setting
+initial values for parameters. The methods can be combined, so that you
+can set parameter hints but then change the initial value explicitly with
+:meth:`fit`.
+
+.. _model_param_hints_section:
+
+Using parameter hints
+--------------------------------
+
+
+After a model has been created, you can give it hints for how to create
+parameters with :meth:`make_params`. This allows you to set not only a
+default initial value but also to set other parameter attributes
+controlling bounds, whether it is varied in the fit, or a constraint
+expression. To set a parameter hint, you can use :meth:`set_param_hint`,
+as with::
>>> mod = Model(myfunc)
- >>> mod.params['a'].value = 1.0
- >>> mod.params['b'].value = 0.1
+ >>> mod.set_param_hint('a', value = 1.0)
+ >>> mod.set_param_hint('b', value = 0.3, min=0, max=1.0)
+
+Parameter hints are stored in a model's :attr:`param_hints` attribute,
+which is simply a nested dictionary::
+
+ >>> print mod.param_hints
+ {'a': {'value': 1}, 'b': {'max': 1.0, 'value': 0.3, 'min': 0}}
+
+
+You can change this dictionary directly, or with the :meth:`set_param_hint`
+method. Either way, these parameter hints are used by :meth:`make_params`
+when making parameters.
+
+An important feature of parameter hints is that you can force the creation
+of new parameters with parameter hints. This can be useful to make derived
+parameters with constraint expressions. For example to get the full-width
+at half maximum of a Gaussian model, one could use a parameter hint of::
+
+ >>> mod = Model(gaussian)
+ >>> mod.set_param_hint('fwhm', expr='2.3548*sigma')
+
+
+
+The :class:`ModelFit` class
+=======================================
+
+A :class:`ModelFit` is the object returned by :meth:`Model.fit`. It is a
+sublcass of :class:`Minimizer`, and so contains many of the fit results.
+Of course, it knows the :class:`Model` and the set of :class:`Parameters`
+used in the fit, and it has methods to evaluate the model, to fit the data
+(or re-fit the data with changes to the parameters, or fit with different
+or modified data) and to print out a report for that fit.
+
+While a :class:`Model` encapsulates your model function, it is fairly
+abstract and does not contain the parameters or data used in a particular
+fit. A :class:`ModelFit` *does* contain parameters and data as well as
+methods to alter and re-do fits. Thus the :class:`Model` is the idealized
+model while the :class:`ModelFit` is the messier, more complex (but perhaps
+more useful) object that represents a fit with a set of parameters to data
+with a model.
+
+A :class:`ModelFit` has several attributes holding values for fit results,
+and several methods for working with fits.
+
+:class:`ModelFit` methods
+---------------------------------
+
+These methods are all inherited from :class:`Minimize` or from
+:class:`Model`.
+
+.. method:: eval(**kwargs)
+
+ evaluate the model using the best-fit parameters and supplied
+ independent variables. The ``**kwargs`` arguments can be used to update
+ parameter values and/or independent variables.
+
+.. method:: fit(data=None[, params=None[, weights=None[, method=None[, **kwargs]]]])
+
+ fit (or re-fit), optionally changing ``data``, ``params``, ``weights``,
+ or ``method``, or changing the independent variable(s) with the
+ ``**kwargs`` argument. See :meth:`Model.fit` for argument
+ descriptions, and note that any value of ``None`` defaults to the last
+ used value.
+
+.. method:: fit_report(modelpars=None[, show_correl=True[, min_correl=0.1]])
+
+ return a printable fit report for the fit with fit statistics, best-fit
+ values with uncertainties and correlations. As with :func:`fit_report`.
+
+ :param modelpars: Parameters with "Known Values" (optional, default None)
+ :param show_correl: whether to show list of sorted correlations [``True``]
+ :param min_correl: smallest correlation absolute value to show [0.1]
+
+
+
+
+:class:`ModelFit` attributes
+---------------------------------
+
+.. attribute:: best_fit
+
+ ndarray result of model function, evaluated at provided
+ independent variables and with best-fit parameters.
+
+.. attribute:: best_values
+
+ dictionary with parameter names as keys, and best-fit values as values.
+
+.. attribute:: chisqr
+
+ floating point best-fit chi-square statistic.
+
+.. attribute:: covar
+
+ ndarray (square) covariance matrix returned from fit.
+
+.. attribute:: data
+
+ ndarray of data to compare to model.
+
+.. attribute:: errorbars
+
+ boolean for whether error bars were estimated by fit.
+
+.. attribute:: ier
+
+ integer returned code from :func:`scipy.optimize.leastsq`.
+
+.. attribute:: init_fit
+
+ ndarray result of model function, evaluated at provided
+ independent variables and with initial parameters.
+
+.. attribute:: init_params
+
+ initial parameters.
+
+.. attribute:: init_values
+
+ dictionary with parameter names as keys, and initial values as values.
-An advantage of this approach is that you can set other parameter
-attributes such as bounds and constraints.
+.. attribute:: iter_cb
-For option 3, give explicit initial values for the parameters:
+ optional callable function, to be called at each fit iteration. This
+ must take take arguments of ``params, iter, resid, *args, **kws``, where
+ ``params`` will have the current parameter values, ``iter`` the
+ iteration, ``resid`` the current residual array, and ``*args`` and
+ ``**kws`` as passed to the objective function.
- >>> y1 = mod.eval(x=x, a=1, b=3)
+.. attribute:: jacfcn
-Again, these methods can be combined. For example, you can set parameter
-values and bounds as with option 2, but then change the initial value with
-option 3.
+ optional callable function, to be called to calculate jacobian array.
+
+.. attribute:: lmdif_message
+
+ string message returned from :func:`scipy.optimize.leastsq`.
+
+.. attribute:: message
+
+ string message returned from :func:`minimize`.
+
+.. attribute:: method
+
+ string naming fitting method for :func:`minimize`.
+
+.. attribute:: model
+
+ instance of :class:`Model` used for model.
+
+.. attribute:: ndata
+
+ integer number of data points.
+
+.. attribute:: nfev
+
+ integer number of function evaluations used for fit.
+
+.. attribute:: nfree
+
+ integer number of free paramaeters in fit.
+
+.. attribute:: nvarys
+
+ integer number of independent, freely varying variables in fit.
+
+.. attribute:: params
+
+ Parameters used in fit. Will have best-fit values.
+
+.. attribute:: redchi
+
+ floating point reduced chi-square statistic
+
+.. attribute:: residual
+
+ ndarray for residual.
+
+.. attribute:: scale_covar
+
+ boolean flag for whether to automatically scale covariance matrix.
+
+.. attribute:: success
+
+ boolean value of whether fit succeeded.
+
+.. attribute:: weights
+
+ ndarray (or ``None``) of weighting values used in fit.
.. _composite_models_section:
@@ -496,7 +804,6 @@ option 3.
Creating composite models
=============================
-
One of the most interesting features of the :class:`Model` class is that
models can be added together to give a composite model, with parameters
from the component models all being available to influence the total sum of
@@ -537,7 +844,10 @@ This model has parameters for both component models, and can be used as:
which prints out the results::
-
+ [[Model]]
+ Composite Model:
+ gaussian
+ line
[[Fit Statistics]]
# function evals = 44
# data points = 101
@@ -545,28 +855,94 @@ which prints out the results::
chi-square = 2.579
reduced chi-square = 0.027
[[Variables]]
- amp: 8.459311 +/- 0.1241451 (1.47%) initial = 5
- cen: 5.655479 +/- 0.009176784 (0.16%) initial = 5
- intercept: -0.968602 +/- 0.03352202 (3.46%) initial = 1
- slope: 0.264844 +/- 0.005748921 (2.17%) initial = 0
- wid: 0.6754552 +/- 0.009916862 (1.47%) initial = 1
+ amp: 8.45931061 +/- 0.124145 (1.47%) (init= 5)
+ cen: 5.65547872 +/- 0.009176 (0.16%) (init= 5)
+ intercept: -0.96860201 +/- 0.033522 (3.46%) (init= 1)
+ slope: 0.26484403 +/- 0.005748 (2.17%) (init= 0)
+ wid: 0.67545523 +/- 0.009916 (1.47%) (init= 1)
[[Correlations]] (unreported correlations are < 0.100)
C(amp, wid) = 0.666
C(cen, intercept) = 0.129
-and shows the plot:
-.. image:: _images/model_fit2.png
- :target: _images/model_fit2.png
- :width: 50%
+and shows the plot on the left.
+.. _figModel2:
-which shows the data in blue dots, the best fit as a solid red line, and
-the initial fit in black dashed line.
+ .. image:: _images/model_fit2.png
+ :target: _images/model_fit2.png
+ :width: 48%
+ .. image:: _images/model_fit2a.png
+ :target: _images/model_fit2a.png
+ :width: 48%
+
+
+On the left, data is shown in blue dots, the total fit is shown in solid
+red line, and the initial fit is shown as a black dashed line. In the
+figure on the right, the data is again shown in blue dots, and the Gaussian
+component shown as a black dashed line, and the linear component shown as a
+red dashed line. These components were generated after the fit using the
+Models :meth:`eval` method::
+
+
+ comp_gauss = mod.components[0].eval(x=x)
+ comp_line = mod.components[1].eval(x=x)
+
+
+Note that we have to pass in ``x`` here, but not any of the final values
+for the parameters -- the current values for ``mod.params`` will be used,
+and these will be the best-fit values after a fit. While the model does
+store the best parameters and the estimate of the data in ``mod.best_fit``,
+it does not actually store the data it fit to or the independent variables
+-- here, ``x`` for that data. That means you can easily apply this model
+to other data sets, or evaluate the model at other values of ``x``. You
+may want to do this to give a finer or coarser spacing of data point, or to
+extrapolate the model outside the fitting range. This can be done with::
+
+ xwide = np.linspace(-5, 25, 3001)
+ predicted = mod.eval(x=xwide)
+
+
+A final note: In this example, the argument names for the model functions
+do not overlap. If they had, the ``prefix`` argument to :class:`Model`
+would have allowed us to identify which parameter went with which component
+model. As we will see in the next chapter, using composite models with the
+built-in models provides a simple way to build up complex models.
+
+Model names for composite models
+-----------------------------------------
+
+By default a `Model` object has a `name` attribute containing the name of
+the model function. This name can be overridden when building a model::
+
+ my_model = Model(gaussian, name='my_gaussian')
+
+or by assigning the `name` attribute::
+
+ my_model = Model(gaussian)
+ my_model.name = 'my_gaussian'
+
+This name is used in the object representation (for example when printing)::
+
+ <lmfit.Model: my_gaussian>
+
+A composite model will have the name `'composite_fun'` by default, but as
+noted, we can overwrite it with a more meaningful string. This can be useful
+when dealing with multiple models.
+
+For example, let assume we want to fit some bi-modal data. We initially try
+two Gaussian peaks::
+
+ model = GaussianModel(prefix='p1_') + GaussianModel(prefix='p2_')
+ model.name = '2-Gaussians model'
+
+Here, instead of the standard name `'composite_func'`, we assigned a more
+meaningful name. Now, if we want to also fit with two Lorentzian peaks
+we can do similarly::
+
+ model2 = LorentzianModel(prefix='p1_') + LorentzianModel(prefix='p2_')
+ model2.name = '2-Lorentzians model'
-In this example, the argument names for the model functions do not overlap.
-If they had, the ``prefix`` argument to :class:`Model` would have allowed
-us to identify which parameter went with which component model. As we will
-see in the next chapter, using composite models with the built-in models
-provides a simple way to build up complex models.
+It is evident that assigning names will help to easily distinguish
+the different models.
diff --git a/doc/parameters.rst b/doc/parameters.rst
index 57c3296..be2f64f 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -4,10 +4,33 @@
:class:`Parameter` and :class:`Parameters`
================================================
-This chapter describes :class:`Parameter` objects, which are
-fundamental to the lmfit approach to optimization. Most real use cases
-will use the :class:`Parameters` class, which provides an (ordered)
-dictionary of :class:`Parameter` objects.
+This chapter describes :class:`Parameter` objects which is the key concept
+of lmfit. A :class:`Parameter` is the quantity to be optimized in all
+minimization problems, replacing the plain floating point number used in
+the optimization routines from :mod:`scipy.optimize`. A :class:`Parameter`
+has a value that can be varied in the fit, fixed, have upper and/or lower
+bounds. It can even have a value that is constrained by an algebraic
+expression of other Parameter values. Since :class:`Parameters` live
+outside the core optimization routines, they can be used in **all**
+optimization routines from :mod:`scipy.optimize`. By using
+:class:`Parameter` objects instead of plain variables, the objective
+function does not have to be modified to reflect every change of what is
+varied in the fit. This simplifies the writing of models, allowing general
+models that describe the phenomenon to be written, and gives the user more
+flexibility in using and testing variations of that model.
+
+Whereas a :class:`Parameter` expands on an individual floating point
+variable, the optimization methods need an ordered group of floating point
+variables. In the :mod:`scipy.optimize` routines this is required to be a
+1-dimensional numpy ndarray. For lmfit, where each :class:`Parameter` has
+a name, this is replaced by a :class:`Parameters` class, which works as an
+ordered dictionary of :class:`Parameter` objects, with a few additional
+features and methods. That is, while the concept of a :class:`Parameter`
+is central to lmfit, one normally creates and interacts with a
+:class:`Parameters` instance that contains many :class:`Parameter`
+objects. The objective functions you write will take an instance of
+:class:`Parameters` as its first argument.
+
The :class:`Parameter` class
========================================
@@ -20,7 +43,7 @@ The :class:`Parameter` class
:type name: ``None`` or string -- will be overwritten during fit if ``None``.
:param value: the numerical value for the parameter
:param vary: whether to vary the parameter or not.
- :type vary: boolean (``True``/``False``)
+ :type vary: boolean (``True``/``False``) [default ``True``]
:param min: lower bound for value (``None`` = no lower bound).
:param max: upper bound for value (``None`` = no upper bound).
:param expr: mathematical expression to use to evaluate value during fit.
@@ -29,9 +52,10 @@ The :class:`Parameter` class
Each of these inputs is turned into an attribute of the same name.
-After a fit, a Parameter for a fitted variable (ie with vary = ``True``)
-will have the :attr:`value` attribute holding the best-fit value, and may
-(depending on the success of the fit) have obtain additional attributes.
+After a fit, a Parameter for a fitted variable (that is with vary =
+``True``) will have the :attr:`value` attribute holding the best-fit value.
+Depending on the success of the fit and fitting algorithm used, it may also
+have attributes :attr:`stderr` and :attr:`correl`.
.. attribute:: stderr
@@ -44,14 +68,61 @@ will have the :attr:`value` attribute holding the best-fit value, and may
{'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}
-For details of the use of the bounds :attr:`min` and :attr:`max`,
-see :ref:`bounds_chapter`.
+See :ref:`bounds_chapter` for details on the math used to implement the
+bounds with :attr:`min` and :attr:`max`.
The :attr:`expr` attribute can contain a mathematical expression that will
be used to compute the value for the Parameter at each step in the fit.
See :ref:`constraints_chapter` for more details and examples of this
feature.
+.. index:: Removing a Constraint Expression
+
+.. method:: set(value=None[, vary=None[, min=None[, max=None[, expr=None]]]])
+
+ set or update a Parameters value or other attributes.
+
+ :param name: parameter name
+ :param value: the numerical value for the parameter
+ :param vary: whether to vary the parameter or not.
+ :param min: lower bound for value
+ :param max: upper bound for value
+ :param expr: mathematical expression to use to evaluate value during fit.
+
+ Each argument of :meth:`set` has a default value of ``None``, and will
+ be set only if the provided value is not ``None``. You can use this to
+ update some Parameter attribute without affecting others, for example::
+
+ p1 = Parameter('a', value=2.0)
+ p2 = Parameter('b', value=0.0)
+ p1.set(min=0)
+ p2.set(vary=False)
+
+ to set a lower bound, or to set a Parameter as have a fixed value.
+
+ Note that to use this approach to lift a lower or upper bound, doing::
+
+ p1.set(min=0)
+ .....
+ # now lift the lower bound
+ p1.set(min=None) # won't work! lower bound NOT changed
+
+ won't work -- this will not change the current lower bound. Instead
+ you'll have to use ``np.inf`` to remove a lower or upper bound::
+
+ # now lift the lower bound
+ p1.set(min=-np.inf) # will work!
+
+ Similarly, to clear an expression of a parameter, you need to pass an
+ empty string, not ``None``. You also need to give a value and
+ explicitly tell it to vary::
+
+ p3 = Parameter('c', expr='(a+b)/2')
+ p3.set(expr=None) # won't work! expression NOT changed
+
+ # remove constraint expression
+ p3.set(value=1.0, vary=True, expr='') # will work! parameter now unconstrained
+
The :class:`Parameters` class
========================================
@@ -68,11 +139,12 @@ The :class:`Parameters` class
2. values must be valid :class:`Parameter` objects.
- Two methods for provided for convenience of initializing Parameters.
+ Two methods are for provided for convenient initialization of a :class:`Parameters`,
+ and one for extracting :class:`Parameter` values into a plain dictionary.
.. method:: add(name[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]])
- add a named parameter. This simply creates a :class:`Parameter`
+ add a named parameter. This creates a :class:`Parameter`
object associated with the key `name`, with optional arguments
passed to :class:`Parameter`::
@@ -86,9 +158,9 @@ The :class:`Parameters` class
name, value, vary, min, max, expr
- That is, this method is somewhat rigid and verbose (no default values),
- but can be useful when initially defining a parameter list so that it
- looks table-like::
+ This method is somewhat rigid and verbose (no default values), but can
+ be useful when initially defining a parameter list so that it looks
+ table-like::
p = Parameters()
# (Name, Value, Vary, Min, Max, Expr)
@@ -100,12 +172,35 @@ The :class:`Parameters` class
('wid2', None, False, None, None, '2*wid1/3'))
+.. method:: valuesdict(self)
+
+ return an ordered dictionary of name:value pairs containing the
+ :attr:`name` and :attr:`value` of a Parameter.
+
+ This is distinct from the :class:`Parameters` itself, as the dictionary
+ values are not :class:`Parameeter` objects, just the :attr:`value`.
+ This can be a very convenient way to get updated values in a objective
+ function.
+
+
Simple Example
==================
-Putting it all together, a simple example of using a dictionary of
-:class:`Parameter` objects and :func:`minimize` might look like this:
+Using :class:`Parameters`` and :func:`minimize` function (discussed in the
+next chapter) might look like this:
.. literalinclude:: ../examples/doc_basic.py
+Here, the objective function explicitly unpacks each Parameter value. This
+can be simplified using the :class:`Parameters` :meth:`valuesdict` method,
+which would make the objective function ``fcn2min`` above look like::
+
+ def fcn2min(params, x, data):
+ """ model decaying sine wave, subtract data"""
+ v = params.valuesdict()
+
+ model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
+ return model - data
+
+The results are identical, and the difference is a stylisic choice.
diff --git a/doc/sphinx/ext_mathjax.py b/doc/sphinx/ext_mathjax.py
new file mode 100644
index 0000000..40de659
--- /dev/null
+++ b/doc/sphinx/ext_mathjax.py
@@ -0,0 +1,10 @@
+# sphinx extensions for mathjax
+extensions = ['sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.intersphinx',
+ 'numpydoc']
+mathjax = 'sphinx.ext.mathjax'
+pngmath = 'sphinx.ext.pngmath'
+
+extensions.append(mathjax)
diff --git a/doc/sphinx/ext_pngmath.py b/doc/sphinx/ext_pngmath.py
new file mode 100644
index 0000000..cf153fe
--- /dev/null
+++ b/doc/sphinx/ext_pngmath.py
@@ -0,0 +1,10 @@
+# sphinx extensions for pngmath
+extensions = ['sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.intersphinx',
+ 'numpydoc']
+mathjax = 'sphinx.ext.mathjax'
+pngmath = 'sphinx.ext.pngmath'
+
+extensions.append(pngmath)
diff --git a/doc/sphinx/mathjax/conf.py b/doc/sphinx/mathjax/conf.py
deleted file mode 100644
index 451458e..0000000
--- a/doc/sphinx/mathjax/conf.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# lmfit documentation build configuration file
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
-sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
-sys.path.append(os.path.abspath(os.path.join('.')))
-# -- General configuration -----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.todo',
- 'sphinx.ext.coverage',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.intersphinx',
- 'numpydoc']
-
-try:
- import IPython.sphinxext.ipython_directive
- extensions.extend(['IPython.sphinxext.ipython_directive',
- 'IPython.sphinxext.ipython_console_highlighting'])
-except ImportError:
- pass
-
-
-intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
- 'numpy': ('http://scipy.org/docs/numpy/', None),
- 'scipy': ('http://scipy.org/docs/scipy/reference/', None)}
-
-intersphinx_cache_limit = 10
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'lmfit'
-copyright = u'2014, Matthew Newville, The University of Chicago, Till Stensitzki, Freie Universitat Berlin'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-try:
- import lmfit
- release = lmfit.__version__
-# The full version, including alpha/beta/rc tags.
-except ImportError:
- release = 'latest'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = ['_build']
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-add_module_names = False
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-html_theme_path = ['sphinx/theme']
-html_theme = 'lmfitdoc'
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-html_short_title = 'Minimization and Curve-Fitting for Python'
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
-
-html_use_modindex = False
-#html_use_index = True
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'lmfitdoc'
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('index', 'lmfit.tex',
- 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
- 'Matthew Newville, Till Stensitzki, and others', 'manual'),
-]
-
diff --git a/doc/sphinx/pngmath/conf.py b/doc/sphinx/pngmath/conf.py
deleted file mode 100644
index 2fb8e92..0000000
--- a/doc/sphinx/pngmath/conf.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# lmfit documentation build configuration file
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
-sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
-sys.path.append(os.path.abspath(os.path.join('.')))
-# -- General configuration -----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.todo',
- 'sphinx.ext.coverage',
- 'sphinx.ext.pngmath',
- 'sphinx.ext.intersphinx',
- 'numpydoc']
-
-try:
- import IPython.sphinxext.ipython_directive
- extensions.extend(['IPython.sphinxext.ipython_directive',
- 'IPython.sphinxext.ipython_console_highlighting'])
-except ImportError:
- pass
-
-
-intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
- 'numpy': ('http://scipy.org/docs/numpy/', None),
- 'scipy': ('http://scipy.org/docs/scipy/reference/', None)}
-
-intersphinx_cache_limit = 10
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'lmfit'
-copyright = u'2014, Matthew Newville, The University of Chicago, Till Stensitzki, Freie Universitat Berlin'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-try:
- import lmfit
- release = lmfit.__version__
-# The full version, including alpha/beta/rc tags.
-except ImportError:
- release = 'latest'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = ['_build']
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-add_module_names = False
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-html_theme_path = ['sphinx/theme']
-html_theme = 'lmfitdoc'
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-html_short_title = 'Minimization and Curve-Fitting for Python'
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
-
-html_use_modindex = False
-#html_use_index = True
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'lmfitdoc'
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('index', 'lmfit.tex',
- 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
- 'Matthew Newville, Till Stensitzki, and others', 'manual'),
-]
-
diff --git a/doc/sphinx/theme/lmfitdoc/layout.html b/doc/sphinx/theme/lmfitdoc/layout.html
index 5bf78eb..0681d42 100644
--- a/doc/sphinx/theme/lmfitdoc/layout.html
+++ b/doc/sphinx/theme/lmfitdoc/layout.html
@@ -9,6 +9,63 @@
#}
{%- extends "basic/layout.html" %}
+{%- block extrahead %}
+ <script type="text/x-mathjax-config">
+ MathJax.Hub.Config({
+ "TeX": {Macros: {AA : "{\\unicode{x212B}}"}},
+ "HTML-CSS": {scale: 90}
+ });</script>
+{% endblock %}
+
+
+
+{% block rootrellink %}
+ <li>[<a href="{{ pathto('intro') }}">intro</a>|</li>
+ <li><a href="{{ pathto('parameters') }}">parameters</a>|</li>
+ <li><a href="{{ pathto('fitting') }}"> minimize</a>|</li>
+ <li><a href="{{ pathto('model') }}"> model</a>|</li>
+ <li><a href="{{ pathto('builtin_models') }}"> builtin models</a>|</li>
+ <li><a href="{{ pathto('confidence') }}">confidence intervals</a>|</li>
+ <li><a href="{{ pathto('bounds') }}">bounds</a>|</li>
+ <li><a href="{{ pathto('constraints') }}">constraints</a>]</li>
+{% endblock %}
+
+{% block relbar1 %}
+<div>
+<table border=0>
+ <tr><td></td><td width=75% padding=5 align=left>
+ <a href="index.html" style="color: #157"> <font size=+3>LMFIT</font></a>
+ </td><td></td>
+ <td width=8% align=left>
+ <a href="contents.html" style="color: #882222">
+ <font size+=1>Contents</font></a> </td>
+ <td width=8% align=left>
+ <a href="installation.html" style="color: #882222">
+ <font size+=1>Download</font></a></td>
+ <td width=8% align=left>
+ <a href="https://github.com/lmfit/lmfit-py/" style="color: #882222">
+ <font size+=1>Develop</font></a></td>
+ </tr>
+ <tr><td></td><td width=75% padding=5 align=left>
+ <a href="index.html" style="color: #157"> <font size=+2>
+ Non-Linear Least-Squares Minimization and Curve-Fitting for Python</font></a>
+ </td><td></td>
+ <td width=8% align=left>
+ <a href="intro.html" style="color: #882222">
+ <font size+=1>Introduction</font></a> </td>
+ <td width=8% align=left>
+ <a href="parameters.html" style="color: #882222">
+ <font size+=1>Parameters</font></a> </td>
+ <td width=8% align=left>
+ <a href="model.html" style="color: #882222">
+ <font size+=1>Models</font></a> </td>
+
+ </tr>
+</table>
+</div>
+{{ super() }}
+{% endblock %}
+
{# put the sidebar before the body #}
{% block sidebar1 %}{{ sidebar() }}{% endblock %}
{% block sidebar2 %}{% endblock %}