summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--INSTALL17
-rw-r--r--PKG-INFO7
-rw-r--r--THANKS.txt6
-rw-r--r--doc/Makefile48
-rw-r--r--doc/__pycache__/extensions.cpython-37.pycbin404 -> 405 bytes
-rw-r--r--doc/builtin_models.rst33
-rw-r--r--doc/confidence.rst45
-rw-r--r--doc/contents.rst2
-rwxr-xr-xdoc/doc_examples_to_gallery.py1
-rw-r--r--doc/fitting.rst263
-rw-r--r--doc/installation.rst18
-rw-r--r--doc/whatsnew.rst50
-rw-r--r--examples/doc_fitting_emcee.py47
-rw-r--r--examples/example_Model_interface.py8
-rw-r--r--examples/example_brute.py2
-rw-r--r--examples/example_complex_resonator_model.py2
-rw-r--r--examples/example_confidence_interval.py3
-rw-r--r--examples/example_detect_outliers.py102
-rw-r--r--examples/example_emcee_Model_interface.py19
-rw-r--r--examples/lmfit_emcee_model_selection.py9
-rw-r--r--lmfit.egg-info/PKG-INFO7
-rw-r--r--lmfit.egg-info/SOURCES.txt2
-rw-r--r--lmfit.egg-info/requires.txt9
-rw-r--r--lmfit/_version.py6
-rw-r--r--lmfit/confidence.py75
-rw-r--r--lmfit/jsonutils.py19
-rw-r--r--lmfit/lineshapes.py4
-rw-r--r--lmfit/minimizer.py617
-rw-r--r--lmfit/model.py6
-rw-r--r--lmfit/models.py54
-rw-r--r--lmfit/parameter.py95
-rw-r--r--lmfit/printfuncs.py1
-rw-r--r--lmfit/ui/basefitter.py4
-rw-r--r--lmfit/ui/ipy_fitter.py13
-rw-r--r--requirements.txt9
-rw-r--r--setup.cfg6
-rw-r--r--setup.py14
-rw-r--r--tests/test_NIST_Strd.py2
-rw-r--r--tests/test_ampgo.py6
-rw-r--r--tests/test_confidence.py62
-rw-r--r--tests/test_covariance_matrix.py1
-rw-r--r--tests/test_custom_independentvar.py2
-rw-r--r--tests/test_jsonutils.py7
-rw-r--r--tests/test_least_squares.py51
-rw-r--r--tests/test_lineshapes_models.py1
-rw-r--r--tests/test_model.py29
-rw-r--r--tests/test_nose.py102
-rw-r--r--tests/test_parameter.py514
-rw-r--r--tests/test_parameters.py24
-rw-r--r--tests/test_saveload.py9
50 files changed, 1687 insertions, 746 deletions
diff --git a/INSTALL b/INSTALL
index abe873b..570b3ca 100644
--- a/INSTALL
+++ b/INSTALL
@@ -1,18 +1,17 @@
Installation instructions for LMFIT-py
========================================
-To install the lmfit python module, use::
+To install the lmfit Python module, use::
python setup.py build
python setup.py install
-For lmfit 0.9.10, the following versions are required:
- Python: 2.7, 3.4, 3.5, or 3.6
- NumPy: 1.10 or higher
- SciPy: 0.19 or higher
- six: 1.10 or higher
- asteval: 0.9.12 or higher
- uncertainties: 3.0 or higher
+For lmfit 1.0, the following versions are required:
+ Python: 3.5 or higher
+ NumPy: 1.16 or higher
+ SciPy: 1.2 or higher
+ asteval: 0.9.16 or higher
+ uncertainties: 3.0.1 or higher
Matt Newville <newville@cars.uchicago.edu>
-Last Update: 2018-May-8
+Last Update: 2019-December-4
diff --git a/PKG-INFO b/PKG-INFO
index 310f693..c37537e 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.2
Name: lmfit
-Version: 0.9.14
+Version: 1.0.0
Summary: Least-Squares Minimization with Bounds and Constraints
Home-page: https://lmfit.github.io/lmfit-py/
Author: LMFit Development Team
@@ -32,10 +32,9 @@ Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Science/Research
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
Classifier: Topic :: Scientific/Engineering
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+Requires-Python: >=3.5
diff --git a/THANKS.txt b/THANKS.txt
index 253bfb9..9b14502 100644
--- a/THANKS.txt
+++ b/THANKS.txt
@@ -45,14 +45,14 @@ of size of the contribution to the existing code) are from:
The propagation of parameter uncertainties to uncertainties in a Model
was adapted from the excellent description at
https://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html#confidence-and-prediction-intervals,
- which references the original work of: J. Wolberg,Data Analysis Using the
- Method of Least Squares, 2006, Springer
+ which references the original work of: J. Wolberg, Data Analysis Using the
+ Method of Least Squares, 2006, Springer.
Additional patches, bug fixes, and suggestions have come from Faustin
Carter, Christoph Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau,
nmearl, Gustavo Pasquevich, Clemens Prescher, LiCode, Ben Gamari, Yoav
Roam, Alexander Stark, Alexandre Beelen, Andrey Aristov, Nicholas Zobrist,
-and many others.
+Ethan Welty, Julius Zimmermann, and many others.
The lmfit code obviously depends on, and owes a very large debt to the code
in scipy.optimize. Several discussions on the SciPy-user and lmfit mailing
diff --git a/doc/Makefile b/doc/Makefile
index 34f8107..59489d7 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -2,24 +2,30 @@
#
# You can set these variables from the command line.
-SPHINXOPTS =
SPHINXBUILD = sphinx-build
-PAPER =
+SPHINX_OPTS = -W
+SPHINX_DEBUGOPTS = --keep-going -n
BUILDDIR = _build
-INSTALLDIR = /home/newville/public_html/lmfit/
-
# Internal variables.
+PAPER =
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+SPHINX_OUTPUT = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER))
+ALLSPHINXOPTS = $(SPHINX_OUTPUT) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest latexpdf htmlzip
-.PHONY: all install pdf gallery
+.PHONY: all pdf gallery debug
html: gallery
cp sphinx/ext_mathjax.py extensions.py
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ $(SPHINXBUILD) -b html $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/html
+ @echo
+ @echo "html build finished: $(BUILDDIR)/html."
+
+debug: gallery
+ cp sphinx/ext_mathjax.py extensions.py
+ $(SPHINXBUILD) -b html $(SPHINX_OUTPUT) $(SPHINX_DEBUGOPTS) . $(BUILDDIR)/html
@echo
@echo "html build finished: $(BUILDDIR)/html."
@@ -30,12 +36,12 @@ examples/index.rst:
htmlzip: html
cp sphinx/ext_mathjax.py extensions.py
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/lmfit_doc
+ $(SPHINXBUILD) -b html $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/lmfit_doc
cd $(BUILDDIR) && zip -pur html/lmfit_doc.zip lmfit_doc
epub: html
cp sphinx/ext_mathjax.py extensions.py
- $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ $(SPHINXBUILD) -b epub $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/epub
cp -pr $(BUILDDIR)/epub/*.epub $(BUILDDIR)/html/.
pdf: latex
@@ -45,12 +51,6 @@ pdf: latex
all: html htmlzip epub pdf
-install: all
- cd $(BUILDDIR)/latex && pdflatex lmfit.tex
- cd $(BUILDDIR)/latex && makeindex -s python.ist lmfit.idx
- cd $(BUILDDIR)/latex && pdflatex lmfit.tex
- cp -pr $(BUILDDIR)/html/* $(INSTALLDIR)/.
-
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@@ -72,29 +72,29 @@ clean:
-rm -rf ../examples/documentation
dirhtml: gallery
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ $(SPHINXBUILD) -b dirhtml $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
pickle: gallery
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ $(SPHINXBUILD) -b pickle $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json: gallery
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ $(SPHINXBUILD) -b json $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp: gallery
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ $(SPHINXBUILD) -b htmlhelp $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
latex: gallery
cp sphinx/ext_imgmath.py extensions.py
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
+ $(SPHINXBUILD) -b latex $(SPHINX_OUTPUT) $(SPHINX_OPTS) . _build/latex
@echo
@echo "Build finished; the LaTeX files are in _build/latex."
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
@@ -102,23 +102,23 @@ latex: gallery
latexpdf:
cp sphinx/ext_imgmath.py extensions.py
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
+ $(SPHINXBUILD) -b latex $(SPHINX_OUTPUT) $(SPHINX_OPTS) . _build/latex
@echo "Running LaTeX files through pdflatex..."
make -C _build/latex all-pdf
@echo "pdflatex finished; the PDF files are in _build/latex."
changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ $(SPHINXBUILD) -b changes $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ $(SPHINXBUILD) -b linkcheck $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ $(SPHINXBUILD) -b doctest $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/__pycache__/extensions.cpython-37.pyc b/doc/__pycache__/extensions.cpython-37.pyc
index 2653194..773a719 100644
--- a/doc/__pycache__/extensions.cpython-37.pyc
+++ b/doc/__pycache__/extensions.cpython-37.pyc
Binary files differ
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index 52c763d..a4c7f70 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -34,13 +34,9 @@ All the models listed below are one-dimensional, with an independent
variable named ``x``. Many of these models represent a function with a
distinct peak, and so share common features. To maintain uniformity,
common parameter names are used whenever possible. Thus, most models have
-a parameter called ``amplitude`` that represents the overall height (or
-area of) a peak or function, a ``center`` parameter that represents a peak
-centroid position, and a ``sigma`` parameter that gives a characteristic
-width. Many peak shapes also have a parameter ``fwhm`` (constrained by
-``sigma``) giving the full width at half maximum and a parameter ``height``
-(constrained by ``sigma`` and ``amplitude``) to give the maximum peak
-height.
+a parameter called ``amplitude`` that represents the overall intensity (or
+area of) a peak or function and a ``sigma`` parameter that gives a
+characteristic width.
After a list of built-in models, a few examples of their use are given.
@@ -48,10 +44,25 @@ Peak-like models
-------------------
There are many peak-like models available. These include
-:class:`GaussianModel`, :class:`LorentzianModel`, :class:`VoigtModel` and
-some less commonly used variations. The :meth:`guess`
-methods for all of these make a fairly crude guess for the value of
-``amplitude``, but also set a lower bound of 0 on the value of ``sigma``.
+:class:`GaussianModel`, :class:`LorentzianModel`, :class:`VoigtModel`,
+:class:`PseudoVoigtModel`, and some less commonly used variations. Most of
+these models are *unit-normalized* and share the same parameter names so
+that you can easily switch between models and interpret the results. The
+``amplitude`` parameter is the multiplicative factor for the
+unit-normalized peak lineshape, and so will represent the strength of that
+peak or the area under that curve. The ``center`` parameter will be the
+centroid ``x`` value. The ``sigma`` parameter is the characteristic width
+of the peak, with many functions using :math:`(x-\mu)/\sigma` where
+:math:`\mu` is the centroid value. Most of these peak functions will have
+two additional parameters derived from and constrained by the other
+parameters. The first of these is ``fwhm`` which will hold the estimated
+"Full Width at Half Max" for the peak, which is often easier to compare
+between different models than ``sigma``. The second of these is ``height``
+which will contain the maximum value of the peak, typically the value at
+:math:`x = \mu`. Finally, each of these models has a :meth:`guess` method
+that uses data to make a fairly crude but usually sufficient guess for the
+value of ``amplitude``, ``center``, and ``sigma``, and sets a lower bound
+of 0 on the value of ``sigma``.
:class:`GaussianModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/confidence.rst b/doc/confidence.rst
index 7fad485..07da44c 100644
--- a/doc/confidence.rst
+++ b/doc/confidence.rst
@@ -88,15 +88,38 @@ around the best fit value. For this problem, it is not necessary to
calculate confidence intervals, and the estimates of the uncertainties from
the covariance matrix are sufficient.
-An advanced example
--------------------
+Working without standard error estimates
+----------------------------------------
+
+Sometimes the estimation of the standard errors from the covariance
+matrix fails, especially if values are near given bounds. Hence, to
+find the confidence intervals in these cases, it is necessary to set
+the errors by hand. Note that the standard error is only used to find an
+upper limit for each value, hence the exact value is not important.
+
+To set the step-size to 10% of the initial value we loop through all
+parameters and set it manually:
+
+.. jupyter-execute::
+
+ for p in result.params:
+ result.params[p].stderr = abs(result.params[p].value * 0.1)
+
+
+.. _label-confidence-advanced:
+
+An advanced example for evaluating confidence intervals
+---------------------------------------------------------
Now we look at a problem where calculating the error from approximated
-covariance can lead to misleading result -- two decaying exponentials. In
-fact such a problem is particularly hard for the Levenberg-Marquardt
-method, so we first estimate the results using the slower but robust
-Nelder-Mead method, and *then* use Levenberg-Marquardt to estimate the
-uncertainties and correlations.
+covariance can lead to misleading result -- the same double exponential
+problem shown in :ref:`label-emcee`. In fact such a problem is particularly
+hard for the Levenberg-Marquardt method, so we first estimate the results
+using the slower but robust Nelder-Mead method. We can then compare the
+uncertainties computed (if the ``numdifftools`` package is installed) with
+those estimated using Levenberg-Marquardt around the previously found
+solution. We can also compare to the results of using ``emcee``.
+
.. jupyter-execute::
:hide-code:
@@ -150,7 +173,13 @@ Plots of the confidence region are shown in the figures below for ``a1`` and
plt.show()
Neither of these plots is very much like an ellipse, which is implicitly
-assumed by the approach using the covariance matrix.
+assumed by the approach using the covariance matrix. The plots actually
+look quite a bit like those found with MCMC and shown in the "corner plot"
+in :ref:`label-emcee`. In fact, comparing the confidence interval results
+here with the results for the 1- and 2-:math:`\sigma` error estimated with
+``emcee``, we can see that the agreement is pretty good and that the
+asymmetry in the parameter distributions are reflected well in the
+asymmetry of the uncertainties
The trace returned as the optional second argument from
:func:`conf_interval` contains a dictionary for each variable parameter.
diff --git a/doc/contents.rst b/doc/contents.rst
index ccf1fa9..b376860 100644
--- a/doc/contents.rst
+++ b/doc/contents.rst
@@ -1,3 +1,5 @@
+:orphan:
+
Contents
=================
diff --git a/doc/doc_examples_to_gallery.py b/doc/doc_examples_to_gallery.py
index caed9e5..d217a11 100755
--- a/doc/doc_examples_to_gallery.py
+++ b/doc/doc_examples_to_gallery.py
@@ -13,6 +13,7 @@ Process the examples in the documentation for inclusion in the Gallery:
"""
import os
import time
+
basedir = os.getcwd()
examples_dir = os.path.abspath(os.path.join(basedir, '..', 'examples'))
diff --git a/doc/fitting.rst b/doc/fitting.rst
index 6f80930..1fcec29 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -2,7 +2,6 @@
.. module:: lmfit.minimizer
-
=======================================
Performing Fits and Analyzing Outputs
=======================================
@@ -151,11 +150,11 @@ uncertainties and correlations if ``calc_covar`` is True (default).
+--------------------------+------------------------------------------------------------------+
| Newton CG trust-region | ``trust-ncg`` |
+--------------------------+------------------------------------------------------------------+
- | Exact trust-region | ``trust-exact`` (SciPy >= 1.0) |
+ | Exact trust-region | ``trust-exact`` |
+--------------------------+------------------------------------------------------------------+
- | Newton GLTR trust-region | ``trust-krylov`` (SciPy >= 1.0) |
+ | Newton GLTR trust-region | ``trust-krylov`` |
+--------------------------+------------------------------------------------------------------+
- | Constrained trust-region | ``trust-constr`` (SciPy >= 1.1) |
+ | Constrained trust-region | ``trust-constr`` |
+--------------------------+------------------------------------------------------------------+
| Dogleg | ``dogleg`` |
+--------------------------+------------------------------------------------------------------+
@@ -340,12 +339,105 @@ information criterion, and/or Bayesian information criterion. Generally,
the Bayesian information criterion is considered the most conservative of
these statistics.
-.. _fit-itercb-label:
+Uncertainties in Variable Parameters, and their Correlations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As mentioned above, when a fit is complete the uncertainties for fitted
+Parameters as well as the correlations between pairs of Parameters are
+usually calculated. This happens automatically either when using the
+default :meth:`leastsq` method, the :meth:`least_squares` method, or for
+most other fitting methods if the highly-recommended ``numdifftools``
+package is available. The estimated standard error (the :math:`1\sigma`
+uncertainty) for each variable Parameter will be contained in the
+:attr:`stderr`, while the :attr:`correl` attribute for each Parameter will
+contain a dictionary of the correlation with each other variable Parameter.
+
+These estimates of the uncertainties are done by inverting the Hessian
+matrix which represents the second derivative of fit quality for each
+variable parameter. There are situations for which the uncertainties cannot
+be estimated, which generally indicates that this matrix cannot be inverted
+because one of the fit is not actually sensitive to one of the variables.
+This can happen if a Parameter is stuck at an upper or lower bound, if the
+variable is simply not used by the fit, or if the value for the variable is
+such that it has no real influence on the fit.
+
+In principle, the scale of the uncertainties in the Parameters is closely
+tied to the goodness-of-fit statistics chi-square and reduced chi-square
+(``chisqr`` and ``redchi``). The standard errors or :math:`1 \sigma`
+uncertainties are those that increase chi-square by 1. Since a "good fit"
+should have ``redchi`` of around 1, this requires that the data
+uncertainties (and to some extent the sampling of the N data points) is
+correct. Unfortunately, it is often not the case that one has high-quality
+estimates of the data uncertainties (getting the data is hard enough!).
+Because of this common situation, the uncertainties reported and held in
+:attr:`stderr` are not those that increase chi-square by 1, but those that
+increase chi-square by reduced chi-square. This is equivalent to rescaling
+the uncertainty in the data such that reduced chi-square would be 1. To be
+clear, this rescaling is done by default because if reduced chi-square is
+far from 1, this rescaling often makes the reported uncertainties sensible,
+and if reduced chi-square is near 1 it does little harm. If you have good
+scaling of the data uncertainty and believe the scale of the residual
+array is correct, this automatic rescaling can be turned off using
+``scale_covar=False``.
+
+Note that the simple (and fast!) approach to estimating uncertainties and
+correlations by inverting the second derivative matrix assumes that the
+components of the residual array (if, indeed, an array is used) are
+distributed around 0 with a normal (Gaussian distribution), and that a map
+of probability distributions for pairs would be elliptical -- the size of
+the of ellipse gives the uncertainty itself and the eccentricity of the
+ellipse gives the correlation. This simple approach to assessing
+uncertainties ignores outliers, highly asymmetric uncertainties, or complex
+correlations between Parameters. In fact, it is not too hard to come up
+with problems where such effects are important. Our experience is that the
+automated results are usually the right scale and quite reasonable as
+initial estimates, but a more thorough exploration of the Parameter space
+using the tools described in :ref:`label-emcee` and
+:ref:`label-confidence-advanced` can give a more complete understanding of
+the distributions and relations between Parameters.
+
+
+.. _fit-reports-label:
+
+Getting and Printing Fit Reports
+===========================================
+
+.. currentmodule:: lmfit.printfuncs
+
+.. autofunction:: fit_report
+
+An example using this to write out a fit report would be:
+
+.. jupyter-execute:: ../examples/doc_fitting_withreport.py
+ :hide-output:
+
+which would give as output:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(fit_report(out))
+
+To be clear, you can get at all of these values from the fit result ``out``
+and ``out.params``. For example, a crude printout of the best fit variables
+and standard errors could be done as
+
+.. jupyter-execute::
+
+ print('-------------------------------')
+ print('Parameter Value Stderr')
+ for name, param in out.params.items():
+ print('{:7s} {:11.5f} {:11.5f}'.format(name, param.value, param.stderr))
+
+
+.. _fit-itercb-label:
Using a Iteration Callback Function
====================================
+.. currentmodule:: lmfit.minimizer
+
An iteration callback function is a function to be called at each
iteration, just after the objective function is called. The iteration
callback allows user-supplied code to be run at each iteration, and can be
@@ -374,11 +466,14 @@ exception is raised in the iteration callback. When a fit is aborted this
way, the parameters will have the values from the last iteration. The fit
statistics are not likely to be meaningful, and uncertainties will not be computed.
+
.. _fit-minimizer-label:
Using the :class:`Minimizer` class
=======================================
+.. currentmodule:: lmfit.minimizer
+
For full control of the fitting process, you will want to create a
:class:`Minimizer` object.
@@ -410,14 +505,31 @@ For more information, check the examples in ``examples/lmfit_brute_example.ipynb
.. automethod:: Minimizer.emcee
+
.. _label-emcee:
:meth:`Minimizer.emcee` - calculating the posterior probability distribution of parameters
==============================================================================================
-:meth:`Minimizer.emcee` can be used to obtain the posterior probability distribution of
-parameters, given a set of experimental data. An example problem is a double
-exponential decay. A small amount of Gaussian noise is also added in:
+:meth:`Minimizer.emcee` can be used to obtain the posterior probability
+distribution of parameters, given a set of experimental data. Note that this
+method does *not* actually perform a fit at all. Instead, it explores
+parameter space to determine the probability distributions for the parameters,
+but without an explicit goal of attempting to refine the solution. It should
+not be used for fitting, but it is a useful method to to more thoroughly
+explore the parameter space around the solution after a fit has been done and
+thereby get an improved understanding of the probability distribution for the
+parameters. It may be able to refine your estimate of the most likely values
+for a set of parameters, but it will not iteratively find a good solution to
+the minimization problem. To use this method effectively, you should first
+use another minimization method and then use this method to explore the
+parameter space around thosee best-fit values.
+
+To illustrate this, we'll use an example problem of fitting data to function
+of a double exponential decay, including a modest amount of Gaussian noise to
+the data. Note that this example is the same problem used in
+:ref:`label-confidence-advanced` for evaluating confidence intervals in the
+parameters, which is a similar goal to the one here.
.. jupyter-execute::
:hide-code:
@@ -456,7 +568,10 @@ Create a Parameter set for the initial guesses:
v = p.valuesdict()
return v['a1'] * np.exp(-x / v['t1']) + v['a2'] * np.exp(-(x - 0.1) / v['t2']) - y
-Solving with :func:`minimize` gives the Maximum Likelihood solution:
+Solving with :func:`minimize` gives the Maximum Likelihood solution. Note
+that we use the robust Nelder-Mead method here. The default Levenberg-Marquardt
+method seems to have difficulty with exponential decays, though it can refine
+the solution if starting near the solution:
.. jupyter-execute::
@@ -472,13 +587,19 @@ and plotting the fit using the Maximum Likelihood solution gives the graph below
plt.legend(loc='best')
plt.show()
-However, this doesn't give a probability distribution for the parameters.
+Note that the fit here (for which the ``numdifftools`` package is installed)
+does estimate and report uncertainties in the parameters and correlations for
+the parameters, and reports the correlation of parameters ``a2`` and ``t2`` to
+be very high. As we'll see, these estimates are pretty good, but when faced
+with such high correlation, it can be helpful to get the full probability
+distribution for the parameters. MCMC methods are very good for this.
+
Furthermore, we wish to deal with the data uncertainty. This is called
-marginalisation of a nuisance parameter. ``emcee`` requires a function that returns
-the log-posterior probability. The log-posterior probability is a sum of the
-log-prior probability and log-likelihood functions. The log-prior probability is
-assumed to be zero if all the parameters are within their bounds and ``-np.inf``
-if any of the parameters are outside their bounds.
+marginalisation of a nuisance parameter. ``emcee`` requires a function that
+returns the log-posterior probability. The log-posterior probability is a sum
+of the log-prior probability and log-likelihood functions. The log-prior
+probability is assumed to be zero if all the parameters are within their
+bounds and ``-np.inf`` if any of the parameters are outside their bounds.
If the objective function returns an array of unweighted residuals (i.e.,
``data-model``) as is the case here, you can use ``is_weighted=False`` as an
@@ -490,19 +611,37 @@ place boundaries on this parameter one can do:
mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2))
-Now we have to set up the minimizer and do the sampling:
+Now we have to set up the minimizer and do the sampling (again, just to be
+clear, this is *not* doing a fit):
.. jupyter-execute::
+ :hide-output:
res = lmfit.minimize(residual, method='emcee', nan_policy='omit', burn=300, steps=1000, thin=20,
- params=mi.params, is_weighted=False)
+ params=mi.params, is_weighted=False, progress=False)
-Of note, the ``is_weighted`` argument will be ignored if your objective function
-returns a float instead of an array. See the Notes in :meth:`Minimizer.emcee` for
-more information.
+As mentioned in the Notes for :meth:`Minimizer.emcee`, the ``is_weighted``
+argument will be ignored if your objective function returns a float instead of
+an array. For the documentation we set ``progress=False``; the default is to
+print a progress bar to the Terminal if the ``tqdm`` package is installed.
-Lets have a look at those posterior distributions for the parameters. This requires
-installation of the ``corner`` package:
+The success of the method (i.e., whether or not the sampling went well) can be
+assessed by checking the integrated autocorrelation time and/or the acceptance
+fraction of the walkers. For this specific example the autocorrelation time
+could not be estimated because the "chain is too short". Instead, we plot the
+acceptance fraction per walker and its mean value suggests that the sampling
+worked as intended (as a rule of thumb the value should be between 0.2 and
+0.5).
+
+.. jupyter-execute::
+
+ plt.plot(res.acceptance_fraction)
+ plt.xlabel('walker')
+ plt.ylabel('acceptance fraction')
+ plt.show()
+
+With the results from ``emcee``, we can visualize the posterior distributions
+for the parameters using the ``corner`` package:
.. jupyter-execute::
@@ -512,19 +651,30 @@ installation of the ``corner`` package:
truths=list(res.params.valuesdict().values()))
The values reported in the :class:`MinimizerResult` are the medians of the
-probability distributions and a 1 :math:`\sigma` quantile, estimated as half the
-difference between the 15.8 and 84.2 percentiles. The median value is not
-necessarily the same as the Maximum Likelihood Estimate. We'll get that as well.
-You can see that we recovered the right uncertainty level on the data:
+probability distributions and a 1 :math:`\sigma` quantile, estimated as half
+the difference between the 15.8 and 84.2 percentiles. Printing these values:
+
.. jupyter-execute::
- print("median of posterior probability distribution")
+ print('median of posterior probability distribution')
print('--------------------------------------------')
lmfit.report_fit(res.params)
-
-Find the Maximum Likelihood Estimation (MLE):
+You can see that this recovered the right uncertainty level on the data. Note
+that these values agree pretty well with the results, uncertainties and
+correlations found by the fit and using ``numdifftools`` to estimate the
+covariance matrix. That is, even though the parameters ``a2``, ``t1``, and
+``t2`` are all highly correlated and do not display perfectly Gaussian
+probability distributions, the probability distributions found by explicitly
+sampling the parameter space are not so far from elliptical as to make the
+simple (and much faster) estimates from inverting the covariance matrix
+completely invalid.
+
+As mentioned above, the result from ``emcee`` reports the median values, which
+are not necessarily the same as the Maximum Likelihood Estimate. To obtain
+the values for the Maximum Likelihood Estimation (MLE) we find the location in
+the chain with the highest probability:
.. jupyter-execute::
@@ -534,35 +684,40 @@ Find the Maximum Likelihood Estimation (MLE):
for i, par in enumerate(p):
p[par].value = mle_soln[i]
- print("Maximum Likelihood Estimation")
- print('-----------------------------')
- for _, vals in p.items():
- print(vals)
-Finally, lets work out a 1 and 2-:math:`\sigma` error estimate for 't1':
+ print('\nMaximum Likelihood Estimation from emcee ')
+ print('-------------------------------------------------')
+ print('Parameter MLE Value Median Value Uncertainty')
+ fmt = ' {:5s} {:11.5f} {:11.5f} {:11.5f}'.format
+ for name, param in p.items():
+ print(fmt(name, param.value, res.params[name].value,
+ res.params[name].stderr))
-.. jupyter-execute::
- quantiles = np.percentile(res.flatchain['t1'], [2.28, 15.9, 50, 84.2, 97.7])
- print("1 sigma spread", 0.5 * (quantiles[3] - quantiles[1]))
- print("2 sigma spread", 0.5 * (quantiles[4] - quantiles[0]))
+Here the difference between MLE and median value are seen to be below 0.5%,
+and well within the estimated 1-:math:`\sigma` uncertainty.
-
-Getting and Printing Fit Reports
-===========================================
-
-.. currentmodule:: lmfit.printfuncs
-
-.. autofunction:: fit_report
-
-An example using this to write out a fit report would be:
-
-.. jupyter-execute:: ../examples/doc_fitting_withreport.py
- :hide-output:
-
-which would give as output:
+Finally, we can use the samples from ``emcee`` to work out the 1- and
+2-:math:`\sigma` error estimates.
.. jupyter-execute::
- :hide-code:
- print(fit_report(out))
+ print('\nError Estimates from emcee ')
+ print('------------------------------------------------------')
+ print('Parameter -2sigma -1sigma median +1sigma +2sigma ')
+
+ for name in p.keys():
+ quantiles = np.percentile(res.flatchain[name],
+ [2.275, 15.865, 50, 84.135, 97.275])
+ median = quantiles[2]
+ err_m2 = quantiles[0] - median
+ err_m1 = quantiles[1] - median
+ err_p1 = quantiles[3] - median
+ err_p2 = quantiles[4] - median
+ fmt = ' {:5s} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f}'.format
+ print(fmt(name, err_m2, err_m1, median, err_p1, err_p2))
+
+And we see that the initial estimates for the 1-:math:`\sigma` standard error
+using ``numdifftools`` was not too bad. We'll return to this example
+problem in :ref:`label-confidence-advanced` and use a different method to
+calculate the 1- and 2-:math:`\sigma` error bars.
diff --git a/doc/installation.rst b/doc/installation.rst
index af85690..9b09f36 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -13,7 +13,6 @@ Downloading and Installation
.. _matplotlib: https://matplotlib.org/
.. _dill: https://github.com/uqfoundation/dill
.. _asteval: https://github.com/newville/asteval
-.. _six: https://github.com/benjaminp/six
.. _uncertainties: https://github.com/lebigot/uncertainties
.. _numdifftools: https://github.com/pbrod/numdifftools
.. _contributing.md: https://github.com/lmfit/lmfit-py/blob/master/.github/CONTRIBUTING.md
@@ -27,25 +26,24 @@ Downloading and Installation
Prerequisites
~~~~~~~~~~~~~~~
-Lmfit works with `Python`_ versions 2.7 and 3.5, 3.6, or 3.7. Support for
-2.7 is expected to end in late 2019.
+Lmfit works with `Python`_ versions 3.5 and higher. Version
+0.9.15 is the final version to support Python 2.7.
Lmfit requires the following Python packages, with versions given:
- * `six`_ version 1.10 or higher.
- * `NumPy`_ version 1.10 or higher.
- * `SciPy`_ version 0.19 or higher.
- * `asteval`_ version 0.9.12 or higher.
- * `uncertainties`_ version 3.0 or higher.
+ * `NumPy`_ version 1.16 or higher.
+ * `SciPy`_ version 1.2 or higher.
+ * `asteval`_ version 0.9.16 or higher.
+ * `uncertainties`_ version 3.0.1 or higher.
All of these are readily available on PyPI, and should be installed
automatically if installing with ``pip install lmfit``.
In order to run the test suite, the `pytest`_ package is required. Some
-functionality requires the `emcee`_, `corner`_, `pandas`_, `Jupyter`_,
+functionality requires the `emcee`_ (version 3+), `corner`_, `pandas`_, `Jupyter`_,
`matplotlib`_, `dill`_, or `numdifftools`_ packages. These are not installed
automatically, but we highly recommend each of these packages.
-For building the documentation, `matplotlib`_, `emcee`_, `corner`_,
+For building the documentation, `matplotlib`_, `emcee`_ (version 3+), `corner`_,
`Sphinx`_, `jupyter_sphinx`_, and `ImageMagick`_ are required (the latter
one only when generating the PDF document).
diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst
index c0d9bb6..02cdc51 100644
--- a/doc/whatsnew.rst
+++ b/doc/whatsnew.rst
@@ -11,6 +11,56 @@ significant to the use and behavior of the library. This is not meant
to be a comprehensive list of changes. For such a complete record,
consult the `lmfit GitHub repository`_.
+
+.. _whatsnew_100_label:
+
+Version 1.0.0 Release Notes
+============================
+
+**Version 1.0.0 supports Python 3.5, 3.6, 3.7, and 3.8**
+
+New features:
+
+- no new features are introduced in 1.0.0.
+
+Improvements:
+
+- support for Python 2 and use of the ``six`` package are removed. (PR #612)
+
+Various:
+
+- documentation updates to clarify the use of ``emcee``. (PR #614)
+
+
+.. _whatsnew_0915_label:
+
+Version 0.9.15 Release Notes
+============================
+
+**Version 0.9.15 is the last release that supports Python 2.7**; it now also fully suports Python 3.8.
+
+New features, improvements, and bug fixes:
+
+- move application of parameter bounds to setter instead of getter (PR #587)
+- add support for non-array Jacobian types in least_squares (Issue #588, @ezwelty in PR #589)
+- add more information (i.e., acor and acceptance_fraction) about emcee fit (@j-zimmermann in PR #593)
+- "name" is now a required positional argument for Parameter class, update the magic methods (PR #595)
+- fix nvars count and bound handling in confidence interval calculations (Issue #597, PR #598)
+- support Python 3.8; requires asteval >= 0.9.16 (PR #599)
+- only support emcee version 3 (i.e., no PTSampler anymore) (PR #600)
+- fix and refactor prob_bunc in confidence interval calculations (PR #604)
+- fix adding Parameters with custom user-defined symbols (Issue #607, PR #608; thanks to @gbouvignies for the report)
+
+Various:
+
+- bump requirements to LTS version of SciPy/ NumPy and code clean-up (PR #591)
+- documentation updates (PR #596, and others)
+- improve test coverage and Travis CI updates (PR #595, and others)
+- update pre-commit hooks and configuration in setup.cfg
+
+To-be deprecated:
+- function Parameter.isParameter and conversion from uncertainties.core.Variable to value in _getval (PR #595)
+
.. _whatsnew_0914_label:
Version 0.9.14 Release Notes
diff --git a/examples/doc_fitting_emcee.py b/examples/doc_fitting_emcee.py
index 1d396d6..5862804 100644
--- a/examples/doc_fitting_emcee.py
+++ b/examples/doc_fitting_emcee.py
@@ -8,6 +8,7 @@ try:
HASPYLAB = True
except ImportError:
HASPYLAB = False
+HASPYLAB = False
try:
import corner
@@ -46,27 +47,45 @@ if HASPYLAB:
mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2))
res = lmfit.minimize(residual, method='emcee', nan_policy='omit', burn=300,
- steps=1000, thin=20, params=mi.params, is_weighted=False)
+ steps=1000, thin=20, params=mi.params, is_weighted=False,
+ progress=False)
if HASPYLAB and HASCORNER:
emcee_corner = corner.corner(res.flatchain, labels=res.var_names,
truths=list(res.params.valuesdict().values()))
plt.show()
+if HASPYLAB:
+ plt.plot(res.acceptance_fraction)
+ plt.xlabel('walker')
+ plt.ylabel('acceptance fraction')
+ plt.show()
+
+if hasattr(res, "acor"):
+ print("Autocorrelation time for the parameters:")
+ print("----------------------------------------")
+ for i, par in enumerate(p):
+ print(par, res.acor[i])
+
print("\nmedian of posterior probability distribution")
print('--------------------------------------------')
lmfit.report_fit(res.params)
+
# find the maximum likelihood solution
highest_prob = np.argmax(res.lnprob)
hp_loc = np.unravel_index(highest_prob, res.lnprob.shape)
mle_soln = res.chain[hp_loc]
for i, par in enumerate(p):
p[par].value = mle_soln[i]
-print("\nMaximum likelihood Estimation")
-print('-----------------------------')
-for _, vals in p.items():
- print(vals)
+
+print('\nMaximum Likelihood Estimation from emcee ')
+print('-------------------------------------------------')
+print('Parameter MLE Value Median Value Uncertainty')
+fmt = ' {:5s} {:11.5f} {:11.5f} {:11.5f}'.format
+for name, param in p.items():
+ print(fmt(name, param.value, res.params[name].value,
+ res.params[name].stderr))
if HASPYLAB:
plt.figure()
@@ -76,7 +95,17 @@ if HASPYLAB:
plt.legend()
plt.show()
-quantiles = np.percentile(res.flatchain['t1'], [2.28, 15.9, 50, 84.2, 97.7])
-print("1 sigma spread", 0.5 * (quantiles[3] - quantiles[1]))
-print("2 sigma spread", 0.5 * (quantiles[4] - quantiles[0]))
-# <end of examples/doc_fitting_emcee.py>
+print('\nError Estimates from emcee ')
+print('------------------------------------------------------')
+print('Parameter -2sigma -1sigma median +1sigma +2sigma ')
+
+for name in p.keys():
+ quantiles = np.percentile(res.flatchain[name],
+ [2.275, 15.865, 50, 84.135, 97.275])
+ median = quantiles[2]
+ err_m2 = quantiles[0] - median
+ err_m1 = quantiles[1] - median
+ err_p1 = quantiles[3] - median
+ err_p2 = quantiles[4] - median
+ fmt = ' {:5s} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f}'.format
+ print(fmt(name, err_m2, err_m1, median, err_p1, err_p2))
diff --git a/examples/example_Model_interface.py b/examples/example_Model_interface.py
index 45ede59..d2c0d0a 100644
--- a/examples/example_Model_interface.py
+++ b/examples/example_Model_interface.py
@@ -61,16 +61,16 @@ result.params.pretty_print()
# keyword arguments of ``fit`` that match the argments of ``decay``. You can
# build the ``Parameter`` objects explicity; the following is equivalent.
result = model.fit(data, t=t,
- N=Parameter(value=10),
- tau=Parameter(value=1))
+ N=Parameter('N', value=10),
+ tau=Parameter('tau', value=1))
report_fit(result.params)
###############################################################################
# By building ``Parameter`` objects explicitly, you can specify bounds
# (``min``, ``max``) and set parameters constant (``vary=False``).
result = model.fit(data, t=t,
- N=Parameter(value=7, vary=False),
- tau=Parameter(value=1, min=0))
+ N=Parameter('N', value=7, vary=False),
+ tau=Parameter('tau', value=1, min=0))
report_fit(result.params)
###############################################################################
diff --git a/examples/example_brute.py b/examples/example_brute.py
index 5d2266e..a89b71b 100644
--- a/examples/example_brute.py
+++ b/examples/example_brute.py
@@ -17,7 +17,7 @@ import copy
import matplotlib.pyplot as plt
import numpy as np
-from lmfit import fit_report, Minimizer, Parameters
+from lmfit import Minimizer, Parameters, fit_report
###############################################################################
# Let's start with the example given in the documentation of SciPy:
diff --git a/examples/example_complex_resonator_model.py b/examples/example_complex_resonator_model.py
index fa0d5bf..5eb117a 100644
--- a/examples/example_complex_resonator_model.py
+++ b/examples/example_complex_resonator_model.py
@@ -43,7 +43,7 @@ class ResonatorModel(lmfit.model.Model):
def __init__(self, *args, **kwargs):
# pass in the defining equation so the user doesn't have to later.
- super(ResonatorModel, self).__init__(linear_resonator, *args, **kwargs)
+ super().__init__(linear_resonator, *args, **kwargs)
self.set_param_hint('Q', min=0) # Enforce Q is positive
diff --git a/examples/example_confidence_interval.py b/examples/example_confidence_interval.py
index 1438a3e..5e7be2d 100644
--- a/examples/example_confidence_interval.py
+++ b/examples/example_confidence_interval.py
@@ -7,7 +7,8 @@ import matplotlib.pyplot as plt
from numpy import argsort, exp, linspace, pi, random, sign, sin, unique
from scipy.interpolate import interp1d
-from lmfit import Minimizer, Parameters, conf_interval, conf_interval2d, report_ci, report_fit
+from lmfit import (Minimizer, Parameters, conf_interval, conf_interval2d,
+ report_ci, report_fit)
###############################################################################
# Define the residual function, specify "true" parameter values, and generate
diff --git a/examples/example_detect_outliers.py b/examples/example_detect_outliers.py
new file mode 100644
index 0000000..cd997e8
--- /dev/null
+++ b/examples/example_detect_outliers.py
@@ -0,0 +1,102 @@
+"""
+Outlier detection via leave-one-out
+===================================
+
+Outliers can sometimes be identified by assessing the influence of each
+datapoint. To assess the influence of one point, we fit the dataset while the
+point and compare the result with the fit of the full dataset. The code below
+shows how to do this with lmfit. Note that the presented method is very basic.
+"""
+from collections import defaultdict
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+import lmfit
+
+plt.rcParams['figure.dpi'] = 130
+plt.rcParams['figure.autolayout'] = True
+###############################################################################
+# Generate test data and model. Apply the model to the data
+x = np.linspace(0.3, 10, 100)
+np.random.seed(1)
+y = 1.0 / (0.1 * x) + 2.0 + 3 * np.random.randn(x.size)
+
+params = lmfit.Parameters()
+params.add_many(('a', 0.1), ('b', 1))
+
+
+def func(x, a, b):
+ return 1.0 / (a * x) + b
+
+
+# Make 5 points outliers
+idx = np.random.randint(0, x.size, 5)
+y[idx] += 10 * np.random.randn(idx.size)
+
+# Fit the data
+model = lmfit.Model(func, independent_vars=['x'])
+fit_result = model.fit(y, x=x, a=0.1, b=2)
+
+###############################################################################
+# and gives the plot and fitting results below:
+
+fit_result.plot_fit()
+plt.plot(x[idx], y[idx], 'o', color='r', label='outliers')
+plt.show()
+print(fit_result.fit_report())
+
+###############################################################################
+# Fit the dataset while omitting one data point
+
+best_vals = defaultdict(lambda: np.zeros(x.size))
+stderrs = defaultdict(lambda: np.zeros(x.size))
+chi_sq = np.zeros_like(x)
+for i in range(x.size):
+ idx2 = np.arange(0, x.size)
+ idx2 = np.delete(idx2, i)
+ tmp_x = x[idx2]
+ tmp = model.fit(y[idx2],
+ x=tmp_x,
+ a=fit_result.params['a'],
+ b=fit_result.params['b'])
+ chi_sq[i] = tmp.chisqr
+ for p in tmp.params:
+ tpar = tmp.params[p]
+ best_vals[p][i] = tpar.value
+ stderrs[p][i] = (tpar.stderr / fit_result.params[p].stderr)
+
+###############################################################################
+# Plot the influence on the red. chisqr of each point
+
+fig, ax = plt.subplots()
+ax.plot(x, (fit_result.chisqr - chi_sq) / chi_sq)
+ax.scatter(x[idx],
+ fit_result.chisqr / chi_sq[idx] - 1,
+ color='r',
+ label='outlier')
+ax.set_ylabel(r'Relative red. $\chi^2$ change')
+ax.set_xlabel('x')
+ax.legend()
+
+###############################################################################
+# Plot the influence on the parameter value and error of each point
+
+fig, axs = plt.subplots(4, figsize=(4, 7), sharex='col')
+axs[0].plot(x, best_vals['a'])
+axs[0].scatter(x[idx], best_vals['a'][idx], color='r', label='outlier')
+axs[0].set_ylabel('best a')
+
+axs[1].plot(x, best_vals['b'])
+axs[1].scatter(x[idx], best_vals['b'][idx], color='r', label='outlier')
+axs[1].set_ylabel('best b')
+
+axs[2].plot(x, stderrs['a'])
+axs[2].scatter(x[idx], stderrs['a'][idx], color='r', label='outlier')
+axs[2].set_ylabel('err a change')
+
+axs[3].plot(x, stderrs['b'])
+axs[3].scatter(x[idx], stderrs['b'][idx], color='r', label='outlier')
+axs[3].set_ylabel('err b change')
+
+axs[3].set_xlabel('x')
diff --git a/examples/example_emcee_Model_interface.py b/examples/example_emcee_Model_interface.py
index 9acfd76..971b40b 100644
--- a/examples/example_emcee_Model_interface.py
+++ b/examples/example_emcee_Model_interface.py
@@ -43,7 +43,8 @@ result.plot()
# - set is_weighted to False to estimate the noise weights
# - set some sensible priors on the uncertainty to keep the MCMC in check
#
-emcee_kws = dict(steps=1000, burn=300, thin=20, is_weighted=False)
+emcee_kws = dict(steps=1000, burn=300, thin=20, is_weighted=False,
+ progress=False)
emcee_params = result.params.copy()
emcee_params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2.0))
@@ -59,6 +60,22 @@ result_emcee.plot_fit(ax=ax, data_kws=dict(color='gray', markersize=2))
plt.show()
###############################################################################
+# check the acceptance fraction to see whether emcee performed well
+plt.plot(result_emcee.acceptance_fraction)
+plt.xlabel('walker')
+plt.ylabel('acceptance fraction')
+plt.show()
+
+###############################################################################
+# try to compute the autocorrelation time
+if hasattr(result_emcee, "acor"):
+ print("Autocorrelation time for the parameters:")
+ print("----------------------------------------")
+ for i, p in enumerate(result.params):
+ print(p, result.acor[i])
+
+
+###############################################################################
# Plot the parameter covariances returned by emcee using corner
emcee_corner = corner.corner(result_emcee.flatchain, labels=result_emcee.var_names,
truths=list(result_emcee.params.valuesdict().values()))
diff --git a/examples/lmfit_emcee_model_selection.py b/examples/lmfit_emcee_model_selection.py
index a2989b6..0279a7f 100644
--- a/examples/lmfit_emcee_model_selection.py
+++ b/examples/lmfit_emcee_model_selection.py
@@ -2,8 +2,8 @@
Model Selection using lmfit and emcee
=====================================
-FIXME: this is an useful examples; however, it doesn't seem to run correctly
-anymore....
+FIXME: this is a useful examples; however, it doesn't run correctly anymore as
+the PTSampler was removed in emcee v3...
"""
###############################################################################
@@ -139,7 +139,8 @@ for i in range(4):
# do the sampling
mini.append(lmfit.Minimizer(lnprob, res[i].params))
out = mini[i].emcee(steps=total_steps, ntemps=ntemps, workers=workers,
- reuse_sampler=False, float_behavior='posterior')
+ reuse_sampler=False, float_behavior='posterior',
+ progress=False)
# get the evidence
print(i, total_steps, mini[i].sampler.thermodynamic_integration_log_evidence())
log_evidence.append(mini[i].sampler.thermodynamic_integration_log_evidence()[0])
@@ -152,7 +153,7 @@ for j in range(6):
for i in range(4):
# do the sampling
res = mini[i].emcee(burn=burn, steps=100, thin=thin, ntemps=ntemps,
- workers=workers, reuse_sampler=True)
+ workers=workers, reuse_sampler=True, progress=False)
# get the evidence
print(i, total_steps, mini[i].sampler.thermodynamic_integration_log_evidence())
log_evidence.append(mini[i].sampler.thermodynamic_integration_log_evidence()[0])
diff --git a/lmfit.egg-info/PKG-INFO b/lmfit.egg-info/PKG-INFO
index 310f693..c37537e 100644
--- a/lmfit.egg-info/PKG-INFO
+++ b/lmfit.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.2
Name: lmfit
-Version: 0.9.14
+Version: 1.0.0
Summary: Least-Squares Minimization with Bounds and Constraints
Home-page: https://lmfit.github.io/lmfit-py/
Author: LMFit Development Team
@@ -32,10 +32,9 @@ Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Science/Research
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
Classifier: Topic :: Scientific/Engineering
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+Requires-Python: >=3.5
diff --git a/lmfit.egg-info/SOURCES.txt b/lmfit.egg-info/SOURCES.txt
index 4627aae..5932afa 100644
--- a/lmfit.egg-info/SOURCES.txt
+++ b/lmfit.egg-info/SOURCES.txt
@@ -90,6 +90,7 @@ examples/example_Model_interface.py
examples/example_brute.py
examples/example_complex_resonator_model.py
examples/example_confidence_interval.py
+examples/example_detect_outliers.py
examples/example_diffev.py
examples/example_emcee_Model_interface.py
examples/example_expression_model.py
@@ -152,6 +153,7 @@ tests/test_model.py
tests/test_model_uncertainties.py
tests/test_multidatasets.py
tests/test_nose.py
+tests/test_parameter.py
tests/test_parameters.py
tests/test_params_set.py
tests/test_printfuncs.py
diff --git a/lmfit.egg-info/requires.txt b/lmfit.egg-info/requires.txt
index 87dabf1..81149a4 100644
--- a/lmfit.egg-info/requires.txt
+++ b/lmfit.egg-info/requires.txt
@@ -1,5 +1,4 @@
-asteval>=0.9.12
-numpy>=1.10
-scipy>=0.19
-six>1.10
-uncertainties>=3.0
+asteval>=0.9.16
+numpy>=1.16
+scipy>=1.2
+uncertainties>=3.0.1
diff --git a/lmfit/_version.py b/lmfit/_version.py
index 3fb8b1f..2d157d7 100644
--- a/lmfit/_version.py
+++ b/lmfit/_version.py
@@ -8,11 +8,11 @@ import json
version_json = '''
{
- "date": "2019-08-29T12:07:38-0500",
+ "date": "2019-12-20T13:51:16-0600",
"dirty": false,
"error": null,
- "full-revisionid": "d7b408baa64821f4356cff9c32a199bc53becd93",
- "version": "0.9.14"
+ "full-revisionid": "c5f969028c8e937c02a5b009347d12e2f7843be9",
+ "version": "1.0.0"
}
''' # END VERSION_JSON
diff --git a/lmfit/confidence.py b/lmfit/confidence.py
index 61ddc78..14d26d5 100644
--- a/lmfit/confidence.py
+++ b/lmfit/confidence.py
@@ -14,24 +14,18 @@ CONF_ERR_STDERR = '%s without sensible uncertainty estimates' % CONF_ERR_GEN
CONF_ERR_NVARS = '%s with < 2 variables' % CONF_ERR_GEN
-def f_compare(ndata, nparas, new_chi, best_chi, nfix=1):
- """Return the probability calculated using the F-test.
+def f_compare(best_fit, new_fit):
+ """Return the probability calculated using the F-test.
The null model (i.e., best-fit solution) is compared to an alternate model
where one or more parameters are fixed.
Parameters
----------
- ndata : int
- Number of data points: :math:`N`.
- nparas : int
- Number of variables in the alternate model.
- new_chi : float
- Chi-square of the alternate model.
- best_chi : float
- Chi-square of the null model.
- nfix : int
- Number of fixed parameters (default is 1).
+ best_fit: MinimizerResult
+ The result from the best-fit.
+ new_fit: MinimizerResult
+ The result from fit with the fixed parameter(s).
Returns
-------
@@ -39,10 +33,9 @@ def f_compare(ndata, nparas, new_chi, best_chi, nfix=1):
Value of the calculated probality.
"""
- nparas = nparas + nfix
- nfree = ndata - nparas
- nfix = 1.0*nfix
- dchi = new_chi / best_chi - 1.0
+ nfree = best_fit.nfree
+ nfix = best_fit.nvarys - new_fit.nvarys
+ dchi = new_fit.chisqr / best_fit.chisqr - 1.0
return f.cdf(dchi * nfree / nfix, nfix, nfree)
@@ -161,7 +154,7 @@ def map_trace_to_names(trace, params):
return out
-class ConfidenceInterval(object):
+class ConfidenceInterval:
"""Class used to calculate the confidence interval."""
def __init__(self, minimizer, result, p_names=None, prob_func=None,
@@ -170,7 +163,7 @@ class ConfidenceInterval(object):
self.verbose = verbose
self.minimizer = minimizer
self.result = result
- self.params = result.params
+ self.params = result.params.copy()
self.org = copy_vals(self.params)
self.best_chi = result.chisqr
@@ -182,20 +175,18 @@ class ConfidenceInterval(object):
# check that there are at least 2 true variables!
# check that all stderrs are sensible (including not None or NaN)
- nvars = 0
+
for par in self.fit_params:
- if par.vary:
- nvars += 1
- try:
- if not (par.vary and par.stderr > 0.0):
- raise MinimizerException(CONF_ERR_STDERR)
- except TypeError:
+ if par.vary and (par.stderr is None or par.stderr is np.nan):
raise MinimizerException(CONF_ERR_STDERR)
+ nvars = len([p for p in self.params.values() if p.vary])
if nvars < 2:
raise MinimizerException(CONF_ERR_NVARS)
- if prob_func is None or not hasattr(prob_func, '__call__'):
+ if prob_func is None:
self.prob_func = f_compare
+ else:
+ self.prob_func = prob_func
if trace:
self.trace_dict = {i: [] for i in self.p_names}
@@ -255,7 +246,6 @@ class ConfidenceInterval(object):
try:
val = brentq(calc_prob, a_limit,
limit, rtol=.5e-4, args=prob)
-
except ValueError:
self.reset_vals()
try:
@@ -293,16 +283,35 @@ class ConfidenceInterval(object):
old_prob = 0
limit = start_val
i = 0
+ bound_reached = False
+ max_prob = max(self.probs)
- while old_prob < max(self.probs):
+ while old_prob < max_prob:
i = i + 1
limit += step * direction
+ if limit > para.max:
+ limit = para.max
+ bound_reached = True
+ elif limit < para.min:
+ limit = para.min
+ bound_reached = True
new_prob = self.calc_prob(para, limit)
- rel_change = (new_prob - old_prob) / max(new_prob, old_prob, 1.e-12)
+ rel_change = (new_prob - old_prob) / max(new_prob, old_prob, 1e-12)
old_prob = new_prob
+ if self.verbose:
+ msg = "P({}={}) = {}, max. prob={}"
+ print(msg.format(para.name, limit, new_prob, max_prob))
# check for convergence
+ if bound_reached:
+ if new_prob < max(self.probs):
+ errmsg = ("Bound reached with "
+ "prob({}={}) = {} < max(sigmas)"
+ ).format(para.name, limit, new_prob)
+ warn(errmsg)
+ break
+
if i > self.maxiter:
errmsg = "maxiter={} reached ".format(self.maxiter)
errmsg += ("and prob({}={}) = {} < "
@@ -331,8 +340,7 @@ class ConfidenceInterval(object):
self.params[para.name] = para
self.minimizer.prepare_fit(self.params)
out = self.minimizer.leastsq()
- prob = self.prob_func(out.ndata, out.ndata - out.nfree,
- out.chisqr, self.best_chi)
+ prob = self.prob_func(self.result, out)
if self.trace:
x = [i.value for i in out.params.values()]
@@ -391,7 +399,7 @@ def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
best_chi = result.chisqr
org = copy_vals(result.params)
- if prob_func is None or not hasattr(prob_func, '__call__'):
+ if prob_func is None:
prob_func = f_compare
x = params[x_name]
@@ -423,8 +431,7 @@ def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
result.params[y.name] = y
minimizer.prepare_fit(params=result.params)
out = minimizer.leastsq()
- prob = prob_func(out.ndata, out.ndata - out.nfree, out.chisqr,
- best_chi, nfix=2.)
+ prob = prob_func(result, out)
result.params[x.name] = save_x
result.params[y.name] = save_y
return prob
diff --git a/lmfit/jsonutils.py b/lmfit/jsonutils.py
index 6b5cdd9..edbd394 100644
--- a/lmfit/jsonutils.py
+++ b/lmfit/jsonutils.py
@@ -3,7 +3,6 @@ from base64 import b64decode, b64encode
import sys
import numpy as np
-import six
try:
import dill
@@ -19,19 +18,13 @@ except ImportError:
def bindecode(val):
- """b64decode wrapper, Python 2 and 3 version."""
- return b64decode(six.b(val))
+ """b64decode wrapper."""
+ return b64decode(val)
-if six.PY3:
- def binencode(val):
- """b64encode wrapper, Python 3 version."""
- # b64encode result is /always/ UTF-8
- return str(b64encode(val), 'utf-8')
-else:
- def binencode(val):
- """b64encode wrapper, Python 2 version."""
- return str(b64encode(val))
+def binencode(val):
+ """b64encode wrapper."""
+ return str(b64encode(val), 'utf-8')
def find_importer(obj):
@@ -80,7 +73,7 @@ def encode4js(obj):
__dtype__=obj.dtype.name, value=val)
elif isinstance(obj, (np.float, np.int)):
return float(obj)
- elif isinstance(obj, six.string_types):
+ elif isinstance(obj, str):
try:
return str(obj)
except UnicodeError:
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
index bf3c31c..8aa0cb0 100644
--- a/lmfit/lineshapes.py
+++ b/lmfit/lineshapes.py
@@ -1,7 +1,7 @@
"""Basic model line shapes and distribution functions."""
-from __future__ import division
-from numpy import arctan, cos, exp, finfo, float64, isnan, log, pi, sin, sqrt, where
+from numpy import (arctan, cos, exp, finfo, float64, isnan, log, pi, sin, sqrt,
+ where)
from numpy.testing import assert_allclose
from scipy.special import erf, erfc
from scipy.special import gamma as gamfcn
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index a4034a2..27352ce 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -24,34 +24,28 @@ from numpy.dual import inv
from numpy.linalg import LinAlgError
from scipy.optimize import basinhopping as scipy_basinhopping
from scipy.optimize import brute as scipy_brute
-from scipy.optimize import differential_evolution, least_squares
+from scipy.optimize import differential_evolution
+from scipy.optimize import dual_annealing as scipy_dual_annealing
+from scipy.optimize import least_squares
from scipy.optimize import leastsq as scipy_leastsq
from scipy.optimize import minimize as scipy_minimize
+from scipy.optimize import shgo as scipy_shgo
+from scipy.sparse import issparse
+from scipy.sparse.linalg import LinearOperator
from scipy.stats import cauchy as cauchy_dist
from scipy.stats import norm as norm_dist
from scipy.version import version as scipy_version
-import six
import uncertainties
from ._ampgo import ampgo
from .parameter import Parameter, Parameters
from .printfuncs import fitreport_html_table
-# check for SHGO and dual_annealing algorithms
-try:
- from scipy.optimize import shgo as scipy_shgo
- from scipy.optimize import dual_annealing as scipy_dual_annealing
- HAS_SHGO = True
- HAS_DUAL_ANNEALING = True
-except ImportError:
- HAS_SHGO = False
- HAS_DUAL_ANNEALING = False
-
# check for EMCEE
try:
import emcee
- HAS_EMCEE = True
- EMCEE_VERSION = int(emcee.__version__[0])
+ from emcee.autocorr import AutocorrError
+ HAS_EMCEE = int(emcee.__version__[0]) >= 3
except ImportError:
HAS_EMCEE = False
@@ -71,6 +65,14 @@ try:
except ImportError:
HAS_NUMDIFFTOOLS = False
+# check for dill
+try:
+ import dill # noqa: F401
+ HAS_DILL = True
+except ImportError:
+ HAS_DILL = False
+
+
# define the namedtuple here so pickle will work with the MinimizerResult
Candidate = namedtuple('Candidate', ['params', 'score'])
@@ -146,15 +148,10 @@ SCALAR_METHODS = {'nelder': 'Nelder-Mead',
'slsqp': 'SLSQP',
'dogleg': 'dogleg',
'trust-ncg': 'trust-ncg',
- 'differential_evolution': 'differential_evolution'}
-
-# FIXME: update this when incresing the minimum scipy version
-major, minor, micro = scipy_version.split('.', 2)
-if (int(major) >= 1 and int(minor) >= 1):
- SCALAR_METHODS.update({'trust-constr': 'trust-constr'})
-if int(major) >= 1:
- SCALAR_METHODS.update({'trust-exact': 'trust-exact',
- 'trust-krylov': 'trust-krylov'})
+ 'differential_evolution': 'differential_evolution',
+ 'trust-constr': 'trust-constr',
+ 'trust-exact': 'trust-exact',
+ 'trust-krylov': 'trust-krylov'}
def reduce_chisquare(r):
@@ -226,7 +223,7 @@ def reduce_cauchylogpdf(r):
return -cauchy_dist.logpdf(r).sum()
-class MinimizerResult(object):
+class MinimizerResult:
r"""The results of a minimization.
Minimization results include data such as status and error messages,
@@ -366,7 +363,7 @@ class MinimizerResult(object):
min_correl=min_correl)
-class Minimizer(object):
+class Minimizer:
"""A general minimizer for curve fitting and optimization."""
_err_nonparam = ("params must be a minimizer.Parameters() instance or list "
@@ -681,7 +678,7 @@ class Minimizer(object):
# 2. string starting with 'neglogc' or 'negent'
# 3. sum of squares
if not callable(self.reduce_fcn):
- if isinstance(self.reduce_fcn, six.string_types):
+ if isinstance(self.reduce_fcn, str):
if self.reduce_fcn.lower().startswith('neglogc'):
self.reduce_fcn = reduce_cauchylogpdf
elif self.reduce_fcn.lower().startswith('negent'):
@@ -838,9 +835,9 @@ class Minimizer(object):
- 'BFGS'
- 'TNC'
- 'trust-ncg'
- - 'trust-exact' (SciPy >= 1.0)
- - 'trust-krylov' (SciPy >= 1.0)
- - 'trust-constr' (SciPy >= 1.1)
+ - 'trust-exact'
+ - 'trust-krylov'
+ - 'trust-constr'
- 'dogleg'
- 'SLSQP'
- 'differential_evolution'
@@ -924,12 +921,6 @@ class Minimizer(object):
if k in kwargs:
kwargs[k] = v
- # keywords 'updating' and 'workers' are introduced in SciPy v1.2
- # FIXME: remove after updating the requirement >= 1.2
- if int(major) == 0 or (int(major) == 1 and int(minor) < 2):
- kwargs.pop('updating')
- kwargs.pop('workers')
-
try:
ret = differential_evolution(self.penalty, _bounds, **kwargs)
except AbortFitException:
@@ -966,53 +957,141 @@ class Minimizer(object):
return result
+ def _lnprob(self, theta, userfcn, params, var_names, bounds, userargs=(),
+ userkws=None, float_behavior='posterior', is_weighted=True,
+ nan_policy='raise'):
+ """Calculate the log-posterior probability.
+
+ See the `Minimizer.emcee` method for more details.
+
+ Parameters
+ ----------
+ theta : sequence
+ Float parameter values (only those being varied).
+ userfcn : callable
+ User objective function.
+ params : :class:`~lmfit.parameters.Parameters`
+ The entire set of Parameters.
+ var_names : list
+ The names of the parameters that are varying.
+ bounds : numpy.ndarray
+ Lower and upper bounds of parameters. Has shape (nvarys, 2).
+ userargs : tuple, optional
+ Extra positional arguments required for user objective function.
+ userkws : dict, optional
+ Extra keyword arguments required for user objective function.
+ float_behavior : str, optional
+ Specifies meaning of objective when it returns a float. Use
+ 'posterior' if objective function returnins a log-posterior
+ probability (default) or 'chi2' if it returns a chi2 value.
+ is_weighted : bool, optional
+ If `userfcn` returns a vector of residuals then `is_weighted`
+ (default is True) specifies if the residuals have been weighted by
+ data uncertainties.
+ nan_policy : str, optional
+ Specifies action if `userfcn` returns NaN values. Use 'raise'
+ (default) to raise a `ValueError`, 'propagate' to use values as-is,
+ or 'omit' to filter out the non-finite values.
+
+ Returns
+ -------
+ lnprob : float
+ Log posterior probability.
+
+ """
+ # the comparison has to be done on theta and bounds. DO NOT inject theta
+ # values into Parameters, then compare Parameters values to the bounds.
+ # Parameters values are clipped to stay within bounds.
+ if np.any(theta > bounds[:, 1]) or np.any(theta < bounds[:, 0]):
+ return -np.inf
+ for name, val in zip(var_names, theta):
+ params[name].value = val
+ userkwargs = {}
+ if userkws is not None:
+ userkwargs = userkws
+ # update the constraints
+ params.update_constraints()
+ # now calculate the log-likelihood
+ out = userfcn(params, *userargs, **userkwargs)
+ self.result.nfev += 1
+ if callable(self.iter_cb):
+ abort = self.iter_cb(params, self.result.nfev, out,
+ *userargs, **userkwargs)
+ self._abort = self._abort or abort
+ if self._abort:
+ self.result.residual = out
+ self._lastpos = theta
+ raise AbortFitException("fit aborted by user.")
+ else:
+ out = _nan_policy(np.asarray(out).ravel(),
+ nan_policy=self.nan_policy)
+ lnprob = np.asarray(out).ravel()
+ if len(lnprob) == 0:
+ lnprob = np.array([-1.e100])
+ if lnprob.size > 1:
+ # objective function returns a vector of residuals
+ if '__lnsigma' in params and not is_weighted:
+ # marginalise over a constant data uncertainty
+ __lnsigma = params['__lnsigma'].value
+ c = np.log(2 * np.pi) + 2 * __lnsigma
+ lnprob = -0.5 * np.sum((lnprob / np.exp(__lnsigma)) ** 2 + c)
+ else:
+ lnprob = -0.5 * (lnprob * lnprob).sum()
+ else:
+ # objective function returns a single value.
+ # use float_behaviour to figure out if the value is posterior or chi2
+ if float_behavior == 'posterior':
+ pass
+ elif float_behavior == 'chi2':
+ lnprob *= -0.5
+ else:
+ raise ValueError("float_behaviour must be either 'posterior' "
+ "or 'chi2' " + float_behavior)
+ return lnprob
+
def emcee(self, params=None, steps=1000, nwalkers=100, burn=0, thin=1,
ntemps=1, pos=None, reuse_sampler=False, workers=1,
- float_behavior='posterior', is_weighted=True, seed=None, progress=True):
- r"""Bayesian sampling of the posterior distribution using `emcee`.
+ float_behavior='posterior', is_weighted=True, seed=None,
+ progress=True):
+ """Bayesian sampling of the posterior distribution using the `emcee`
+ Markov Chain Monte Carlo package.
- Bayesian sampling of the posterior distribution for the parameters
- using the `emcee` Markov Chain Monte Carlo package. The method assumes
- that the prior is Uniform. You need to have `emcee` installed to use
- this method.
+ The method assumes that the prior is Uniform. You need to have `emcee`
+ version 3 installed to use this method.
Parameters
----------
params : :class:`~lmfit.parameter.Parameters`, optional
- Parameters to use as starting point. If this is not specified
- then the Parameters used to initialize the Minimizer object are
- used.
+ Parameters to use as starting point. If this is not specified then
+ the Parameters used to initialize the Minimizer object are used.
steps : int, optional
How many samples you would like to draw from the posterior
distribution for each of the walkers?
nwalkers : int, optional
Should be set so :math:`nwalkers >> nvarys`, where `nvarys` are
the number of parameters being varied during the fit.
- "Walkers are the members of the ensemble. They are almost like
+ 'Walkers are the members of the ensemble. They are almost like
separate Metropolis-Hastings chains but, of course, the proposal
distribution for a given walker depends on the positions of all
- the other walkers in the ensemble." - from the `emcee` webpage.
+ the other walkers in the ensemble.' - from the `emcee` webpage.
burn : int, optional
Discard this many samples from the start of the sampling regime.
thin : int, optional
Only accept 1 in every `thin` samples.
- ntemps : int, optional
- If `ntemps > 1` perform a Parallel Tempering.
+ ntemps : int, deprecated
+ ntemps has no effect.
pos : numpy.ndarray, optional
- Specify the initial positions for the sampler. If `ntemps == 1`
- then `pos.shape` should be `(nwalkers, nvarys)`. Otherwise,
- `(ntemps, nwalkers, nvarys)`. You can also initialise using a
- previous chain that had the same `ntemps`, `nwalkers` and
- `nvarys`. Note that `nvarys` may be one larger than you expect it
- to be if your `userfcn` returns an array and `is_weighted is
- False`.
+ Specify the initial positions for the sampler, an ndarray of shape
+ `(nwalkers, nvarys)`. You can also initialise using a previous
+ chain of the same `nwalkers` and `nvarys`. Note that `nvarys` may
+ be one larger than you expect it to be if your `userfcn` returns
+ an array and `is_weighted` is `False`.
reuse_sampler : bool, optional
- If you have already run `emcee` on a given `Minimizer` object then
- it possesses an internal ``sampler`` attribute. You can continue to
- draw from the same sampler (retaining the chain history) if you set
- this option to True. Otherwise a new sampler is created. The
- `nwalkers`, `ntemps`, `pos`, and `params` keywords are ignored with
- this option.
+ Set to `True` if you have already run `emcee` with the `Minimizer`
+ instance and want to continue to draw from its ``sampler`` (and so
+ retain the chain history). If `False`, a new sampler is created.
+ The keywords `nwalkers`, `pos`, and `params` will be ignored when
+ this is set, as they will be set by the existing sampler.
**Important**: the Parameters used to create the sampler must not
change in-between calls to `emcee`. Alteration of Parameters
would include changed ``min``, ``max``, ``vary`` and ``expr``
@@ -1029,16 +1108,11 @@ class Minimizer(object):
be used here. **Note**: because of multiprocessing overhead it may
only be worth parallelising if the objective function is expensive
to calculate, or if there are a large number of objective
- evaluations per step (`ntemps * nwalkers * nvarys`).
+ evaluations per step (`nwalkers * nvarys`).
float_behavior : str, optional
- Specifies meaning of the objective function output if it returns a
- float. One of:
-
- - 'posterior' - objective function returns a log-posterior
- probability
- - 'chi2' - objective function returns :math:`\chi^2`
-
- See Notes for further details.
+ Meaning of float (scalar) output of objective function. Use
+ 'posterior' if it returns a log-posterior probability or 'chi2'
+ if it returns :math:`\\chi^2`. See Notes for further details.
is_weighted : bool, optional
Has your objective function been weighted by measurement
uncertainties? If `is_weighted is True` then your objective
@@ -1059,109 +1133,98 @@ class Minimizer(object):
If `seed` is already a `numpy.random.RandomState` instance, then
that `numpy.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
+ progress : bool, optional
+ Print a progress bar to the console while running.
Returns
-------
:class:`MinimizerResult`
MinimizerResult object containing updated params, statistics,
- etc. The updated params represent the median (50th percentile) of
- all the samples, whilst the parameter uncertainties are half of the
- difference between the 15.87 and 84.13 percentiles.
- The `MinimizerResult` also contains the ``chain``, ``flatchain``
- and ``lnprob`` attributes. The ``chain`` and ``flatchain``
- attributes contain the samples and have the shape
- `(nwalkers, (steps - burn) // thin, nvarys)` or
- `(ntemps, nwalkers, (steps - burn) // thin, nvarys)`,
- depending on whether Parallel tempering was used or not.
- `nvarys` is the number of parameters that are allowed to vary.
- The ``flatchain`` attribute is a `pandas.DataFrame` of the
- flattened chain, `chain.reshape(-1, nvarys)`. To access flattened
- chain values for a particular parameter use
- `result.flatchain[parname]`. The ``lnprob`` attribute contains the
- log probability for each sample in ``chain``. The sample with the
- highest probability corresponds to the maximum likelihood estimate.
-
+ etc. The updated params represent the median of the samples,
+ while the uncertainties are half the difference of the 15.87
+ and 84.13 percentiles. The `MinimizerResult` contains a few
+ additional attributes: ``chain`` contain the samples and has
+ shape `((steps - burn) // thin, nwalkers, nvarys)`.
+ ``flatchain`` is a `pandas.DataFrame` of the flattened chain,
+ that can be accessed with `result.flatchain[parname]`.
+ ``lnprob`` contains the log probability for each sample in
+ ``chain``. The sample with the highest probability corresponds
+ to the maximum likelihood estimate. ``acor`` is an array
+ containing the autocorrelation time for each parameter if the
+ autocorrelation time can be computed from the chain. Finally,
+ ``acceptance_fraction`` (an array of the fraction of steps
+ accepted for each walker).
Notes
-----
- This method samples the posterior distribution of the parameters using
- Markov Chain Monte Carlo. To do so it needs to calculate the
- log-posterior probability of the model parameters, `F`, given the data,
- `D`, :math:`\ln p(F_{true} | D)`. This 'posterior probability' is
- calculated as:
+ This method samples the posterior distribution of the parameters
+ using Markov Chain Monte Carlo. It calculates the log-posterior
+ probability of the model parameters, `F`, given the data, `D`,
+ :math:`\\ln p(F_{true} | D)`. This 'posterior probability' is given by:
.. math::
- \ln p(F_{true} | D) \propto \ln p(D | F_{true}) + \ln p(F_{true})
+ \\ln p(F_{true} | D) \\propto \\ln p(D | F_{true}) + \\ln p(F_{true})
- where :math:`\ln p(D | F_{true})` is the 'log-likelihood' and
- :math:`\ln p(F_{true})` is the 'log-prior'. The default log-prior
- encodes prior information already known about the model. This method
- assumes that the log-prior probability is `-numpy.inf` (impossible) if
- the one of the parameters is outside its limits. The log-prior probability
- term is zero if all the parameters are inside their bounds (known as a
- uniform prior). The log-likelihood function is given by [1]_:
+ where :math:`\\ln p(D | F_{true})` is the 'log-likelihood' and
+ :math:`\\ln p(F_{true})` is the 'log-prior'. The default log-prior
+ encodes prior information known about the model that the log-prior
+ probability is `-numpy.inf` (impossible) if any of the parameters
+ is outside its limits, and is zero if all the parameters are inside
+ their bounds (uniform prior). The log-likelihood function is [1]_:
.. math::
- \ln p(D|F_{true}) = -\frac{1}{2}\sum_n \left[\frac{(g_n(F_{true}) - D_n)^2}{s_n^2}+\ln (2\pi s_n^2)\right]
-
- The first summand in the square brackets represents the residual for a
- given datapoint (:math:`g` being the generative model, :math:`D_n` the
- data and :math:`s_n` the standard deviation, or measurement
- uncertainty, of the datapoint). This term represents :math:`\chi^2`
- when summed over all data points.
- Ideally the objective function used to create `lmfit.Minimizer` should
- return the log-posterior probability, :math:`\ln p(F_{true} | D)`.
- However, since the in-built log-prior term is zero, the objective
- function can also just return the log-likelihood, unless you wish to
- create a non-uniform prior.
-
- If a float value is returned by the objective function then this value
- is assumed by default to be the log-posterior probability, i.e.
- `float_behavior is 'posterior'`. If your objective function returns
- :math:`\chi^2`, then you should use a value of `'chi2'` for
- `float_behavior`. `emcee` will then multiply your :math:`\chi^2` value
- by -0.5 to obtain the posterior probability.
-
- However, the default behaviour of many objective functions is to return
- a vector of (possibly weighted) residuals. Therefore, if your objective
- function returns a vector, `res`, then the vector is assumed to contain
- the residuals. If `is_weighted is True` then your residuals are assumed
- to be correctly weighted by the standard deviation (measurement
- uncertainty) of the data points (`res = (data - model) / sigma`) and
- the log-likelihood (and log-posterior probability) is calculated as:
- `-0.5 * numpy.sum(res**2)`.
- This ignores the second summand in the square brackets. Consequently,
- in order to calculate a fully correct log-posterior probability value
- your objective function should return a single value. If
- `is_weighted is False` then the data uncertainty, `s_n`, will be
- treated as a nuisance parameter and will be marginalized out. This is
- achieved by employing a strictly positive uncertainty
- (homoscedasticity) for each data point, :math:`s_n = \exp(\_\_lnsigma)`.
- `__lnsigma` will be present in `MinimizerResult.params`, as well as
- `Minimizer.chain`, `nvarys` will also be increased by one.
+ \\ln p(D|F_{true}) = -\\frac{1}{2}\\sum_n \\left[\\frac{(g_n(F_{true}) - D_n)^2}{s_n^2}+\\ln (2\\pi s_n^2)\\right]
+
+ The first term represents the residual (:math:`g` being the generative
+ model, :math:`D_n` the data and :math:`s_n` the measurement
+ uncertainty). This gives :math:`\\chi^2` when summed over all data
+ points. The objective function may also return the log-posterior
+ probability, :math:`\\ln p(F_{true} | D)`. Since the default
+ log-prior term is zero, the objective function can also just return
+ the log-likelihood, unless you wish to create a non-uniform prior.
+
+ If the objective function returns a float value, this is assumed by
+ default to be the log-posterior probability, (`float_behavior` default
+ is 'posterior'. If your objective function returns :math:`\\chi^2`,
+ then you should use a value of `float_behavior='chi2'`.
+
+ By default objective functions may return an ndarray of (possibly
+ weighted) residuals. In this case, use `is_weighted` to select whether
+ these are correctly weighted by measurement uncertainty. Note that
+ this ignores the second term above, so that to calculate a correct
+ log-posterior probability value your objective function should return
+ a float value. With `is_weighted=False` the data uncertainty, `s_n`,
+ will be treated as a nuisance parameter to be marginalized out. This
+ uses strictly positive uncertainty (homoscedasticity) for each data
+ point, :math:`s_n = \\exp(\\rm{\\_\\_lnsigma})`. `__lnsigma` will be
+ present in `MinimizerResult.params`, as well as `Minimizer.chain` and
+ `nvarys` will be increased by one.
References
----------
- .. [1] http://dan.iel.fm/emcee/current/user/line/
+ .. [1] https://emcee.readthedocs.io
"""
if not HAS_EMCEE:
- raise NotImplementedError('You must have emcee to use'
- ' the emcee method')
+ raise NotImplementedError('emcee version 3 is required.')
+
+ if ntemps > 1:
+ msg = ("'ntemps' has no effect anymore, since the PTSampler was "
+ "removed from emcee version 3.")
+ raise DeprecationWarning(msg)
+
tparams = params
- # if you're reusing the sampler then ntemps, nwalkers have to be
+ # if you're reusing the sampler then nwalkers have to be
# determined from the previous sampling
if reuse_sampler:
if not hasattr(self, 'sampler') or not hasattr(self, '_lastpos'):
- raise ValueError("You wanted to use an existing sampler, but"
+ raise ValueError("You wanted to use an existing sampler, but "
"it hasn't been created yet")
if len(self._lastpos.shape) == 2:
- ntemps = 1
nwalkers = self._lastpos.shape[0]
elif len(self._lastpos.shape) == 3:
- ntemps = self._lastpos.shape[0]
nwalkers = self._lastpos.shape[1]
tparams = None
@@ -1176,7 +1239,8 @@ class Minimizer(object):
if '__lnsigma' not in params:
# __lnsigma should already be in params if is_weighted was
# previously set to True.
- params.add('__lnsigma', value=0.01, min=-np.inf, max=np.inf, vary=True)
+ params.add('__lnsigma', value=0.01, min=-np.inf, max=np.inf,
+ vary=True)
# have to re-prepare the fit
result = self.prepare_fit(params)
params = result.params
@@ -1213,7 +1277,7 @@ class Minimizer(object):
# set up multiprocessing options for the samplers
auto_pool = None
sampler_kwargs = {}
- if isinstance(workers, int) and workers > 1:
+ if isinstance(workers, int) and workers > 1 and HAS_DILL:
auto_pool = multiprocessing.Pool(workers)
sampler_kwargs['pool'] = auto_pool
elif hasattr(workers, 'map'):
@@ -1228,14 +1292,8 @@ class Minimizer(object):
'userkws': self.userkws,
'nan_policy': self.nan_policy}
- if ntemps > 1:
- # the prior and likelihood function args and kwargs are the same
- sampler_kwargs['loglargs'] = lnprob_args
- sampler_kwargs['loglkwargs'] = lnprob_kwargs
- sampler_kwargs['logpargs'] = (bounds,)
- else:
- sampler_kwargs['args'] = lnprob_args
- sampler_kwargs['kwargs'] = lnprob_kwargs
+ sampler_kwargs['args'] = lnprob_args
+ sampler_kwargs['kwargs'] = lnprob_kwargs
# set up the random number generator
rng = _make_random_gen(seed)
@@ -1247,20 +1305,15 @@ class Minimizer(object):
p0 = self._lastpos
if p0.shape[-1] != self.nvarys:
- raise ValueError("You cannot reuse the sampler if the number"
+ raise ValueError("You cannot reuse the sampler if the number "
"of varying parameters has changed")
- elif ntemps > 1:
- # Parallel Tempering
- # jitter the starting position by scaled Gaussian noise
- p0 = 1 + rng.randn(ntemps, nwalkers, self.nvarys) * 1.e-4
- p0 *= var_arr
- self.sampler = emcee.PTSampler(ntemps, nwalkers, self.nvarys,
- _lnpost, _lnprior, **sampler_kwargs)
+
else:
p0 = 1 + rng.randn(nwalkers, self.nvarys) * 1.e-4
p0 *= var_arr
+ sampler_kwargs['pool'] = auto_pool
self.sampler = emcee.EnsembleSampler(nwalkers, self.nvarys,
- _lnpost, **sampler_kwargs)
+ self._lnprob, **sampler_kwargs)
# user supplies an initialisation position for the chain
# If you try to run the sampler with p0 of a wrong size then you'll get
@@ -1271,17 +1324,10 @@ class Minimizer(object):
if p0.shape == tpos.shape:
pass
# trying to initialise with a previous chain
- elif tpos.shape[0::2] == (nwalkers, self.nvarys):
- tpos = tpos[:, -1, :]
- # initialising with a PTsampler chain.
- elif ntemps > 1 and tpos.ndim == 4:
- tpos_shape = list(tpos.shape)
- tpos_shape.pop(2)
- if tpos_shape == (ntemps, nwalkers, self.nvarys):
- tpos = tpos[..., -1, :]
+ elif tpos.shape[-1] == self.nvarys:
+ tpos = tpos[-1]
else:
- raise ValueError('pos should have shape (nwalkers, nvarys)'
- 'or (ntemps, nwalkers, nvarys) if ntemps > 1')
+ raise ValueError('pos should have shape (nwalkers, nvarys)')
p0 = tpos
# if you specified a seed then you also need to seed the sampler
@@ -1289,50 +1335,55 @@ class Minimizer(object):
self.sampler.random_state = rng.get_state()
# now do a production run, sampling all the time
- if EMCEE_VERSION >= 3:
+ try:
output = self.sampler.run_mcmc(p0, steps, progress=progress)
self._lastpos = output.coords
- else:
- output = self.sampler.run_mcmc(p0, steps)
- self._lastpos = output[0]
+ except AbortFitException:
+ result.aborted = True
+ result.message = "Fit aborted by user callback. Could not estimate error-bars."
+ result.success = False
+ result.nfev = self.result.nfev
+ output = None
# discard the burn samples and thin
- chain = self.sampler.chain[..., burn::thin, :]
- lnprobability = self.sampler.lnprobability[..., burn::thin]
-
- # take the zero'th PTsampler temperature for the parameter estimators
- if ntemps > 1:
- flatchain = chain[0, ...].reshape((-1, self.nvarys))
- else:
- flatchain = chain.reshape((-1, self.nvarys))
-
- quantiles = np.percentile(flatchain, [15.87, 50, 84.13], axis=0)
+ chain = self.sampler.get_chain(thin=thin, discard=burn)[..., :, :]
+ lnprobability = self.sampler.get_log_prob(thin=thin, discard=burn)[..., :]
+ flatchain = chain.reshape((-1, self.nvarys))
+ if not result.aborted:
+ quantiles = np.percentile(flatchain, [15.87, 50, 84.13], axis=0)
- for i, var_name in enumerate(result.var_names):
- std_l, median, std_u = quantiles[:, i]
- params[var_name].value = median
- params[var_name].stderr = 0.5 * (std_u - std_l)
- params[var_name].correl = {}
+ for i, var_name in enumerate(result.var_names):
+ std_l, median, std_u = quantiles[:, i]
+ params[var_name].value = median
+ params[var_name].stderr = 0.5 * (std_u - std_l)
+ params[var_name].correl = {}
- params.update_constraints()
+ params.update_constraints()
- # work out correlation coefficients
- corrcoefs = np.corrcoef(flatchain.T)
+ # work out correlation coefficients
+ corrcoefs = np.corrcoef(flatchain.T)
- for i, var_name in enumerate(result.var_names):
- for j, var_name2 in enumerate(result.var_names):
- if i != j:
- result.params[var_name].correl[var_name2] = corrcoefs[i, j]
+ for i, var_name in enumerate(result.var_names):
+ for j, var_name2 in enumerate(result.var_names):
+ if i != j:
+ result.params[var_name].correl[var_name2] = corrcoefs[i, j]
result.chain = np.copy(chain)
result.lnprob = np.copy(lnprobability)
result.errorbars = True
result.nvarys = len(result.var_names)
- result.nfev = ntemps*nwalkers*steps
+ result.nfev = nwalkers*steps
+ try:
+ result.acor = self.sampler.get_autocorr_time()
+ except AutocorrError as e:
+ print(str(e))
+ pass
+ result.acceptance_fraction = self.sampler.acceptance_fraction
# Calculate the residual with the "best fit" parameters
out = self.userfcn(params, *self.userargs, **self.userkws)
- result.residual = _nan_policy(out, nan_policy=self.nan_policy, handle_inf=False)
+ result.residual = _nan_policy(out, nan_policy=self.nan_policy,
+ handle_inf=False)
# If uncertainty was automatically estimated, weight the residual properly
if (not is_weighted) and (result.residual.size > 1):
@@ -1352,8 +1403,8 @@ class Minimizer(object):
# assuming prior prob = 1, this is true
_neg2_log_likel = -2*result.residual
- # assumes that residual is properly weighted
- result.chisqr = np.exp(_neg2_log_likel)
+ # assumes that residual is properly weighted, avoid overflowing np.exp()
+ result.chisqr = np.exp(min(650, _neg2_log_likel))
result.redchi = result.chisqr / result.nfree
result.aic = _neg2_log_likel + 2 * result.nvarys
@@ -1427,7 +1478,21 @@ class Minimizer(object):
# calculate the cov_x and estimate uncertainties/correlations
try:
- hess = np.matmul(ret.jac.T, ret.jac)
+ if issparse(ret.jac):
+ hess = (ret.jac.T * ret.jac).toarray()
+ elif isinstance(ret.jac, LinearOperator):
+ identity = np.eye(ret.jac.shape[1], dtype=ret.jac.dtype)
+ # TODO: Remove try-except when scipy < 1.4.0 support dropped
+ try:
+ # For scipy >= 1.4.0 (with Linear Operator transpose)
+ # https://github.com/scipy/scipy/pull/9064
+ hess = (ret.jac.T * ret.jac) * identity
+ except AttributeError:
+ # For scipy < 1.4.0 (without Linear Operator transpose)
+ jac = ret.jac * identity
+ hess = np.matmul(jac.T, jac)
+ else:
+ hess = np.matmul(ret.jac.T, ret.jac)
result.covar = np.linalg.inv(hess)
self._calculate_uncertainties_correlations()
except LinAlgError:
@@ -1689,6 +1754,8 @@ class Minimizer(object):
brute_kws = dict(full_output=1, finish=None, disp=False)
# keyword 'workers' is introduced in SciPy v1.3
# FIXME: remove this check after updating the requirement >= 1.3
+ major, minor, micro = scipy_version.split('.', 2)
+
if int(major) == 1 and int(minor) >= 3:
brute_kws.update({'workers': workers})
@@ -1907,10 +1974,6 @@ class Minimizer(object):
.. versionadded:: 0.9.14
"""
- if not HAS_SHGO:
- raise NotImplementedError('You must have SciPy >= 1.2 to use the '
- 'shgo method.')
-
result = self.prepare_fit(params=params)
result.method = 'shgo'
@@ -1977,10 +2040,6 @@ class Minimizer(object):
.. versionadded:: 0.9.14
"""
- if not HAS_DUAL_ANNEALING:
- raise NotImplementedError('You must have SciPy >= 1.2 to use the'
- ' dual_annealing method')
-
result = self.prepare_fit(params=params)
result.method = 'dual_annealing'
@@ -2049,14 +2108,14 @@ class Minimizer(object):
- `'bfgs'`: BFGS
- `'tnc'`: Truncated Newton
- `'trust-ncg'`: Newton-CG trust-region
- - `'trust-exact'`: nearly exact trust-region (SciPy >= 1.0)
- - `'trust-krylov'`: Newton GLTR trust-region (SciPy >= 1.0)
- - `'trust-constr'`: trust-region for constrained optimization (SciPy >= 1.1)
+ - `'trust-exact'`: nearly exact trust-region
+ - `'trust-krylov'`: Newton GLTR trust-region
+ - `'trust-constr'`: trust-region for constrained optimization
- `'dogleg'`: Dog-leg trust-region
- `'slsqp'`: Sequential Linear Squares Programming
- `'emcee'`: Maximum likelihood via Monte-Carlo Markov Chain
- - `'shgo'`: Simplicial Homology Global Optimization (SciPy >= 1.2)
- - `'dual_annealing'`: Dual Annealing optimization (SciPy >= 1.2)
+ - `'shgo'`: Simplicial Homology Global Optimization
+ - `'dual_annealing'`: Dual Annealing optimization
In most cases, these methods wrap and use the method with the
same name from `scipy.optimize`, or use
@@ -2117,120 +2176,6 @@ class Minimizer(object):
return function(**kwargs)
-def _lnprior(theta, bounds):
- """Calculate an improper uniform log-prior probability.
-
- Parameters
- ----------
- theta : sequence
- Float parameter values (only those being varied).
- bounds : np.ndarray
- Lower and upper bounds of parameters that are varying.
- Has shape (nvarys, 2).
-
- Returns
- -------
- lnprob : float
- Log prior probability.
-
- """
- if np.any(theta > bounds[:, 1]) or np.any(theta < bounds[:, 0]):
- return -np.inf
- return 0
-
-
-def _lnpost(theta, userfcn, params, var_names, bounds, userargs=(),
- userkws=None, float_behavior='posterior', is_weighted=True,
- nan_policy='raise'):
- """Calculate the log-posterior probability.
-
- See the `Minimizer.emcee` method for more details.
-
- Parameters
- ----------
- theta : sequence
- Float parameter values (only those being varied).
- userfcn : callable
- User objective function.
- params : :class:`~lmfit.parameters.Parameters`
- The entire set of Parameters.
- var_names : list
- The names of the parameters that are varying.
- bounds : numpy.ndarray
- Lower and upper bounds of parameters. Has shape (nvarys, 2).
- userargs : tuple, optional
- Extra positional arguments required for user objective function.
- userkws : dict, optional
- Extra keyword arguments required for user objective function.
- float_behavior : str, optional
- Specifies meaning of objective when it returns a float. One of:
-
- 'posterior' - objective function returnins a log-posterior
- probability
- 'chi2' - objective function returns a chi2 value
-
- is_weighted : bool
- If `userfcn` returns a vector of residuals then `is_weighted`
- specifies if the residuals have been weighted by data uncertainties.
- nan_policy : str, optional
- Specifies action if `userfcn` returns NaN values. One of:
-
- 'raise' - a `ValueError` is raised
- 'propagate' - the values returned from `userfcn` are un-altered
- 'omit' - the non-finite values are filtered
-
-
- Returns
- -------
- lnprob : float
- Log posterior probability.
-
- """
- # the comparison has to be done on theta and bounds. DO NOT inject theta
- # values into Parameters, then compare Parameters values to the bounds.
- # Parameters values are clipped to stay within bounds.
- if np.any(theta > bounds[:, 1]) or np.any(theta < bounds[:, 0]):
- return -np.inf
-
- for name, val in zip(var_names, theta):
- params[name].value = val
-
- userkwargs = {}
- if userkws is not None:
- userkwargs = userkws
-
- # update the constraints
- params.update_constraints()
-
- # now calculate the log-likelihood
- out = userfcn(params, *userargs, **userkwargs)
- out = _nan_policy(out, nan_policy=nan_policy, handle_inf=False)
-
- lnprob = np.asarray(out).ravel()
-
- if lnprob.size > 1:
- # objective function returns a vector of residuals
- if '__lnsigma' in params and not is_weighted:
- # marginalise over a constant data uncertainty
- __lnsigma = params['__lnsigma'].value
- c = np.log(2 * np.pi) + 2 * __lnsigma
- lnprob = -0.5 * np.sum((lnprob / np.exp(__lnsigma)) ** 2 + c)
- else:
- lnprob = -0.5 * (lnprob * lnprob).sum()
- else:
- # objective function returns a single value.
- # use float_behaviour to figure out if the value is posterior or chi2
- if float_behavior == 'posterior':
- pass
- elif float_behavior == 'chi2':
- lnprob *= -0.5
- else:
- raise ValueError("float_behaviour must be either 'posterior' or"
- " 'chi2' " + float_behavior)
-
- return lnprob
-
-
def _make_random_gen(seed):
"""Turn seed into a numpy.random.RandomState instance.
@@ -2353,14 +2298,14 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, iter_cb=None,
- `'bfgs'`: BFGS
- `'tnc'`: Truncated Newton
- `'trust-ncg'`: Newton-CG trust-region
- - `'trust-exact'`: nearly exact trust-region (SciPy >= 1.0)
- - `'trust-krylov'`: Newton GLTR trust-region (SciPy >= 1.0)
- - `'trust-constr'`: trust-region for constrained optimization (SciPy >= 1.1)
+ - `'trust-exact'`: nearly exact trust-region
+ - `'trust-krylov'`: Newton GLTR trust-region
+ - `'trust-constr'`: trust-region for constrained optimization
- `'dogleg'`: Dog-leg trust-region
- `'slsqp'`: Sequential Linear Squares Programming
- `'emcee'`: Maximum likelihood via Monte-Carlo Markov Chain
- - `'shgo'`: Simplicial Homology Global Optimization (SciPy >= 1.2)
- - `'dual_annealing'`: Dual Annealing optimization (SciPy >= 1.2)
+ - `'shgo'`: Simplicial Homology Global Optimization
+ - `'dual_annealing'`: Dual Annealing optimization
In most cases, these methods wrap and use the method of the same
name from `scipy.optimize`, or use `scipy.optimize.minimize` with
diff --git a/lmfit/model.py b/lmfit/model.py
index 07a5367..64d4470 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -182,7 +182,7 @@ def propagate_err(z, dz, option):
return err
-class Model(object):
+class Model:
"""Model class."""
_forbidden_args = ('data', 'weights', 'params')
@@ -1131,7 +1131,7 @@ class CompositeModel(Model):
self.right._reprstring(long=long))
def eval(self, params=None, **kwargs):
- """TODO: docstring in public method."""
+ """Evaluate model function for composite model."""
return self.op(self.left.eval(params=params, **kwargs),
self.right.eval(params=params, **kwargs))
@@ -1685,7 +1685,7 @@ class ModelResult(Minimizer):
self.params = Parameters()
state = {'unique_symbols': modres['unique_symbols'], 'params': []}
for parstate in modres['params']:
- _par = Parameter()
+ _par = Parameter(name='')
_par.__setstate__(parstate)
state['params'].append(_par)
self.params.__setstate__(state)
diff --git a/lmfit/models.py b/lmfit/models.py
index 625daa5..49775bd 100644
--- a/lmfit/models.py
+++ b/lmfit/models.py
@@ -143,7 +143,7 @@ class ConstantModel(Model):
def constant(x, c=0.0):
return c
- super(ConstantModel, self).__init__(constant, **kwargs)
+ super().__init__(constant, **kwargs)
def guess(self, data, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -173,7 +173,7 @@ class ComplexConstantModel(Model):
def constant(x, re=0., im=0.):
return re + 1j*im
- super(ComplexConstantModel, self).__init__(constant, **kwargs)
+ super().__init__(constant, **kwargs)
def guess(self, data, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -203,7 +203,7 @@ class LinearModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(LinearModel, self).__init__(linear, **kwargs)
+ super().__init__(linear, **kwargs)
def guess(self, data, x=None, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -232,7 +232,7 @@ class QuadraticModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(QuadraticModel, self).__init__(parabolic, **kwargs)
+ super().__init__(parabolic, **kwargs)
def guess(self, data, x=None, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -279,7 +279,7 @@ class PolynomialModel(Model):
def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x)
- super(PolynomialModel, self).__init__(polynomial, **kwargs)
+ super().__init__(polynomial, **kwargs)
def guess(self, data, x=None, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -319,7 +319,7 @@ class GaussianModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(GaussianModel, self).__init__(gaussian, **kwargs)
+ super().__init__(gaussian, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -360,7 +360,7 @@ class LorentzianModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(LorentzianModel, self).__init__(lorentzian, **kwargs)
+ super().__init__(lorentzian, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -408,7 +408,7 @@ class SplitLorentzianModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(SplitLorentzianModel, self).__init__(split_lorentzian, **kwargs)
+ super().__init__(split_lorentzian, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -466,7 +466,7 @@ class VoigtModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(VoigtModel, self).__init__(voigt, **kwargs)
+ super().__init__(voigt, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -519,7 +519,7 @@ class PseudoVoigtModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(PseudoVoigtModel, self).__init__(pvoigt, **kwargs)
+ super().__init__(pvoigt, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -565,7 +565,7 @@ class MoffatModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(MoffatModel, self).__init__(moffat, **kwargs)
+ super().__init__(moffat, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -607,7 +607,7 @@ class Pearson7Model(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(Pearson7Model, self).__init__(pearson7, **kwargs)
+ super().__init__(pearson7, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -648,7 +648,7 @@ class StudentsTModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(StudentsTModel, self).__init__(students_t, **kwargs)
+ super().__init__(students_t, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -687,7 +687,7 @@ class BreitWignerModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(BreitWignerModel, self).__init__(breit_wigner, **kwargs)
+ super().__init__(breit_wigner, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -726,7 +726,7 @@ class LognormalModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(LognormalModel, self).__init__(lognormal, **kwargs)
+ super().__init__(lognormal, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -772,7 +772,7 @@ class DampedOscillatorModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(DampedOscillatorModel, self).__init__(damped_oscillator, **kwargs)
+ super().__init__(damped_oscillator, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -821,7 +821,7 @@ class DampedHarmonicOscillatorModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(DampedHarmonicOscillatorModel, self).__init__(dho, **kwargs)
+ super().__init__(dho, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -870,7 +870,7 @@ class ExponentialGaussianModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(ExponentialGaussianModel, self).__init__(expgaussian, **kwargs)
+ super().__init__(expgaussian, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -918,7 +918,7 @@ class SkewedGaussianModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(SkewedGaussianModel, self).__init__(skewed_gaussian, **kwargs)
+ super().__init__(skewed_gaussian, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -965,7 +965,7 @@ class SkewedVoigtModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(SkewedVoigtModel, self).__init__(skewed_voigt, **kwargs)
+ super().__init__(skewed_voigt, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -1003,7 +1003,7 @@ class DonaichModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(DonaichModel, self).__init__(donaich, **kwargs)
+ super().__init__(donaich, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -1034,7 +1034,7 @@ class PowerLawModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(PowerLawModel, self).__init__(powerlaw, **kwargs)
+ super().__init__(powerlaw, **kwargs)
def guess(self, data, x=None, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -1065,7 +1065,7 @@ class ExponentialModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super(ExponentialModel, self).__init__(exponential, **kwargs)
+ super().__init__(exponential, **kwargs)
def guess(self, data, x=None, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -1113,7 +1113,7 @@ class StepModel(Model):
form='linear', **kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'form': form, 'independent_vars': independent_vars})
- super(StepModel, self).__init__(step, **kwargs)
+ super().__init__(step, **kwargs)
def guess(self, data, x=None, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -1170,7 +1170,7 @@ class RectangleModel(Model):
form='linear', **kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'form': form, 'independent_vars': independent_vars})
- super(RectangleModel, self).__init__(rectangle, **kwargs)
+ super().__init__(rectangle, **kwargs)
self._set_paramhints_prefix()
@@ -1287,7 +1287,7 @@ class ExpressionModel(Model):
kws["nan_policy"] = nan_policy
- super(ExpressionModel, self).__init__(_eval, **kws)
+ super().__init__(_eval, **kws)
# set param names here, and other things normally
# set in _parse_params(), which will be short-circuited.
@@ -1298,7 +1298,7 @@ class ExpressionModel(Model):
self.def_vals = {}
def __repr__(self):
- """TODO: docstring in magic method."""
+ """Return printable representation of ExpressionModel."""
return "<lmfit.ExpressionModel('%s')>" % (self.expr)
def _parse_params(self):
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index 70c1a3b..1e64665 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -1,10 +1,10 @@
"""Parameter class."""
-from __future__ import division
from collections import OrderedDict
from copy import deepcopy
-import json
import importlib
+import json
+import warnings
from asteval import Interpreter, get_ast_names, valid_symbol_name
from numpy import arcsin, array, cos, inf, isclose, nan, sin, sqrt
@@ -61,7 +61,7 @@ class Parameters(OrderedDict):
Keyword arguments.
"""
- super(Parameters, self).__init__(self)
+ super().__init__(self)
self._asteval = asteval
if self._asteval is None:
@@ -74,12 +74,19 @@ class Parameters(OrderedDict):
for key, val in _syms.items():
self._asteval.symtable[key] = val
- self.update(*args, **kwds)
-
def copy(self):
"""Parameters.copy() should always be a deepcopy."""
return self.__deepcopy__(None)
+ def update(self, other):
+ """Update values and symbols with another Parameters object."""
+ if not isinstance(other, Parameters):
+ raise ValueError("'%s' is not a Parameters object" % other)
+ self.add_many(*other.values())
+ for sym in other._asteval.user_defined_symbols():
+ self._asteval.symtable[sym] = other._asteval.symtable[sym]
+ return self
+
def __copy__(self):
"""Parameters.copy() should always be a deepcopy."""
return self.__deepcopy__(None)
@@ -120,7 +127,7 @@ class Parameters(OrderedDict):
return _pars
def __setitem__(self, key, par):
- """TODO: add magic method docstring."""
+ """Set items of Parameters object."""
if key not in self:
if not valid_symbol_name(key):
raise KeyError("'%s' is not a valid Parameters name" % key)
@@ -136,16 +143,15 @@ class Parameters(OrderedDict):
if not isinstance(other, Parameters):
raise ValueError("'%s' is not a Parameters object" % other)
out = deepcopy(self)
- params = other.values()
- out.add_many(*params)
+ out.add_many(*other.values())
+ for sym in other._asteval.user_defined_symbols():
+ if sym not in out._asteval.symtable:
+ out._asteval.symtable[sym] = other._asteval.symtable[sym]
return out
def __iadd__(self, other):
"""Add/assign Parameters objects."""
- if not isinstance(other, Parameters):
- raise ValueError("'%s' is not a Parameters object" % other)
- params = other.values()
- self.add_many(*params)
+ self.update(other)
return self
def __array__(self):
@@ -262,7 +268,7 @@ class Parameters(OrderedDict):
"""
if oneline:
- return super(Parameters, self).__repr__()
+ return super().__repr__()
s = "Parameters({\n"
for key in self.keys():
s += " '%s': %s, \n" % (key, self[key])
@@ -464,7 +470,7 @@ class Parameters(OrderedDict):
state = {'unique_symbols': tmp['unique_symbols'],
'params': []}
for parstate in tmp['params']:
- _par = Parameter()
+ _par = Parameter(name='')
_par.__setstate__(parstate)
state['params'].append(_par)
self.__setstate__(state)
@@ -516,7 +522,7 @@ class Parameters(OrderedDict):
return self.loads(fp.read(), **kws)
-class Parameter(object):
+class Parameter:
"""A Parameter is an object that can be varied in a fit, or one of the
controlling variables in a model. It is a central component of lmfit,
and all minimization and modeling methods use Parameter objects.
@@ -538,12 +544,12 @@ class Parameter(object):
"""
- def __init__(self, name=None, value=None, vary=True, min=-inf, max=inf,
+ def __init__(self, name, value=None, vary=True, min=-inf, max=inf,
expr=None, brute_step=None, user_data=None):
"""
Parameters
----------
- name : str, optional
+ name : str
Name of the Parameter.
value : float, optional
Numerical Parameter value.
@@ -572,7 +578,6 @@ class Parameter(object):
"""
self.name = name
- self._val = value
self.user_data = user_data
self.init_value = value
self.min = min
@@ -587,6 +592,7 @@ class Parameter(object):
self.stderr = None
self.correl = None
self.from_internal = lambda val: val
+ self._val = value
self._init_bounds()
def set(self, value=None, vary=None, min=None, max=None, expr=None,
@@ -688,32 +694,31 @@ class Parameter(object):
def __setstate__(self, state):
"""Set state for pickle."""
- (self.name, self.value, self.vary, self.expr, self.min, self.max,
+ (self.name, _value, self.vary, self.expr, self.min, self.max,
self.brute_step, self.stderr, self.correl, self.init_value,
self.user_data) = state
self._expr_ast = None
self._expr_eval = None
self._expr_deps = []
self._delay_asteval = False
+ self.value = _value
self._init_bounds()
def __repr__(self):
"""Return printable representation of a Parameter object."""
s = []
- if self.name is not None:
- s.append("'%s'" % self.name)
- sval = repr(self._getval())
+ sval = "value=%s" % repr(self._getval())
if not self.vary and self._expr is None:
- sval = "value=%s (fixed)" % sval
+ sval += " (fixed)"
elif self.stderr is not None:
- sval = "value=%s +/- %.3g" % (sval, self.stderr)
+ sval += " +/- %.3g" % self.stderr
s.append(sval)
s.append("bounds=[%s:%s]" % (repr(self.min), repr(self.max)))
if self._expr is not None:
s.append("expr='%s'" % self.expr)
if self.brute_step is not None:
s.append("brute_step=%s" % (self.brute_step))
- return "<Parameter %s>" % ', '.join(s)
+ return "<Parameter '%s', %s>" % (self.name, ', '.join(s))
def setup_bounds(self):
"""Set up Minuit-style internal/external parameter transformation of
@@ -781,36 +786,27 @@ class Parameter(object):
"""Get value, with bounds applied."""
# Note assignment to self._val has been changed to self.value
# The self.value property setter makes sure that the
- # _expr_eval.symtable is kept updated.
- # If you just assign to self._val then
- # _expr_eval.symtable[self.name]
+ # _expr_eval.symtable is kept up-to-date.
+ # If you just assign to self._val then _expr_eval.symtable[self.name]
# becomes stale if parameter.expr is not None.
if (isinstance(self._val, uncertainties.core.Variable) and
self._val is not nan):
-
+ msg = ("Please make sure that the Parameter value is a number, "
+ "not an instance of 'uncertainties.core.Variable'. This "
+ "automatic conversion will be removed in the next release.")
+ warnings.warn(FutureWarning(msg))
try:
self.value = self._val.nominal_value
except AttributeError:
pass
- if not self.vary and self._expr is None:
- return self._val
if self._expr is not None:
if self._expr_ast is None:
self.__set_expression(self._expr)
-
if self._expr_eval is not None:
if not self._delay_asteval:
self.value = self._expr_eval(self._expr_ast)
check_ast_errors(self._expr_eval)
-
- if self._val is not None:
- if self._val > self.max:
- self._val = self.max
- elif self._val < self.min:
- self._val = self.min
- if self._expr_eval is not None:
- self._expr_eval.symtable[self.name] = self._val
return self._val
def set_expr_eval(self, evaluator):
@@ -826,10 +822,15 @@ class Parameter(object):
def value(self, val):
"""Set the numerical Parameter value."""
self._val = val
+ if self._val is not None:
+ if self._val > self.max:
+ self._val = self.max
+ elif self._val < self.min:
+ self._val = self.min
if not hasattr(self, '_expr_eval'):
self._expr_eval = None
if self._expr_eval is not None:
- self._expr_eval.symtable[self.name] = val
+ self._expr_eval.symtable[self.name] = self._val
@property
def expr(self):
@@ -884,8 +885,8 @@ class Parameter(object):
"""positive"""
return +self._getval()
- def __nonzero__(self):
- """not zero"""
+ def __bool__(self):
+ """bool"""
return self._getval() != 0
def __int__(self):
@@ -908,10 +909,9 @@ class Parameter(object):
"""-"""
return self._getval() - other
- def __div__(self, other):
+ def __truediv__(self, other):
"""/"""
return self._getval() / other
- __truediv__ = __div__
def __floordiv__(self, other):
"""//"""
@@ -961,10 +961,9 @@ class Parameter(object):
"""+ (right)"""
return other + self._getval()
- def __rdiv__(self, other):
+ def __rtruediv__(self, other):
"""/ (right)"""
return other / self._getval()
- __rtruediv__ = __rdiv__
def __rdivmod__(self, other):
"""divmod (right)"""
@@ -993,5 +992,7 @@ class Parameter(object):
def isParameter(x):
"""Test for Parameter-ness."""
+ msg = 'The isParameter function will be removed in the next release.'
+ warnings.warn(FutureWarning(msg))
return (isinstance(x, Parameter) or
x.__class__.__name__ == 'Parameter')
diff --git a/lmfit/printfuncs.py b/lmfit/printfuncs.py
index e1a7794..21c66ed 100644
--- a/lmfit/printfuncs.py
+++ b/lmfit/printfuncs.py
@@ -2,6 +2,7 @@
from math import log10
import re
import warnings
+
import numpy as np
try:
diff --git a/lmfit/ui/basefitter.py b/lmfit/ui/basefitter.py
index b4feccb..9b573af 100644
--- a/lmfit/ui/basefitter.py
+++ b/lmfit/ui/basefitter.py
@@ -46,7 +46,7 @@ _COMMON_EXAMPLES_DOC = """
"""
-class BaseFitter(object):
+class BaseFitter:
__doc__ = _COMMON_DOC + """
Parameters
@@ -231,7 +231,7 @@ class MPLFitter(BaseFitter):
self.data_style = data_style
self.init_style = init_style
self.best_style = best_style
- super(MPLFitter, self).__init__(data, model, **kwargs)
+ super().__init__(data, model, **kwargs)
def plot(self, axes_style={}, data_style={}, init_style={}, best_style={},
ax=None):
diff --git a/lmfit/ui/ipy_fitter.py b/lmfit/ui/ipy_fitter.py
index ff469a1..46b5129 100644
--- a/lmfit/ui/ipy_fitter.py
+++ b/lmfit/ui/ipy_fitter.py
@@ -40,7 +40,7 @@ else:
from ipywidgets import Checkbox
-class ParameterWidgetGroup(object):
+class ParameterWidgetGroup:
"""Construct several widgets that together represent a Parameter.
This will only be used if IPython is available."""
@@ -235,9 +235,8 @@ class NotebookFitter(MPLFitter):
# Parameter widgets are not built here. They are (re-)built when
# the model is (re-)set.
- super(NotebookFitter, self).__init__(data, model, axes_style,
- data_style, init_style,
- best_style, **kwargs)
+ super().__init__(data, model, axes_style, data_style, init_style,
+ best_style, **kwargs)
def _repr_html_(self):
display(self.models_menu)
@@ -249,7 +248,7 @@ class NotebookFitter(MPLFitter):
self.plot()
def guess(self):
- guessing_successful = super(NotebookFitter, self).guess()
+ guessing_successful = super().guess()
self.guess_button.disabled = not guessing_successful
def _finalize_model(self, value):
@@ -275,8 +274,8 @@ class NotebookFitter(MPLFitter):
def plot(self):
clear_output(wait=True)
- super(NotebookFitter, self).plot()
+ super().plot()
def fit(self):
- super(NotebookFitter, self).fit()
+ super().fit()
self.plot()
diff --git a/requirements.txt b/requirements.txt
index 87dabf1..81149a4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,4 @@
-asteval>=0.9.12
-numpy>=1.10
-scipy>=0.19
-six>1.10
-uncertainties>=3.0
+asteval>=0.9.16
+numpy>=1.16
+scipy>=1.2
+uncertainties>=3.0.1
diff --git a/setup.cfg b/setup.cfg
index 2866f8a..185b942 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,7 +7,7 @@ tag_prefix =
parentdir_prefix = lmfit-
[isort]
-skip = versioneer.py,lmfit/_version.py,lmfit/__init__.py
+skip = versioneer.py,lmfit/_version.py,lmfit/__init__.py,doc/conf.py
known_third_party = asteval,dill,emcee,IPython,matplotlib,numdifftools,numpy,NISTModels,pandas,pytest,scipy,six,uncertainties
known_first_party = lmfit
force_sort_within_sections = True
@@ -18,6 +18,10 @@ ignore_substitutions = release
ignore_roles = scipydoc,numpydoc
ignore_directives = autoclass,autodoc,autofunction,automethod,jupyter-execute
+[flake8]
+ignore = E121,E123,E126,E226,W503,W504,E501,E731
+exclude = doc/conf.py, versioneer.py, lmfit/__init__.py, lmfit/ui/__init__.py
+
[egg_info]
tag_build =
tag_date = 0
diff --git a/setup.py b/setup.py
index f1ddec9..d7dbede 100644
--- a/setup.py
+++ b/setup.py
@@ -29,12 +29,11 @@ setup(name='lmfit',
author_email='matt.newville@gmail.com',
url='https://lmfit.github.io/lmfit-py/',
download_url='https://lmfit.github.io//lmfit-py/',
- install_requires=['asteval>=0.9.12',
- 'numpy>=1.10',
- 'scipy>=0.19',
- 'six>1.10',
- 'uncertainties>=3.0'],
- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
+ install_requires=['asteval>=0.9.16',
+ 'numpy>=1.16',
+ 'scipy>=1.2',
+ 'uncertainties>=3.0.1'],
+ python_requires='>=3.5',
license='BSD-3',
description="Least-Squares Minimization with Bounds and Constraints",
long_description=long_desc,
@@ -43,11 +42,10 @@ setup(name='lmfit',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
],
keywords='curve-fitting, least-squares minimization',
diff --git a/tests/test_NIST_Strd.py b/tests/test_NIST_Strd.py
index 00ad869..0753319 100644
--- a/tests/test_NIST_Strd.py
+++ b/tests/test_NIST_Strd.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
import math
from optparse import OptionParser
diff --git a/tests/test_ampgo.py b/tests/test_ampgo.py
index 3ca9691..83cf0dd 100644
--- a/tests/test_ampgo.py
+++ b/tests/test_ampgo.py
@@ -58,7 +58,11 @@ def test_ampgo_maxfunevals(minimizer_Alpine02):
def test_ampgo_local_solver(minimizer_Alpine02):
"""Test AMPGO algorithm with local solver."""
kws = {'local': 'Nelder-Mead'}
- out = minimizer_Alpine02.minimize(method='ampgo', **kws)
+
+ msg = r'Method Nelder-Mead cannot handle constraints nor bounds'
+ with pytest.warns(RuntimeWarning, match=msg):
+ out = minimizer_Alpine02.minimize(method='ampgo', **kws)
+
out_x = np.array([out.params['x0'].value, out.params['x1'].value])
assert 'ampgo' and 'Nelder-Mead' in out.method
diff --git a/tests/test_confidence.py b/tests/test_confidence.py
index f090e24..804cc02 100644
--- a/tests/test_confidence.py
+++ b/tests/test_confidence.py
@@ -2,6 +2,7 @@
import numpy as np
from numpy.testing import assert_allclose
import pytest
+from scipy.stats import f
import lmfit
from lmfit_testutils import assert_paramval
@@ -12,7 +13,7 @@ def data():
"""Generate synthetic data."""
x = np.linspace(0.3, 10, 100)
np.random.seed(0)
- y = 1.0/(0.1*x) + 2.0 + 0.1*np.random.randn(x.size)
+ y = 1.0 / (0.1 * x) + 2.0 + 0.1 * np.random.randn(x.size)
return (x, y)
@@ -26,7 +27,7 @@ def pars():
def residual(params, x, data):
"""Define objective function for the minimization."""
- return data - 1.0/(params['a']*x) + params['b']
+ return data - 1.0 / (params['a'] * x) + params['b']
@pytest.mark.parametrize("verbose", [False, True])
@@ -54,6 +55,40 @@ def test_confidence_leastsq(data, pars, verbose, capsys):
assert 'Calculating CI for' in captured.out
+def test_confidence_pnames(data, pars):
+ """Test if pnames works as expected."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+
+ assert_paramval(out.params['a'], 0.1, tol=0.1)
+ assert_paramval(out.params['b'], -2.0, tol=0.1)
+
+ ci = lmfit.conf_interval(minimizer, out, p_names=['a'])
+ assert 'a' in ci
+ assert 'b' not in ci
+
+
+def test_confidence_bounds_reached(data, pars):
+ """Check if conf_interval handles bounds correctly"""
+
+ # Should work
+ pars['a'].max = 0.2
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+ out.params['a'].stderr = 1
+ lmfit.conf_interval(minimizer, out, verbose=True)
+
+ # Should warn
+ pars['b'].max = 2.03
+ pars['b'].min = 1.97
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+ out.params['b'].stderr = 0.005
+ out.params['a'].stderr = 0.01
+ with pytest.warns(UserWarning, match="Bound reached"):
+ lmfit.conf_interval(minimizer, out, verbose=True)
+
+
def test_confidence_sigma_vs_prob(data, pars):
"""Calculate confidence by specifying sigma or probability."""
minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
@@ -61,8 +96,9 @@ def test_confidence_sigma_vs_prob(data, pars):
ci_sigmas = lmfit.conf_interval(minimizer, out, sigmas=[1, 2, 3])
ci_1sigma = lmfit.conf_interval(minimizer, out, sigmas=[1])
- ci_probs = lmfit.conf_interval(minimizer, out, sigmas=[0.68269, 0.9545,
- 0.9973])
+ ci_probs = lmfit.conf_interval(minimizer,
+ out,
+ sigmas=[0.68269, 0.9545, 0.9973])
assert_allclose(ci_sigmas['a'][0][1], ci_probs['a'][0][1], rtol=0.01)
assert_allclose(ci_sigmas['b'][2][1], ci_probs['b'][2][1], rtol=0.01)
@@ -72,7 +108,9 @@ def test_confidence_sigma_vs_prob(data, pars):
def test_confidence_exceptions(data, pars):
"""Make sure the proper exceptions are raised when needed."""
- minimizer = lmfit.Minimizer(residual, pars, calc_covar=False,
+ minimizer = lmfit.Minimizer(residual,
+ pars,
+ calc_covar=False,
fcn_args=data)
out = minimizer.minimize(method='nelder')
out_lsq = minimizer.minimize(params=out.params, method='leastsq')
@@ -144,3 +182,17 @@ def test_confidence_2d_limits(data, pars):
assert_allclose(max(cx.ravel()), 0.02)
assert_allclose(min(cy.ravel()), -4.0)
assert_allclose(max(cy.ravel()), 1.0e-6)
+
+
+def test_confidence_prob_func(data, pars):
+ """Test conf_interval with alternate prob_func."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
+ out = minimizer.minimize(method='leastsq')
+
+ def my_f_compare(best_fit, new_fit):
+ nfree = best_fit.nfree
+ nfix = best_fit.nfree - new_fit.nfree
+ dchi = new_fit.chisqr / best_fit.chisqr - 1.0
+ return f.cdf(dchi * nfree / nfix, nfix, nfree)
+
+ lmfit.conf_interval(minimizer, out, sigmas=[1], prob_func=my_f_compare)
diff --git a/tests/test_covariance_matrix.py b/tests/test_covariance_matrix.py
index c9d581a..f5bc900 100644
--- a/tests/test_covariance_matrix.py
+++ b/tests/test_covariance_matrix.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
import os
import numpy as np
diff --git a/tests/test_custom_independentvar.py b/tests/test_custom_independentvar.py
index 0a22b6d..ae39963 100644
--- a/tests/test_custom_independentvar.py
+++ b/tests/test_custom_independentvar.py
@@ -4,7 +4,7 @@ from lmfit.lineshapes import gaussian
from lmfit.models import Model
-class Stepper(object):
+class Stepper:
def __init__(self, start, stop, npts):
self.start = start
self.stop = stop
diff --git a/tests/test_jsonutils.py b/tests/test_jsonutils.py
index 38b63e4..19d0225 100644
--- a/tests/test_jsonutils.py
+++ b/tests/test_jsonutils.py
@@ -1,12 +1,9 @@
"""Tests for the JSON utilities."""
-# coding: utf-8
-# TODO: remove the line above when dropping Python 2.7
from types import BuiltinFunctionType, FunctionType
import numpy as np
import pytest
from scipy.optimize import basinhopping
-import six
import lmfit
from lmfit.jsonutils import decode4js, encode4js, find_importer, import_from
@@ -21,12 +18,12 @@ def test_import_from(obj):
(BuiltinFunctionType, FunctionType))
-objects = [('test_string', six.string_types),
+objects = [('test_string', (str,)),
(np.array([7.0]), np.ndarray),
(np.array([1.0+2.0j]), np.ndarray),
(123.456, np.float),
(10, np.float),
- (u'café', six.string_types),
+ ('café', (str,)),
(10.0-5.0j, np.complex),
(['a', 'b', 'c'], list),
(('a', 'b', 'c'), tuple),
diff --git a/tests/test_least_squares.py b/tests/test_least_squares.py
index 778c204..b9aa64e 100644
--- a/tests/test_least_squares.py
+++ b/tests/test_least_squares.py
@@ -2,6 +2,8 @@
import numpy as np
from numpy.testing import assert_allclose
import pytest
+from scipy.sparse import bsr_matrix
+from scipy.sparse.linalg import aslinearoperator
import lmfit
from lmfit.models import VoigtModel
@@ -116,3 +118,52 @@ def test_least_squares_solver_options(peakdata, capsys):
assert 'Iteration' in captured.out
assert 'final cost' in captured.out
+
+
+def test_least_squares_jacobian_types():
+ """Test support for Jacobian of all types supported by least_squares."""
+ # Build function
+ # f(x, y) = (x - a)^2 + (y - b)^2
+ np.random.seed(42)
+ a = np.random.normal(0, 1, 50)
+ np.random.seed(43)
+ b = np.random.normal(0, 1, 50)
+
+ def f(params):
+ return (params['x'] - a)**2 + (params['y'] - b)**2
+
+ # Build analytic Jacobian functions with the different possible return types
+ # numpy.ndarray, scipy.sparse.spmatrix, scipy.sparse.linalg.LinearOperator
+ # J = [ 2x - 2a , 2y - 2b ]
+ def jac_array(params, *args, **kwargs):
+ return np.column_stack((2 * params[0] - 2 * a, 2 * params[1] - 2 * b))
+
+ def jac_sparse(params, *args, **kwargs):
+ return bsr_matrix(jac_array(params, *args, **kwargs))
+
+ def jac_operator(params, *args, **kwargs):
+ return aslinearoperator(jac_array(params, *args, **kwargs))
+ # Build parameters
+ params = lmfit.Parameters()
+ params.add('x', value=0)
+ params.add('y', value=0)
+ # Solve model for numerical Jacobian and each analytic Jacobian function
+ result = lmfit.minimize(f, params, method='least_squares')
+ result_array = lmfit.minimize(
+ f, params, method='least_squares',
+ jac=jac_array)
+ result_sparse = lmfit.minimize(
+ f, params, method='least_squares',
+ jac=jac_sparse)
+ result_operator = lmfit.minimize(
+ f, params, method='least_squares',
+ jac=jac_operator)
+ # Check that all have uncertainties
+ assert result.errorbars
+ assert result_array.errorbars
+ assert result_sparse.errorbars
+ assert result_operator.errorbars
+ # Check that all have ~equal covariance matrix
+ assert_allclose(result.covar, result_array.covar)
+ assert_allclose(result.covar, result_sparse.covar)
+ assert_allclose(result.covar, result_operator.covar)
diff --git a/tests/test_lineshapes_models.py b/tests/test_lineshapes_models.py
index 4302bcd..9ad9ef1 100644
--- a/tests/test_lineshapes_models.py
+++ b/tests/test_lineshapes_models.py
@@ -5,7 +5,6 @@ import sys
import numpy as np
from numpy.testing import assert_allclose
-
import pytest
from scipy.optimize import fsolve
diff --git a/tests/test_model.py b/tests/test_model.py
index 4518e58..ba9e8dc 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -42,7 +42,7 @@ def linear_func(x, a, b):
return a*x+b
-class CommonTests(object):
+class CommonTests:
# to be subclassed for testing predefined models
def setUp(self):
@@ -234,6 +234,9 @@ class CommonTests(object):
class TestUserDefiniedModel(CommonTests, unittest.TestCase):
# mainly aimed at checking that the API does what it says it does
# and raises the right exceptions or warnings when things are not right
+ import six
+ if six.PY2:
+ from six import assertRaisesRegex
def setUp(self):
self.true_values = lambda: dict(amplitude=7.1, center=1.1, sigma=2.40)
@@ -241,7 +244,7 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
# return a fresh copy
self.model_constructor = (
lambda *args, **kwargs: Model(gaussian, *args, **kwargs))
- super(TestUserDefiniedModel, self).setUp()
+ super().setUp()
@property
def x(self):
@@ -641,7 +644,7 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
result = lambda: mod.fit(y, params, x=x, nan_policy='raise')
msg = ('NaN values detected in your input data or the output of your '
'objective/model function - fitting algorithms cannot handle this!')
- self.assertRaisesRegexp(ValueError, msg, result)
+ self.assertRaisesRegex(ValueError, msg, result)
# with propagate, should get no error, but bad results
result = mod.fit(y, params, x=x, nan_policy='propagate')
@@ -684,7 +687,7 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
nan_policy='raise')
msg = 'The model function generated NaN values and the fit aborted!'
- self.assertRaisesRegexp(ValueError, msg, result)
+ self.assertRaisesRegex(ValueError, msg, result)
@pytest.mark.skipif(sys.version_info.major == 2,
reason="cannot use wrapped functions with Python 2")
@@ -715,7 +718,7 @@ class TestLinear(CommonTests, unittest.TestCase):
self.true_values = lambda: dict(slope=5, intercept=2)
self.guess = lambda: dict(slope=10, intercept=6)
self.model_constructor = models.LinearModel
- super(TestLinear, self).setUp()
+ super().setUp()
class TestParabolic(CommonTests, unittest.TestCase):
@@ -724,7 +727,7 @@ class TestParabolic(CommonTests, unittest.TestCase):
self.true_values = lambda: dict(a=5, b=2, c=8)
self.guess = lambda: dict(a=1, b=6, c=3)
self.model_constructor = models.ParabolicModel
- super(TestParabolic, self).setUp()
+ super().setUp()
class TestPolynomialOrder2(CommonTests, unittest.TestCase):
@@ -734,7 +737,7 @@ class TestPolynomialOrder2(CommonTests, unittest.TestCase):
self.guess = lambda: dict(c1=1, c2=6, c0=3)
self.model_constructor = models.PolynomialModel
self.args = (2,)
- super(TestPolynomialOrder2, self).setUp()
+ super().setUp()
class TestPolynomialOrder3(CommonTests, unittest.TestCase):
@@ -744,7 +747,7 @@ class TestPolynomialOrder3(CommonTests, unittest.TestCase):
self.guess = lambda: dict(c3=1, c1=1, c2=6, c0=3)
self.model_constructor = models.PolynomialModel
self.args = (3,)
- super(TestPolynomialOrder3, self).setUp()
+ super().setUp()
class TestConstant(CommonTests, unittest.TestCase):
@@ -752,7 +755,7 @@ class TestConstant(CommonTests, unittest.TestCase):
self.true_values = lambda: dict(c=5)
self.guess = lambda: dict(c=2)
self.model_constructor = models.ConstantModel
- super(TestConstant, self).setUp()
+ super().setUp()
def check_skip_independent_vars(self):
raise pytest.skip("ConstantModel has not independent_vars.")
@@ -763,7 +766,7 @@ class TestPowerlaw(CommonTests, unittest.TestCase):
self.true_values = lambda: dict(amplitude=5, exponent=3)
self.guess = lambda: dict(amplitude=2, exponent=8)
self.model_constructor = models.PowerLawModel
- super(TestPowerlaw, self).setUp()
+ super().setUp()
class TestExponential(CommonTests, unittest.TestCase):
@@ -771,7 +774,7 @@ class TestExponential(CommonTests, unittest.TestCase):
self.true_values = lambda: dict(amplitude=5, decay=3)
self.guess = lambda: dict(amplitude=2, decay=8)
self.model_constructor = models.ExponentialModel
- super(TestExponential, self).setUp()
+ super().setUp()
class TestComplexConstant(CommonTests, unittest.TestCase):
@@ -779,7 +782,7 @@ class TestComplexConstant(CommonTests, unittest.TestCase):
self.true_values = lambda: dict(re=5, im=5)
self.guess = lambda: dict(re=2, im=2)
self.model_constructor = models.ComplexConstantModel
- super(TestComplexConstant, self).setUp()
+ super().setUp()
class TestExpression(CommonTests, unittest.TestCase):
@@ -789,7 +792,7 @@ class TestExpression(CommonTests, unittest.TestCase):
self.expression = "off_c + amp_c * exp(-x/x0)"
self.model_constructor = (
lambda *args, **kwargs: models.ExpressionModel(self.expression, *args, **kwargs))
- super(TestExpression, self).setUp()
+ super().setUp()
def test_composite_with_expression(self):
expression_model = models.ExpressionModel("exp(-x/x0)", name='exp')
diff --git a/tests/test_nose.py b/tests/test_nose.py
index 3ec4a22..d3b482a 100644
--- a/tests/test_nose.py
+++ b/tests/test_nose.py
@@ -5,13 +5,12 @@ from numpy import pi
from numpy.testing import (assert_, assert_allclose, assert_almost_equal,
assert_equal, dec)
import pytest
-from scipy.version import version as scipy_version
from uncertainties import ufloat
from lmfit import Minimizer, Parameters, minimize
from lmfit.lineshapes import gaussian
from lmfit.minimizer import (HAS_EMCEE, SCALAR_METHODS, MinimizerResult,
- _lnpost, _nan_policy)
+ _nan_policy)
def check(para, real_val, sig=3):
@@ -341,7 +340,6 @@ class CommonMinimizerTest(unittest.TestCase):
fit_params.add('shift', value=.10, min=0.0, max=0.2)
fit_params.add('decay', value=6.e-3, min=0, max=0.1)
self.fit_params = fit_params
-
self.mini = Minimizer(self.residual, fit_params, [self.x, self.data])
def residual(self, pars, x, data=None):
@@ -395,10 +393,8 @@ class CommonMinimizerTest(unittest.TestCase):
# the data returned by userfcn
self.data[0] = np.nan
- major, minor, _micro = scipy_version.split('.', 2)
for method in SCALAR_METHODS:
- if (method == 'differential_evolution' and int(major) > 0 and
- int(minor) >= 2):
+ if method == 'differential_evolution':
pytest.raises(RuntimeError, self.mini.scalar_minimize,
SCALAR_METHODS[method])
else:
@@ -443,40 +439,29 @@ class CommonMinimizerTest(unittest.TestCase):
# test with emcee as method keyword argument
if not HAS_EMCEE:
return True
-
np.random.seed(123456)
- out = self.mini.minimize(method='emcee', nwalkers=100, steps=200,
+ out = self.mini.minimize(method='emcee',
+ nwalkers=50, steps=200,
burn=50, thin=10)
assert out.method == 'emcee'
- assert out.nfev == 100*200
+ assert out.nfev == 50*200
check_paras(out.params, self.p_true, sig=3)
- out_unweighted = self.mini.minimize(method='emcee', is_weighted=False)
+ out_unweighted = self.mini.minimize(method='emcee',
+ nwalkers=50, steps=200,
+ burn=50, thin=10,
+ is_weighted=False)
assert out_unweighted.method == 'emcee'
@dec.slow
- def test_emcee_PT(self):
- # test emcee with parallel tempering
- if not HAS_EMCEE:
- return True
-
- np.random.seed(123456)
- self.mini.userfcn = residual_for_multiprocessing
- out = self.mini.emcee(ntemps=4, nwalkers=50, steps=200,
- burn=100, thin=10, workers=2)
-
- check_paras(out.params, self.p_true, sig=3)
-
- @dec.slow
def test_emcee_multiprocessing(self):
# test multiprocessing runs
+ raise pytest.skip("Pytest fails with multiprocessing")
+ pytest.importorskip("dill")
if not HAS_EMCEE:
return True
-
- np.random.seed(123456)
- self.mini.userfcn = residual_for_multiprocessing
- self.mini.emcee(steps=10, workers=4)
+ self.mini.emcee(steps=50, workers=4, nwalkers=20)
def test_emcee_bounds_length(self):
# the log-probability functions check if the parameters are
@@ -513,22 +498,21 @@ class CommonMinimizerTest(unittest.TestCase):
out = self.mini.emcee(nwalkers=100, steps=5)
# can initialise with a chain
self.mini.emcee(nwalkers=100, steps=1, pos=out.chain)
-
# can initialise with a correct subset of a chain
- self.mini.emcee(nwalkers=100, steps=1, pos=out.chain[..., -1, :])
+ self.mini.emcee(nwalkers=100, steps=1, pos=out.chain[-1, ...])
# but you can't initialise if the shape is wrong.
pytest.raises(ValueError,
self.mini.emcee,
nwalkers=100,
steps=1,
- pos=out.chain[..., -1, :-1])
+ pos=out.chain[-1, :-1, ...])
def test_emcee_reuse_sampler(self):
if not HAS_EMCEE:
return True
- self.mini.emcee(nwalkers=100, steps=5)
+ self.mini.emcee(nwalkers=20, steps=25)
# if you've run the sampler the Minimizer object should have a _lastpos
# attribute
@@ -536,7 +520,7 @@ class CommonMinimizerTest(unittest.TestCase):
# now try and re-use sampler
out2 = self.mini.emcee(steps=10, reuse_sampler=True)
- assert_(out2.chain.shape[1] == 15)
+ assert_(out2.chain.shape == (35, 20, 4))
# you shouldn't be able to reuse the sampler if nvarys has changed.
self.mini.params['amp'].vary = False
@@ -563,12 +547,10 @@ class CommonMinimizerTest(unittest.TestCase):
# calculate the log-likelihood value
bounds = np.array([(par.min, par.max)
for par in result.params.values()])
- val2 = _lnpost(fvars,
- self.residual,
- result.params,
- result.var_names,
- bounds,
- userargs=(self.x, self.data))
+
+ val2 = self.mini._lnprob(fvars, self.residual, result.params,
+ result.var_names, bounds,
+ userargs=(self.x, self.data))
assert_almost_equal(-0.5 * val, val2)
@@ -585,32 +567,8 @@ class CommonMinimizerTest(unittest.TestCase):
assert_(isinstance(out.flatchain, DataFrame))
# check that we can access the chains via parameter name
- assert_(out.flatchain['amp'].shape[0] == 80)
- assert out.errorbars
- assert_(np.isfinite(out.params['amp'].correl['period']))
-
- # the lnprob array should be the same as the chain size
- assert_(np.size(out.chain)//out.nvarys == np.size(out.lnprob))
-
- # test chain output shapes
- assert_(out.lnprob.shape == (10, (20-5+1)/2))
- assert_(out.chain.shape == (10, (20-5+1)/2, out.nvarys))
- assert_(out.flatchain.shape == (10*(20-5+1)/2, out.nvarys))
-
- def test_emcee_PT_output(self):
- # test mcmc output when using parallel tempering
- if not HAS_EMCEE:
- return True
- try:
- from pandas import DataFrame
- except ImportError:
- return True
- out = self.mini.emcee(ntemps=6, nwalkers=10, steps=20, burn=5, thin=2)
- assert_(isinstance(out, MinimizerResult))
- assert_(isinstance(out.flatchain, DataFrame))
-
- # check that we can access the chains via parameter name
- assert_(out.flatchain['amp'].shape[0] == 80)
+ # print( out.flatchain['amp'].shape[0], 200)
+ assert_(out.flatchain['amp'].shape[0] == 70)
assert out.errorbars
assert_(np.isfinite(out.params['amp'].correl['period']))
@@ -618,10 +576,10 @@ class CommonMinimizerTest(unittest.TestCase):
assert_(np.size(out.chain)//out.nvarys == np.size(out.lnprob))
# test chain output shapes
- assert_(out.lnprob.shape == (6, 10, (20-5+1)/2))
- assert_(out.chain.shape == (6, 10, (20-5+1)/2, out.nvarys))
- # Only the 0th temperature is returned
- assert_(out.flatchain.shape == (10*(20-5+1)/2, out.nvarys))
+ print(out.lnprob.shape, out.chain.shape, out.flatchain.shape)
+ assert_(out.lnprob.shape == (7, 10))
+ assert_(out.chain.shape == (7, 10, 4))
+ assert_(out.flatchain.shape == (70, 4))
@dec.slow
def test_emcee_float(self):
@@ -662,6 +620,14 @@ class CommonMinimizerTest(unittest.TestCase):
assert_almost_equal(out.chain, out2.chain)
+ def test_emcee_ntemps(self):
+ # check for DeprecationWarning when using ntemps > 1
+ if not HAS_EMCEE:
+ return True
+
+ with pytest.raises(DeprecationWarning):
+ _ = self.mini.emcee(params=self.fit_params, ntemps=5)
+
def residual_for_multiprocessing(pars, x, data=None):
# a residual function defined in the top level is needed for
diff --git a/tests/test_parameter.py b/tests/test_parameter.py
new file mode 100644
index 0000000..5c2e5c5
--- /dev/null
+++ b/tests/test_parameter.py
@@ -0,0 +1,514 @@
+"""Tests for the Parameter class."""
+
+from math import trunc
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+import uncertainties as un
+
+import lmfit
+
+
+@pytest.fixture
+def parameter():
+ """Initialize parameter for tests."""
+ param = lmfit.Parameter(name='a', value=10.0, vary=True, min=-100.0,
+ max=100.0, expr=None, brute_step=5.0, user_data=1)
+ expected_attribute_values = ('a', 10.0, True, -100.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(param, expected_attribute_values)
+ return param, expected_attribute_values
+
+
+def assert_parameter_attributes(par, expected):
+ """Assert that parameter attributes have the expected values."""
+ par_attr_values = (par.name, par._val, par.vary, par.min, par.max,
+ par._expr, par.brute_step, par.user_data)
+ assert par_attr_values == expected
+
+
+in_out = [(lmfit.Parameter(name='a'), # set name
+ ('a', -np.inf, True, -np.inf, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', value=10.0), # set value
+ ('a', 10.0, True, -np.inf, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', vary=False), # fix parameter, set vary to False
+ ('a', -np.inf, False, -np.inf, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', min=-10.0), # set lower bound, value reset to min
+ ('a', -10.0, True, -10.0, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', value=-5.0, min=-10.0), # set lower bound
+ ('a', -5.0, True, -10.0, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', max=10.0), # set upper bound
+ ('a', -np.inf, True, -np.inf, 10.0, None, None, None)),
+ (lmfit.Parameter(name='a', value=25.0, max=10.0), # set upper bound, value reset
+ ('a', 10.0, True, -np.inf, 10.0, None, None, None)),
+ (lmfit.Parameter(name='a', expr="2.0*10.0"), # set expression, vary becomes False
+ ('a', -np.inf, True, -np.inf, np.inf, '2.0*10.0', None, None)),
+ (lmfit.Parameter(name='a', brute_step=0.1), # set brute_step
+ ('a', -np.inf, True, -np.inf, np.inf, None, 0.1, None)),
+ (lmfit.Parameter(name='a', user_data={'b': {}}), # set user_data
+ ('a', -np.inf, True, -np.inf, np.inf, None, None, {'b': {}}))]
+
+
+@pytest.mark.parametrize('par, attr_values', in_out)
+def test_initialize_Parameter(par, attr_values):
+ """Test the initialization of the Parameter class."""
+ assert_parameter_attributes(par, attr_values)
+
+ # check for other default attributes
+ for attribute in ['_expr', '_expr_ast', '_expr_eval', '_expr_deps',
+ '_delay_asteval', 'stderr', 'correl', 'from_internal',
+ '_val']:
+ assert hasattr(par, attribute)
+
+
+def test_Parameter_no_name():
+ """Test for Parameter name, now required positional argument."""
+ msg = r"missing 1 required positional argument: 'name'"
+ with pytest.raises(TypeError, match=msg):
+ lmfit.Parameter()
+
+
+def test_init_bounds():
+ """Tests to make sure that initial bounds are consistent.
+
+ Only for specific cases not tested above with the initializations of the
+ Parameter class.
+
+ """
+ # test 1: min > max; should swap min and max
+ par = lmfit.Parameter(name='a', value=0.0, min=10.0, max=-10.0)
+ assert par.min == -10.0
+ assert par.max == 10.0
+
+ # test 2: min == max; should raise a ValueError
+ msg = r"Parameter 'a' has min == max"
+ with pytest.raises(ValueError, match=msg):
+ par = lmfit.Parameter(name='a', value=0.0, min=10.0, max=10.0)
+
+ # FIXME: ideally this should be impossible to happen ever....
+ # perhaps we should add a setter method for MIN and MAX as well?
+ # test 3: max or min is equal to None
+ par.min = None
+ par._init_bounds()
+ assert par.min == -np.inf
+
+ par.max = None
+ par._init_bounds()
+ assert par.max == np.inf
+
+
+def test_parameter_set_value(parameter):
+ """Test the Parameter.set() function with value."""
+ par, initial_attribute_values = parameter
+
+ par.set(value=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(value=5.0)
+ changed_attribute_values = ('a', 5.0, True, -100.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_parameter_set_vary(parameter):
+ """Test the Parameter.set() function with vary."""
+ par, initial_attribute_values = parameter
+
+ par.set(vary=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(vary=False)
+ changed_attribute_values = ('a', 10.0, False, -100.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_parameter_set_min(parameter):
+ """Test the Parameter.set() function with min."""
+ par, initial_attribute_values = parameter
+
+ par.set(min=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(min=-50.0)
+ changed_attribute_values = ('a', 10.0, True, -50.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_parameter_set_max(parameter):
+ """Test the Parameter.set() function with max."""
+ par, initial_attribute_values = parameter
+
+ par.set(max=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(max=50.0)
+ changed_attribute_values = ('a', 10.0, True, -100.0, 50.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_parameter_set_expr(parameter):
+ """Test the Parameter.set() function with expr.
+
+ Of note, this only tests for setting/removal of the expression; nothing
+ else gets evaluated here.... More specific tests will be present in the
+ Parameters class.
+
+ """
+ par, _ = parameter
+
+ par.set(expr='2.0*50.0') # setting an expression, vary --> False
+ changed_attribute_values = ('a', 10.0, False, -100.0, 100.0, '2.0*50.0',
+ 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+ par.set(expr=None) # nothing should change
+ assert_parameter_attributes(par, changed_attribute_values)
+
+ par.set(expr='') # should remove the expression
+ changed_attribute_values = ('a', 10.0, False, -100.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_parameter_set_brute_step(parameter):
+ """Test the Parameter.set() function with brute_step."""
+ par, initial_attribute_values = parameter
+
+ par.set(brute_step=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(brute_step=0.0) # brute_step set to None
+ changed_attribute_values = ('a', 10.0, True, -100.0, 100.0, None, None, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+ par.set(brute_step=1.0)
+ changed_attribute_values = ('a', 10.0, True, -100.0, 100.0, None, 1.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_getstate(parameter):
+ """Test for the __getstate__ method."""
+ par, _ = parameter
+ assert par.__getstate__() == ('a', 10.0, True, None, -100.0, 100.0, 5.0,
+ None, None, 10, 1)
+
+
+def test_setstate(parameter):
+ """Test for the __setstate__ method."""
+ par, initial_attribute_values = parameter
+ state = par.__getstate__()
+
+ par_new = lmfit.Parameter('new')
+ attributes_new = ('new', -np.inf, True, -np.inf, np.inf, None, None, None)
+ assert_parameter_attributes(par_new, attributes_new)
+
+ par_new.__setstate__(state)
+ assert_parameter_attributes(par_new, initial_attribute_values)
+
+
+def test_repr():
+ """Tests for the __repr__ method."""
+ par = lmfit.Parameter(name='test', value=10.0, min=0.0, max=20.0)
+ assert par.__repr__() == "<Parameter 'test', value=10.0, bounds=[0.0:20.0]>"
+
+ par = lmfit.Parameter(name='test', value=10.0, vary=False)
+ assert par.__repr__() == "<Parameter 'test', value=10.0 (fixed), bounds=[-inf:inf]>"
+
+ par.set(vary=True)
+ par.stderr = 0.1
+ assert par.__repr__() == "<Parameter 'test', value=10.0 +/- 0.1, bounds=[-inf:inf]>"
+
+ par = lmfit.Parameter(name='test', expr='10.0*2.5')
+ assert par.__repr__() == "<Parameter 'test', value=-inf, bounds=[-inf:inf], expr='10.0*2.5'>"
+
+ par = lmfit.Parameter(name='test', brute_step=0.1)
+ assert par.__repr__() == "<Parameter 'test', value=-inf, bounds=[-inf:inf], brute_step=0.1>"
+
+
+def test_setup_bounds_and_scale_gradient_methods():
+ """Tests for the setup_bounds and scale_gradient methods.
+
+ Make use of the MINUIT-style transformation to obtain the the Parameter
+ values and scaling factor for the gradient.
+ See: https://lmfit.github.io/lmfit-py/bounds.html
+
+ """
+ # situation 1: no bounds
+ par_no_bounds = lmfit.Parameter('no_bounds', value=10.0)
+ assert_allclose(par_no_bounds.setup_bounds(), 10.0)
+ assert_allclose(par_no_bounds.scale_gradient(par_no_bounds.value), 1.0)
+
+ # situation 2: no bounds, min/max set to None after creating the parameter
+ # TODO: ideally this should never happen; perhaps use a setter here
+ par_no_bounds = lmfit.Parameter('no_bounds', value=10.0)
+ par_no_bounds.min = None
+ par_no_bounds.max = None
+ assert_allclose(par_no_bounds.setup_bounds(), 10.0)
+ assert_allclose(par_no_bounds.scale_gradient(par_no_bounds.value), 1.0)
+
+ # situation 3: upper bound
+ par_upper_bound = lmfit.Parameter('upper_bound', value=10.0, max=25.0)
+ assert_allclose(par_upper_bound.setup_bounds(), 15.968719422671311)
+ assert_allclose(par_upper_bound.scale_gradient(par_upper_bound.value),
+ -0.99503719, rtol=1.e-6)
+
+ # situation 4: lower bound
+ par_lower_bound = lmfit.Parameter('upper_bound', value=10.0, min=-25.0)
+ assert_allclose(par_lower_bound.setup_bounds(), 35.98610843)
+ assert_allclose(par_lower_bound.scale_gradient(par_lower_bound.value),
+ 0.995037, rtol=1.e-6)
+
+ # situation 5: both lower and upper bounds
+ par_both_bounds = lmfit.Parameter('both_bounds', value=10.0, min=-25.0,
+ max=25.0)
+ assert_allclose(par_both_bounds.setup_bounds(), 0.4115168460674879)
+ assert_allclose(par_both_bounds.scale_gradient(par_both_bounds.value),
+ -20.976788, rtol=1.e-6)
+
+
+def test__getval(parameter):
+ """Test _getval function."""
+ par, _ = parameter
+
+ # test uncertainties.core.Variable in _getval [deprecated]
+ par.set(value=un.ufloat(5.0, 0.2))
+ with pytest.warns(FutureWarning, match='removed in the next release'):
+ val = par.value
+ assert_allclose(val, 5.0)
+
+
+def test_value_setter(parameter):
+ """Tests for the value setter."""
+ par, initial_attribute_values = parameter
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(value=200.0) # above maximum
+ assert_allclose(par.value, 100.0)
+
+ par.set(value=-200.0) # below minimum
+ assert_allclose(par.value, -100.0)
+
+
+# TODO: add tests for setter/getter methods for VALUE, EXPR
+
+
+# Tests for magic methods of the Parameter class
+def test__array__(parameter):
+ """Test the __array__ magic method."""
+ par, _ = parameter
+ assert np.array(par) == np.array(10.0)
+
+
+def test__str__(parameter):
+ """Test the __str__ magic method."""
+ par, _ = parameter
+ assert str(par) == "<Parameter 'a', value=10.0, bounds=[-100.0:100.0], brute_step=5.0>"
+
+
+def test__abs__(parameter):
+ """Test the __abs__ magic method."""
+ par, _ = parameter
+ assert_allclose(abs(par), 10.0)
+ par.set(value=-10.0)
+ assert_allclose(abs(par), 10.0)
+
+
+def test__neg__(parameter):
+ """Test the __neg__ magic method."""
+ par, _ = parameter
+ assert_allclose(-par, -10.0)
+ par.set(value=-10.0)
+ assert_allclose(-par, 10.0)
+
+
+def test__pos__(parameter):
+ """Test the __pos__ magic method."""
+ par, _ = parameter
+ assert_allclose(+par, 10.0)
+ par.set(value=-10.0)
+ assert_allclose(+par, -10.0)
+
+
+def test__bool__(parameter):
+ """Test the __bool__ magic method."""
+ par, _ = parameter
+ assert bool(par)
+
+
+def test__int__(parameter):
+ """Test the __int__ magic method."""
+ par, _ = parameter
+ assert isinstance(int(par), int)
+ assert_allclose(int(par), 10)
+
+
+def test__float__(parameter):
+ """Test the __float__ magic method."""
+ par, _ = parameter
+ par.set(value=5)
+ assert isinstance(float(par), float)
+ assert_allclose(float(par), 5.0)
+
+
+def test__trunc__(parameter):
+ """Test the __trunc__ magic method."""
+ par, _ = parameter
+ par.set(value=10.5)
+ assert isinstance(trunc(par), int)
+ assert_allclose(trunc(par), 10)
+
+
+def test__add__(parameter):
+ """Test the __add__ magic method."""
+ par, _ = parameter
+ assert_allclose(par + 5.25, 15.25)
+
+
+def test__sub__(parameter):
+ """Test the __sub__ magic method."""
+ par, _ = parameter
+ assert_allclose(par - 5.25, 4.75)
+
+
+def test__truediv__(parameter):
+ """Test the __truediv__ magic method."""
+ par, _ = parameter
+ assert_allclose(par / 1.25, 8.0)
+
+
+def test__floordiv__(parameter):
+ """Test the __floordiv__ magic method."""
+ par, _ = parameter
+ par.set(value=5)
+ assert_allclose(par // 2, 2)
+
+
+def test__divmod__(parameter):
+ """Test the __divmod__ magic method."""
+ par, _ = parameter
+ assert_allclose(divmod(par, 3), (3, 1))
+
+
+def test__mod__(parameter):
+ """Test the __mod__ magic method."""
+ par, _ = parameter
+ assert_allclose(par % 2, 0)
+ assert_allclose(par % 3, 1)
+
+
+def test__mul__(parameter):
+ """Test the __mul__ magic method."""
+ par, _ = parameter
+ assert_allclose(par * 2.5, 25.0)
+ assert_allclose(par * -0.1, -1.0)
+
+
+def test__pow__(parameter):
+ """Test the __pow__ magic method."""
+ par, _ = parameter
+ assert_allclose(par ** 0.5, 3.16227766)
+ assert_allclose(par ** 4, 1e4)
+
+
+def test__gt__(parameter):
+ """Test the __gt__ magic method."""
+ par, _ = parameter
+ assert 11 > par
+ assert not 10 > par
+
+
+def test__ge__(parameter):
+ """Test the __ge__ magic method."""
+ par, _ = parameter
+ assert 11 >= par
+ assert 10 >= par
+ assert not 9 >= par
+
+
+def test__le__(parameter):
+ """Test the __le__ magic method."""
+ par, _ = parameter
+ assert 9 <= par
+ assert 10 <= par
+ assert not 11 <= par
+
+
+def test__lt__(parameter):
+ """Test the __lt__ magic method."""
+ par, _ = parameter
+ assert 9 < par
+ assert not 10 < par
+
+
+def test__eq__(parameter):
+ """Test the __eq__ magic method."""
+ par, _ = parameter
+ assert 10 == par
+ assert not 9 == par
+
+
+def test__ne__(parameter):
+ """Test the __ne__ magic method."""
+ par, _ = parameter
+ assert 9 != par
+ assert not 10 != par
+
+
+def test__radd__(parameter):
+ """Test the __radd__ magic method."""
+ par, _ = parameter
+ assert_allclose(5.25 + par, 15.25)
+
+
+def test__rtruediv__(parameter):
+ """Test the __rtruediv__ magic method."""
+ par, _ = parameter
+ assert_allclose(1.25 / par, 0.125)
+
+
+def test__rdivmod__(parameter):
+ """Test the __rdivmod__ magic method."""
+ par, _ = parameter
+ assert_allclose(divmod(3, par), (0, 3))
+
+
+def test__rfloordiv__(parameter):
+ """Test the __rfloordiv__ magic method."""
+ par, _ = parameter
+ assert_allclose(2 // par, 0)
+ assert_allclose(20 // par, 2)
+
+
+def test__rmod__(parameter):
+ """Test the __rmod__ magic method."""
+ par, _ = parameter
+ assert_allclose(2 % par, 2)
+ assert_allclose(25 % par, 5)
+
+
+def test__rmul__(parameter):
+ """Test the __rmul__ magic method."""
+ par, _ = parameter
+ assert_allclose(2.5 * par, 25.0)
+ assert_allclose(-0.1 * par, -1.0)
+
+
+def test__rpow__(parameter):
+ """Test the __rpow__ magic method."""
+ par, _ = parameter
+ assert_allclose(0.5 ** par, 0.0009765625)
+ assert_allclose(4 ** par, 1048576)
+
+
+def test__rsub__(parameter):
+ """Test the __rsub__ magic method."""
+ par, _ = parameter
+ assert_allclose(5.25 - par, -4.75)
+
+
+def test_isParameter(parameter):
+ """Test function to check whether something is a Paramter [deprecated]."""
+ # TODO: this function isn't used anywhere in the codebase; useful at all?
+ par, _ = parameter
+ assert lmfit.parameter.isParameter(par)
+ assert not lmfit.parameter.isParameter('test')
+ with pytest.warns(FutureWarning, match='removed in the next release'):
+ lmfit.parameter.isParameter(par)
diff --git a/tests/test_parameters.py b/tests/test_parameters.py
index a5fcc0e..67f6c41 100644
--- a/tests/test_parameters.py
+++ b/tests/test_parameters.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
from copy import copy, deepcopy
import pickle
import unittest
@@ -8,7 +6,6 @@ import numpy as np
from numpy.testing import assert_, assert_almost_equal, assert_equal
from lmfit import Model, Parameter, Parameters
-
from lmfit.printfuncs import params_html_table
@@ -299,3 +296,24 @@ class TestParameters(unittest.TestCase):
assert len(repr_full) > 150
assert len(repr_one) > 150
assert len(out) > 150
+
+ def test_add_with_symtable(self):
+ pars1 = Parameters()
+ pars1.add("a", value=1.0, vary=True)
+
+ def half(x):
+ return 0.5*x
+
+ pars2 = Parameters(usersyms={"half": half})
+ pars2.add("b", value=3.0)
+ pars2.add("c", expr="half(b)")
+
+ params = pars1 + pars2
+ assert_almost_equal(params['c'].value, 1.5)
+
+ params = pars2 + pars1
+ assert_almost_equal(params['c'].value, 1.5)
+
+ params = deepcopy(pars1)
+ params.update(pars2)
+ assert_almost_equal(params['c'].value, 1.5)
diff --git a/tests/test_saveload.py b/tests/test_saveload.py
index 0ec66cf..cb4e026 100644
--- a/tests/test_saveload.py
+++ b/tests/test_saveload.py
@@ -6,13 +6,12 @@ import numpy as np
from numpy.testing import assert_allclose
import pytest
-import lmfit.jsonutils
from lmfit import Parameters
-from lmfit.model import (load_model, load_modelresult, save_model,
- save_modelresult, Model, ModelResult)
-from lmfit.models import ExponentialModel, GaussianModel, VoigtModel
+import lmfit.jsonutils
from lmfit.lineshapes import gaussian, lorentzian
-
+from lmfit.model import (Model, ModelResult, load_model, load_modelresult,
+ save_model, save_modelresult)
+from lmfit.models import ExponentialModel, GaussianModel, VoigtModel
from lmfit_testutils import assert_between, assert_param_between
y, x = np.loadtxt(os.path.join(os.path.dirname(__file__), '..',