summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPicca Frédéric-Emmanuel <picca@synchrotron-soleil.fr>2020-08-03 11:17:34 +0200
committerPicca Frédéric-Emmanuel <picca@synchrotron-soleil.fr>2020-08-03 11:17:34 +0200
commit1f6ed0ffb65f5672bb173ea96c1944301770d282 (patch)
tree29510176a62fef3e9c4b529cd7c5174b12817301
parent12d246f2165e9ee66f54f615ab4c65f0d4494352 (diff)
New upstream version 1.0.1
-rw-r--r--INSTALL4
-rw-r--r--LICENSE2
-rw-r--r--PKG-INFO4
-rw-r--r--THANKS.txt2
-rw-r--r--doc/__pycache__/extensions.cpython-37.pycbin405 -> 405 bytes
-rw-r--r--doc/builtin_models.rst11
-rw-r--r--doc/conf.py1
-rw-r--r--doc/constraints.rst2
-rwxr-xr-xdoc/doc_examples_to_gallery.py8
-rw-r--r--doc/faq.rst6
-rw-r--r--doc/fitting.rst2
-rw-r--r--doc/installation.rst10
-rw-r--r--doc/model.rst8
-rw-r--r--doc/whatsnew.rst39
-rw-r--r--examples/doc_model_with_iter_callback.py4
-rw-r--r--examples/example_Model_interface.py10
-rw-r--r--examples/example_brute.py21
-rw-r--r--examples/example_complex_resonator_model.py2
-rw-r--r--lmfit.egg-info/PKG-INFO4
-rw-r--r--lmfit.egg-info/SOURCES.txt10
-rw-r--r--lmfit/__init__.py2
-rw-r--r--lmfit/_version.py6
-rw-r--r--lmfit/confidence.py2
-rw-r--r--lmfit/jsonutils.py16
-rw-r--r--lmfit/lineshapes.py142
-rw-r--r--lmfit/minimizer.py234
-rw-r--r--lmfit/model.py53
-rw-r--r--lmfit/models.py160
-rw-r--r--lmfit/parameter.py67
-rw-r--r--lmfit/ui/__init__.py49
-rw-r--r--lmfit/ui/basefitter.py322
-rw-r--r--lmfit/ui/ipy_fitter.py281
-rw-r--r--setup.cfg4
-rw-r--r--setup.py4
-rw-r--r--tests/NISTModels.py2
-rw-r--r--tests/lmfit_testutils.py19
-rw-r--r--tests/test_1variable.py8
-rw-r--r--tests/test_ampgo.py23
-rw-r--r--tests/test_basicfit.py8
-rw-r--r--tests/test_bounded_jacobian.py4
-rw-r--r--tests/test_bounds.py8
-rw-r--r--tests/test_builtin_models.py (renamed from tests/test_lineshapes_models.py)69
-rw-r--r--tests/test_confidence.py86
-rw-r--r--tests/test_copy_params.py36
-rw-r--r--tests/test_covariance_matrix.py2
-rw-r--r--tests/test_custom_independentvar.py20
-rw-r--r--tests/test_default_kws.py10
-rw-r--r--tests/test_itercb.py5
-rw-r--r--tests/test_jsonutils.py12
-rw-r--r--tests/test_lineshapes.py124
-rw-r--r--tests/test_manypeaks_speed.py8
-rw-r--r--tests/test_max_nfev.py105
-rw-r--r--tests/test_model.py12
-rw-r--r--tests/test_model_uncertainties.py4
-rw-r--r--tests/test_multidatasets.py22
-rw-r--r--tests/test_nose.py63
-rw-r--r--tests/test_pandas.py24
-rw-r--r--tests/test_parameter.py147
-rw-r--r--tests/test_parameters.py889
-rw-r--r--tests/test_params_set.py186
-rw-r--r--tests/test_printfuncs.py2
-rw-r--r--tests/test_saveload.py16
-rw-r--r--tests/test_stepmodel.py36
63 files changed, 1678 insertions, 1764 deletions
diff --git a/INSTALL b/INSTALL
index 570b3ca..8a11daa 100644
--- a/INSTALL
+++ b/INSTALL
@@ -6,7 +6,7 @@ To install the lmfit Python module, use::
python setup.py build
python setup.py install
-For lmfit 1.0, the following versions are required:
+For lmfit 1.0.1, the following versions are required:
Python: 3.5 or higher
NumPy: 1.16 or higher
SciPy: 1.2 or higher
@@ -14,4 +14,4 @@ For lmfit 1.0, the following versions are required:
uncertainties: 3.0.1 or higher
Matt Newville <newville@cars.uchicago.edu>
-Last Update: 2019-December-4
+Last Update: 2020-April-29
diff --git a/LICENSE b/LICENSE
index 5691bee..c44053d 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
BSD-3
-Copyright 2019 Matthew Newville, The University of Chicago
+Copyright 2020 Matthew Newville, The University of Chicago
Renee Otten, Brandeis University
Till Stensitzki, Freie Universitat Berlin
A. R. J. Nelson, Australian Nuclear Science and Technology Organisation
diff --git a/PKG-INFO b/PKG-INFO
index c37537e..3b9a3b1 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.2
Name: lmfit
-Version: 1.0.0
+Version: 1.0.1
Summary: Least-Squares Minimization with Bounds and Constraints
Home-page: https://lmfit.github.io/lmfit-py/
Author: LMFit Development Team
@@ -17,7 +17,7 @@ Description: A library for least-squares minimization and data fitting in
algorithm, and provides estimated standard errors and correlations between
varied Parameters. Other minimization methods, including Nelder-Mead's
downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
- others are also supported. Bounds and contraints can be placed on
+ others are also supported. Bounds and constraints can be placed on
Parameters for all of these methods.
In addition, methods for explicitly calculating confidence intervals are
diff --git a/THANKS.txt b/THANKS.txt
index 9b14502..6722a30 100644
--- a/THANKS.txt
+++ b/THANKS.txt
@@ -52,7 +52,7 @@ Additional patches, bug fixes, and suggestions have come from Faustin
Carter, Christoph Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau,
nmearl, Gustavo Pasquevich, Clemens Prescher, LiCode, Ben Gamari, Yoav
Roam, Alexander Stark, Alexandre Beelen, Andrey Aristov, Nicholas Zobrist,
-Ethan Welty, Julius Zimmermann, and many others.
+Ethan Welty, Julius Zimmermann, Mark Dean, Arun Persaud, and many others.
The lmfit code obviously depends on, and owes a very large debt to the code
in scipy.optimize. Several discussions on the SciPy-user and lmfit mailing
diff --git a/doc/__pycache__/extensions.cpython-37.pyc b/doc/__pycache__/extensions.cpython-37.pyc
index 773a719..174a916 100644
--- a/doc/__pycache__/extensions.cpython-37.pyc
+++ b/doc/__pycache__/extensions.cpython-37.pyc
Binary files differ
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index a4c7f70..4ef79a2 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -139,10 +139,15 @@ of 0 on the value of ``sigma``.
.. autoclass:: SkewedVoigtModel
-:class:`DonaichModel`
+:class:`ThermalDistributionModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ThermalDistributionModel
+
+:class:`DoniachModel`
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. autoclass:: DonaichModel
+.. autoclass:: DoniachModel
Linear and Polynomial Models
@@ -470,7 +475,7 @@ plus a constant:
After constructing step-like data, we first create a :class:`StepModel`
telling it to use the ``erf`` form (see details above), and a
:class:`ConstantModel`. We set initial values, in one case using the data
-and :meth:`guess` method for the initial step function paramaters, and
+and :meth:`guess` method for the initial step function parameters, and
:meth:`make_params` arguments for the linear component.
After making a composite model, we run :meth:`fit` and report the
results, which gives:
diff --git a/doc/conf.py b/doc/conf.py
index 424dd86..7cc1574 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -166,5 +166,4 @@ sphinx_gallery_conf = {
'gallery_dirs': 'examples',
'filename_pattern': '/documentation|/example_',
'ignore_pattern': '/doc_',
- 'expected_failing_examples': ['../examples/documentation/model_loadmodel.py']
}
diff --git a/doc/constraints.rst b/doc/constraints.rst
index c8af1e0..1678aa2 100644
--- a/doc/constraints.rst
+++ b/doc/constraints.rst
@@ -159,7 +159,7 @@ for the constraints::
return (amp / (1 + ((x-cen) / wid)**2))
fitter = Minimizer()
- fitter.asteval.symtable['lorentzian'] = mylorentzian
+ fitter._asteval.symtable['lorentzian'] = mylorentzian
and this :meth:`lorentzian` function can now be used in constraint
expressions.
diff --git a/doc/doc_examples_to_gallery.py b/doc/doc_examples_to_gallery.py
index d217a11..f15f3b6 100755
--- a/doc/doc_examples_to_gallery.py
+++ b/doc/doc_examples_to_gallery.py
@@ -32,15 +32,12 @@ with open(os.path.join(examples_documentation_dir, 'README.txt'), 'w') as out:
for fn in files:
inp_path = os.path.join(examples_dir, fn)
- with open(inp_path, 'r') as inp:
+ with open(inp_path) as inp:
script_text = inp.read()
gallery_file = os.path.join(examples_documentation_dir, fn[4:])
with open(gallery_file, 'w') as out:
- msg = ""
- if 'model_loadmodel.py' in fn:
- msg = ('.. note:: This example *does* actually work, but running from within '
- ' sphinx-gallery fails to find symbols saved in the save file.')
+ msg = "" # add optional message f
out.write('"""\n{}\n{}\n\n{}\n"""\n'.format(fn, "="*len(fn), msg))
out.write('##\nimport warnings\nwarnings.filterwarnings("ignore")\n##\n')
out.write(script_text)
@@ -54,7 +51,6 @@ time.sleep(1.0)
os.system('cp {}/*.dat {}'.format(examples_dir, examples_documentation_dir))
os.system('cp {}/*.csv {}'.format(examples_dir, examples_documentation_dir))
os.system('cp {}/*.sav {}'.format(examples_dir, examples_documentation_dir))
-#
os.chdir(examples_documentation_dir)
diff --git a/doc/faq.rst b/doc/faq.rst
index 0a213dc..bdfc60e 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -111,7 +111,7 @@ I get errors from NaN in my fit. What can I do?
The solvers used by lmfit use NaN (see
https://en.wikipedia.org/wiki/NaN) values as signals that the calculation
cannot continue. If any value in the residual array (typically
-`(data-model)*weight`) is NaN, then calculations of chi-square or
+``(data-model)*weight``) is NaN, then calculations of chi-square or
comparisons with other residual arrays to try find a better fit will also
give NaN and fail. There is no sensible way for lmfit or any of the
optimization routines to know how to handle such NaN values. They
@@ -122,7 +122,7 @@ function (if using ``Model``) generates a NaN, the fit will stop
immediately. If your objective or model function generates a NaN, you
really must handle that.
-`nan_policy`
+``nan_policy``
~~~~~~~~~~~~~~~~~~
If you are using :class:`lmfit.Model` and the NaN values come from your
@@ -141,7 +141,7 @@ calculated model, that should be the case.
Common sources of NaN
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If you are seeing erros due to NaN values, you will need to figure out
+If you are seeing errors due to NaN values, you will need to figure out
where they are coming from and eliminate them. It is sometimes difficult
to tell what causes NaN values. Keep in mind that all values should be
assumed to be either scalar values or numpy arrays of double precision real
diff --git a/doc/fitting.rst b/doc/fitting.rst
index 1fcec29..64edb05 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -268,6 +268,8 @@ Goodness-of-Fit Statistics
+----------------------+----------------------------------------------------------------------------+
| init_vals | list of initial values for variable parameters |
+----------------------+----------------------------------------------------------------------------+
+| call_kws | dict of keyword arguments sent to underlying solver |
++----------------------+----------------------------------------------------------------------------+
Note that the calculation of chi-square and reduced chi-square assume
that the returned residual function is scaled properly to the
diff --git a/doc/installation.rst b/doc/installation.rst
index 9b09f36..8d19e9c 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -20,6 +20,7 @@ Downloading and Installation
.. _sphinx: https://www.sphinx-doc.org
.. _jupyter_sphinx: https://jupyter-sphinx.readthedocs.io
.. _ImageMagick: https://www.imagemagick.org/
+.. _Pillow: https://python-pillow.org/
.. _release_notes: https://lmfit.github.io/lmfit-py/whatsnew.html
@@ -43,11 +44,12 @@ functionality requires the `emcee`_ (version 3+), `corner`_, `pandas`_, `Jupyter
`matplotlib`_, `dill`_, or `numdifftools`_ packages. These are not installed
automatically, but we highly recommend each of these packages.
-For building the documentation, `matplotlib`_, `emcee`_ (version 3+), `corner`_,
-`Sphinx`_, `jupyter_sphinx`_, and `ImageMagick`_ are required (the latter
-one only when generating the PDF document).
+For building the documentation and generating the examples gallery,
+`matplotlib`_, `emcee`_ (version 3+), `corner`_, `Sphinx`_,
+`jupyter_sphinx`_, `Pillow`_ and `ImageMagick`_ are required (the
+latter one only when generating the PDF document).
-Please refer to ``requirements-dev.txt`` for a list of all dependencies that
+Please refer to ``requirements-dev.txt`` for a list of all dependencies that
are needed if you want to participate in the development of lmfit.
Downloads
diff --git a/doc/model.rst b/doc/model.rst
index ee12f4b..3f454b3 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -117,7 +117,7 @@ the expected names::
params = gmodel.make_params()
This creates the :class:`~lmfit.parameter.Parameters` but does not
-automaticaly give them initial values since it has no idea what the scale
+automatically give them initial values since it has no idea what the scale
should be. You can set initial values for parameters with keyword
arguments to :meth:`make_params`::
@@ -726,6 +726,10 @@ comparing different models, including ``chisqr``, ``redchi``, ``aic``, and ``bic
String naming fitting method for :func:`~lmfit.minimizer.minimize`.
+.. attribute:: call_kws
+
+ Dict of keyword arguments actually send to underlying solver with :func:`~lmfit.minimizer.minimize`.
+
.. attribute:: model
Instance of :class:`Model` used for model.
@@ -939,7 +943,7 @@ The components were generated after the fit using the
:meth:`ModelResult.eval_components` method of the ``result``, which returns
a dictionary of the components, using keys of the model name
(or ``prefix`` if that is set). This will use the parameter values in
-`result.params` and the independent variables (``x``) used during the
+``result.params`` and the independent variables (``x``) used during the
fit. Note that while the :class:`ModelResult` held in ``result`` does store the
best parameters and the best estimate of the model in ``result.best_fit``,
the original model and parameters in ``pars`` are left unaltered.
diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst
index 02cdc51..ff9a21b 100644
--- a/doc/whatsnew.rst
+++ b/doc/whatsnew.rst
@@ -12,6 +12,41 @@ to be a comprehensive list of changes. For such a complete record,
consult the `lmfit GitHub repository`_.
+.. _whatsnew_101_label:
+
+Version 1.0.1 Release Notes
+============================
+
+**Version 1.0.1 is the last release that supports Python 3.5**. All newer version will
+require 3.6+ so that we can use formatting-strings and rely on dictionaries being ordered.
+
+New features:
+
+- added thermal distribution model and lineshape (PR #620; @mpmdean)
+- introduced a new argument ``max_nfev`` to uniformly specify the maximum number of function evalutions (PR #610)
+ **Please note: all other arguments (e.g., ``maxfev``, ``maxiter``, ...) will no longer be passed to the underlying
+ solver. A warning will be emitted stating that one should use ``max_nfev``.**
+- the attribute ``call_kws`` was added to the ``MinimizerResult`` class and contains the keyword arguments that are
+ supplied to the solver in SciPy.
+
+Bug fixes:
+
+- fixes to the ``load`` and ``__setstate__`` methods of the Parameter class
+- fixed failure of ModelResult.dump() due to missing attributes (Issue #611, PR #623; @mpmdean)
+- ``guess_from_peak`` function now also works correctly with decreasing x-values or when using
+ pandas (PRs #627 and #629; @mpmdean)
+- the ``Parameter.set()`` method now correctly first updates the boundaries and then the value (Issue #636, PR #637; @arunpersaud)
+
+Various:
+
+- fixed typo for the use of expressions in the documentation (Issue #610; @jkrogager)
+- removal of PY2-compatibility and unused code and improved test coverage (PRs #619, #631, and #633)
+- removed deprecated ``isParameter`` function and automatic conversion of an ``uncertainties`` object (PR #626)
+- inaccurate FWHM calculations were removed from built-in models, others labeled as estimates (Issue #616 and PR #630)
+- corrected spelling mistake for the Doniach lineshape and model (Issue #634; @rayosborn)
+- removed unsupported/untested code for IPython notebooks in lmfit/ui/*
+
+
.. _whatsnew_100_label:
Version 1.0.0 Release Notes
@@ -37,7 +72,7 @@ Various:
Version 0.9.15 Release Notes
============================
-**Version 0.9.15 is the last release that supports Python 2.7**; it now also fully suports Python 3.8.
+**Version 0.9.15 is the last release that supports Python 2.7**; it now also fully supports Python 3.8.
New features, improvements, and bug fixes:
@@ -250,7 +285,7 @@ so that the Parameter value does not need ``sigma = params['sigma'].value``.
The older, explicit usage still works, but the docs, samples, and tests
have been updated to use the simpler usage.
-Support for Python 2.6 and SciPy 0.13 is now explicitly deprecated and wil
+Support for Python 2.6 and SciPy 0.13 is now explicitly deprecated and will
be dropped in version 0.9.5.
.. _whatsnew_093_label:
diff --git a/examples/doc_model_with_iter_callback.py b/examples/doc_model_with_iter_callback.py
index 91f2351..77d56c3 100644
--- a/examples/doc_model_with_iter_callback.py
+++ b/examples/doc_model_with_iter_callback.py
@@ -6,8 +6,8 @@ from lmfit.lineshapes import gaussian
from lmfit.models import GaussianModel, LinearModel
-def per_iteration(pars, iter, resid, *args, **kws):
- print(" ITER ", iter, ["%.5f" % p for p in pars.values()])
+def per_iteration(pars, iteration, resid, *args, **kws):
+ print(" ITER ", iteration, ["%.5f" % p for p in pars.values()])
x = linspace(0., 20, 401)
diff --git a/examples/example_Model_interface.py b/examples/example_Model_interface.py
index d2c0d0a..de05f0c 100644
--- a/examples/example_Model_interface.py
+++ b/examples/example_Model_interface.py
@@ -25,7 +25,7 @@ def decay(t, N, tau):
# The parameters are in no particular order. We'll need some example data. I
# will use ``N=7`` and ``tau=3``, and add a little noise.
t = np.linspace(0, 5, num=1000)
-data = decay(t, 7, 3) + np.random.randn(*t.shape)
+data = decay(t, 7, 3) + np.random.randn(t.size)
###############################################################################
# **Simplest Usage**
@@ -58,8 +58,8 @@ result.params.pretty_print()
# **Specifying Bounds and Holding Parameters Constant**
#
# Above, the ``Model`` class implicitly builds ``Parameter`` objects from
-# keyword arguments of ``fit`` that match the argments of ``decay``. You can
-# build the ``Parameter`` objects explicity; the following is equivalent.
+# keyword arguments of ``fit`` that match the arguments of ``decay``. You can
+# build the ``Parameter`` objects explicitly; the following is equivalent.
result = model.fit(data, t=t,
N=Parameter('N', value=10),
tau=Parameter('tau', value=1))
@@ -76,7 +76,7 @@ report_fit(result.params)
###############################################################################
# **Defining Parameters in Advance**
#
-# Passing parameters to ``fit`` can become unwieldly. As an alternative, you
+# Passing parameters to ``fit`` can become unwieldy. As an alternative, you
# can extract the parameters from ``model`` like so, set them individually,
# and pass them to ``fit``.
params = model.make_params()
@@ -127,7 +127,7 @@ report_fit(result.params)
###############################################################################
# *Handling Missing Data*
#
-# By default, attemping to fit data that includes a ``NaN``, which
+# By default, attempting to fit data that includes a ``NaN``, which
# conventionally indicates a "missing" observation, raises a lengthy exception.
# You can choose to ``omit`` (i.e., skip over) missing values instead.
data_with_holes = data.copy()
diff --git a/examples/example_brute.py b/examples/example_brute.py
index a89b71b..9650cba 100644
--- a/examples/example_brute.py
+++ b/examples/example_brute.py
@@ -15,6 +15,7 @@ Global minimization using the ``brute`` method (a.k.a. grid search)
import copy
import matplotlib.pyplot as plt
+from matplotlib.colors import LogNorm
import numpy as np
from lmfit import Minimizer, Parameters, fit_report
@@ -56,19 +57,20 @@ params.add_many(
def f1(p):
par = p.valuesdict()
return (par['a'] * par['x']**2 + par['b'] * par['x'] * par['y'] +
- par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] + par['f'])
+ par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] +
+ par['f'])
def f2(p):
par = p.valuesdict()
return (-1.0*par['g']*np.exp(-((par['x']-par['h'])**2 +
- (par['y']-par['i'])**2) / par['scale']))
+ (par['y']-par['i'])**2) / par['scale']))
def f3(p):
par = p.valuesdict()
return (-1.0*par['j']*np.exp(-((par['x']-par['k'])**2 +
- (par['y']-par['l'])**2) / par['scale']))
+ (par['y']-par['l'])**2) / par['scale']))
def f(params):
@@ -118,13 +120,13 @@ print(result.brute_grid)
print(result.brute_Jout)
###############################################################################
-# **Reassuringly, the obtained results are indentical to using the method in
+# **Reassuringly, the obtained results are identical to using the method in
# SciPy directly!**
###############################################################################
# Example 2: fit of a decaying sine wave
#
-# In this example, will explain some of the options ot the algorithm.
+# In this example, we will explain some of the options of the algorithm.
#
# We start off by generating some synthetic data with noise for a decaying
# sine wave, define an objective function and create a Parameter set.
@@ -185,7 +187,8 @@ par_name = 'shift'
indx_shift = result_brute.var_names.index(par_name)
grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel())
print("parameter = {}\nnumber of steps = {}\ngrid = {}".format(par_name,
- len(grid_shift), grid_shift))
+ len(grid_shift),
+ grid_shift))
###############################################################################
# If finite bounds are not set for a certain parameter then the user **must**
@@ -311,10 +314,8 @@ def plot_results_brute(result, best_vals=True, varlabels=None,
output : str, optional
Name of the output PDF file (default is 'None')
"""
- from matplotlib.colors import LogNorm
-
npars = len(result.var_names)
- fig, axes = plt.subplots(npars, npars)
+ _fig, axes = plt.subplots(npars, npars)
if not varlabels:
varlabels = result.var_names
@@ -366,7 +367,7 @@ def plot_results_brute(result, best_vals=True, varlabels=None,
# contour plots for all combinations of two parameters
elif j > i:
ax = axes[j, i+1]
- red_axis = tuple([a for a in range(npars) if a != i and a != j])
+ red_axis = tuple([a for a in range(npars) if a not in (i, j)])
X, Y = np.meshgrid(np.unique(result.brute_grid[i]),
np.unique(result.brute_grid[j]))
lvls1 = np.linspace(result.brute_Jout.min(),
diff --git a/examples/example_complex_resonator_model.py b/examples/example_complex_resonator_model.py
index 5eb117a..1663d7c 100644
--- a/examples/example_complex_resonator_model.py
+++ b/examples/example_complex_resonator_model.py
@@ -32,7 +32,7 @@ import lmfit
def linear_resonator(f, f_0, Q, Q_e_real, Q_e_imag):
Q_e = Q_e_real + 1j*Q_e_imag
- return (1 - (Q * Q_e**-1 / (1 + 2j * Q * (f - f_0) / f_0)))
+ return 1 - (Q * Q_e**-1 / (1 + 2j * Q * (f - f_0) / f_0))
###############################################################################
diff --git a/lmfit.egg-info/PKG-INFO b/lmfit.egg-info/PKG-INFO
index c37537e..3b9a3b1 100644
--- a/lmfit.egg-info/PKG-INFO
+++ b/lmfit.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.2
Name: lmfit
-Version: 1.0.0
+Version: 1.0.1
Summary: Least-Squares Minimization with Bounds and Constraints
Home-page: https://lmfit.github.io/lmfit-py/
Author: LMFit Development Team
@@ -17,7 +17,7 @@ Description: A library for least-squares minimization and data fitting in
algorithm, and provides estimated standard errors and correlations between
varied Parameters. Other minimization methods, including Nelder-Mead's
downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
- others are also supported. Bounds and contraints can be placed on
+ others are also supported. Bounds and constraints can be placed on
Parameters for all of these methods.
In addition, methods for explicitly calculating confidence intervals are
diff --git a/lmfit.egg-info/SOURCES.txt b/lmfit.egg-info/SOURCES.txt
index 5932afa..06cbeab 100644
--- a/lmfit.egg-info/SOURCES.txt
+++ b/lmfit.egg-info/SOURCES.txt
@@ -122,9 +122,6 @@ lmfit.egg-info/SOURCES.txt
lmfit.egg-info/dependency_links.txt
lmfit.egg-info/requires.txt
lmfit.egg-info/top_level.txt
-lmfit/ui/__init__.py
-lmfit/ui/basefitter.py
-lmfit/ui/ipy_fitter.py
tests/NISTModels.py
tests/conftest.py
tests/lmfit_testutils.py
@@ -137,8 +134,8 @@ tests/test_basinhopping.py
tests/test_bounded_jacobian.py
tests/test_bounds.py
tests/test_brute.py
+tests/test_builtin_models.py
tests/test_confidence.py
-tests/test_copy_params.py
tests/test_covariance_matrix.py
tests/test_custom_independentvar.py
tests/test_default_kws.py
@@ -146,16 +143,17 @@ tests/test_dual_annealing.py
tests/test_itercb.py
tests/test_jsonutils.py
tests/test_least_squares.py
-tests/test_lineshapes_models.py
+tests/test_lineshapes.py
tests/test_manypeaks_speed.py
+tests/test_max_nfev.py
tests/test_minimizer.py
tests/test_model.py
tests/test_model_uncertainties.py
tests/test_multidatasets.py
tests/test_nose.py
+tests/test_pandas.py
tests/test_parameter.py
tests/test_parameters.py
-tests/test_params_set.py
tests/test_printfuncs.py
tests/test_saveload.py
tests/test_shgo.py
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
index bb0d2d1..35969f6 100644
--- a/lmfit/__init__.py
+++ b/lmfit/__init__.py
@@ -31,7 +31,7 @@ enhancements, including:
* Many built-in models for common lineshapes are included and ready
to use.
-Copyright (c) 2019 Lmfit Developers ; BSD-3 license ; see LICENSE
+Copyright (c) 2020 Lmfit Developers ; BSD-3 license ; see LICENSE
"""
from asteval import Interpreter
diff --git a/lmfit/_version.py b/lmfit/_version.py
index 2d157d7..1684285 100644
--- a/lmfit/_version.py
+++ b/lmfit/_version.py
@@ -8,11 +8,11 @@ import json
version_json = '''
{
- "date": "2019-12-20T13:51:16-0600",
+ "date": "2020-05-01T23:45:06-0400",
"dirty": false,
"error": null,
- "full-revisionid": "c5f969028c8e937c02a5b009347d12e2f7843be9",
- "version": "1.0.0"
+ "full-revisionid": "60ed7602e4c747e96034e130960dc3ffa9c0ed79",
+ "version": "1.0.1"
}
''' # END VERSION_JSON
diff --git a/lmfit/confidence.py b/lmfit/confidence.py
index 14d26d5..f457ad0 100644
--- a/lmfit/confidence.py
+++ b/lmfit/confidence.py
@@ -82,7 +82,7 @@ def conf_interval(minimizer, result, p_names=None, sigmas=[1, 2, 3],
maxiter : int, optional
Maximum of iteration to find an upper limit (default is 200).
verbose: bool, optional
- Print extra debuging information (default is False).
+ Print extra debugging information (default is False).
prob_func : None or callable, optional
Function to calculate the probability from the optimized chi-square.
Default is None and uses the built-in f_compare (i.e., F-test).
diff --git a/lmfit/jsonutils.py b/lmfit/jsonutils.py
index edbd394..c7977f8 100644
--- a/lmfit/jsonutils.py
+++ b/lmfit/jsonutils.py
@@ -17,16 +17,6 @@ except ImportError:
read_json = None
-def bindecode(val):
- """b64decode wrapper."""
- return b64decode(val)
-
-
-def binencode(val):
- """b64encode wrapper."""
- return str(b64encode(val), 'utf-8')
-
-
def find_importer(obj):
"""Find importer of an object."""
oname = obj.__name__
@@ -66,7 +56,7 @@ def encode4js(obj):
if 'complex' in obj.dtype.name:
val = [(obj.real).tolist(), (obj.imag).tolist()]
elif obj.dtype.name == 'object':
- val = [encode4js(item) for item in obj['value']]
+ val = [encode4js(item) for item in obj]
else:
val = obj.flatten().tolist()
return dict(__class__='NDArray', __shape__=obj.shape,
@@ -96,7 +86,7 @@ def encode4js(obj):
pyvers = "%d.%d" % (sys.version_info.major,
sys.version_info.minor)
if HAS_DILL:
- val = binencode(dill.dumps(obj))
+ val = str(b64encode(dill.dumps(obj)), 'utf-8')
else:
val = None
importer = find_importer(obj)
@@ -142,7 +132,7 @@ def decode4js(obj):
pyvers = "%d.%d" % (sys.version_info.major,
sys.version_info.minor)
if pyvers == obj['pyversion'] and HAS_DILL:
- out = dill.loads(bindecode(obj['value']))
+ out = dill.loads(b64decode(obj['value']))
elif obj['importer'] is not None:
out = import_from(obj['importer'], val)
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
index 8aa0cb0..de02faa 100644
--- a/lmfit/lineshapes.py
+++ b/lmfit/lineshapes.py
@@ -1,11 +1,10 @@
"""Basic model line shapes and distribution functions."""
-from numpy import (arctan, cos, exp, finfo, float64, isnan, log, pi, sin, sqrt,
- where)
-from numpy.testing import assert_allclose
+from numpy import (arctan, cos, exp, finfo, float64, isnan, log, pi, real, sin,
+ sqrt, where)
from scipy.special import erf, erfc
from scipy.special import gamma as gamfcn
-from scipy.special import gammaln, wofz
+from scipy.special import wofz
log2 = log(2)
s2pi = sqrt(2*pi)
@@ -15,10 +14,10 @@ tiny = finfo(float64).eps
functions = ('gaussian', 'lorentzian', 'voigt', 'pvoigt', 'moffat', 'pearson7',
'breit_wigner', 'damped_oscillator', 'dho', 'logistic', 'lognormal',
- 'students_t', 'expgaussian', 'donaich', 'skewed_gaussian',
- 'skewed_voigt', 'step', 'rectangle', 'erf', 'erfc', 'wofz',
- 'gamma', 'gammaln', 'exponential', 'powerlaw', 'linear',
- 'parabolic', 'sine', 'expsine', 'split_lorentzian')
+ 'students_t', 'expgaussian', 'doniach', 'donaich', 'skewed_gaussian',
+ 'skewed_voigt', 'thermal_distribution', 'step', 'rectangle',
+ 'exponential', 'powerlaw', 'linear', 'parabolic', 'sine',
+ 'expsine', 'split_lorentzian')
def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):
@@ -55,6 +54,7 @@ def split_lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0, sigma_r=1.0):
[2*amplitude / (pi* (sigma + sigma_r)] *
{ sigma**2 * (x<center) / [sigma**2 + (x - center)**2]
+ sigma_r**2 * (x>=center) / [sigma_r**2+ (x - center)**2] }
+
"""
s = max(tiny, sigma)
r = max(tiny, sigma_r)
@@ -121,6 +121,7 @@ def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0):
and beta() is the beta function.
"""
+ expon = max(tiny, expon)
arg = (x-center)/max(tiny, sigma)
scale = amplitude * gamfcn(expon)/(gamfcn(0.5)*gamfcn(expon-0.5))
return scale*(1+arg**2)**(-expon)/max(tiny, sigma)
@@ -180,7 +181,7 @@ def logistic(x, amplitude=1., center=0., sigma=1.):
= amplitude*(1. - 1. / (1 + exp((x-center)/sigma)))
"""
- return amplitude*(1. - 1./(1. + exp((x-center)/sigma)))
+ return amplitude*(1. - 1./(1. + exp((x-center)/max(tiny, sigma))))
def lognormal(x, amplitude=1.0, center=0., sigma=1):
@@ -194,8 +195,8 @@ def lognormal(x, amplitude=1.0, center=0., sigma=1):
x = max(tiny, x)
else:
x[where(x <= tiny)] = tiny
- return ((amplitude/(x*max(tiny, sigma*s2pi))) * exp(-(log(x)-center)**2
- / max(tiny, (2*sigma**2))))
+ return ((amplitude/(x*max(tiny, sigma*s2pi))) *
+ exp(-(log(x)-center)**2 / max(tiny, (2*sigma**2))))
def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
@@ -203,8 +204,8 @@ def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
students_t(x, amplitude, center, sigma) =
- gamma((sigma+1)/2) (1 + (x-center)**2/sigma)^(-(sigma+1)/2)
- -------------------------
+ gamma((sigma+1)/2)
+ ---------------------------- * (1 + (x-center)**2/sigma)^(-(sigma+1)/2)
sqrt(sigma*pi)gamma(sigma/2)
"""
@@ -214,10 +215,11 @@ def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
- """Return a exponentially modified Gaussian.
+ """Return an exponentially modified Gaussian.
- expgaussian(x, amplitude, center, sigma, gamma=)
- = (gamma/2) exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] *
+ expgaussian(x, amplitude, center, sigma, gamma)
+ = amplitude * (gamma/2) *
+ exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] *
erfc[(center + gamma*sigma**2 - x)/(sqrt(2)*sigma)]
https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
@@ -229,10 +231,10 @@ def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
return amplitude*(gamma/2) * exp(arg1) * erfc(arg2)
-def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
+def doniach(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
"""Return a Doniach Sunjic asymmetric lineshape, used for photo-emission.
- donaich(x, amplitude, center, sigma, gamma) =
+ doniach(x, amplitude, center, sigma, gamma) =
amplitude / sigma^(1-gamma) *
cos(pi*gamma/2 + (1-gamma) arctan((x-center)/sigma) /
(sigma**2 + (x-center)**2)**[(1-gamma)/2]
@@ -246,6 +248,9 @@ def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2)
+donaich = doniach # for back-compat
+
+
def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0):
"""Return a Gaussian lineshape, skewed with error function.
@@ -302,6 +307,43 @@ def expsine(x, amplitude=1.0, frequency=1.0, shift=0.0, decay=0.0):
return amplitude*sin(x*frequency + shift) * exp(-x*decay)
+def thermal_distribution(x, amplitude=1.0, center=0.0, kt=1.0, form='bose'):
+ """Return a thermal distribution function.
+
+ form = 'bose' (default) is the Bose-Einstein distribution
+ thermal_distribution(x, amplitude=1.0, center=0.0, kt=1.0):
+ = 1/(amplitude*exp((x - center)/kt) - 1)
+ form = 'maxwell' is the Maxwell-Boltzmann distribution
+ thermal_distribution(x, amplitude=1.0, center=0.0, kt=1.0):
+ = 1/(amplitude*exp((x - center)/kt))
+ form = 'fermi' is the Fermi-Dirac distribution
+ thermal_distribution(x, amplitude=1.0, center=0.0, kt=1.0):
+ = 1/(amplitude*exp((x - center)/kt) + 1)
+
+ Notes:
+ - ``kt`` should be defined in the same units as ``x``. (The Boltzmann
+ constant is kB = 8.617e-5 eV/K).
+ - set ``kt<0`` to implement the energy loss convention common in scattering
+ research.
+
+ see http://hyperphysics.phy-astr.gsu.edu/hbase/quantum/disfcn.html
+
+ """
+ form = form.lower()
+ if form.startswith('bose'):
+ offset = -1
+ elif form.startswith('maxwell'):
+ offset = 0
+ elif form.startswith('fermi'):
+ offset = 1
+ else:
+ msg = "Invalid value ('%s') for argument 'form'; should be one of %s."\
+ % (form, "'maxwell', 'fermi', or 'bose'")
+ raise ValueError(msg)
+
+ return real(1/(amplitude*exp((x - center)/kt) + offset + tiny*1j))
+
+
def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
"""Return a step function.
@@ -319,13 +361,18 @@ def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
if form == 'erf':
out = 0.5*(1 + erf(out))
- elif form.startswith('logi'):
+ elif form == 'logistic':
out = (1. - 1./(1. + exp(out)))
elif form in ('atan', 'arctan'):
out = 0.5 + arctan(out)/pi
- else:
+ elif form == 'linear':
out[where(out < 0)] = 0.0
out[where(out > 1)] = 1.0
+ else:
+ msg = "Invalid value ('%s') for argument 'form'; should be one of %s."\
+ % (form, "'erf', 'logistic', 'atan', 'arctan', or 'linear'")
+ raise ValueError(msg)
+
return amplitude*out
@@ -350,54 +397,22 @@ def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
if form == 'erf':
out = 0.5*(erf(arg1) + erf(arg2))
- elif form.startswith('logi'):
+ elif form == 'logistic':
out = (1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2)))
elif form in ('atan', 'arctan'):
out = (arctan(arg1) + arctan(arg2))/pi
- else:
+ elif form == 'linear':
arg1[where(arg1 < 0)] = 0.0
arg1[where(arg1 > 1)] = 1.0
arg2[where(arg2 > 0)] = 0.0
arg2[where(arg2 < -1)] = -1.0
out = arg1 + arg2
- return amplitude*out
-
-
-def _erf(x):
- """Return the error function.
-
- erf = 2/sqrt(pi)*integral(exp(-t**2), t=[0, z])
-
- """
- return erf(x)
-
-
-def _erfc(x):
- """Return the complementary error function.
-
- erfc = 1 - erf(x)
-
- """
- return erfc(x)
-
-
-def _wofz(x):
- """Return the fadeeva function for complex argument.
-
- wofz = exp(-x**2)*erfc(-i*x)
-
- """
- return wofz(x)
-
-
-def _gamma(x):
- """Return the gamma function."""
- return gamfcn(x)
-
+ else:
+ msg = "Invalid value ('%s') for argument 'form'; should be one of %s."\
+ % (form, "'erf', 'logistic', 'atan', 'arctan', or 'linear'")
+ raise ValueError(msg)
-def _gammaln(x):
- """Return the log of absolute value of gamma function."""
- return gammaln(x)
+ return amplitude*out
def exponential(x, amplitude=1, decay=1):
@@ -406,6 +421,7 @@ def exponential(x, amplitude=1, decay=1):
x -> amplitude * exp(-x/decay)
"""
+ decay = max(tiny, decay)
return amplitude * exp(-x/decay)
@@ -434,11 +450,3 @@ def parabolic(x, a=0.0, b=0.0, c=0.0):
"""
return a * x**2 + b * x + c
-
-
-def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
- err_msg='', verbose=True):
- """Check whether all actual and desired parameter values are close."""
- for param_name, value in desired.items():
- assert_allclose(actual[param_name], value, rtol,
- atol, err_msg, verbose)
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index 27352ce..7c50f97 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -14,6 +14,7 @@ See LICENSE for more complete authorship information and license.
"""
from collections import namedtuple
from copy import deepcopy
+import inspect
import multiprocessing
import numbers
import warnings
@@ -76,6 +77,16 @@ except ImportError:
# define the namedtuple here so pickle will work with the MinimizerResult
Candidate = namedtuple('Candidate', ['params', 'score'])
+MAXEVAL_Warning = "ignoring `%s` argument to `%s()`. Use `max_nfev` instead."
+
+
+def thisfuncname():
+ """Return the name of calling function."""
+ try:
+ return inspect.stack()[1].function
+ except AttributeError:
+ return inspect.stack()[1][3]
+
def asteval_with_uncertainties(*vals, **kwargs):
"""Calculate object value, given values for variables.
@@ -133,8 +144,6 @@ class MinimizerException(Exception):
class AbortFitException(MinimizerException):
"""Raised when a fit is aborted by the user."""
- pass
-
SCALAR_METHODS = {'nelder': 'Nelder-Mead',
'powell': 'Powell',
@@ -258,6 +267,8 @@ class MinimizerResult:
True if uncertainties were estimated, otherwise False.
message : str
Message about fit success.
+ call_kws : dict
+ Keyword arguments sent to underlying solver.
ier : int
Integer error value from :scipydoc:`optimize.leastsq` (`leastsq` only).
lmdif_message : str
@@ -343,6 +354,8 @@ class MinimizerResult:
def _calculate_statistics(self):
"""Calculate the fitting statistics."""
self.nvarys = len(self.init_vals)
+ if not hasattr(self, 'residual'):
+ self.residual = -np.inf
if isinstance(self.residual, ndarray):
self.chisqr = (self.residual**2).sum()
self.ndata = len(self.residual)
@@ -353,6 +366,7 @@ class MinimizerResult:
self.nfree = 1
self.redchi = self.chisqr / max(1, self.nfree)
# this is -2*loglikelihood
+ self.chisqr = max(self.chisqr, 1.e-250*self.ndata)
_neg2_log_likel = self.ndata * np.log(self.chisqr / self.ndata)
self.aic = _neg2_log_likel + 2 * self.nvarys
self.bic = _neg2_log_likel + np.log(self.ndata) * self.nvarys
@@ -366,15 +380,15 @@ class MinimizerResult:
class Minimizer:
"""A general minimizer for curve fitting and optimization."""
- _err_nonparam = ("params must be a minimizer.Parameters() instance or list "
- "of Parameters()")
- _err_maxfev = ("Too many function calls (max set to %i)! Use:"
- " minimize(func, params, ..., maxfev=NNN)"
- "or set leastsq_kws['maxfev'] to increase this maximum.")
+ _err_nonparam = ("params must be a minimizer.Parameters() instance or"
+ " list of Parameters()")
+ _err_max_evals = ("Too many function calls (max set to %i)! Use:"
+ " minimize(func, params, ..., max_nfev=NNN)"
+ " to increase this maximum.")
def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None,
iter_cb=None, scale_covar=True, nan_policy='raise',
- reduce_fcn=None, calc_covar=True, **kws):
+ reduce_fcn=None, calc_covar=True, max_nfev=None, **kws):
"""
Parameters
----------
@@ -391,6 +405,9 @@ class Minimizer:
Positional arguments to pass to `userfcn`.
fcn_kws : dict, optional
Keyword arguments to pass to `userfcn`.
+ max_nfev: int or None, optional
+ Maximum number of function evaluations (default is None). The
+ default value depends on the fitting method.
iter_cb : callable, optional
Function to be called at each fit iteration. This function should
have the signature::
@@ -462,10 +479,17 @@ class Minimizer:
self.userkws = fcn_kws
if self.userkws is None:
self.userkws = {}
+ for maxnfev_alias in ('maxfev', 'maxiter'):
+ if maxnfev_alias in kws:
+ warnings.warn(MAXEVAL_Warning % (maxnfev_alias, 'Minimizer'),
+ RuntimeWarning)
+ kws.pop(maxnfev_alias)
+
self.kws = kws
self.iter_cb = iter_cb
self.calc_covar = calc_covar
self.scale_covar = scale_covar
+ self.max_nfev = max_nfev
self.nfev = 0
self.nfree = 0
self.ndata = 0
@@ -484,6 +508,19 @@ class Minimizer:
self.jacfcn = None
self.nan_policy = nan_policy
+ def set_max_nfev(self, max_nfev=None, default_value=100000):
+ """Set maximum number of function evaluations.
+
+ If `max_nfev` is None, use the provided `default_value`.
+
+ >>> self.set_max_nfev(max_nfev, 1000*(result.nvarys+1))
+
+ """
+ if max_nfev is not None:
+ self.max_nfev = max_nfev
+ elif self.max_nfev in (None, np.inf):
+ self.max_nfev = default_value
+
@property
def values(self):
"""Return Parameter values in a simple dictionary."""
@@ -525,7 +562,16 @@ class Minimizer:
params[name].value = val
params.update_constraints()
+ if self.max_nfev is None:
+ self.max_nfev = np.inf
+
self.result.nfev += 1
+ if self.result.nfev > self.max_nfev:
+ self.result.aborted = True
+ m = "number of function evaluations > %d" % self.max_nfev
+ self.result.message = "Fit aborted: %s" % m
+ self.result.success = False
+ raise AbortFitException("fit aborted: too many function evaluations (%d)." % self.max_nfev)
out = self.userfcn(params, *self.userargs, **self.userkws)
@@ -655,6 +701,7 @@ class Minimizer:
result.init_vals = []
result.params.update_constraints()
result.nfev = 0
+ result.call_kws = {}
result.errorbars = False
result.aborted = False
for name, par in self.result.params.items():
@@ -693,7 +740,6 @@ class Minimizer:
removes AST compilations of constraint expressions.
"""
- pass
def _calculate_covariance_matrix(self, fvars):
"""Calculate the covariance matrix.
@@ -721,7 +767,7 @@ class Minimizer:
nfev = deepcopy(self.result.nfev)
try:
- Hfun = ndt.Hessian(self.penalty)
+ Hfun = ndt.Hessian(self.penalty, step=1.e-4)
hessian_ndt = Hfun(fvars)
cov_x = inv(hessian_ndt) * 2.0
except (LinAlgError, ValueError):
@@ -803,7 +849,8 @@ class Minimizer:
for v, nam in zip(uvars, self.result.var_names):
self.result.params[nam].value = v.nominal_value
- def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
+ def scalar_minimize(self, method='Nelder-Mead', params=None, max_nfev=None,
+ **kws):
"""Scalar minimization using :scipydoc:`optimize.minimize`.
Perform fit with any of the scalar minimization algorithms supported by
@@ -843,7 +890,10 @@ class Minimizer:
- 'differential_evolution'
params : :class:`~lmfit.parameter.Parameters`, optional
- Parameters to use as starting point.
+ Parameters to use as starting point.
+ max_nfev: int or None, optional
+ Maximum number of function evaluations. Defaults to 1000*(nvars+1),
+ where nvars is the number of variable parameters.
**kws : dict, optional
Minimizer options pass to :scipydoc:`optimize.minimize`.
@@ -855,7 +905,7 @@ class Minimizer:
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
Notes
-----
@@ -875,9 +925,14 @@ class Minimizer:
variables = result.init_vals
params = result.params
- fmin_kws = dict(method=method,
- options={'maxiter': 1000 * (len(variables) + 1)})
+ self.set_max_nfev(max_nfev, 1000*(result.nvarys+1))
+ fmin_kws = dict(method=method, options={'maxiter': 2*self.max_nfev})
fmin_kws.update(self.kws)
+
+ if 'maxiter' in kws:
+ warnings.warn(MAXEVAL_Warning % ('maxiter', thisfuncname()),
+ RuntimeWarning)
+ kws.pop('maxiter')
fmin_kws.update(kws)
# hess supported only in some methods
@@ -921,12 +976,15 @@ class Minimizer:
if k in kwargs:
kwargs[k] = v
+ fmin_kws = kwargs
+ result.call_kws = fmin_kws
try:
- ret = differential_evolution(self.penalty, _bounds, **kwargs)
+ ret = differential_evolution(self.penalty, _bounds, **fmin_kws)
except AbortFitException:
pass
else:
+ result.call_kws = fmin_kws
try:
ret = scipy_minimize(self.penalty, variables, **fmin_kws)
except AbortFitException:
@@ -944,6 +1002,13 @@ class Minimizer:
result.x = np.atleast_1d(result.x)
result.residual = self.__residual(result.x)
result.nfev -= 1
+ else:
+ result.x = np.array([self.result.params[p].value
+ for p in self.result.var_names])
+ self.result.nfev -= 2
+ self._abort = False
+ result.residual = self.__residual(result.x)
+ result.nfev += 1
result._calculate_statistics()
@@ -1373,11 +1438,11 @@ class Minimizer:
result.errorbars = True
result.nvarys = len(result.var_names)
result.nfev = nwalkers*steps
+
try:
result.acor = self.sampler.get_autocorr_time()
except AutocorrError as e:
print(str(e))
- pass
result.acceptance_fraction = self.sampler.acceptance_fraction
# Calculate the residual with the "best fit" parameters
@@ -1415,7 +1480,7 @@ class Minimizer:
return result
- def least_squares(self, params=None, **kws):
+ def least_squares(self, params=None, max_nfev=None, **kws):
"""Least-squares minimization using :scipydoc:`optimize.least_squares`.
This method wraps :scipydoc:`optimize.least_squares`, which has inbuilt
@@ -1426,7 +1491,10 @@ class Minimizer:
Parameters
----------
params : :class:`~lmfit.parameter.Parameters`, optional
- Parameters to use as starting point.
+ Parameters to use as starting point.
+ max_nfev: int or None, optional
+ Maximum number of function evaluations. Defaults to 1000*(nvars+1),
+ where nvars is the number of variable parameters.
**kws : dict, optional
Minimizer options to pass to :scipydoc:`optimize.least_squares`.
@@ -1438,13 +1506,14 @@ class Minimizer:
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
"""
result = self.prepare_fit(params)
result.method = 'least_squares'
replace_none = lambda x, sign: sign*np.inf if x is None else x
+ self.set_max_nfev(max_nfev, 1000*(result.nvarys+1))
start_vals, lower_bounds, upper_bounds = [], [], []
for vname in result.var_names:
@@ -1453,21 +1522,22 @@ class Minimizer:
lower_bounds.append(replace_none(par.min, -1))
upper_bounds.append(replace_none(par.max, 1))
+ result.call_kws = kws
try:
ret = least_squares(self.__residual, start_vals,
bounds=(lower_bounds, upper_bounds),
kwargs=dict(apply_bounds_transformation=False),
- **kws)
+ max_nfev=2*self.max_nfev, **kws)
result.residual = ret.fun
except AbortFitException:
pass
# note: upstream least_squares is actually returning
- # "last evaluation", not "best result", but we do this
- # here for consistency, and assuming it will be fixed.
+ # "last evaluation", not "best result", do we do that
+ # here for consistency
if not result.aborted:
- result.residual = self.__residual(ret.x, False)
result.nfev -= 1
+ result.residual = self.__residual(ret.x, False)
result._calculate_statistics()
if not result.aborted:
@@ -1500,7 +1570,7 @@ class Minimizer:
return result
- def leastsq(self, params=None, **kws):
+ def leastsq(self, params=None, max_nfev=None, **kws):
"""Use Levenberg-Marquardt minimization to perform a fit.
It assumes that the input Parameters have been initialized, and
@@ -1520,15 +1590,16 @@ class Minimizer:
+------------------+----------------+------------------------------------------------------------+
| ftol | 1.e-7 | Relative error in the desired sum of squares |
+------------------+----------------+------------------------------------------------------------+
- | maxfev | 2000*(nvar+1) | Maximum number of function calls (nvar= # of variables) |
- +------------------+----------------+------------------------------------------------------------+
| Dfun | None | Function to call for Jacobian calculation |
+------------------+----------------+------------------------------------------------------------+
Parameters
----------
params : :class:`~lmfit.parameter.Parameters`, optional
- Parameters to use as starting point.
+ Parameters to use as starting point.
+ max_nfev: int or None, optional
+ Maximum number of function evaluations. Defaults to 2000*(nvars+1),
+ where nvars is the number of variable parameters.
**kws : dict, optional
Minimizer options to pass to :scipydoc:`optimize.leastsq`.
@@ -1540,20 +1611,27 @@ class Minimizer:
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
"""
result = self.prepare_fit(params=params)
result.method = 'leastsq'
result.nfev -= 2 # correct for "pre-fit" initialization/checks
variables = result.init_vals
- nvars = len(variables)
+
+ # note we set the max number of function evaluations here, and send twice that
+ # value to the solver so it essentially never stops on its own
+ self.set_max_nfev(max_nfev, 2000*(result.nvarys+1))
+
lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7, col_deriv=False,
- gtol=1.e-7, maxfev=2000*(nvars+1), Dfun=None)
+ gtol=1.e-7, maxfev=2*self.max_nfev, Dfun=None)
+ if 'maxfev' in kws:
+ warnings.warn(MAXEVAL_Warning % ('maxfev', thisfuncname()),
+ RuntimeWarning)
+ kws.pop('maxfev')
lskws.update(self.kws)
lskws.update(kws)
-
self.col_deriv = False
if lskws['Dfun'] is not None:
self.jacfcn = lskws['Dfun']
@@ -1563,20 +1641,30 @@ class Minimizer:
# suppress runtime warnings during fit and error analysis
orig_warn_settings = np.geterr()
np.seterr(all='ignore')
-
+ result.call_kws = lskws
try:
lsout = scipy_leastsq(self.__residual, variables, **lskws)
except AbortFitException:
pass
if not result.aborted:
- _best, _cov, infodict, errmsg, ier = lsout
+ _best, _cov, _infodict, errmsg, ier = lsout
+ else:
+ _best = np.array([self.result.params[p].value
+ for p in self.result.var_names])
+ _cov = None
+ ier = -1
+ errmsg = 'Fit aborted.'
+
+ result.nfev -= 1
+ if result.nfev >= self.max_nfev:
+ result.nfev = self.max_nfev - 1
+ self.result.nfev = result.nfev
+ try:
result.residual = self.__residual(_best)
- result.nfev -= 1
- result._calculate_statistics()
-
- if result.aborted:
- return result
+ result._calculate_statistics()
+ except AbortFitException:
+ pass
result.ier = ier
result.lmdif_message = errmsg
@@ -1590,7 +1678,7 @@ class Minimizer:
elif ier == 4:
result.message = 'One or more variable did not affect the fit.'
elif ier == 5:
- result.message = self._err_maxfev % lskws['maxfev']
+ result.message = self._err_max_evals % lskws['maxfev']
else:
result.message = 'Tolerance seems to be too small.'
@@ -1644,7 +1732,7 @@ class Minimizer:
basinhopping_kws.update(kws)
x0 = result.init_vals
-
+ result.call_kws = basinhopping_kws
try:
ret = scipy_basinhopping(self.penalty, x0, **basinhopping_kws)
except AbortFitException:
@@ -1727,7 +1815,7 @@ class Minimizer:
Notes
-----
- The :meth:`brute` method evalutes the function at each point of a
+ The :meth:`brute` method evaluates the function at each point of a
multidimensional grid of points. The grid points are generated from the
parameter ranges using `Ns` and (optional) `brute_step`.
The implementation in :scipydoc:`optimize.brute` requires finite bounds
@@ -1751,10 +1839,10 @@ class Minimizer:
result = self.prepare_fit(params=params)
result.method = 'brute'
- brute_kws = dict(full_output=1, finish=None, disp=False)
+ brute_kws = dict(full_output=1, finish=None, disp=False, Ns=Ns)
# keyword 'workers' is introduced in SciPy v1.3
# FIXME: remove this check after updating the requirement >= 1.3
- major, minor, micro = scipy_version.split('.', 2)
+ major, minor, _micro = scipy_version.split('.', 2)
if int(major) == 1 and int(minor) >= 3:
brute_kws.update({'workers': workers})
@@ -1790,9 +1878,9 @@ class Minimizer:
'least an initial value and brute_step for '
'parameter "{}".'.format(result.var_names[i]))
ranges.append(par_range)
-
+ result.call_kws = brute_kws
try:
- ret = scipy_brute(self.penalty, tuple(ranges), Ns=Ns, **brute_kws)
+ ret = scipy_brute(self.penalty, tuple(ranges), **brute_kws)
except AbortFitException:
pass
@@ -1828,14 +1916,15 @@ class Minimizer:
result.candidates.append(Candidate(params=pars, score=data[1]))
result.params = result.candidates[0].params
- result.residual = self.__residual(result.brute_x0, apply_bounds_transformation=False)
+ result.residual = self.__residual(result.brute_x0,
+ apply_bounds_transformation=False)
result.nfev = len(result.brute_Jout.ravel())
result._calculate_statistics()
return result
- def ampgo(self, params=None, **kws):
+ def ampgo(self, params=None, max_nfev=None, **kws):
"""Find the global minimum of a multivariate function using AMPGO.
AMPGO stands for 'Adaptive Memory Programming for Global Optimization'
@@ -1861,7 +1950,10 @@ class Minimizer:
Options to pass to the local minimizer.
maxfunevals: int (default is None)
Maximum number of function evaluations. If None, the optimization will stop
- after `totaliter` number of iterations.
+ after `totaliter` number of iterations. (deprecated: use `max_nfev`)
+ max_nfev: int (default is None)
+ Maximum number of total function evaluations. If None, the
+ optimization will stop after `totaliter` number of iterations.
totaliter: int (default is 20)
Maximum number of global iterations.
maxiter: int (default is 5)
@@ -1895,7 +1987,7 @@ class Minimizer:
Notes
- ----
+ -----
The Python implementation was written by Andrea Gavana in 2014
(http://infinity77.net/global_optimization/index.html).
@@ -1907,6 +1999,7 @@ class Minimizer:
"""
result = self.prepare_fit(params=params)
+ self.set_max_nfev(max_nfev, 1000000*(result.nvarys+1))
ampgo_kws = dict(local='L-BFGS-B', local_opts=None, maxfunevals=None,
totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02,
@@ -1917,7 +2010,7 @@ class Minimizer:
values = result.init_vals
result.method = "ampgo, with {} as local solver".format(ampgo_kws['local'])
-
+ result.call_kws = ampgo_kws
try:
ret = ampgo(self.penalty, values, **ampgo_kws)
except AbortFitException:
@@ -1948,7 +2041,7 @@ class Minimizer:
return result
- def shgo(self, params=None, **kws):
+ def shgo(self, params=None, max_nfev=None, **kws):
"""Use the `SHGO` algorithm to find the global minimum.
SHGO stands for "simplicial homology global optimization" and calls
@@ -1959,6 +2052,10 @@ class Minimizer:
params : :class:`~lmfit.parameter.Parameters`, optional
Contains the Parameters for the model. If None, then the
Parameters used to initialize the Minimizer object are used.
+ max_nfev: int or None, optional
+ Maximum number of function evaluations. Defaults to
+ 1e6*(result.nvarys+1), where nvars is the number of variable
+ parameters.
**kws : dict, optional
Minimizer options to pass to the SHGO algorithm.
@@ -1977,6 +2074,8 @@ class Minimizer:
result = self.prepare_fit(params=params)
result.method = 'shgo'
+ self.set_max_nfev(max_nfev, 1000000*(result.nvarys+1))
+
shgo_kws = dict(constraints=None, n=100, iters=1, callback=None,
minimizer_kwargs=None, options=None,
sampling_method='simplicial')
@@ -1987,7 +2086,7 @@ class Minimizer:
varying = np.asarray([par.vary for par in self.params.values()])
bounds = np.asarray([(par.min, par.max) for par in
self.params.values()])[varying]
-
+ result.call_kws = shgo_kws
try:
ret = scipy_shgo(self.penalty, bounds, **shgo_kws)
except AbortFitException:
@@ -2014,7 +2113,7 @@ class Minimizer:
return result
- def dual_annealing(self, params=None, **kws):
+ def dual_annealing(self, params=None, max_nfev=None, **kws):
"""Use the `dual_annealing` algorithm to find the global minimum.
This method calls :scipydoc:`optimize.dual_annealing` using its
@@ -2025,6 +2124,8 @@ class Minimizer:
params : :class:`~lmfit.parameter.Parameters`, optional
Contains the Parameters for the model. If None, then the
Parameters used to initialize the Minimizer object are used.
+ max_nfev: int or None, optional
+ Maximum number of function evaluations. Defaults to 1e7.
**kws : dict, optional
Minimizer options to pass to the dual_annealing algorithm.
@@ -2042,11 +2143,13 @@ class Minimizer:
"""
result = self.prepare_fit(params=params)
result.method = 'dual_annealing'
+ self.set_max_nfev(max_nfev, 1.e7)
da_kws = dict(maxiter=1000, local_search_options={},
initial_temp=5230.0, restart_temp_ratio=2e-05,
- visit=2.62, accept=-5.0, maxfun=10000000.0, seed=None,
- no_local_search=False, callback=None, x0=None)
+ visit=2.62, accept=-5.0, maxfun=2*self.max_nfev,
+ seed=None, no_local_search=False, callback=None,
+ x0=None)
da_kws.update(self.kws)
da_kws.update(kws)
@@ -2058,7 +2161,7 @@ class Minimizer:
if not np.all(np.isfinite(bounds)):
raise ValueError('dual_annealing requires finite bounds for all'
' varying parameters')
-
+ result.call_kws = da_kws
try:
ret = scipy_dual_annealing(self.penalty, bounds, **da_kws)
except AbortFitException:
@@ -2142,12 +2245,18 @@ class Minimizer:
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
"""
function = self.leastsq
kwargs = {'params': params}
kwargs.update(self.kws)
+ for maxnfev_alias in ('maxfev', 'maxiter'):
+ if maxnfev_alias in kws:
+ warnings.warn(MAXEVAL_Warning % (maxnfev_alias, thisfuncname()),
+ RuntimeWarning)
+ kws.pop(maxnfev_alias)
+
kwargs.update(kws)
user_method = method.lower()
@@ -2242,7 +2351,7 @@ def _nan_policy(arr, nan_policy='raise', handle_inf=True):
with np.errstate(invalid='ignore'):
contains_nan = handler_func(np.sum(arr))
except TypeError:
- # If the check cannot be properly performed we fallback to omiting
+ # If the check cannot be properly performed we fallback to omitting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
@@ -2260,7 +2369,7 @@ def _nan_policy(arr, nan_policy='raise', handle_inf=True):
def minimize(fcn, params, method='leastsq', args=None, kws=None, iter_cb=None,
scale_covar=True, nan_policy='raise', reduce_fcn=None,
- calc_covar=True, **fit_kws):
+ calc_covar=True, max_nfev=None, **fit_kws):
"""Perform a fit of a set of parameters by minimizing an objective (or
cost) function using one of the several available methods.
@@ -2343,6 +2452,9 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, iter_cb=None,
Whether to calculate the covariance matrix (default is True) for
solvers other than `leastsq` and `least_squares`. Requires the
`numdifftools` package to be installed.
+ max_nfev: int or None, optional
+ Maximum number of function evaluations (default is None). The
+ default value depends on the fitting method.
**fit_kws : dict, optional
Options to pass to the minimizer being used.
@@ -2389,5 +2501,5 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, iter_cb=None,
fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
iter_cb=iter_cb, scale_covar=scale_covar,
nan_policy=nan_policy, reduce_fcn=reduce_fcn,
- calc_covar=calc_covar, **fit_kws)
+ calc_covar=calc_covar, max_nfev=max_nfev, **fit_kws)
return fitter.minimize(method=method)
diff --git a/lmfit/model.py b/lmfit/model.py
index 64d4470..57c9fee 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -5,7 +5,6 @@ from functools import wraps
import inspect
import json
import operator
-import sys
import warnings
import numpy as np
@@ -37,7 +36,7 @@ def _align(var, mask, data):
try:
- import matplotlib # noqa: F401
+ from matplotlib import pyplot as plt
_HAS_MATPLOTLIB = True
except Exception:
_HAS_MATPLOTLIB = False
@@ -353,9 +352,8 @@ class Model:
Returns
-------
- None or int
- Return value from `fp.write()`. None for Python 2.7 and the
- number of characters written in Python 3.
+ int
+ Return value from `fp.write()`: the number of characters written.
See Also
--------
@@ -437,7 +435,6 @@ class Model:
def _set_paramhints_prefix(self):
"""Reset parameter hints for prefix: intended to be overwritten."""
- pass
@property
def param_names(self):
@@ -468,7 +465,7 @@ class Model:
for name, defval in self.func.kwargs:
kw_args[name] = defval
# 2. modern, best-practice approach: use inspect.signature
- elif sys.version_info > (3, 4):
+ else:
pos_args = []
kw_args = {}
keywords_ = None
@@ -483,15 +480,6 @@ class Model:
kw_args[fnam] = fpar.default
elif fpar.kind == fpar.VAR_POSITIONAL:
raise ValueError("varargs '*%s' is not supported" % fnam)
- # 3. Py2 compatible approach
- else:
- argspec = inspect.getargspec(self.func)
- keywords_ = argspec.keywords
- pos_args = argspec.args
- kw_args = {}
- if argspec.defaults is not None:
- for val in reversed(argspec.defaults):
- kw_args[pos_args.pop()] = val
# inspection done
self._func_haskeywords = keywords_ is not None
@@ -876,7 +864,7 @@ class Model:
def fit(self, data, params=None, weights=None, method='leastsq',
iter_cb=None, scale_covar=True, verbose=False, fit_kws=None,
- nan_policy=None, calc_covar=True, **kwargs):
+ nan_policy=None, calc_covar=True, max_nfev=None, **kwargs):
"""Fit the model to the data using the supplied Parameters.
Parameters
@@ -906,6 +894,9 @@ class Model:
Whether to calculate the covariance matrix (default is True) for
solvers other than `leastsq` and `least_squares`. Requires the
`numdifftools` package to be installed.
+ max_nfev: int or None, optional
+ Maximum number of function evaluations (default is None). The
+ default value depends on the fitting method.
**kwargs: optional
Arguments to pass to the model function, possibly overriding
params.
@@ -1018,7 +1009,7 @@ class Model:
output = ModelResult(self, params, method=method, iter_cb=iter_cb,
scale_covar=scale_covar, fcn_kws=kwargs,
nan_policy=self.nan_policy, calc_covar=calc_covar,
- **fit_kws)
+ max_nfev=max_nfev, **fit_kws)
output.fit(data=data, weights=weights)
output.components = self.components
return output
@@ -1289,7 +1280,7 @@ class ModelResult(Minimizer):
def __init__(self, model, params, data=None, weights=None,
method='leastsq', fcn_args=None, fcn_kws=None,
iter_cb=None, scale_covar=True, nan_policy='raise',
- calc_covar=True, **fit_kws):
+ calc_covar=True, max_nfev=None, **fit_kws):
"""
Parameters
----------
@@ -1317,6 +1308,9 @@ class ModelResult(Minimizer):
Whether to calculate the covariance matrix (default is True) for
solvers other than `leastsq` and `least_squares`. Requires the
`numdifftools` package to be installed.
+ max_nfev: int or None, optional
+ Maximum number of function evaluations (default is None). The
+ default value depends on the fitting method.
**fit_kws : optional
Keyword arguments to send to minimization routine.
@@ -1328,9 +1322,11 @@ class ModelResult(Minimizer):
self.ci_out = None
self.user_options = None
self.init_params = deepcopy(params)
- Minimizer.__init__(self, model._residual, params, fcn_args=fcn_args,
- fcn_kws=fcn_kws, iter_cb=iter_cb, nan_policy=nan_policy,
- scale_covar=scale_covar, calc_covar=calc_covar, **fit_kws)
+ Minimizer.__init__(self, model._residual, params,
+ fcn_args=fcn_args, fcn_kws=fcn_kws,
+ iter_cb=iter_cb, nan_policy=nan_policy,
+ scale_covar=scale_covar, calc_covar=calc_covar,
+ max_nfev=max_nfev, **fit_kws)
def fit(self, data=None, params=None, weights=None, method=None,
nan_policy=None, **kwargs):
@@ -1479,6 +1475,8 @@ class ModelResult(Minimizer):
covar = self.covar
fjac = np.zeros((nvarys, ndata))
df2 = np.zeros(ndata)
+ if any([p.stderr is None for p in self.params.values()]):
+ return df2
# find derivative by hand!
pars = self.params.copy()
@@ -1619,7 +1617,10 @@ class ModelResult(Minimizer):
'nfree', 'nvarys', 'redchi', 'scale_covar', 'calc_covar',
'success', 'userargs', 'userkws', 'values', 'var_names',
'weights', 'user_options'):
- val = getattr(self, attr)
+ try:
+ val = getattr(self, attr)
+ except AttributeError:
+ continue
if isinstance(val, np.bool_):
val = bool(val)
out[attr] = encode4js(val)
@@ -1637,9 +1638,8 @@ class ModelResult(Minimizer):
Returns
-------
- None or int
- Return value from `fp.write()`. None for Python 2.7 and the
- number of characters written in Python 3.
+ int
+ Return value from `fp.write()`: the number of characters written.
See Also
--------
@@ -2045,7 +2045,6 @@ class ModelResult(Minimizer):
ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
"""
- from matplotlib import pyplot as plt
if data_kws is None:
data_kws = {}
if fit_kws is None:
diff --git a/lmfit/models.py b/lmfit/models.py
index 49775bd..1822432 100644
--- a/lmfit/models.py
+++ b/lmfit/models.py
@@ -5,11 +5,12 @@ from asteval import Interpreter, get_ast_names
import numpy as np
from . import lineshapes
-from .lineshapes import (breit_wigner, damped_oscillator, dho, donaich,
+from .lineshapes import (breit_wigner, damped_oscillator, dho, doniach,
expgaussian, exponential, gaussian, linear, lognormal,
lorentzian, moffat, parabolic, pearson7, powerlaw,
pvoigt, rectangle, skewed_gaussian, skewed_voigt,
- split_lorentzian, step, students_t, voigt)
+ split_lorentzian, step, students_t,
+ thermal_distribution, voigt)
from .model import Model
tiny = np.finfo(np.float).eps
@@ -18,8 +19,6 @@ tiny = np.finfo(np.float).eps
class DimensionalError(Exception):
"""Raise exception when number of independent variables is not one."""
- pass
-
def _validate_1d(independent_vars):
if len(independent_vars) != 1:
@@ -27,13 +26,6 @@ def _validate_1d(independent_vars):
"This model requires exactly one independent variable.")
-def index_of(arr, val):
- """Return index of array nearest to a value."""
- if val < min(arr):
- return 0
- return np.abs(arr-val).argmin()
-
-
def fwhm_expr(model):
"""Return constraint expression for fwhm."""
fmt = "{factor:.7f}*{prefix:s}sigma"
@@ -50,22 +42,27 @@ def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
"""Estimate amp, cen, sigma for a peak, create params."""
if x is None:
return 1.0, 0.0, 1.0
+
+ sort_increasing = np.argsort(x)
+ x = x[sort_increasing]
+ y = y[sort_increasing]
+
maxy, miny = max(y), min(y)
maxx, minx = max(x), min(x)
- imaxy = index_of(y, maxy)
- cen = x[imaxy]
- amp = (maxy - miny)*3.0
+ cen = x[np.argmax(y)]
+ height = (maxy - miny)*3.0
sig = (maxx-minx)/6.0
- halfmax_vals = np.where(y > (maxy+miny)/2.0)[0]
+ # the explicit conversion to a NumPy array is to make sure that the
+ # indexing on line 65 also works if the data is supplied as pandas.Series
+ x_halfmax = np.array(x[y > (maxy+miny)/2.0])
if negative:
- imaxy = index_of(y, miny)
- amp = -(maxy - miny)*3.0
- halfmax_vals = np.where(y < (maxy+miny)/2.0)[0]
- if len(halfmax_vals) > 2:
- sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0
- cen = x[halfmax_vals].mean()
- amp = amp*sig*ampscale
+ height = -(maxy - miny)*3.0
+ x_halfmax = x[y < (maxy+miny)/2.0]
+ if len(x_halfmax) > 2:
+ sig = (x_halfmax[-1] - x_halfmax[0])/2.0
+ cen = x_halfmax.mean()
+ amp = height*sig*ampscale
sig = sig*sigscale
pars = model.make_params(amplitude=amp, center=cen, sigma=sig)
@@ -250,7 +247,7 @@ ParabolicModel = QuadraticModel
class PolynomialModel(Model):
- r"""A polynomial model with up to 7 Parameters, specfied by ``degree``.
+ r"""A polynomial model with up to 7 Parameters, specified by ``degree``.
.. math::
@@ -589,7 +586,8 @@ class Pearson7Model(Model):
with four parameters: ``amplitude`` (:math:`A`), ``center``
(:math:`\mu`), ``sigma`` (:math:`\sigma`), and ``exponent`` (:math:`m`).
In addition, parameters ``fwhm`` and ``height`` are included as constraints
- to report full width at half maximum and maximum peak height, respectively.
+ to report estimates for the full width at half maximum and maximum peak height,
+ respectively.
.. math::
@@ -674,8 +672,6 @@ class BreitWignerModel(Model):
https://en.wikipedia.org/wiki/Fano_resonance), with four Parameters:
``amplitude`` (:math:`A`), ``center`` (:math:`\mu`),
``sigma`` (:math:`\sigma`), and ``q`` (:math:`q`).
- In addition, parameters ``fwhm`` and ``height`` are included as constraints
- to report full width at half maximum and maximum peak height, respectively.
.. math::
@@ -692,11 +688,6 @@ class BreitWignerModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0.0)
- fmt = ("{prefix:s}amplitude*{prefix:s}q**2")
- self.set_param_hint('height', expr=fmt.format(prefix=self.prefix))
- fmt = ("2*(sqrt({prefix:s}q**2*{prefix:s}sigma**2*({prefix:s}q**2+2))/"
- "max({0}, 2*({prefix:s}q**2)-2))")
- self.set_param_hint('fwhm', expr=fmt.format(tiny, prefix=self.prefix))
def guess(self, data, x=None, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -714,7 +705,8 @@ class LognormalModel(Model):
``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma``
(:math:`\sigma`).
In addition, parameters ``fwhm`` and ``height`` are included as constraints
- to report full width at half maximum and maximum peak height, respectively.
+ to report estimates of full width at half maximum and maximum peak height,
+ respectively.
.. math::
@@ -757,8 +749,8 @@ class DampedOscillatorModel(Model):
(see https://en.wikipedia.org/wiki/Harmonic_oscillator#Amplitude_part), with
three Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
``sigma`` (:math:`\sigma`).
- In addition, parameters ``fwhm`` and ``height`` are included as constraints
- to report full width at half maximum and maximum peak height, respectively.
+ In addition, the parameter ``height`` is included as a constraint
+ to report the maximum peak height.
.. math::
@@ -778,13 +770,6 @@ class DampedOscillatorModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0)
self.set_param_hint('height', expr=height_expr(self))
- fmt = ("sqrt(abs({prefix:s}center**2*(1-2*{prefix:s}sigma**2)+"
- "(2*sqrt({prefix:s}center**4*{prefix:s}sigma**2*"
- "({prefix:s}sigma**2+3)))))-"
- "sqrt(abs({prefix:s}center**2*(1-2*{prefix:s}sigma**2)-"
- "(2*sqrt({prefix:s}center**4*{prefix:s}sigma**2*"
- "({prefix:s}sigma**2+3)))))")
- self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix))
def guess(self, data, x=None, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -803,7 +788,8 @@ class DampedHarmonicOscillatorModel(Model):
four Parameters: ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`),
``sigma`` (:math:`\sigma`), and ``gamma`` (:math:`\gamma`).
In addition, parameters ``fwhm`` and ``height`` are included as constraints
- to report full width at half maximum and maximum peak height, respectively.
+ to report estimates for full width at half maximum and maximum peak height,
+ respectively.
.. math::
@@ -850,8 +836,6 @@ class ExponentialGaussianModel(Model):
(see https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution) with
four Parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`),
``sigma`` (:math:`\sigma`), and ``gamma`` (:math:`\gamma`).
- In addition, parameters ``fwhm`` and ``height`` are included as constraints
- to report full width at half maximum and maximum peak height, respectively.
.. math::
@@ -863,9 +847,6 @@ class ExponentialGaussianModel(Model):
where :func:`erfc` is the complementary error function.
"""
-
- fwhm_factor = 2*np.sqrt(2*np.log(2))
-
def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
@@ -876,11 +857,6 @@ class ExponentialGaussianModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0)
self.set_param_hint('gamma', min=0, max=20)
- fmt = ("{prefix:s}amplitude*{prefix:s}gamma/2*"
- "exp({prefix:s}gamma**2*{prefix:s}sigma**2/2)*"
- "erfc({prefix:s}gamma*{prefix:s}sigma/sqrt(2))")
- self.set_param_hint('height', expr=fmt.format(prefix=self.prefix))
- self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -896,8 +872,6 @@ class SkewedGaussianModel(Model):
(see https://en.wikipedia.org/wiki/Skew_normal_distribution), with Parameters
``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma`` (:math:`\sigma`),
and ``gamma`` (:math:`\gamma`).
- In addition, parameters ``fwhm`` and ``height`` are included as constraints
- to report full width at half maximum and maximum peak height, respectively.
.. math::
@@ -911,9 +885,6 @@ class SkewedGaussianModel(Model):
"""
- fwhm_factor = 2*np.sqrt(2*np.log(2))
- height_factor = 1./np.sqrt(2*np.pi)
-
def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
@@ -923,8 +894,6 @@ class SkewedGaussianModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0)
- self.set_param_hint('height', expr=height_expr(self))
- self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -941,10 +910,7 @@ class SkewedVoigtModel(Model):
https://en.wikipedia.org/wiki/Voigt_distribution). It has Parameters
``amplitude`` (:math:`A`), ``center`` (:math:`\mu`), ``sigma``
(:math:`\sigma`), and ``gamma`` (:math:`\gamma`), as usual for a
- Voigt distribution, and add a Parameter ``skew``. In addition,
- parameters ``fwhm`` and ``height`` are included as constraints to
- report full width at half maximum and maximum peak height, of the
- Voigt distribution, respectively.
+ Voigt distribution, and add a Parameter ``skew``.
.. math::
@@ -958,9 +924,6 @@ class SkewedVoigtModel(Model):
"""
- fwhm_factor = 3.60131
- height_factor = 1./np.sqrt(2*np.pi)
-
def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
@@ -971,8 +934,6 @@ class SkewedVoigtModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0)
self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
- self.set_param_hint('height', expr=height_expr(self))
- self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
@@ -983,7 +944,62 @@ class SkewedVoigtModel(Model):
guess.__doc__ = COMMON_GUESS_DOC
-class DonaichModel(Model):
+class ThermalDistributionModel(Model):
+ r"""Return a thermal distribution function.
+
+ Variable ``form`` defines
+ the kind of distribution as below with parameters ``amplitude``
+ (:math:`A`), ``center`` (:math:`x_0`) and ``kt`` (:math:`kt`). The
+ following distributions are available:
+
+ - ``bose`` (the default) Bose-Einstein distribution
+ - ``maxwell`` Maxwell-Boltzmann distribution
+ - ``fermi`` Fermi-Dirac distribution
+
+ The functional forms are defined as:
+
+ .. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ & f(x; A, x_0, kt, {\mathrm{form={}'bose{}'}}) & = \frac{1}{A \exp(\frac{x - x_0}{kt}) - 1} \\
+ & f(x; A, x_0, kt, {\mathrm{form={}'maxwell{}'}}) & = \frac{1}{A \exp(\frac{x - x_0}{kt})} \\
+ & f(x; A, x_0, kt, {\mathrm{form={}'fermi{}'}}) & = \frac{1}{A \exp(\frac{x - x_0}{kt}) + 1} ]
+ \end{eqnarray*}
+
+ see http://hyperphysics.phy-astr.gsu.edu/hbase/quantum/disfcn.html
+
+ Comments:
+
+ - ``kt`` should be defined in the same units as ``x`` (:math:`k_B = 8.617\times10^{-5}` eV/K).
+ - set :math:`kt<0` to implement the energy loss convention common in
+ scattering research.
+
+ """
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ form='bose', **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'form': form, 'independent_vars': independent_vars})
+ super().__init__(thermal_distribution, **kwargs)
+ self._set_paramhints_prefix()
+
+ def guess(self, data, x=None, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ if x is None:
+ center = 0
+ kt = 1
+ else:
+ center = np.mean(x)
+ kt = (max(x) - min(x))/10
+
+ pars = self.make_params()
+ return update_param_vals(pars, self.prefix, center=center, kt=kt)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class DoniachModel(Model):
r"""A model of an Doniach Sunjic asymmetric lineshape
(see https://www.casaxps.com/help_manual/line_shapes.htm), used in
photo-emission, with four Parameters ``amplitude`` (:math:`A`),
@@ -1003,7 +1019,7 @@ class DonaichModel(Model):
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
- super().__init__(donaich, **kwargs)
+ super().__init__(doniach, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
@@ -1020,6 +1036,9 @@ class DonaichModel(Model):
guess.__doc__ = COMMON_GUESS_DOC
+DonaichModel = DoniachModel # for back-compat
+
+
class PowerLawModel(Model):
r"""A model based on a Power Law (see https://en.wikipedia.org/wiki/Power_law),
with two Parameters: ``amplitude`` (:math:`A`), and ``exponent`` (:math:`k`), in:
@@ -1307,4 +1326,3 @@ class ExpressionModel(Model):
This prevents normal parsing of function for parameter names.
"""
- pass
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index 1e64665..384f83e 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -2,14 +2,11 @@
from collections import OrderedDict
from copy import deepcopy
-import importlib
import json
-import warnings
from asteval import Interpreter, get_ast_names, valid_symbol_name
-from numpy import arcsin, array, cos, inf, isclose, nan, sin, sqrt
+from numpy import arcsin, array, cos, inf, isclose, sin, sqrt
import scipy.special
-import uncertainties
from .jsonutils import decode4js, encode4js
from .printfuncs import params_html_table
@@ -190,22 +187,12 @@ class Parameters(OrderedDict):
# an Error. Another way of doing this would be to remove all the expr
# from the Parameter instances before they get added, then to restore
# them.
- # self._asteval.symtable.update(state['unique_symbols'])
symtab = self._asteval.symtable
for key, val in state['unique_symbols'].items():
if key not in symtab:
- if isinstance(val, dict):
- value = val.get('__name__', None)
- symname = val.get('__name__', None)
- importer = val.get('importer', None)
- if value is None and symname is not None and importer is not None:
- _mod = importlib.import_module(importer)
- value = getattr(_mod, symname, None)
- if value is not None:
- symtab[key] = value
- else:
- symtab[key] = val
+ symtab[key] = val
+
# then add all the parameters
self.add_many(*state['params'])
@@ -466,9 +453,11 @@ class Parameters(OrderedDict):
"""
self.clear()
- tmp = decode4js(json.loads(s, **kws))
- state = {'unique_symbols': tmp['unique_symbols'],
- 'params': []}
+ tmp = json.loads(s, **kws)
+ unique_symbols = {key: decode4js(tmp['unique_symbols'][key]) for key
+ in tmp['unique_symbols']}
+
+ state = {'unique_symbols': unique_symbols, 'params': []}
for parstate in tmp['params']:
_par = Parameter(name='')
_par.__setstate__(parstate)
@@ -488,9 +477,8 @@ class Parameters(OrderedDict):
Returns
-------
- None or int
- Return value from `fp.write()`. None for Python 2.7 and the
- number of characters written in Python 3.
+ int
+ Return value from `fp.write()`: the number of characters written.
See Also
--------
@@ -533,7 +521,7 @@ class Parameter:
placed on the Parameter's value by setting its `min` and/or `max`
attributes. A Parameter can also have its value determined by a
mathematical expression of other Parameter values held in the `expr`
- attrribute. Additional attributes include `brute_step` used as the step
+ attribute. Additional attributes include `brute_step` used as the step
size in a brute-force minimization, and `user_data` reserved
exclusively for user's need.
@@ -643,10 +631,6 @@ class Parameter:
par.set(brute_step=0) # removes brute_step
"""
- if value is not None:
- self.value = value
- self.__set_expression('')
-
if vary is not None:
self.vary = vary
if vary:
@@ -658,6 +642,12 @@ class Parameter:
if max is not None:
self.max = max
+ # need to set this after min and max, so that it will use new
+ # bounds in the setter for value
+ if value is not None:
+ self.value = value
+ self.__set_expression("")
+
if expr is not None:
self.__set_expression(expr)
@@ -789,17 +779,6 @@ class Parameter:
# _expr_eval.symtable is kept up-to-date.
# If you just assign to self._val then _expr_eval.symtable[self.name]
# becomes stale if parameter.expr is not None.
- if (isinstance(self._val, uncertainties.core.Variable) and
- self._val is not nan):
- msg = ("Please make sure that the Parameter value is a number, "
- "not an instance of 'uncertainties.core.Variable'. This "
- "automatic conversion will be removed in the next release.")
- warnings.warn(FutureWarning(msg))
- try:
- self.value = self._val.nominal_value
- except AttributeError:
- pass
-
if self._expr is not None:
if self._expr_ast is None:
self.__set_expression(self._expr)
@@ -809,10 +788,6 @@ class Parameter:
check_ast_errors(self._expr_eval)
return self._val
- def set_expr_eval(self, evaluator):
- """Set expression evaluator instance."""
- self._expr_eval = evaluator
-
@property
def value(self):
"""Return the numerical value of the Parameter, with bounds applied."""
@@ -988,11 +963,3 @@ class Parameter:
def __rsub__(self, other):
"""- (right)"""
return other - self._getval()
-
-
-def isParameter(x):
- """Test for Parameter-ness."""
- msg = 'The isParameter function will be removed in the next release.'
- warnings.warn(FutureWarning(msg))
- return (isinstance(x, Parameter) or
- x.__class__.__name__ == 'Parameter')
diff --git a/lmfit/ui/__init__.py b/lmfit/ui/__init__.py
deleted file mode 100644
index ae23d19..0000000
--- a/lmfit/ui/__init__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# These variables are used at the end of the module to decide
-# which BaseFitter subclass the Fitter will point to.
-import warnings
-
-from .basefitter import BaseFitter
-
-has_ipython, has_matplotlib = False, False
-
-try:
- import matplotlib
-except ImportError:
- pass
-else:
- has_matplotlib = True
-
-try:
- import IPython
-except ImportError:
- warnings.warn("lmfit.Fitter will use basic mode, not IPython: need matplotlib")
-else:
- _ipy_msg1 = "lmfit.Fitter will use basic mode, not IPython: need IPython2."
- _ipy_msg2 = "lmfit.Fitter will use basic mode, not IPython: could not get IPython version"
- _ipy_msg3 = "lmfit.Fitter will use basic mode, not IPython: need ipywidgets."
- try:
- major_version = IPython.release.version_info[0]
- if major_version < 2:
- warnings.warn(_ipy_msg1)
- elif major_version > 3:
- # After IPython 3, widgets were moved to a separate package.
- # There is a shim to allow the old import, but the package has to be
- # installed for that to work.
- try:
- import ipywidgets
- except ImportError:
- warnings.warn(_ipy_msg3)
- else:
- # has_ipython = iPython installed and we are in an IPython session.
- has_ipython = IPython.get_ipython() is not None
- except Exception as e:
- warnings.warn(_ipy_msg2)
-
-Fitter = BaseFitter
-if has_matplotlib:
- from .basefitter import MPLFitter
- Fitter = MPLFitter
-
-if has_ipython:
- from .ipy_fitter import NotebookFitter
- Fitter = NotebookFitter
diff --git a/lmfit/ui/basefitter.py b/lmfit/ui/basefitter.py
deleted file mode 100644
index 9b573af..0000000
--- a/lmfit/ui/basefitter.py
+++ /dev/null
@@ -1,322 +0,0 @@
-import warnings # noqa: F401
-
-from asteval import Interpreter
-from asteval.astutils import NameFinder
-import numpy as np # noqa: F401
-
-from ..model import Model # noqa: F401
-from ..models import ExponentialModel # arbitrary default
-from ..parameter import check_ast_errors
-
-_COMMON_DOC = """
- This an interactive container for fitting models to particular data.
-
- It maintains the attributes `current_params` and `current_result`. When
- its fit() method is called, the best fit becomes the new `current_params`.
- The most basic usage is iteratively fitting data, taking advantage of
- this stateful memory that keep the parameters between each fit.
-"""
-
-_COMMON_EXAMPLES_DOC = """
-
- Examples
- --------
- >>> fitter = Fitter(data, model=SomeModel, x=x)
-
- >>> fitter.model
- # This property can be changed, to try different models on the same
- # data with the same independent vars.
- # (This is especially handy in the notebook.)
-
- >>> fitter.current_params
- # This copy of the model's Parameters is updated after each fit.
-
- >>> fitter.fit()
- # Perform a fit using fitter.current_params as a guess.
- # Optionally, pass a params argument or individual keyword arguments
- # to override current_params.
-
- >>> fitter.current_result
- # This is the result of the latest fit. It contain the usual
- # copies of the Parameters, in the attributes params and init_params.
-
- >>> fitter.data = new_data
- # If this property is updated, the `current_params` are retained an used
- # as an initial guess if fit() is called again.
- """
-
-
-class BaseFitter:
- __doc__ = _COMMON_DOC + """
-
- Parameters
- ----------
- data : array-like
- model : lmfit.Model
- optional initial Model to use, maybe be set or changed later
- """ + _COMMON_EXAMPLES_DOC
-
- def __init__(self, data, model=None, **kwargs):
- self._data = data
- self.kwargs = kwargs
-
- # GUI-based subclasses need a default value for the menu of models,
- # and so an arbitrary default is applied here, for uniformity
- # among the subclasses.
- if model is None:
- model = ExponentialModel
- self.model = model
-
- def _on_model_value_change(self, name, value):
- self.model = value
-
- def _on_fit_button_click(self, b):
- self.fit()
-
- def _on_guess_button_click(self, b):
- self.guess()
-
- @property
- def data(self):
- return self._data
-
- @data.setter
- def data(self, value):
- self._data = value
-
- @property
- def model(self):
- return self._model
-
- @model.setter
- def model(self, value):
- if callable(value):
- model = value()
- else:
- model = value
- self._model = model
- self.current_result = None
- self._current_params = model.make_params()
-
- # Use these to evaluate any Parameters that use expressions.
- self.asteval = Interpreter()
- self.namefinder = NameFinder()
-
- self._finalize_model(value)
-
- self.guess()
-
- def _finalize_model(self, value):
- # subclasses optionally override to update display here
- pass
-
- @property
- def current_params(self):
- """Each time fit() is called, these will be updated to reflect
- the latest best params. They will be used as the initial guess
- for the next fit, unless overridden by arguments to fit()."""
- return self._current_params
-
- @current_params.setter
- def current_params(self, new_params):
- # Copy contents, but retain original params objects.
- for name, par in new_params.items():
- self._current_params[name].value = par.value
- self._current_params[name].expr = par.expr
- self._current_params[name].vary = par.vary
- self._current_params[name].min = par.min
- self._current_params[name].max = par.max
-
- # Compute values for expression-based Parameters.
- self.__assign_deps(self._current_params)
- for _, par in self._current_params.items():
- if par.value is None:
- self.__update_paramval(self._current_params, par.name)
-
- self._finalize_params()
-
- def _finalize_params(self):
- # subclasses can override this to pass params to display
- pass
-
- def guess(self):
- count_indep_vars = len(self.model.independent_vars)
- guessing_successful = True
- try:
- if count_indep_vars == 0:
- guess = self.model.guess(self._data)
- elif count_indep_vars == 1:
- key = self.model.independent_vars[0]
- val = self.kwargs[key]
- d = {key: val}
- guess = self.model.guess(self._data, **d)
- self.current_params = guess
- except NotImplementedError:
- guessing_successful = False
- return guessing_successful
-
- def __assign_deps(self, params):
- # N.B. This does not use self.current_params but rather
- # new Parameters that are being built by self.guess().
- for name, par in params.items():
- if par.expr is not None:
- par.ast = self.asteval.parse(par.expr)
- check_ast_errors(self.asteval.error)
- par.deps = []
- self.namefinder.names = []
- self.namefinder.generic_visit(par.ast)
- for symname in self.namefinder.names:
- if (symname in self.current_params and symname not in
- par.deps):
- par.deps.append(symname)
- self.asteval.symtable[name] = par.value
- if par.name is None:
- par.name = name
-
- def __update_paramval(self, params, name):
- # N.B. This does not use self.current_params but rather
- # new Parameters that are being built by self.guess().
- par = params[name]
- if getattr(par, 'expr', None) is not None:
- if getattr(par, 'ast', None) is None:
- par.ast = self.asteval.parse(par.expr)
- if par.deps is not None:
- for dep in par.deps:
- self.__update_paramval(params, dep)
- par.value = self.asteval.run(par.ast)
- out = check_ast_errors(self.asteval.error)
- if out is not None:
- self.asteval.raise_exception(None)
- self.asteval.symtable[name] = par.value
-
- def fit(self, *args, **kwargs):
- "Use current_params unless overridden by arguments passed here."
- guess = dict(self.current_params)
- guess.update(self.kwargs) # from __init__, e.g. x=x
- guess.update(kwargs)
- self.current_result = self.model.fit(self._data, *args, **guess)
- self.current_params = self.current_result.params
-
-
-class MPLFitter(BaseFitter):
- # This is a small elaboration on BaseModel; it adds a plot()
- # method that depends on matplotlib. It adds several plot-
- # styling arguments to the signature.
- __doc__ = _COMMON_DOC + """
-
- Parameters
- ----------
- data : array-like
- model : lmfit.Model
- optional initial Model to use, maybe be set or changed later
-
- Additional Parameters
- ---------------------
- axes_style : dictionary representing style keyword arguments to be
- passed through to `Axes.set(...)`
- data_style : dictionary representing style keyword arguments to be passed
- through to the matplotlib `plot()` command the plots the data points
- init_style : dictionary representing style keyword arguments to be passed
- through to the matplotlib `plot()` command the plots the initial fit
- line
- best_style : dictionary representing style keyword arguments to be passed
- through to the matplotlib `plot()` command the plots the best fit
- line
- **kwargs : independent variables or extra arguments, passed like `x=x`
- """ + _COMMON_EXAMPLES_DOC
-
- def __init__(self, data, model=None, axes_style={}, data_style={},
- init_style={}, best_style={}, **kwargs):
- self.axes_style = axes_style
- self.data_style = data_style
- self.init_style = init_style
- self.best_style = best_style
- super().__init__(data, model, **kwargs)
-
- def plot(self, axes_style={}, data_style={}, init_style={}, best_style={},
- ax=None):
- """Plot data, initial guess fit, and best fit.
-
- Optional style arguments pass keyword dictionaries through to their
- respective components of the matplotlib plot.
-
- Precedence is:
- 1. arguments passed to this function, plot()
- 2. arguments passed to the Fitter when it was first declared
- 3. hard-coded defaults
-
- Parameters
- ---------------------
- axes_style : dictionary representing style keyword arguments to be
- passed through to `Axes.set(...)`
- data_style : dictionary representing style keyword arguments to be passed
- through to the matplotlib `plot()` command the plots the data points
- init_style : dictionary representing style keyword arguments to be passed
- through to the matplotlib `plot()` command the plots the initial fit
- line
- best_style : dictionary representing style keyword arguments to be passed
- through to the matplotlib `plot()` command the plots the best fit
- line
- ax : matplotlib.Axes
- optional `Axes` object. Axes will be generated if not provided.
- """
- try:
- import matplotlib.pyplot as plt
- except ImportError:
- raise ImportError("Matplotlib is required to use this Fitter. "
- "Use BaseFitter or a subclass thereof "
- "that does not depend on matplotlib.")
-
- # Configure style
- _axes_style = {} # none, but this is here for possible future use
- _axes_style.update(self.axes_style)
- _axes_style.update(axes_style)
- _data_style = dict(color='blue', marker='o', linestyle='none')
- _data_style.update(**_normalize_kwargs(self.data_style, 'line2d'))
- _data_style.update(**_normalize_kwargs(data_style, 'line2d'))
- _init_style = dict(color='gray')
- _init_style.update(**_normalize_kwargs(self.init_style, 'line2d'))
- _init_style.update(**_normalize_kwargs(init_style, 'line2d'))
- _best_style = dict(color='red')
- _best_style.update(**_normalize_kwargs(self.best_style, 'line2d'))
- _best_style.update(**_normalize_kwargs(best_style, 'line2d'))
-
- if ax is None:
- fig, ax = plt.subplots()
- count_indep_vars = len(self.model.independent_vars)
- if count_indep_vars == 0:
- ax.plot(self._data, **_data_style)
- elif count_indep_vars == 1:
- indep_var = self.kwargs[self.model.independent_vars[0]]
- ax.plot(indep_var, self._data, **_data_style)
- else:
- raise NotImplementedError("Cannot plot models with more than one "
- "indepedent variable.")
- result = self.current_result # alias for brevity
- if not result:
- ax.set(**_axes_style)
- return # short-circuit the rest of the plotting
- if count_indep_vars == 0:
- ax.plot(result.init_fit, **_init_style)
- ax.plot(result.best_fit, **_best_style)
- elif count_indep_vars == 1:
- ax.plot(indep_var, result.init_fit, **_init_style)
- ax.plot(indep_var, result.best_fit, **_best_style)
- ax.set(**_axes_style)
-
-
-def _normalize_kwargs(kwargs, kind='patch'):
- """Convert matplotlib keywords from short to long form."""
- # Source:
- # github.com/tritemio/FRETBursts/blob/fit_experim/fretbursts/burst_plot.py
- if kind == 'line2d':
- long_names = dict(c='color', ls='linestyle', lw='linewidth',
- mec='markeredgecolor', mew='markeredgewidth',
- mfc='markerfacecolor', ms='markersize',)
- elif kind == 'patch':
- long_names = dict(c='color', ls='linestyle', lw='linewidth',
- ec='edgecolor', fc='facecolor',)
- for short_name in long_names:
- if short_name in kwargs:
- kwargs[long_names[short_name]] = kwargs.pop(short_name)
- return kwargs
diff --git a/lmfit/ui/ipy_fitter.py b/lmfit/ui/ipy_fitter.py
deleted file mode 100644
index 46b5129..0000000
--- a/lmfit/ui/ipy_fitter.py
+++ /dev/null
@@ -1,281 +0,0 @@
-import IPython
-from IPython.display import clear_output, display
-import numpy as np
-
-from ..model import Model
-from .basefitter import _COMMON_DOC, _COMMON_EXAMPLES_DOC, MPLFitter
-
-# Note: If IPython is not available of the version is < 2,
-# this module will not be imported, and a different Fitter.
-
-# Widgets were only experimental in IPython 2.x, but this does work there.
-# Handle the change in naming from 2.x to 3.x.
-IPY2 = IPython.release.version_info[0] == 2
-IPY3 = IPython.release.version_info[0] == 3
-if IPY2:
- from IPython.html.widgets import DropdownWidget as Dropdown
- from IPython.html.widgets import ButtonWidget as Button
- from IPython.html.widgets import ContainerWidget
- from IPython.html.widgets import FloatTextWidget as FloatText
- from IPython.html.widgets import CheckboxWidget as Checkbox
-
- class HBox(ContainerWidget):
-
- def __init__(self, *args, **kwargs):
- self.add_class('hbox')
- super(self, ContainerWidget).__init__(*args, **kwargs)
-elif IPY3:
- # as of IPython 3.x:
- from IPython.html.widgets import Dropdown
- from IPython.html.widgets import Button
- from IPython.html.widgets import HBox
- from IPython.html.widgets import FloatText
- from IPython.html.widgets import Checkbox
-else:
- # as of IPython 4.x+:
- from ipywidgets import Dropdown
- from ipywidgets import Button
- from ipywidgets import HBox
- from ipywidgets import FloatText
- from ipywidgets import Checkbox
-
-
-class ParameterWidgetGroup:
- """Construct several widgets that together represent a Parameter.
-
- This will only be used if IPython is available."""
- def __init__(self, par):
- self.par = par
-
- # Define widgets.
- self.value_text = FloatText(description=par.name,
- min=self.par.min, max=self.par.max)
- self.value_text.width = 100
- self.min_text = FloatText(description='min', max=self.par.max)
- self.min_text.width = 100
- self.max_text = FloatText(description='max', min=self.par.min)
- self.max_text.width = 100
- self.min_checkbox = Checkbox(description='min')
- self.max_checkbox = Checkbox(description='max')
- self.vary_checkbox = Checkbox(description='vary')
-
- # Set widget values and visibility.
- if par.value is not None:
- self.value_text.value = self.par.value
- min_unset = self.par.min is None or self.par.min == -np.inf
- max_unset = self.par.max is None or self.par.max == np.inf
- self.min_checkbox.value = not min_unset
- self.min_text.visible = not min_unset
- self.min_text.value = self.par.min
- self.max_checkbox.value = not max_unset
- self.max_text.visible = not max_unset
- self.max_text.value = self.par.max
- self.vary_checkbox.value = self.par.vary
-
- # Configure widgets to sync with par attributes.
- self.value_text.on_trait_change(self._on_value_change, 'value')
- self.min_text.on_trait_change(self._on_min_value_change, 'value')
- self.max_text.on_trait_change(self._on_max_value_change, 'value')
- self.min_checkbox.on_trait_change(self._on_min_checkbox_change,
- 'value')
- self.max_checkbox.on_trait_change(self._on_max_checkbox_change,
- 'value')
- self.vary_checkbox.on_trait_change(self._on_vary_change, 'value')
-
- def _on_value_change(self, name, value):
- self.par.value = value
-
- def _on_min_checkbox_change(self, name, value):
- self.min_text.visible = value
- if value:
- # -np.inf does not play well with a numerical text field,
- # so set min to -1 if activated (and back to -inf if deactivated).
- self.min_text.value = -1
- self.par.min = self.min_text.value
- self.value_text.min = self.min_text.value
- else:
- self.par.min = None
-
- def _on_max_checkbox_change(self, name, value):
- self.max_text.visible = value
- if value:
- # np.inf does not play well with a numerical text field,
- # so set max to 1 if activated (and back to inf if deactivated).
- self.max_text.value = 1
- self.par.max = self.max_text.value
- self.value_text.max = self.max_text.value
- else:
- self.par.max = None
-
- def _on_min_value_change(self, name, value):
- self.par.min = value
- self.value_text.min = value
- self.max_text.min = value
-
- def _on_max_value_change(self, name, value):
- self.par.max = value
- self.value_text.max = value
- self.min_text.max = value
-
- def _on_vary_change(self, name, value):
- self.par.vary = value
- # self.value_text.disabled = not value
-
- def close(self):
- # one convenience method to close (i.e., hide and disconnect) all
- # widgets in this group
- self.value_text.close()
- self.min_text.close()
- self.max_text.close()
- self.vary_checkbox.close()
- self.min_checkbox.close()
- self.max_checkbox.close()
-
- def _repr_html_(self):
- box = HBox()
- box.children = [self.value_text, self.vary_checkbox,
- self.min_checkbox, self.min_text,
- self.max_checkbox, self.max_text]
- display(box)
-
- # Make it easy to set the widget attributes directly.
- @property
- def value(self):
- return self.value_text.value
-
- @value.setter
- def value(self, value):
- self.value_text.value = value
-
- @property
- def vary(self):
- return self.vary_checkbox.value
-
- @vary.setter
- def vary(self, value):
- self.vary_checkbox.value = value
-
- @property
- def min(self):
- return self.min_text.value
-
- @min.setter
- def min(self, value):
- self.min_text.value = value
-
- @property
- def max(self):
- return self.max_text.value
-
- @max.setter
- def max(self, value):
- self.max_text.value = value
-
- @property
- def name(self):
- return self.par.name
-
-
-class NotebookFitter(MPLFitter):
- __doc__ = _COMMON_DOC + """
- If IPython is available, it uses the IPython notebook's rich display
- to fit data interactively in a web-based GUI. The Parameters are
- represented in a web-based form that is kept in sync with `current_params`.
- All subclasses to Model, including user-defined ones, are shown in a
- drop-down menu.
-
- Clicking the "Fit" button updates a plot, as above, and updates the
- Parameters in the form to reflect the best fit.
-
- Parameters
- ----------
- data : array-like
- model : lmfit.Model
- optional initial Model to use, maybe be set or changed later
- all_models : list
- optional list of Models to populate drop-down menu, by default
- all built-in and user-defined subclasses of Model are used
-
- Additional Parameters
- ---------------------
- axes_style : dictionary representing style keyword arguments to be
- passed through to `Axes.set(...)`
- data_style : dictionary representing style keyword arguments to be passed
- through to the matplotlib `plot()` command the plots the data points
- init_style : dictionary representing style keyword arguments to be passed
- through to the matplotlib `plot()` command the plots the initial fit
- line
- best_style : dictionary representing style keyword arguments to be passed
- through to the matplotlib `plot()` command the plots the best fit
- line
- **kwargs : independent variables or extra arguments, passed like `x=x`
- """ + _COMMON_EXAMPLES_DOC
-
- def __init__(self, data, model=None, all_models=None, axes_style={},
- data_style={}, init_style={}, best_style={}, **kwargs):
- # Dropdown menu of all subclasses of Model, incl. user-defined.
- self.models_menu = Dropdown()
- # Dropbox API is very different between IPy 2.x and 3.x.
- if IPY2:
- if all_models is None:
- all_models = {m.__name__: m for m in Model.__subclasses__()}
- self.models_menu.values = all_models
- else:
- if all_models is None:
- all_models = [(m.__name__, m) for m in Model.__subclasses__()]
- self.models_menu.options = all_models
- self.models_menu.on_trait_change(self._on_model_value_change, 'value')
- # Button to trigger fitting.
- self.fit_button = Button(description='Fit')
- self.fit_button.on_click(self._on_fit_button_click)
-
- # Button to trigger guessing.
- self.guess_button = Button(description='Auto-Guess')
- self.guess_button.on_click(self._on_guess_button_click)
-
- # Parameter widgets are not built here. They are (re-)built when
- # the model is (re-)set.
- super().__init__(data, model, axes_style, data_style, init_style,
- best_style, **kwargs)
-
- def _repr_html_(self):
- display(self.models_menu)
- button_box = HBox()
- button_box.children = [self.fit_button, self.guess_button]
- display(button_box)
- for pw in self.param_widgets:
- display(pw)
- self.plot()
-
- def guess(self):
- guessing_successful = super().guess()
- self.guess_button.disabled = not guessing_successful
-
- def _finalize_model(self, value):
- first_run = not hasattr(self, 'param_widgets')
- if not first_run:
- # Remove all Parameter widgets, and replace them with widgets
- # for the new model.
- for pw in self.param_widgets:
- pw.close()
- self.models_menu.value = value
- self.param_widgets = [ParameterWidgetGroup(p)
- for _, p in self._current_params.items()]
- if not first_run:
- for pw in self.param_widgets:
- display(pw)
-
- def _finalize_params(self):
- for pw in self.param_widgets:
- pw.value = self._current_params[pw.name].value
- pw.min = self._current_params[pw.name].min
- pw.max = self._current_params[pw.name].max
- pw.vary = self._current_params[pw.name].vary
-
- def plot(self):
- clear_output(wait=True)
- super().plot()
-
- def fit(self):
- super().fit()
- self.plot()
diff --git a/setup.cfg b/setup.cfg
index 185b942..fdd1a69 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,7 +8,7 @@ parentdir_prefix = lmfit-
[isort]
skip = versioneer.py,lmfit/_version.py,lmfit/__init__.py,doc/conf.py
-known_third_party = asteval,dill,emcee,IPython,matplotlib,numdifftools,numpy,NISTModels,pandas,pytest,scipy,six,uncertainties
+known_third_party = asteval,dill,emcee,IPython,matplotlib,numdifftools,numpy,NISTModels,pandas,pytest,scipy,uncertainties
known_first_party = lmfit
force_sort_within_sections = True
@@ -20,7 +20,7 @@ ignore_directives = autoclass,autodoc,autofunction,automethod,jupyter-execute
[flake8]
ignore = E121,E123,E126,E226,W503,W504,E501,E731
-exclude = doc/conf.py, versioneer.py, lmfit/__init__.py, lmfit/ui/__init__.py
+exclude = doc/conf.py, versioneer.py, lmfit/__init__.py
[egg_info]
tag_build =
diff --git a/setup.py b/setup.py
index d7dbede..b2843f0 100644
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@ Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
algorithm, and provides estimated standard errors and correlations between
varied Parameters. Other minimization methods, including Nelder-Mead's
downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
-others are also supported. Bounds and contraints can be placed on
+others are also supported. Bounds and constraints can be placed on
Parameters for all of these methods.
In addition, methods for explicitly calculating confidence intervals are
@@ -51,5 +51,5 @@ setup(name='lmfit',
keywords='curve-fitting, least-squares minimization',
tests_require=['pytest'],
package_dir={'lmfit': 'lmfit'},
- packages=['lmfit', 'lmfit.ui'],
+ packages=['lmfit'],
)
diff --git a/tests/NISTModels.py b/tests/NISTModels.py
index 06e812c..17da186 100644
--- a/tests/NISTModels.py
+++ b/tests/NISTModels.py
@@ -171,7 +171,7 @@ def ReadNistData(dataset):
"""NIST STRD data is in a simple, fixed format with
line numbers being significant!
"""
- finp = open(os.path.join(NIST_DIR, "%s.dat" % dataset), 'r')
+ finp = open(os.path.join(NIST_DIR, "%s.dat" % dataset))
lines = [l[:-1] for l in finp.readlines()]
finp.close()
ModelLines = lines[30:39]
diff --git a/tests/lmfit_testutils.py b/tests/lmfit_testutils.py
index 0e1d22e..79b27a2 100644
--- a/tests/lmfit_testutils.py
+++ b/tests/lmfit_testutils.py
@@ -5,8 +5,7 @@ from lmfit import Parameter
def assert_paramval(param, val, tol=1.e-3):
"""assert that a named parameter's value is close to expected value"""
-
- assert(isinstance(param, Parameter))
+ assert isinstance(param, Parameter)
pval = param.value
assert_allclose([pval], [val], rtol=tol, atol=tol,
@@ -15,20 +14,18 @@ def assert_paramval(param, val, tol=1.e-3):
def assert_paramattr(param, attr, val):
"""assert that a named parameter's value is a value"""
- assert(isinstance(param, Parameter))
- assert(hasattr(param, attr))
- assert(getattr(param, attr) == val)
+ assert isinstance(param, Parameter)
+ assert hasattr(param, attr)
+ assert getattr(param, attr) == val
def assert_between(val, minval, maxval):
"""assert that a value is between minval and maxval"""
- assert(val >= minval)
- assert(val <= maxval)
+ assert val >= minval
+ assert val <= maxval
def assert_param_between(param, minval, maxval):
- """assert that a named parameter's value is
- between minval and maxval"""
-
- assert(isinstance(param, Parameter))
+ """assert that a named parameter's value is between minval and maxval"""
+ assert isinstance(param, Parameter)
assert_between(param.value, minval, maxval)
diff --git a/tests/test_1variable.py b/tests/test_1variable.py
index 6ae2d12..12be3ff 100644
--- a/tests/test_1variable.py
+++ b/tests/test_1variable.py
@@ -43,7 +43,7 @@ def test_1var():
out = lmfit.minimize(linear_chisq, params, args=(x, y))
assert_allclose(params['m'].value, 1.025, rtol=0.02, atol=0.02)
- assert(len(params) == 2)
- assert(out.nvarys == 1)
- assert(out.chisqr > 0.01)
- assert(out.chisqr < 5.00)
+ assert len(params) == 2
+ assert out.nvarys == 1
+ assert out.chisqr > 0.01
+ assert out.chisqr < 5.00
diff --git a/tests/test_ampgo.py b/tests/test_ampgo.py
index 83cf0dd..36e1746 100644
--- a/tests/test_ampgo.py
+++ b/tests/test_ampgo.py
@@ -6,6 +6,7 @@ from numpy.testing import assert_allclose
import pytest
import lmfit
+from lmfit._ampgo import ampgo, tunnel
# correct result for Alpine02 function
global_optimum = [7.91705268, 4.81584232]
@@ -103,6 +104,26 @@ def test_ampgo_local_opts(minimizer_Alpine02):
with pytest.raises(TypeError):
minimizer_Alpine02.minimize(method='ampgo', **kws)
- # for coverage: make sure that both occurences are reached
+ # for coverage: make sure that both occurrences are reached
kws = {'local_opts': {'maxiter': 10}, 'maxfunevals': 50}
minimizer_Alpine02.minimize(method='ampgo', **kws)
+
+
+def test_ampgo_incorrect_length_for_bounds():
+ """Test for ValueError when bounds are given for only some parameters."""
+ def func(x):
+ return x[0]**2 + 10.0*x[1]
+
+ msg = r'length of x0 != length of bounds'
+ with pytest.raises(ValueError, match=msg):
+ ampgo(func, x0=[0, 1], bounds=[(-10, 10)])
+
+
+def test_ampgo_tunnel_more_than_three_arguments():
+ """Test AMPGO tunnel function with more than three arguments."""
+ def func(x, val):
+ return x[0]**2 + val*x[1]
+
+ args = [func, 0.1, np.array([1, 2]), 5.0]
+ out = tunnel(np.array([10, 5]), *args)
+ assert_allclose(out, 185.386275588)
diff --git a/tests/test_basicfit.py b/tests/test_basicfit.py
index 3593201..5df9980 100644
--- a/tests/test_basicfit.py
+++ b/tests/test_basicfit.py
@@ -31,9 +31,9 @@ def test_basic():
# do fit, here with leastsq model
result = minimize(fcn2min, params, args=(x, data))
- assert(result.nfev > 5)
- assert(result.nfev < 500)
- assert(result.chisqr > 1)
- assert(result.nvarys == 4)
+ assert result.nfev > 5
+ assert result.nfev < 500
+ assert result.chisqr > 1
+ assert result.nvarys == 4
assert_paramval(result.params['amp'], 5.03, tol=0.05)
assert_paramval(result.params['omega'], 2.0, tol=0.05)
diff --git a/tests/test_bounded_jacobian.py b/tests/test_bounded_jacobian.py
index 7af6c10..ab6361b 100644
--- a/tests/test_bounded_jacobian.py
+++ b/tests/test_bounded_jacobian.py
@@ -28,10 +28,10 @@ def test_bounded_jacobian():
assert_paramval(out0.params['x0'], 1.2243, tol=0.02)
assert_paramval(out0.params['x1'], 1.5000, tol=0.02)
- assert(jac_count == 0)
+ assert jac_count == 0
out1 = minimize(resid, pars, Dfun=jac)
assert_paramval(out1.params['x0'], 1.2243, tol=0.02)
assert_paramval(out1.params['x1'], 1.5000, tol=0.02)
- assert(jac_count > 5)
+ assert jac_count > 5
diff --git a/tests/test_bounds.py b/tests/test_bounds.py
index f830925..e9b23c0 100644
--- a/tests/test_bounds.py
+++ b/tests/test_bounds.py
@@ -23,7 +23,7 @@ def test_bounds():
model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
if data is None:
return model
- return (model - data)
+ return model - data
n = 1500
xmin = 0.
@@ -41,9 +41,9 @@ def test_bounds():
out = minimize(residual, fit_params, args=(x,), kws={'data': data})
- assert(out.nfev > 10)
- assert(out.nfree > 50)
- assert(out.chisqr > 1.0)
+ assert out.nfev > 10
+ assert out.nfree > 50
+ assert out.chisqr > 1.0
assert_paramval(out.params['decay'], 0.01, tol=1.e-2)
assert_paramval(out.params['shift'], 0.123, tol=1.e-2)
diff --git a/tests/test_lineshapes_models.py b/tests/test_builtin_models.py
index 9ad9ef1..09e4558 100644
--- a/tests/test_lineshapes_models.py
+++ b/tests/test_builtin_models.py
@@ -1,20 +1,13 @@
-"""Tests for lineshape functions and built-in models."""
+"""Tests for built-in models."""
import inspect
-import sys
import numpy as np
from numpy.testing import assert_allclose
-import pytest
from scipy.optimize import fsolve
from lmfit import lineshapes, models
-if sys.version_info[0] == 2:
- inspect_args = inspect.getargspec
-elif sys.version_info[0] == 3:
- inspect_args = inspect.getfullargspec
-
def check_height_fwhm(x, y, lineshape, model):
"""Check height and fwhm parameters."""
@@ -29,8 +22,9 @@ def check_height_fwhm(x, y, lineshape, model):
cen = mu
# get arguments for lineshape
- args = {key: out.best_values[key] for key in
- inspect_args(lineshape)[0] if key != 'x'}
+ sig = inspect.signature(lineshape)
+ args = {key: out.best_values[key] for key in sig.parameters.keys()
+ if key != 'x'}
# output format for assertion errors
fmt = ("Program calculated values and real values do not match!\n"
@@ -82,41 +76,11 @@ def test_height_fwhm_calculation(peakdata):
models.ExponentialGaussianModel())
check_height_fwhm(x, y, lineshapes.skewed_gaussian,
models.SkewedGaussianModel())
- check_height_fwhm(x, y, lineshapes.donaich, models.DonaichModel())
+ check_height_fwhm(x, y, lineshapes.doniach, models.DoniachModel())
x = x-9 # Lognormal will only fit peaks with centers < 1
check_height_fwhm(x, y, lineshapes.lognormal, models.LognormalModel())
-@pytest.mark.parametrize("lineshape", lineshapes.functions)
-def test_finite_output_lineshape(lineshape):
- """Test for finite output of lineshape functions."""
- x = np.linspace(0, 100)
-
- # no need to test the lineshapes below
- if lineshape in ['linear', 'exponential', 'sine', 'expsine', 'powerlaw',
- 'parabolic', 'erf', 'erfc', 'wofz', 'gamma', 'gammaln']:
- return None
-
- elif lineshape in ['gaussian', 'lorentzian', 'damped_oscillator',
- 'logistic', 'lognormal', 'students_t']:
- func_args = (x, 1.0, x.size/2.0, 0.0)
- elif lineshape in ['split_lorentzian', 'voigt', 'pvoigt', 'dho',
- 'expgaussian', 'donaich', 'skewed_gaussian']:
- func_args = (x, 1.0, x.size/2.0, 0.0, 0.0)
- elif lineshape in ['moffat', 'pearson7', 'breit_wigner']:
- func_args = (x, 1.0, x.size/2.0, 0.0, 1.0)
- elif lineshape in ['skewed_voigt']:
- func_args = (x, 1.0, x.size/2.0, 0.0, 0.0, 0.0)
- elif lineshape == 'step':
- func_args = (x, 1.0, x.size/2.0, 0.0, 'linear')
- elif lineshape == 'rectangle':
- func_args = (x, 1.0, x.size/2.0, 0.0, x.size/2.0, 0.0, 'linear')
-
- ls = getattr(lineshapes, lineshape)
- out = ls(*func_args)
- assert np.all(np.isfinite(out))
-
-
def test_height_and_fwhm_expression_evalution_in_builtin_models():
"""Assert models do not throw an ZeroDivisionError."""
mod = models.GaussianModel()
@@ -180,7 +144,7 @@ def test_height_and_fwhm_expression_evalution_in_builtin_models():
skew=0.0)
params.update_constraints()
- mod = models.DonaichModel()
+ mod = models.DoniachModel()
params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)
params.update_constraints()
@@ -197,6 +161,7 @@ def test_height_and_fwhm_expression_evalution_in_builtin_models():
def test_guess_modelparams():
+ """Tests for the 'guess' function of built-in models."""
x = np.linspace(-10, 10, 501)
mod = models.ConstantModel()
@@ -261,6 +226,7 @@ def test_guess_modelparams():
def test_splitlorentzian_prefix():
+ """Regression test for SplitLorentzian model (see GH #566)."""
mod1 = models.SplitLorentzianModel()
par1 = mod1.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.3)
par1.update_constraints()
@@ -268,3 +234,22 @@ def test_splitlorentzian_prefix():
mod2 = models.SplitLorentzianModel(prefix='prefix_')
par2 = mod2.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.3)
par2.update_constraints()
+
+
+def test_guess_from_peak():
+ """Regression test for guess_from_peak function (see GH #627)."""
+ x = np.linspace(-5, 5)
+ amplitude = 0.8
+ center = 1.7
+ sigma = 0.3
+ y = lineshapes.lorentzian(x, amplitude=amplitude, center=center, sigma=sigma)
+
+ model = models.LorentzianModel()
+ guess_increasing_x = model.guess(y, x=x)
+ guess_decreasing_x = model.guess(y[::-1], x=x[::-1])
+
+ assert guess_increasing_x == guess_decreasing_x
+
+ for param, value in zip(['amplitude', 'center', 'sigma'],
+ [amplitude, center, sigma]):
+ assert np.abs((guess_increasing_x[param].value - value)/value) < 0.5
diff --git a/tests/test_confidence.py b/tests/test_confidence.py
index 804cc02..25d2ca1 100644
--- a/tests/test_confidence.py
+++ b/tests/test_confidence.py
@@ -1,4 +1,6 @@
"""Tests for the calculation of confidence intervals."""
+import copy
+
import numpy as np
from numpy.testing import assert_allclose
import pytest
@@ -27,7 +29,61 @@ def pars():
def residual(params, x, data):
"""Define objective function for the minimization."""
- return data - 1.0 / (params['a'] * x) + params['b']
+ model = 1.0 / (params['a'] * x) + params['b']
+ return data - model
+
+
+def test_default_f_compare(data, pars):
+ """Test the default f_compare function: F-test."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+
+ # "fixing" a parameter, keeping the chisqr the same
+ out2 = copy.deepcopy(out)
+ out2.nvarys = 1
+ prob = lmfit.confidence.f_compare(out, out2)
+ assert_allclose(prob, 0.0)
+
+ # "fixing" a parameter, increasing the chisqr
+ out2.chisqr = 1.0015*out.chisqr
+ prob = lmfit.confidence.f_compare(out, out2)
+ assert_allclose(prob, 0.2977506)
+
+
+def test_copy_and_restore_vals(data, pars):
+ """Test functions to save and restore parameter values and stderrs."""
+ # test copy_vals without/with stderr present
+ copy_pars = lmfit.confidence.copy_vals(pars)
+
+ assert isinstance(copy_pars, dict)
+ for _, par in enumerate(pars):
+ assert_allclose(pars[par].value, copy_pars[par][0])
+ assert copy_pars[par][1] is None # no stderr present
+
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+ copy_pars_out = lmfit.confidence.copy_vals(out.params)
+
+ assert isinstance(copy_pars_out, dict)
+ for _, par in enumerate(out.params):
+ assert_allclose(out.params[par].value, copy_pars_out[par][0])
+ assert_allclose(out.params[par].stderr, copy_pars_out[par][1])
+
+ # test restore_vals to the original parameter set after changing them first
+ pars['a'].set(value=1.0)
+ pars['b'].set(value=10)
+ lmfit.confidence.restore_vals(copy_pars, pars)
+
+ assert isinstance(pars, lmfit.parameter.Parameters)
+ assert_allclose(pars['a'].value, 0.1)
+ assert_allclose(pars['b'].value, 1.0)
+ assert pars['a'].stderr is None
+ assert pars['b'].stderr is None
+
+ lmfit.confidence.restore_vals(copy_pars_out, pars)
+ for _, par in enumerate(pars):
+ assert_allclose(pars[par].value, out.params[par].value)
+ assert_allclose(pars[par].stderr, out.params[par].stderr)
@pytest.mark.parametrize("verbose", [False, True])
@@ -40,15 +96,15 @@ def test_confidence_leastsq(data, pars, verbose, capsys):
assert out.chisqr < 3.0
assert out.nvarys == 2
assert_paramval(out.params['a'], 0.1, tol=0.1)
- assert_paramval(out.params['b'], -2.0, tol=0.1)
+ assert_paramval(out.params['b'], 2.0, tol=0.1)
ci = lmfit.conf_interval(minimizer, out, verbose=verbose)
assert_allclose(ci['b'][0][0], 0.997, rtol=0.01)
- assert_allclose(ci['b'][0][1], -2.022, rtol=0.01)
+ assert_allclose(ci['b'][0][1], 1.947, rtol=0.01)
assert_allclose(ci['b'][2][0], 0.683, rtol=0.01)
- assert_allclose(ci['b'][2][1], -1.997, rtol=0.01)
+ assert_allclose(ci['b'][2][1], 1.972, rtol=0.01)
assert_allclose(ci['b'][5][0], 0.95, rtol=0.01)
- assert_allclose(ci['b'][5][1], -1.96, rtol=0.01)
+ assert_allclose(ci['b'][5][1], 2.01, rtol=0.01)
if verbose:
captured = capsys.readouterr()
@@ -61,7 +117,7 @@ def test_confidence_pnames(data, pars):
out = minimizer.leastsq()
assert_paramval(out.params['a'], 0.1, tol=0.1)
- assert_paramval(out.params['b'], -2.0, tol=0.1)
+ assert_paramval(out.params['b'], 2.0, tol=0.1)
ci = lmfit.conf_interval(minimizer, out, p_names=['a'])
assert 'a' in ci
@@ -78,7 +134,7 @@ def test_confidence_bounds_reached(data, pars):
out.params['a'].stderr = 1
lmfit.conf_interval(minimizer, out, verbose=True)
- # Should warn
+ # Should warn (i.e,. limit < para.min)
pars['b'].max = 2.03
pars['b'].min = 1.97
minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
@@ -88,6 +144,11 @@ def test_confidence_bounds_reached(data, pars):
with pytest.warns(UserWarning, match="Bound reached"):
lmfit.conf_interval(minimizer, out, verbose=True)
+ # Should warn (i.e,. limit > para.max)
+ out.params['b'].stderr = 0.1
+ with pytest.warns(UserWarning, match="Bound reached"):
+ lmfit.conf_interval(minimizer, out, verbose=True)
+
def test_confidence_sigma_vs_prob(data, pars):
"""Calculate confidence by specifying sigma or probability."""
@@ -96,8 +157,7 @@ def test_confidence_sigma_vs_prob(data, pars):
ci_sigmas = lmfit.conf_interval(minimizer, out, sigmas=[1, 2, 3])
ci_1sigma = lmfit.conf_interval(minimizer, out, sigmas=[1])
- ci_probs = lmfit.conf_interval(minimizer,
- out,
+ ci_probs = lmfit.conf_interval(minimizer, out,
sigmas=[0.68269, 0.9545, 0.9973])
assert_allclose(ci_sigmas['a'][0][1], ci_probs['a'][0][1], rtol=0.01)
@@ -108,9 +168,7 @@ def test_confidence_sigma_vs_prob(data, pars):
def test_confidence_exceptions(data, pars):
"""Make sure the proper exceptions are raised when needed."""
- minimizer = lmfit.Minimizer(residual,
- pars,
- calc_covar=False,
+ minimizer = lmfit.Minimizer(residual, pars, calc_covar=False,
fcn_args=data)
out = minimizer.minimize(method='nelder')
out_lsq = minimizer.minimize(params=out.params, method='leastsq')
@@ -188,11 +246,15 @@ def test_confidence_prob_func(data, pars):
"""Test conf_interval with alternate prob_func."""
minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
out = minimizer.minimize(method='leastsq')
+ called = 0
def my_f_compare(best_fit, new_fit):
+ nonlocal called
+ called += 1
nfree = best_fit.nfree
nfix = best_fit.nfree - new_fit.nfree
dchi = new_fit.chisqr / best_fit.chisqr - 1.0
return f.cdf(dchi * nfree / nfix, nfix, nfree)
lmfit.conf_interval(minimizer, out, sigmas=[1], prob_func=my_f_compare)
+ assert called > 10
diff --git a/tests/test_copy_params.py b/tests/test_copy_params.py
deleted file mode 100644
index e86304e..0000000
--- a/tests/test_copy_params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import numpy as np
-
-from lmfit import Parameters, minimize
-
-
-def get_data():
- x = np.arange(0, 1, 0.01)
- y1 = 1.5*np.exp(0.9*x) + np.random.normal(scale=0.001, size=len(x))
- y2 = 2.0 + x + 1/2.*x**2 + 1/3.*x**3
- y2 = y2 + np.random.normal(scale=0.001, size=len(x))
- return x, y1, y2
-
-
-def residual(params, x, data):
- model = params['a']*np.exp(params['b']*x)
- return (data-model)
-
-
-def test_copy_params():
- x, y1, y2 = get_data()
-
- params = Parameters()
- params.add('a', value=2.0)
- params.add('b', value=2.0)
-
- # fit to first data set
- out1 = minimize(residual, params, args=(x, y1))
-
- # fit to second data set
- out2 = minimize(residual, params, args=(x, y2))
-
- adiff = out1.params['a'].value - out2.params['a'].value
- bdiff = out1.params['b'].value - out2.params['b'].value
-
- assert(abs(adiff) > 1.e-2)
- assert(abs(bdiff) > 1.e-2)
diff --git a/tests/test_covariance_matrix.py b/tests/test_covariance_matrix.py
index f5bc900..94fb0f6 100644
--- a/tests/test_covariance_matrix.py
+++ b/tests/test_covariance_matrix.py
@@ -12,7 +12,7 @@ from lmfit.models import ExponentialModel, VoigtModel
def check(para, real_val, sig=3):
err = abs(para.value - real_val)
- assert(err < sig * para.stderr)
+ assert err < sig * para.stderr
def test_bounded_parameters():
diff --git a/tests/test_custom_independentvar.py b/tests/test_custom_independentvar.py
index ae39963..4be2f53 100644
--- a/tests/test_custom_independentvar.py
+++ b/tests/test_custom_independentvar.py
@@ -33,13 +33,13 @@ def test_custom_independentvar():
params = gmod.make_params(amplitude=2, center=5, sigma=8)
out = gmod.fit(y, params, obj=obj)
- assert(out.nvarys == 3)
- assert(out.nfev > 10)
- assert(out.chisqr > 1)
- assert(out.chisqr < 100)
- assert(out.params['sigma'].value < 3)
- assert(out.params['sigma'].value > 2)
- assert(out.params['center'].value > xmin)
- assert(out.params['center'].value < xmax)
- assert(out.params['amplitude'].value > 1)
- assert(out.params['amplitude'].value < 5)
+ assert out.nvarys == 3
+ assert out.nfev > 10
+ assert out.chisqr > 1
+ assert out.chisqr < 100
+ assert out.params['sigma'].value < 3
+ assert out.params['sigma'].value > 2
+ assert out.params['center'].value > xmin
+ assert out.params['center'].value < xmax
+ assert out.params['amplitude'].value > 1
+ assert out.params['amplitude'].value < 5
diff --git a/tests/test_default_kws.py b/tests/test_default_kws.py
index 4a9a335..8ca90ff 100644
--- a/tests/test_default_kws.py
+++ b/tests/test_default_kws.py
@@ -13,12 +13,12 @@ def test_default_inputs_gauss():
g = GaussianModel()
- fit_option1 = {'maxfev': 5000, 'xtol': 1e-2}
+ fit_option1 = {'xtol': 1e-2}
result1 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5,
- fit_kws=fit_option1)
+ max_nfev=5000, fit_kws=fit_option1)
- fit_option2 = {'maxfev': 5000, 'xtol': 1e-6}
+ fit_option2 = {'xtol': 1e-6}
result2 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5,
- fit_kws=fit_option2)
+ max_nfev=5000, fit_kws=fit_option2)
- assert(result1.values != result2.values)
+ assert result1.values != result2.values
diff --git a/tests/test_itercb.py b/tests/test_itercb.py
index a4e5e12..884c6cf 100644
--- a/tests/test_itercb.py
+++ b/tests/test_itercb.py
@@ -1,4 +1,5 @@
"""Tests for the Iteration Callback Function."""
+
import numpy as np
import pytest
@@ -29,9 +30,9 @@ pars['peak_center'].set(min=5, max=10)
pars['peak_sigma'].set(min=0.5, max=2)
-def per_iteration(pars, iter, resid, *args, **kws):
+def per_iteration(pars, iteration, resid, *args, **kws):
"""Iteration callback, will abort at iteration 23."""
- return iter == 23
+ return iteration == 23
@pytest.mark.parametrize("calc_covar", calc_covar_options)
diff --git a/tests/test_jsonutils.py b/tests/test_jsonutils.py
index 19d0225..0642a60 100644
--- a/tests/test_jsonutils.py
+++ b/tests/test_jsonutils.py
@@ -18,6 +18,8 @@ def test_import_from(obj):
(BuiltinFunctionType, FunctionType))
+# test-case missing for string object that causes a UnicodeError; cannot find
+# a way to trigger that exception (perhaps not needed in PY3 anymore?)
objects = [('test_string', (str,)),
(np.array([7.0]), np.ndarray),
(np.array([1.0+2.0j]), np.ndarray),
@@ -28,7 +30,8 @@ objects = [('test_string', (str,)),
(['a', 'b', 'c'], list),
(('a', 'b', 'c'), tuple),
({'a': 1.0, 'b': 2.0, 'c': 3.0}, dict),
- (lmfit.lineshapes.gaussian, FunctionType)]
+ (lmfit.lineshapes.gaussian, FunctionType),
+ (np.array(['a', np.array([1, 2, 3])], dtype=object), np.ndarray)]
@pytest.mark.parametrize('obj, obj_type', objects)
@@ -37,7 +40,12 @@ def test_encode_decode(obj, obj_type):
encoded = encode4js(obj)
decoded = decode4js(encoded)
- assert decoded == obj
+ if isinstance(obj, np.ndarray) and obj.dtype == 'object':
+ assert decoded[0] == obj[0]
+ assert np.all(decoded[1] == obj[1])
+ else:
+ assert decoded == obj
+
assert isinstance(decoded, obj_type)
diff --git a/tests/test_lineshapes.py b/tests/test_lineshapes.py
new file mode 100644
index 0000000..8c440f4
--- /dev/null
+++ b/tests/test_lineshapes.py
@@ -0,0 +1,124 @@
+"""Tests for lineshape functions."""
+
+import inspect
+
+import numpy as np
+import pytest
+
+import lmfit
+
+
+@pytest.mark.parametrize("lineshape", lmfit.lineshapes.functions)
+def test_no_ZeroDivisionError_and_finite_output(lineshape):
+ """Tests for finite output and ZeroDivisionError is not raised."""
+ xvals = np.linspace(0, 10, 100)
+
+ func = getattr(lmfit.lineshapes, lineshape)
+ assert callable(func)
+ sig = inspect.signature(func)
+
+ # set the following function arguments:
+ # x = xvals
+ # center = 0.5*(max(xvals)-min(xvals))
+ # center1 = 0.25*(max(xvals)-min(xvals))
+ # center2 = 0.75*(max(xvals)-min(xvals))
+ # form = default value (i.e., 'linear' or 'bose')
+ xvals_mid_range = xvals.mean()
+ zero_pars = [par_name for par_name in sig.parameters.keys() if par_name
+ not in ('x', 'form')]
+
+ for par_zero in zero_pars:
+ fnc_args = []
+ for par in sig.parameters.keys():
+ if par == 'x':
+ fnc_args.append(xvals)
+ elif par == 'center':
+ fnc_args.append(0.5*xvals_mid_range)
+ elif par == 'center1':
+ fnc_args.append(0.25*xvals_mid_range)
+ elif par == 'center2':
+ fnc_args.append(0.75*xvals_mid_range)
+ elif par == par_zero:
+ fnc_args.append(0.0)
+ else:
+ fnc_args.append(sig.parameters[par].default)
+
+ fnc_output = func(*fnc_args)
+ assert len(xvals) == len(fnc_output)
+ assert np.all(np.isfinite(fnc_output))
+
+
+@pytest.mark.parametrize("lineshape", lmfit.lineshapes.functions)
+def test_x_float_value(lineshape):
+ """Test lineshapes when x is not an array but a float."""
+ xval = 7.0
+
+ func = getattr(lmfit.lineshapes, lineshape)
+ sig = inspect.signature(func)
+
+ fnc_args = [xval]
+
+ for par in [par_name for par_name in sig.parameters.keys()
+ if par_name != 'x']:
+ fnc_args.append(sig.parameters[par].default)
+
+ if lineshape in ('step', 'rectangle'):
+ msg = r"'float' object does not support item assignment"
+ with pytest.raises(TypeError, match=msg):
+ fnc_output = func(*fnc_args)
+ else:
+ fnc_output = func(*fnc_args)
+ assert isinstance(fnc_output, float)
+
+
+rising_form = ['erf', 'logistic', 'atan', 'arctan', 'linear', 'unknown']
+@pytest.mark.parametrize("form", rising_form)
+@pytest.mark.parametrize("lineshape", ['step', 'rectangle'])
+def test_form_argument_step_rectangle(form, lineshape):
+ """Test 'form' argument for step- and rectangle-functions."""
+ xvals = np.linspace(0, 10, 100)
+
+ func = getattr(lmfit.lineshapes, lineshape)
+ sig = inspect.signature(func)
+
+ fnc_args = [xvals]
+ for par in [par_name for par_name in sig.parameters.keys()
+ if par_name != 'x']:
+ if par == 'form':
+ fnc_args.append(form)
+ else:
+ fnc_args.append(sig.parameters[par].default)
+
+ if form == 'unknown':
+ msg = r"Invalid value .* for argument .*; should be one of .*"
+ with pytest.raises(ValueError, match=msg):
+ func(*fnc_args)
+ else:
+ fnc_output = func(*fnc_args)
+ assert len(fnc_output) == len(xvals)
+
+
+thermal_form = ['bose', 'maxwell', 'fermi', 'Bose-Einstein', 'unknown']
+@pytest.mark.parametrize("form", thermal_form)
+def test_form_argument_thermal_distribution(form):
+ """Test 'form' argument for thermal_distribution function."""
+ xvals = np.linspace(0, 10, 100)
+
+ func = lmfit.lineshapes.thermal_distribution
+ sig = inspect.signature(lmfit.lineshapes.thermal_distribution)
+
+ fnc_args = [xvals]
+ for par in [par_name for par_name in sig.parameters.keys()
+ if par_name != 'x']:
+ if par == 'form':
+ fnc_args.append(form)
+ else:
+ fnc_args.append(sig.parameters[par].default)
+
+ if form == 'unknown':
+ msg = r"Invalid value .* for argument .*; should be one of .*"
+ with pytest.raises(ValueError, match=msg):
+ func(*fnc_args)
+ else:
+ fnc_output = func(*fnc_args)
+ assert len(fnc_output) == len(xvals)
diff --git a/tests/test_manypeaks_speed.py b/tests/test_manypeaks_speed.py
index ee1382c..9734d35 100644
--- a/tests/test_manypeaks_speed.py
+++ b/tests/test_manypeaks_speed.py
@@ -25,11 +25,11 @@ def test_manypeaks_speed():
t1 = time.time()
pars = model.make_params()
t2 = time.time()
- cpars = deepcopy(pars) # noqa: F841
+ _cpars = deepcopy(pars) # noqa: F841
t3 = time.time()
# these are very conservative tests that
# should be satisfied on nearly any machine
- assert((t3-t2) < 0.5)
- assert((t2-t1) < 0.5)
- assert((t1-t0) < 5.0)
+ assert (t3-t2) < 0.5
+ assert (t2-t1) < 0.5
+ assert (t1-t0) < 5.0
diff --git a/tests/test_max_nfev.py b/tests/test_max_nfev.py
new file mode 100644
index 0000000..2657306
--- /dev/null
+++ b/tests/test_max_nfev.py
@@ -0,0 +1,105 @@
+"""Tests for maximum number of function evaluations (max_nfev)."""
+
+import numpy as np
+import pytest
+
+from lmfit.lineshapes import gaussian
+from lmfit.models import GaussianModel, LinearModel
+from lmfit.minimizer import Minimizer
+
+
+nvarys = 5
+methods = ['leastsq', 'least_squares', 'nelder', 'brute', 'ampgo',
+ 'basinopping', 'differential_evolution', 'shgo', 'dual_annealing']
+
+
+@pytest.fixture
+def modelGaussian():
+ """Return data, parameters and Model class for Gaussian + Linear model."""
+ # generate data with random noise added
+ np.random.seed(7)
+ x = np.linspace(0, 20, 401)
+ y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
+ y -= 0.20*x + 3.333 + np.random.normal(scale=0.23, size=len(x))
+
+ mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
+
+ # make parameters and set bounds
+ pars = mod.make_params(peak_amplitude=21.0, peak_center=7.0,
+ peak_sigma=2.0, bkg_intercept=2, bkg_slope=0.0)
+
+ pars['bkg_intercept'].set(min=0, max=10, brute_step=5.0)
+ pars['bkg_slope'].set(min=-5, max=5, brute_step=5.0)
+ pars['peak_amplitude'].set(min=20, max=25, brute_step=2.5)
+ pars['peak_center'].set(min=5, max=10, brute_step=2.5)
+ pars['peak_sigma'].set(min=0.5, max=2, brute_step=0.5)
+
+ return x, y, mod, pars
+
+
+@pytest.fixture
+def minimizerGaussian(modelGaussian):
+ """Return a Mininizer class for the Gaussian + Linear model."""
+ x, y, _, pars = modelGaussian
+
+ def residual(params, x, y):
+ pars = params.valuesdict()
+ model = (gaussian(x, pars['peak_amplitude'], pars['peak_center'],
+ pars['peak_sigma']) +
+ pars['bkg_intercept'] + x*pars['bkg_slope'])
+ return y - model
+
+ mini = Minimizer(residual, pars, fcn_args=(x, y))
+
+ return mini
+
+
+@pytest.mark.parametrize("method", methods)
+def test_max_nfev_Minimizer(minimizerGaussian, method):
+ """Test the max_nfev argument for all solvers using Minimizer interface."""
+ if method in ('brute', 'basinhopping'):
+ pytest.xfail('max_nfev not yet supported in {}'.format(method)) # FIXME
+
+ result = minimizerGaussian.minimize(method=method, max_nfev=10)
+ assert minimizerGaussian.max_nfev == 10
+ assert result.nfev < 15
+ assert result.aborted
+ assert not result.errorbars
+ assert not result.success
+
+
+@pytest.mark.parametrize("method", methods)
+def test_max_nfev_Model(modelGaussian, minimizerGaussian, method):
+ """Test the max_nfev argument for all solvers using Model interfce."""
+ x, y, mod, pars = modelGaussian
+ out = mod.fit(y, pars, x=x, method=method, max_nfev=10)
+
+ assert out.max_nfev == 10
+ assert out.nfev < 15
+ assert out.aborted
+ assert not out.errorbars
+ assert not out.success
+
+
+@pytest.mark.parametrize("method, default_max_nfev",
+ [('leastsq', 2000*(nvarys+1)),
+ ('least_squares', 1000*(nvarys+1)),
+ ('nelder', 1000*(nvarys+1)),
+ ('brute', np.inf),
+ ('ampgo', 1000000*(nvarys+1)),
+ ('basinhopping', 2000*(nvarys+1)),
+ ('differential_evolution', 1000*(nvarys+1)),
+ ('shgo', 1000000*(nvarys+1)),
+ ('dual_annealing', 1.e7)])
+def test_default_max_nfev(modelGaussian, minimizerGaussian, method,
+ default_max_nfev):
+ """Test the default values when setting max_nfev=None."""
+ if method in ('brute', 'basinhopping'):
+ pytest.xfail('max_nfev not yet supported in {}'.format(method)) # FIXME
+
+ x, y, mod, pars = modelGaussian
+ result = mod.fit(y, pars, x=x, method=method, max_nfev=None)
+ assert result.max_nfev == default_max_nfev
+
+ _ = minimizerGaussian.minimize(method=method, max_nfev=None)
+ assert minimizerGaussian.max_nfev == default_max_nfev
diff --git a/tests/test_model.py b/tests/test_model.py
index ba9e8dc..24de3fa 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -47,7 +47,7 @@ class CommonTests:
def setUp(self):
np.random.seed(1)
- self.noise = 0.0001*np.random.randn(*self.x.shape)
+ self.noise = 0.0001*np.random.randn(self.x.size)
# Some Models need args (e.g., polynomial order), and others don't.
try:
args = self.args
@@ -234,10 +234,6 @@ class CommonTests:
class TestUserDefiniedModel(CommonTests, unittest.TestCase):
# mainly aimed at checking that the API does what it says it does
# and raises the right exceptions or warnings when things are not right
- import six
- if six.PY2:
- from six import assertRaisesRegex
-
def setUp(self):
self.true_values = lambda: dict(amplitude=7.1, center=1.1, sigma=2.40)
self.guess = lambda: dict(amplitude=5, center=2, sigma=4)
@@ -347,7 +343,7 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
set_prefix_failed = False
except AttributeError:
set_prefix_failed = True
- except: # noqa: E722
+ except Exception:
set_prefix_failed = None
self.assertFalse(set_prefix_failed)
@@ -495,7 +491,7 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
self.assertTrue(abs(result.params['g1_center'].value - 1.1) < 0.2)
self.assertTrue(abs(result.params['g2_center'].value - 2.5) < 0.2)
- for name, par in pars.items():
+ for _, par in pars.items():
assert len(repr(par)) > 5
def test_composite_plotting(self):
@@ -561,7 +557,7 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
m2 = models.GaussianModel(prefix='m2_')
params.update(m2.make_params())
- m = m1 + m2 # noqa: F841
+ _m = m1 + m2 # noqa: F841
param_values = {name: p.value for name, p in params.items()}
self.assertTrue(param_values['m1_intercept'] < -0.0)
diff --git a/tests/test_model_uncertainties.py b/tests/test_model_uncertainties.py
index 92d540c..c6f2ecd 100644
--- a/tests/test_model_uncertainties.py
+++ b/tests/test_model_uncertainties.py
@@ -74,8 +74,8 @@ def test_gauss_sigmalevel():
dely_sigma2 = ret.eval_uncertainty(sigma=2)
dely_sigma3 = ret.eval_uncertainty(sigma=3)
- assert(dely_sigma3.mean() > 1.5*dely_sigma2.mean())
- assert(dely_sigma2.mean() > 1.5*dely_sigma1.mean())
+ assert dely_sigma3.mean() > 1.5*dely_sigma2.mean()
+ assert dely_sigma2.mean() > 1.5*dely_sigma1.mean()
def test_gauss_noiselevel():
diff --git a/tests/test_multidatasets.py b/tests/test_multidatasets.py
index 13d6f9a..fe69e8a 100644
--- a/tests/test_multidatasets.py
+++ b/tests/test_multidatasets.py
@@ -18,7 +18,7 @@ def gauss_dataset(params, i, x):
def objective(params, x, data):
""" calculate total residual for fits to several data sets held
in a 2-D array, and modeled by Gaussian functions"""
- ndata, nx = data.shape
+ ndata, _ = data.shape
resid = 0.0*data[:]
# make residual per data set
for i in range(ndata):
@@ -31,7 +31,7 @@ def test_multidatasets():
# create 5 datasets
x = np.linspace(-1, 2, 151)
data = []
- for i in np.arange(5):
+ for _ in np.arange(5):
amp = 2.60 + 1.50*np.random.rand()
cen = -0.20 + 1.50*np.random.rand()
sig = 0.25 + 0.03*np.random.rand()
@@ -41,11 +41,11 @@ def test_multidatasets():
# data has shape (5, 151)
data = np.array(data)
- assert(data.shape) == (5, 151)
+ assert data.shape == (5, 151)
# create 5 sets of parameters, one per data set
pars = Parameters()
- for iy, y in enumerate(data):
+ for iy, _ in enumerate(data):
pars.add('amp_%i' % (iy+1), value=0.5, min=0.0, max=200)
pars.add('cen_%i' % (iy+1), value=0.4, min=-2.0, max=2.0)
pars.add('sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0)
@@ -58,10 +58,10 @@ def test_multidatasets():
# run the global fit to all the data sets
out = minimize(objective, pars, args=(x, data))
- assert(len(pars) == 15)
- assert(out.nvarys == 11)
- assert(out.nfev > 15)
- assert(out.chisqr > 1.0)
- assert(pars['amp_1'].value > 0.1)
- assert(pars['sig_1'].value > 0.1)
- assert(pars['sig_2'].value == pars['sig_1'].value)
+ assert len(pars) == 15
+ assert out.nvarys == 11
+ assert out.nfev > 15
+ assert out.chisqr > 1.0
+ assert pars['amp_1'].value > 0.1
+ assert pars['sig_1'].value > 0.1
+ assert pars['sig_2'].value == pars['sig_1'].value
diff --git a/tests/test_nose.py b/tests/test_nose.py
index d3b482a..096a5ae 100644
--- a/tests/test_nose.py
+++ b/tests/test_nose.py
@@ -2,8 +2,8 @@ import unittest
import numpy as np
from numpy import pi
-from numpy.testing import (assert_, assert_allclose, assert_almost_equal,
- assert_equal, dec)
+from numpy.testing import (assert_allclose, assert_almost_equal, assert_equal,
+ dec)
import pytest
from uncertainties import ufloat
@@ -12,15 +12,21 @@ from lmfit.lineshapes import gaussian
from lmfit.minimizer import (HAS_EMCEE, SCALAR_METHODS, MinimizerResult,
_nan_policy)
+try:
+ import numdifftools # noqa: F401
+ HAS_NUMDIFFTOOLS = True
+except ImportError:
+ HAS_NUMDIFFTOOLS = False
+
def check(para, real_val, sig=3):
err = abs(para.value - real_val)
- assert(err < sig * para.stderr)
+ assert err < sig * para.stderr
def check_wo_stderr(para, real_val, sig=0.1):
err = abs(para.value - real_val)
- assert(err < sig)
+ assert err < sig
def check_paras(para_fit, para_real, sig=3):
@@ -79,7 +85,7 @@ def test_lbfgsb():
model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
if data is None:
return model
- return (model - data)
+ return model - data
n = 2500
xmin = 0.
@@ -150,7 +156,7 @@ def test_peakfit():
model = g1 + g2
if data is None:
return model
- return (model - data)
+ return model - data
n = 601
xmin = 0.
@@ -219,18 +225,15 @@ def test_scalar_minimize_has_no_uncertainties():
mini = Minimizer(fcn2min, params, fcn_args=(x, data))
out = mini.minimize()
- assert_(np.isfinite(out.params['amp'].stderr))
+ assert np.isfinite(out.params['amp'].stderr)
assert out.errorbars
out2 = mini.minimize(method='nelder-mead')
- assert_(out2.params['amp'].stderr is None)
- assert_(out2.params['decay'].stderr is None)
- assert_(out2.params['shift'].stderr is None)
- assert_(out2.params['omega'].stderr is None)
- assert_(out2.params['amp'].correl is None)
- assert_(out2.params['decay'].correl is None)
- assert_(out2.params['shift'].correl is None)
- assert_(out2.params['omega'].correl is None)
- assert not out2.errorbars
+
+ for par in ('amp', 'decay', 'shift', 'omega'):
+ assert HAS_NUMDIFFTOOLS == (out2.params[par].stderr is not None)
+ assert HAS_NUMDIFFTOOLS == (out2.params[par].correl is not None)
+
+ assert HAS_NUMDIFFTOOLS == out2.errorbars
def test_scalar_minimize_reduce_fcn():
@@ -282,7 +285,7 @@ def test_multidimensional_fit_GH205():
np.cos(yv * lambda2))
data = f(xv, yv, 0.3, 3)
- assert_(data.ndim, 2)
+ assert data.ndim, 2
def fcn2min(params, xv, yv, data):
"""model decaying sine wave, subtract data"""
@@ -414,12 +417,12 @@ class CommonMinimizerTest(unittest.TestCase):
def test_nan_policy_function(self):
a = np.array([0, 1, 2, 3, np.nan])
pytest.raises(ValueError, _nan_policy, a)
- assert_(np.isnan(_nan_policy(a, nan_policy='propagate')[-1]))
+ assert np.isnan(_nan_policy(a, nan_policy='propagate')[-1])
assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
a[-1] = np.inf
pytest.raises(ValueError, _nan_policy, a)
- assert_(np.isposinf(_nan_policy(a, nan_policy='propagate')[-1]))
+ assert np.isposinf(_nan_policy(a, nan_policy='propagate')[-1])
assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
assert_equal(_nan_policy(a, handle_inf=False), a)
@@ -516,11 +519,11 @@ class CommonMinimizerTest(unittest.TestCase):
# if you've run the sampler the Minimizer object should have a _lastpos
# attribute
- assert_(hasattr(self.mini, '_lastpos'))
+ assert hasattr(self.mini, '_lastpos')
# now try and re-use sampler
out2 = self.mini.emcee(steps=10, reuse_sampler=True)
- assert_(out2.chain.shape == (35, 20, 4))
+ assert out2.chain.shape == (35, 20, 4)
# you shouldn't be able to reuse the sampler if nvarys has changed.
self.mini.params['amp'].vary = False
@@ -563,23 +566,23 @@ class CommonMinimizerTest(unittest.TestCase):
except ImportError:
return True
out = self.mini.emcee(nwalkers=10, steps=20, burn=5, thin=2)
- assert_(isinstance(out, MinimizerResult))
- assert_(isinstance(out.flatchain, DataFrame))
+ assert isinstance(out, MinimizerResult)
+ assert isinstance(out.flatchain, DataFrame)
# check that we can access the chains via parameter name
# print( out.flatchain['amp'].shape[0], 200)
- assert_(out.flatchain['amp'].shape[0] == 70)
+ assert out.flatchain['amp'].shape[0] == 70
assert out.errorbars
- assert_(np.isfinite(out.params['amp'].correl['period']))
+ assert np.isfinite(out.params['amp'].correl['period'])
# the lnprob array should be the same as the chain size
- assert_(np.size(out.chain)//out.nvarys == np.size(out.lnprob))
+ assert np.size(out.chain)//out.nvarys == np.size(out.lnprob)
# test chain output shapes
print(out.lnprob.shape, out.chain.shape, out.flatchain.shape)
- assert_(out.lnprob.shape == (7, 10))
- assert_(out.chain.shape == (7, 10, 4))
- assert_(out.flatchain.shape == (70, 4))
+ assert out.lnprob.shape == (7, 10)
+ assert out.chain.shape == (7, 10, 4)
+ assert out.flatchain.shape == (70, 4)
@dec.slow
def test_emcee_float(self):
@@ -642,4 +645,4 @@ def residual_for_multiprocessing(pars, x, data=None):
model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
if data is None:
return model
- return (model - data)
+ return model - data
diff --git a/tests/test_pandas.py b/tests/test_pandas.py
new file mode 100644
index 0000000..b89db6a
--- /dev/null
+++ b/tests/test_pandas.py
@@ -0,0 +1,24 @@
+"""Tests for using data in pandas.[DataFrame|Series]."""
+import os
+
+import numpy as np
+import pytest
+
+import lmfit
+
+pandas = pytest.importorskip('pandas')
+
+
+def test_pandas_guess_from_peak():
+ """Regression test for failure in guess_from_peak with pandas (GH #629)."""
+ data = pandas.read_csv(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'peak.csv'))
+ xdat, ydat = np.loadtxt(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'peak.csv'),
+ unpack=True, skiprows=1, delimiter=',')
+
+ model = lmfit.models.LorentzianModel()
+ guess_pd = model.guess(data['y'], x=data['x'])
+ guess = model.guess(ydat, x=xdat)
+
+ assert guess_pd == guess
diff --git a/tests/test_parameter.py b/tests/test_parameter.py
index 5c2e5c5..1e9105a 100644
--- a/tests/test_parameter.py
+++ b/tests/test_parameter.py
@@ -1,16 +1,33 @@
"""Tests for the Parameter class."""
from math import trunc
+import pickle
import numpy as np
from numpy.testing import assert_allclose
import pytest
-import uncertainties as un
import lmfit
@pytest.fixture
+def parameters():
+ """Initialize a Parameters class for tests."""
+ pars = lmfit.Parameters()
+ pars.add(lmfit.Parameter(name='a', value=10.0, vary=True, min=-100.0,
+ max=100.0, expr=None, brute_step=5.0,
+ user_data=1))
+ pars.add(lmfit.Parameter(name='b', value=0.0, vary=True, min=-250.0,
+ max=250.0, expr="2.0*a", brute_step=25.0,
+ user_data=2.5))
+ exp_attr_values_A = ('a', 10.0, True, -100.0, 100.0, None, 5.0, 1)
+ exp_attr_values_B = ('b', 20.0, False, -250.0, 250.0, "2.0*a", 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+ return pars, exp_attr_values_A, exp_attr_values_B
+
+
+@pytest.fixture
def parameter():
"""Initialize parameter for tests."""
param = lmfit.Parameter(name='a', value=10.0, vary=True, min=-100.0,
@@ -108,6 +125,11 @@ def test_parameter_set_value(parameter):
changed_attribute_values = ('a', 5.0, True, -100.0, 100.0, None, 5.0, 1)
assert_parameter_attributes(par, changed_attribute_values)
+ # check if set value works with new bounds, see issue#636
+ par.set(value=500.0, min=400, max=600)
+ changed_attribute_values2 = ('a', 500.0, True, 400.0, 600.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values2)
+
def test_parameter_set_vary(parameter):
"""Test the Parameter.set() function with vary."""
@@ -149,8 +171,8 @@ def test_parameter_set_expr(parameter):
"""Test the Parameter.set() function with expr.
Of note, this only tests for setting/removal of the expression; nothing
- else gets evaluated here.... More specific tests will be present in the
- Parameters class.
+ else gets evaluated here... More specific tests that require a Parameters
+ class can be found below.
"""
par, _ = parameter
@@ -168,6 +190,60 @@ def test_parameter_set_expr(parameter):
assert_parameter_attributes(par, changed_attribute_values)
+def test_parameters_set_value_with_expr(parameters):
+ """Test the Parameter.set() function with value in presence of expr."""
+ pars, _, _ = parameters
+
+ pars['a'].set(value=5.0)
+ pars.update_constraints() # update constraints/expressions
+ changed_attr_values_A = ('a', 5.0, True, -100.0, 100.0, None, 5.0, 1)
+ changed_attr_values_B = ('b', 10.0, False, -250.0, 250.0, "2.0*a", 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], changed_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+ # with expression present, setting a value works and will leave vary=False
+ pars['b'].set(value=1.0)
+ pars.update_constraints() # update constraints/expressions
+ changed_attr_values_A = ('a', 5.0, True, -100.0, 100.0, None, 5.0, 1)
+ changed_attr_values_B = ('b', 1.0, False, -250.0, 250.0, None, 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], changed_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+
+def test_parameters_set_vary_with_expr(parameters):
+ """Test the Parameter.set() function with vary in presence of expr."""
+ pars, init_attr_values_A, _ = parameters
+
+ pars['b'].set(vary=True) # expression should get cleared
+ pars.update_constraints() # update constraints/expressions
+ changed_attr_values_B = ('b', 20.0, True, -250.0, 250.0, None, 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], init_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+
+def test_parameters_set_expr(parameters):
+ """Test the Parameter.set() function with expr."""
+ pars, init_attr_values_A, init_attr_values_B = parameters
+
+ pars['b'].set(expr=None) # nothing should change
+ pars.update_constraints() # update constraints/expressions
+ assert_parameter_attributes(pars['a'], init_attr_values_A)
+ assert_parameter_attributes(pars['b'], init_attr_values_B)
+
+ pars['b'].set(expr='') # expression should get cleared, vary still False
+ pars.update_constraints() # update constraints/expressions
+ changed_attr_values_B = ('b', 20.0, False, -250.0, 250.0, None, 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], init_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+ pars['a'].set(expr="b/4.0") # expression should be set, vary --> False
+ pars.update_constraints()
+ changed_attr_values_A = ('a', 5.0, False, -100.0, 100.0, "b/4.0", 5.0, 1)
+ changed_attr_values_B = ('b', 20.0, False, -250.0, 250.0, None, 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], changed_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+
def test_parameter_set_brute_step(parameter):
"""Test the Parameter.set() function with brute_step."""
par, initial_attribute_values = parameter
@@ -204,6 +280,15 @@ def test_setstate(parameter):
assert_parameter_attributes(par_new, initial_attribute_values)
+def test_parameter_pickle_(parameter):
+ """Test that we can pickle a Parameter."""
+ par, _ = parameter
+ pkl = pickle.dumps(par)
+ loaded_par = pickle.loads(pkl)
+
+ assert loaded_par == par
+
+
def test_repr():
"""Tests for the __repr__ method."""
par = lmfit.Parameter(name='test', value=10.0, min=0.0, max=20.0)
@@ -264,32 +349,18 @@ def test_setup_bounds_and_scale_gradient_methods():
-20.976788, rtol=1.e-6)
-def test__getval(parameter):
- """Test _getval function."""
- par, _ = parameter
-
- # test uncertainties.core.Variable in _getval [deprecated]
- par.set(value=un.ufloat(5.0, 0.2))
- with pytest.warns(FutureWarning, match='removed in the next release'):
- val = par.value
- assert_allclose(val, 5.0)
-
-
def test_value_setter(parameter):
"""Tests for the value setter."""
par, initial_attribute_values = parameter
assert_parameter_attributes(par, initial_attribute_values)
- par.set(value=200.0) # above maximum
+ par.value = 200.0 # above maximum
assert_allclose(par.value, 100.0)
- par.set(value=-200.0) # below minimum
+ par.value = -200.0 # below minimum
assert_allclose(par.value, -100.0)
-# TODO: add tests for setter/getter methods for VALUE, EXPR
-
-
# Tests for magic methods of the Parameter class
def test__array__(parameter):
"""Test the __array__ magic method."""
@@ -411,45 +482,45 @@ def test__pow__(parameter):
def test__gt__(parameter):
"""Test the __gt__ magic method."""
par, _ = parameter
- assert 11 > par
- assert not 10 > par
+ assert par < 11
+ assert not par < 10
def test__ge__(parameter):
"""Test the __ge__ magic method."""
par, _ = parameter
- assert 11 >= par
- assert 10 >= par
- assert not 9 >= par
+ assert par <= 11
+ assert par <= 10
+ assert not par <= 9
def test__le__(parameter):
"""Test the __le__ magic method."""
par, _ = parameter
- assert 9 <= par
- assert 10 <= par
- assert not 11 <= par
+ assert par >= 9
+ assert par >= 10
+ assert not par >= 11
def test__lt__(parameter):
"""Test the __lt__ magic method."""
par, _ = parameter
- assert 9 < par
- assert not 10 < par
+ assert par > 9
+ assert not par > 10
def test__eq__(parameter):
"""Test the __eq__ magic method."""
par, _ = parameter
- assert 10 == par
- assert not 9 == par
+ assert par == 10
+ assert not par == 9
def test__ne__(parameter):
"""Test the __ne__ magic method."""
par, _ = parameter
- assert 9 != par
- assert not 10 != par
+ assert par != 9
+ assert not par != 10
def test__radd__(parameter):
@@ -502,13 +573,3 @@ def test__rsub__(parameter):
"""Test the __rsub__ magic method."""
par, _ = parameter
assert_allclose(5.25 - par, -4.75)
-
-
-def test_isParameter(parameter):
- """Test function to check whether something is a Paramter [deprecated]."""
- # TODO: this function isn't used anywhere in the codebase; useful at all?
- par, _ = parameter
- assert lmfit.parameter.isParameter(par)
- assert not lmfit.parameter.isParameter('test')
- with pytest.warns(FutureWarning, match='removed in the next release'):
- lmfit.parameter.isParameter(par)
diff --git a/tests/test_parameters.py b/tests/test_parameters.py
index 67f6c41..37cad6f 100644
--- a/tests/test_parameters.py
+++ b/tests/test_parameters.py
@@ -1,319 +1,578 @@
+"""Tests for the Parameters class."""
+
+
from copy import copy, deepcopy
import pickle
-import unittest
+import asteval
import numpy as np
-from numpy.testing import assert_, assert_almost_equal, assert_equal
-
-from lmfit import Model, Parameter, Parameters
-from lmfit.printfuncs import params_html_table
-
-
-class TestParameters(unittest.TestCase):
-
- def setUp(self):
- self.params = Parameters()
- self.params.add_many(('a', 1., True, None, None, None),
- ('b', 2., True, None, None, None),
- ('c', 3., True, None, None, '2. * a'))
-
- def test_expr_was_evaluated(self):
- self.params.update_constraints()
- assert_almost_equal(self.params['c'].value,
- 2 * self.params['a'].value)
-
- def test_copy(self):
- # check simple Parameters.copy() does not fail
- # on non-trivial Parameters
- p1 = Parameters()
- p1.add('t', 2.0, min=0.0, max=5.0)
- p1.add('x', 10.0)
- p1.add('y', expr='x*t + sqrt(t)/3.0')
-
- p2 = p1.copy()
- assert isinstance(p2, Parameters)
- assert 't' in p2
- assert 'y' in p2
- assert p2['t'].max < 6.0
- assert np.isinf(p2['x'].max) and p2['x'].max > 0
- assert np.isinf(p2['x'].min) and p2['x'].min < 0
- assert 'sqrt(t)' in p2['y'].expr
- assert p2._asteval is not None
- assert p2._asteval.symtable is not None
- assert (p2['y'].value > 20) and (p2['y'].value < 21)
-
- def test_copy_function(self):
- # check copy(Parameters) does not fail
- p1 = Parameters()
- p1.add('t', 2.0, min=0.0, max=5.0)
- p1.add('x', 10.0)
- p1.add('y', expr='x*t + sqrt(t)/3.0')
-
- p2 = copy(p1)
- assert isinstance(p2, Parameters)
-
- # change the 'x' value in the original
- p1['x'].value = 4.0
-
- assert p2['x'].value > 9.8
- assert p2['x'].value < 10.2
- assert np.isinf(p2['x'].max) and p2['x'].max > 0
-
- assert 't' in p2
- assert 'y' in p2
- assert p2['t'].max < 6.0
-
- assert np.isinf(p2['x'].min) and p2['x'].min < 0
- assert 'sqrt(t)' in p2['y'].expr
- assert p2._asteval is not None
- assert p2._asteval.symtable is not None
- assert (p2['y'].value > 20) and (p2['y'].value < 21)
-
- assert p1['y'].value < 10
-
- def test_deepcopy(self):
- # check that a simple copy works
- b = deepcopy(self.params)
- assert_(self.params == b)
-
- # check that we can add a symbol to the interpreter
- self.params['b'].expr = 'sin(1)'
- self.params['b'].value = 10
- assert_almost_equal(self.params['b'].value, np.sin(1))
- assert_almost_equal(self.params._asteval.symtable['b'], np.sin(1))
-
- # check that the symbols in the interpreter are still the same after
- # deepcopying
- b = deepcopy(self.params)
-
- unique_symbols_params = self.params._asteval.user_defined_symbols()
- unique_symbols_b = self.params._asteval.user_defined_symbols()
- assert_(unique_symbols_b == unique_symbols_params)
- for unique_symbol in unique_symbols_b:
- if self.params._asteval.symtable[unique_symbol] is np.nan:
- continue
-
- assert_(self.params._asteval.symtable[unique_symbol]
- ==
- b._asteval.symtable[unique_symbol])
-
- def test_add_many_params(self):
- # test that we can add many parameters, but only parameters are added.
- a = Parameter('a', 1)
- b = Parameter('b', 2)
-
- p = Parameters()
- p.add_many(a, b)
-
- assert_(list(p.keys()) == ['a', 'b'])
-
- def test_expr_and_constraints_GH265(self):
- # test that parameters are reevaluated if they have bounds and expr
- # see GH265
- p = Parameters()
-
- p['a'] = Parameter('a', 10, True)
- p['b'] = Parameter('b', 10, True, 0, 20)
-
- assert_equal(p['b'].min, 0)
- assert_equal(p['b'].max, 20)
-
- p['a'].expr = '2 * b'
- assert_almost_equal(p['a'].value, 20)
-
- p['b'].value = 15
- assert_almost_equal(p['b'].value, 15)
- assert_almost_equal(p['a'].value, 30)
-
- p['b'].value = 30
- assert_almost_equal(p['b'].value, 20)
- assert_almost_equal(p['a'].value, 40)
-
- def test_pickle_parameter(self):
- # test that we can pickle a Parameter
- p = Parameter('a', 10, True, 0, 1)
- pkl = pickle.dumps(p)
-
- q = pickle.loads(pkl)
-
- assert_(p == q)
-
- def test_pickle_parameters(self):
- # test that we can pickle a Parameters object
- p = Parameters()
- p.add('a', 10, True, 0, 100)
- p.add('b', 10, True, 0, 100, 'a * sin(1)')
- p.update_constraints()
- p._asteval.symtable['abc'] = '2 * 3.142'
-
- pkl = pickle.dumps(p, -1)
- q = pickle.loads(pkl)
-
- q.update_constraints()
- assert_(p == q)
- assert_(p is not q)
-
- # now test if the asteval machinery survived
- assert_(q._asteval.symtable['abc'] == '2 * 3.142')
-
- # check that unpickling of Parameters is not affected by expr that
- # refer to Parameter that are added later on. In the following
- # example var_0.expr refers to var_1, which is a Parameter later
- # on in the Parameters OrderedDict.
- p = Parameters()
- p.add('var_0', value=1)
- p.add('var_1', value=2)
- p['var_0'].expr = 'var_1'
- pkl = pickle.dumps(p)
- q = pickle.loads(pkl)
-
- def test_params_usersyms(self):
- # test passing usersymes to Parameters()
- def myfun(x):
- return x**3
-
- params = Parameters(usersyms={"myfun": myfun})
- params.add("a", value=2.3)
- params.add("b", expr="myfun(a)")
-
- xx = np.linspace(0, 1, 10)
- yy = 3 * xx + np.random.normal(scale=0.002, size=len(xx))
-
- model = Model(lambda x, a: a * x)
- result = model.fit(yy, params=params, x=xx)
- assert_(np.isclose(result.params['a'].value, 3.0, rtol=0.025))
- assert_(result.nfev > 3)
- assert_(result.nfev < 300)
-
- def test_set_symtable(self):
- # test that we use Parameter.set(value=XXX) and have
- # that new value be used in constraint expressions
- pars = Parameters()
- pars.add('x', value=1.0)
- pars.add('y', expr='x + 1')
-
- assert_(np.isclose(pars['y'].value, 2.0))
- pars['x'].set(value=3.0)
- assert_(np.isclose(pars['y'].value, 4.0))
-
- def test_dumps_loads_parameters(self):
- # test that we can dumps() and then loads() a Parameters
- pars = Parameters()
- pars.add('x', value=1.0)
- pars.add('y', value=2.0)
- pars['x'].expr = 'y / 2.0'
-
- dumps = pars.dumps()
-
- newpars = Parameters().loads(dumps)
- newpars['y'].value = 100.0
- assert_(np.isclose(newpars['x'].value, 50.0))
-
- def test_isclose(self):
- assert_(np.isclose(1., 1+1e-5, atol=1e-4, rtol=0))
- assert_(not np.isclose(1., 1+1e-5, atol=1e-6, rtol=0))
- assert_(np.isclose(1e10, 1.00001e10, rtol=1e-5, atol=1e-8))
- assert_(not np.isclose(0, np.inf))
- assert_(not np.isclose(-np.inf, np.inf))
- assert_(np.isclose(np.inf, np.inf))
- assert_(not np.isclose(np.nan, np.nan))
-
- def test_expr_with_bounds(self):
- "test an expression with bounds, without value"
- pars = Parameters()
- pars.add('c1', value=0.2)
- pars.add('c2', value=0.2)
- pars.add('c3', value=0.2)
- pars.add('csum', value=0.8)
- # this should not raise TypeError:
- pars.add('c4', expr='csum-c1-c2-c3', min=0, max=1)
- assert_(np.isclose(pars['c4'].value, 0.2))
-
- def test_invalid_expr_exceptions(self):
- "test if an exception is raised for invalid expressions (GH486)"""
- p1 = Parameters()
- p1.add('t', 2.0, min=0.0, max=5.0)
- p1.add('x', 10.0)
- with self.assertRaises(SyntaxError):
- p1.add('y', expr='x*t + sqrt(t)/')
- assert len(p1['y']._expr_eval.error) > 0
- p1.add('y', expr='x*t + sqrt(t)/3.0')
- p1['y'].set(expr='x*3.0 + t**2')
- assert 'x*3' in p1['y'].expr
- assert len(p1['y']._expr_eval.error) == 0
- with self.assertRaises(SyntaxError):
- p1['y'].set(expr='t+')
- assert len(p1['y']._expr_eval.error) > 0
- assert_almost_equal(p1['y'].value, 34.0)
-
- def test_eval(self):
- # check that eval() works with usersyms and parameter values
- def myfun(x):
- return 2.0 * x
- p = Parameters(usersyms={"myfun": myfun})
- p.add("a", value=4.0)
- p.add("b", value=3.0)
- assert_almost_equal(p.eval("myfun(2.0) * a"), 16)
- assert_almost_equal(p.eval("b / myfun(3.0)"), 0.5)
-
- def test_params_html_table(self):
- p1 = Parameters()
- p1.add('t', 2.0, min=0.0, max=5.0)
- p1.add('x', 0.0, )
-
- html = params_html_table(p1)
- self.assertIsInstance(html, str)
-
- def test_add_params_expr_outoforder(self):
- params1 = Parameters()
- params1.add("a", value=1.0)
-
- params2 = Parameters()
- params2.add("b", value=1.0)
- params2.add("c", value=2.0)
- params2['b'].expr = 'c/2'
-
- params = params1 + params2
- assert 'b' in params
- assert_almost_equal(params['b'].value, 1.0)
-
- def test_params_prints(self):
- params = Parameters()
- params.add("a", value=1.0, vary=True)
- params.add("b", value=8.5, min=0, vary=True)
- params.add("c", expr='a + sqrt(b)')
-
- repr_full = params.pretty_repr()
- repr_one = params.pretty_repr(oneline=True)
-
- out = []
- for key, val in params.items():
- out.append("%s: %s" % (key, repr(val)))
- out = '\n'.join(out)
-
- assert repr_full.count('\n') > 4
- assert repr_one.count('\n') < 2
- assert len(repr_full) > 150
- assert len(repr_one) > 150
- assert len(out) > 150
-
- def test_add_with_symtable(self):
- pars1 = Parameters()
- pars1.add("a", value=1.0, vary=True)
-
- def half(x):
- return 0.5*x
-
- pars2 = Parameters(usersyms={"half": half})
- pars2.add("b", value=3.0)
- pars2.add("c", expr="half(b)")
-
- params = pars1 + pars2
- assert_almost_equal(params['c'].value, 1.5)
-
- params = pars2 + pars1
- assert_almost_equal(params['c'].value, 1.5)
-
- params = deepcopy(pars1)
- params.update(pars2)
- assert_almost_equal(params['c'].value, 1.5)
+from numpy.testing import assert_allclose
+import pytest
+
+import lmfit
+
+
+@pytest.fixture
+def parameters():
+ """Initialize a Parameters class for tests."""
+ pars = lmfit.Parameters()
+ pars.add(lmfit.Parameter(name='a', value=10.0, vary=True, min=-100.0,
+ max=100.0, expr=None, brute_step=5.0,
+ user_data=1))
+ pars.add(lmfit.Parameter(name='b', value=0.0, vary=True, min=-250.0,
+ max=250.0, expr="2.0*a", brute_step=25.0,
+ user_data=2.5))
+ exp_attr_values_A = ('a', 10.0, True, -100.0, 100.0, None, 5.0, 1)
+ exp_attr_values_B = ('b', 20.0, False, -250.0, 250.0, "2.0*a", 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+ return pars, exp_attr_values_A, exp_attr_values_B
+
+
+def assert_parameter_attributes(par, expected):
+ """Assert that parameter attributes have the expected values."""
+ par_attr_values = (par.name, par._val, par.vary, par.min, par.max,
+ par._expr, par.brute_step, par.user_data)
+ assert par_attr_values == expected
+
+
+def test_check_ast_errors():
+ """Assert that an exception is raised upon AST errors."""
+ pars = lmfit.Parameters()
+
+ msg = r"at expr='<_ast.Module object at"
+ with pytest.raises(NameError, match=msg):
+ pars.add('par1', expr='2.0*par2')
+
+
+def test_parameters_init():
+ """Test for initialization of the Parameters class"""
+ ast_int = asteval.Interpreter()
+ pars = lmfit.Parameters(asteval=ast_int, usersyms={'test': np.sin})
+ assert pars._asteval == ast_int
+ assert 'test' in pars._asteval.symtable
+
+
+def test_parameters_copy(parameters):
+ """Tests for copying a Parameters class; all use the __deepcopy__ method."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+
+ copy_pars = copy(pars)
+ pars_copy = pars.copy()
+ pars__copy__ = pars.__copy__()
+
+ pars['a'].set(value=100)
+
+ for copied in [copy_pars, pars_copy, pars__copy__]:
+ assert isinstance(copied, lmfit.Parameters)
+ assert copied != pars
+ assert copied._asteval is not None
+ assert copied._asteval.symtable is not None
+ assert_parameter_attributes(copied['a'], exp_attr_values_A)
+ assert_parameter_attributes(copied['b'], exp_attr_values_B)
+
+
+def test_parameters_deepcopy(parameters):
+ """Tests for deepcopy of a Parameters class."""
+ pars, _, _ = parameters
+
+ deepcopy_pars = deepcopy(pars)
+ assert isinstance(deepcopy_pars, lmfit.Parameters)
+ assert deepcopy_pars == pars
+
+ # check that we can add a symbol to the interpreter
+ pars['b'].expr = 'sin(1)'
+ pars['b'].value = 10
+ assert_allclose(pars['b'].value, np.sin(1))
+ assert_allclose(pars._asteval.symtable['b'], np.sin(1))
+
+ # check that the symbols in the interpreter are still the same after
+ # deepcopying
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+ deepcopy_pars = deepcopy(pars)
+
+ unique_symbols_pars = pars._asteval.user_defined_symbols()
+ unique_symbols_copied = deepcopy_pars._asteval.user_defined_symbols()
+ assert unique_symbols_copied == unique_symbols_pars
+
+ for unique_symbol in unique_symbols_copied:
+ if pars._asteval.symtable[unique_symbol] is not np.nan:
+ assert (pars._asteval.symtable[unique_symbol] ==
+ deepcopy_pars._asteval.symtable[unique_symbol])
+
+
+def test_parameters_update(parameters):
+ """Tests for updating a Parameters class."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+
+ msg = r"'test' is not a Parameters object"
+ with pytest.raises(ValueError, match=msg):
+ pars.update('test')
+
+ pars2 = lmfit.Parameters()
+ pars2.add(lmfit.Parameter(name='c', value=7.0, vary=True, min=-70.0,
+ max=70.0, expr=None, brute_step=0.7,
+ user_data=7))
+ exp_attr_values_C = ('c', 7.0, True, -70.0, 70.0, None, 0.7, 7)
+
+ pars_updated = pars.update(pars2)
+
+ assert_parameter_attributes(pars_updated['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars_updated['b'], exp_attr_values_B)
+ assert_parameter_attributes(pars_updated['c'], exp_attr_values_C)
+
+
+def test_parameters__setitem__(parameters):
+ """Tests for __setitem__ method of a Parameters class."""
+ pars, _, exp_attr_values_B = parameters
+
+ msg = r"'10' is not a valid Parameters name"
+ with pytest.raises(KeyError, match=msg):
+ pars.__setitem__('10', None)
+
+ msg = r"'not_a_parameter' is not a Parameter"
+ with pytest.raises(ValueError, match=msg):
+ pars.__setitem__('a', 'not_a_parameter')
+
+ par = lmfit.Parameter('b', value=10, min=-25.0, brute_step=1)
+ pars.__setitem__('b', par)
+
+ exp_attr_values_B = ('b', 10, True, -25.0, np.inf, None, 1, None)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+
+
+def test_parameters__add__(parameters):
+ """Test the __add__ magic method."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+
+ msg = r"'other' is not a Parameters object"
+ with pytest.raises(ValueError, match=msg):
+ _ = pars + 'other'
+
+ pars2 = lmfit.Parameters()
+ pars2.add_many(('c', 1., True, None, None, None),
+ ('d', 2., True, None, None, None))
+ exp_attr_values_C = ('c', 1, True, -np.inf, np.inf, None, None, None)
+ exp_attr_values_D = ('d', 2, True, -np.inf, np.inf, None, None, None)
+
+ pars_added = pars + pars2
+
+ assert_parameter_attributes(pars_added['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars_added['b'], exp_attr_values_B)
+ assert_parameter_attributes(pars_added['c'], exp_attr_values_C)
+ assert_parameter_attributes(pars_added['d'], exp_attr_values_D)
+
+
+def test_parameters__iadd__(parameters):
+ """Test the __iadd__ magic method."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+
+ msg = r"'other' is not a Parameters object"
+ with pytest.raises(ValueError, match=msg):
+ pars += 'other'
+
+ pars2 = lmfit.Parameters()
+ pars2.add_many(('c', 1., True, None, None, None),
+ ('d', 2., True, None, None, None))
+ exp_attr_values_C = ('c', 1, True, -np.inf, np.inf, None, None, None)
+ exp_attr_values_D = ('d', 2, True, -np.inf, np.inf, None, None, None)
+
+ pars += pars2
+
+ assert_parameter_attributes(pars['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+ assert_parameter_attributes(pars['c'], exp_attr_values_C)
+ assert_parameter_attributes(pars['d'], exp_attr_values_D)
+
+
+def test_parameters_add_with_symtable():
+ """Regression test for GitHub Issue 607."""
+ pars1 = lmfit.Parameters()
+ pars1.add('a', value=1.0)
+
+ def half(x):
+ return 0.5*x
+
+ pars2 = lmfit.Parameters(usersyms={"half": half})
+ pars2.add("b", value=3.0)
+ pars2.add("c", expr="half(b)")
+
+ params = pars1 + pars2
+ assert_allclose(params['c'].value, 1.5)
+
+ params = pars2 + pars1
+ assert_allclose(params['c'].value, 1.5)
+
+ params = deepcopy(pars1)
+ params.update(pars2)
+ assert_allclose(params['c'].value, 1.5)
+
+ pars1 += pars2
+ assert_allclose(params['c'].value, 1.5)
+
+
+def test_parameters__array__(parameters):
+ """Test the __array__ magic method."""
+ pars, _, _ = parameters
+
+ assert_allclose(np.array(pars), np.array([10.0, 20.0]))
+
+
+def test_parameters__reduce__(parameters):
+ """Test the __reduce__ magic method."""
+ pars, _, _ = parameters
+ reduced = pars.__reduce__()
+
+ assert isinstance(reduced[2], dict)
+ assert 'unique_symbols' in reduced[2].keys()
+ assert reduced[2]['unique_symbols']['b'] == 20
+ assert 'params' in reduced[2].keys()
+ assert isinstance(reduced[2]['params'][0], lmfit.Parameter)
+
+
+def test_parameters__setstate__(parameters):
+ """Test the __setstate__ magic method."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+ reduced = pars.__reduce__()
+
+ pars_setstate = lmfit.Parameters()
+ pars_setstate.__setstate__(reduced[2])
+
+ assert isinstance(pars_setstate, lmfit.Parameters)
+ assert_parameter_attributes(pars_setstate['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars_setstate['b'], exp_attr_values_B)
+
+
+def test_pickle_parameters():
+ """Test that we can pickle a Parameters object."""
+ p = lmfit.Parameters()
+ p.add('a', 10, True, 0, 100)
+ p.add('b', 10, True, 0, 100, 'a * sin(1)')
+ p.update_constraints()
+ p._asteval.symtable['abc'] = '2 * 3.142'
+
+ pkl = pickle.dumps(p, -1)
+ q = pickle.loads(pkl)
+
+ q.update_constraints()
+ assert p == q
+ assert p is not q
+
+ # now test if the asteval machinery survived
+ assert q._asteval.symtable['abc'] == '2 * 3.142'
+
+ # check that unpickling of Parameters is not affected by expr that
+ # refer to Parameter that are added later on. In the following
+ # example var_0.expr refers to var_1, which is a Parameter later
+ # on in the Parameters OrderedDict.
+ p = lmfit.Parameters()
+ p.add('var_0', value=1)
+ p.add('var_1', value=2)
+ p['var_0'].expr = 'var_1'
+ pkl = pickle.dumps(p)
+ q = pickle.loads(pkl)
+
+
+def test_parameters_eval(parameters):
+ """Test the eval method."""
+ pars, _, _ = parameters
+ evaluated = pars.eval('10.0*a+b')
+ assert_allclose(evaluated, 120)
+
+ # check that eval() works with usersyms and parameter values
+ def myfun(x):
+ return 2.0 * x
+
+ pars2 = lmfit.Parameters(usersyms={"myfun": myfun})
+ pars2.add('a', value=4.0)
+ pars2.add('b', value=3.0)
+ assert_allclose(pars2.eval('myfun(2.0) * a'), 16)
+ assert_allclose(pars2.eval('b / myfun(3.0)'), 0.5)
+
+
+def test_parameters_pretty_repr(parameters):
+ """Test the pretty_repr method."""
+ pars, _, _ = parameters
+ output = pars.pretty_repr()
+ output_oneline = pars.pretty_repr(oneline=True)
+
+ split_output = output.split('\n')
+ assert len(split_output) == 5
+ assert 'Parameters' in split_output[0]
+ assert "Parameter 'a'" in split_output[1]
+ assert "Parameter 'b'" in split_output[2]
+
+ oneliner = ("Parameters([('a', <Parameter 'a', value=10.0, "
+ "bounds=[-100.0:100.0], brute_step=5.0>), ('b', <Parameter "
+ "'b', value=20.0, bounds=[-250.0:250.0], expr='2.0*a', "
+ "brute_step=25.0>)])")
+ assert output_oneline == oneliner
+
+
+def test_parameters_pretty_print(parameters, capsys):
+ """Test the pretty_print method."""
+ pars, _, _ = parameters
+
+ # oneliner
+ pars.pretty_print(oneline=True)
+ captured = capsys.readouterr()
+ oneliner = ("Parameters([('a', <Parameter 'a', value=10.0, "
+ "bounds=[-100.0:100.0], brute_step=5.0>), ('b', <Parameter "
+ "'b', value=20.0, bounds=[-250.0:250.0], expr='2.0*a', "
+ "brute_step=25.0>)])")
+ assert oneliner in captured.out
+
+ # default
+ pars.pretty_print()
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ assert len(captured_split) == 4
+ header = ('Name Value Min Max Stderr Vary '
+ 'Expr Brute_Step')
+ assert captured_split[0] == header
+
+ # specify columnwidth
+ pars.pretty_print(colwidth=12)
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ header = ('Name Value Min Max Stderr '
+ ' Vary Expr Brute_Step')
+ assert captured_split[0] == header
+
+ # specify columns
+ pars['a'].stderr = 0.01
+ pars.pretty_print(columns=['value', 'min', 'max', 'stderr'])
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ assert captured_split[0] == 'Name Value Min Max Stderr'
+ assert captured_split[1] == 'a 10 -100 100 0.01'
+ assert captured_split[2] == 'b 20 -250 250 None'
+
+ # specify fmt
+ pars.pretty_print(fmt='e', columns=['value', 'min', 'max'])
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ assert captured_split[0] == 'Name Value Min Max'
+ assert captured_split[1] == 'a 1.0000e+01 -1.0000e+02 1.0000e+02'
+ assert captured_split[2] == 'b 2.0000e+01 -2.5000e+02 2.5000e+02'
+
+ # specify precision
+ pars.pretty_print(precision=2, fmt='e', columns=['value', 'min', 'max'])
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ assert captured_split[0] == 'Name Value Min Max'
+ assert captured_split[1] == 'a 1.00e+01 -1.00e+02 1.00e+02'
+ assert captured_split[2] == 'b 2.00e+01 -2.50e+02 2.50e+02'
+
+
+def test_parameters__repr_html_(parameters):
+ """Test _repr_html method to generate HTML table for Parameters class."""
+ pars, _, _ = parameters
+ repr_html = pars._repr_html_()
+
+ assert isinstance(repr_html, str)
+ assert '<table><tr><th> name </th><th> value </th>' in repr_html
+
+
+def test_parameters_add():
+ """Tests for adding a Parameter to the Parameters class."""
+ pars = lmfit.Parameters()
+ pars_from_par = lmfit.Parameters()
+
+ pars.add('a')
+ exp_attr_values_A = ('a', -np.inf, True, -np.inf, np.inf, None, None, None)
+ assert_parameter_attributes(pars['a'], exp_attr_values_A)
+
+ pars_from_par.add(lmfit.Parameter('a'))
+ assert pars_from_par == pars
+
+ pars.add('b', value=1, vary=False, min=-5.0, max=5.0, brute_step=0.1)
+ exp_attr_values_B = ('b', 1.0, False, -5.0, 5.0, None, 0.1, None)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+
+ pars_from_par.add(lmfit.Parameter('b', value=1, vary=False, min=-5.0,
+ max=5.0, brute_step=0.1))
+ assert pars_from_par == pars
+
+
+def test_add_params_expr_outoforder():
+ """Regression test for GitHub Issue 560."""
+ params1 = lmfit.Parameters()
+ params1.add("a", value=1.0)
+
+ params2 = lmfit.Parameters()
+ params2.add("b", value=1.0)
+ params2.add("c", value=2.0)
+ params2['b'].expr = 'c/2'
+
+ params = params1 + params2
+ assert 'b' in params
+ assert_allclose(params['b'].value, 1.0)
+
+
+def test_parameters_add_many():
+ """Tests for add_many method."""
+ a = lmfit.Parameter('a', 1)
+ b = lmfit.Parameter('b', 2)
+
+ par = lmfit.Parameters()
+ par.add_many(a, b)
+
+ par_with_tuples = lmfit.Parameters()
+ par_with_tuples.add_many(('a', 1), ('b', 2))
+
+ assert list(par.keys()) == ['a', 'b']
+ assert par == par_with_tuples
+
+
+def test_parameters_valuesdict(parameters):
+ """Test for valuesdict method."""
+ pars, _, _ = parameters
+ vals_dict = pars.valuesdict()
+
+ assert isinstance(vals_dict, dict)
+ assert_allclose(vals_dict['a'], pars['a'].value)
+ assert_allclose(vals_dict['b'], pars['b'].value)
+
+
+def test_dumps_loads_parameters(parameters):
+ """Test for dumps and loads methods for a Parameters class."""
+ pars, _, _ = parameters
+
+ dumps = pars.dumps()
+ assert isinstance(dumps, str)
+ newpars = lmfit.Parameters().loads(dumps)
+ assert newpars == pars
+
+ newpars['a'].value = 100.0
+ assert_allclose(newpars['b'].value, 200.0)
+
+
+def test_dump_load_parameters(parameters):
+ """Test for dump and load methods for a Parameters class."""
+ pars, _, _ = parameters
+
+ with open('parameters.sav', 'w') as outfile:
+ pars.dump(outfile)
+
+ with open('parameters.sav') as infile:
+ newpars = pars.load(infile)
+
+ assert newpars == pars
+ newpars['a'].value = 100.0
+ assert_allclose(newpars['b'].value, 200.0)
+
+
+def test_dumps_loads_parameters_usersyms():
+ """Test for dumps/loads methods for a Parameters class with usersyms."""
+ def half(x):
+ return 0.5*x
+
+ pars = lmfit.Parameters(usersyms={"half": half, 'my_func': np.sqrt})
+ pars.add(lmfit.Parameter(name='a', value=9.0, min=-100.0, max=100.0))
+ pars.add(lmfit.Parameter(name='b', value=100.0, min=-250.0, max=250.0))
+ pars.add("c", expr="half(b) + my_func(a)")
+
+ dumps = pars.dumps()
+ assert isinstance(dumps, str)
+ assert '"half": {' in dumps
+ assert '"my_func": {' in dumps
+
+ newpars = lmfit.Parameters().loads(dumps)
+ assert 'half' in newpars._asteval.symtable
+ assert 'my_func' in newpars._asteval.symtable
+ assert_allclose(newpars['a'].value, 9.0)
+ assert_allclose(newpars['b'].value, 100.0)
+
+ # within the py.test environment the encoding of the function 'half' does
+ # not work correctly as it is changed from <function half at 0x?????????>"
+ # to "<function test_dumps_loads_parameters_usersyms.<locals>.half at 0x?????????>
+ # This result in the "importer" to be set to None and the final "decode4js"
+ # does not do the correct thing.
+ #
+ # Of note, this is only an issue within the py.test framework and it DOES
+ # work correctly in a normal Python interpreter. Also, it isn't an issue
+ # when DILL is used, so in that case the two asserts below will pass.
+ if lmfit.jsonutils.HAS_DILL:
+ assert newpars == pars
+ assert_allclose(newpars['c'].value, 53.0)
+
+
+def test_parameters_expr_and_constraints():
+ """Regression tests for GitHub Issue #265. Test that parameters are re-
+ evaluated if they have bounds and expr.
+
+ """
+ p = lmfit.Parameters()
+ p.add(lmfit.Parameter('a', 10, True))
+ p.add(lmfit.Parameter('b', 10, True, 0, 20))
+
+ assert_allclose(p['b'].min, 0)
+ assert_allclose(p['b'].max, 20)
+
+ p['a'].expr = '2 * b'
+ assert_allclose(p['a'].value, 20)
+
+ p['b'].value = 15
+ assert_allclose(p['b'].value, 15)
+ assert_allclose(p['a'].value, 30)
+
+ p['b'].value = 30
+ assert_allclose(p['b'].value, 20)
+ assert_allclose(p['a'].value, 40)
+
+
+def test_parameters_usersyms():
+ """Test for passing usersyms to Parameters()."""
+ def myfun(x):
+ return x**3
+
+ params = lmfit.Parameters(usersyms={"myfun": myfun})
+ params.add("a", value=2.3)
+ params.add("b", expr="myfun(a)")
+
+ np.random.seed(2020)
+ xx = np.linspace(0, 1, 10)
+ yy = 3 * xx + np.random.normal(scale=0.002, size=xx.size)
+
+ model = lmfit.Model(lambda x, a: a * x)
+ result = model.fit(yy, params=params, x=xx)
+ assert_allclose(result.params['a'].value, 3.0, rtol=1e-3)
+ assert (result.nfev > 3 and result.nfev < 300)
+
+
+def test_parameters_expr_with_bounds():
+ """Test Parameters using an expression with bounds, without value."""
+ pars = lmfit.Parameters()
+ pars.add('c1', value=0.2)
+ pars.add('c2', value=0.2)
+ pars.add('c3', value=0.2)
+ pars.add('csum', value=0.8)
+
+ # this should not raise TypeError:
+ pars.add('c4', expr='csum-c1-c2-c3', min=0, max=1)
+ assert_allclose(pars['c4'].value, 0.2)
+
+
+def test_invalid_expr_exceptions():
+ """Regression test for GitHub Issue #486: check that an exception is
+ raised for invalid expressions.
+
+ """
+ p1 = lmfit.Parameters()
+ p1.add('t', 2.0, min=0.0, max=5.0)
+ p1.add('x', 10.0)
+
+ with pytest.raises(SyntaxError):
+ p1.add('y', expr='x*t + sqrt(t)/')
+ assert len(p1['y']._expr_eval.error) > 0
+
+ p1.add('y', expr='x*t + sqrt(t)/3.0')
+ p1['y'].set(expr='x*3.0 + t**2')
+ assert 'x*3' in p1['y'].expr
+ assert len(p1['y']._expr_eval.error) == 0
+
+ with pytest.raises(SyntaxError):
+ p1['y'].set(expr='t+')
+ assert len(p1['y']._expr_eval.error) > 0
+ assert_allclose(p1['y'].value, 34.0)
diff --git a/tests/test_params_set.py b/tests/test_params_set.py
deleted file mode 100644
index cc8af09..0000000
--- a/tests/test_params_set.py
+++ /dev/null
@@ -1,186 +0,0 @@
-import numpy as np
-from numpy.testing import assert_allclose
-
-from lmfit.lineshapes import gaussian
-from lmfit.models import VoigtModel
-
-
-def test_param_set():
- np.random.seed(2015)
- x = np.arange(0, 20, 0.05)
- y = gaussian(x, amplitude=15.43, center=4.5, sigma=2.13)
- y = y + 0.05 - 0.01*x + np.random.normal(scale=0.03, size=len(x))
-
- model = VoigtModel()
- params = model.guess(y, x=x)
-
- # test #1: gamma is constrained to equal sigma
- assert(params['gamma'].expr == 'sigma')
- params.update_constraints()
- sigval = params['sigma'].value
- assert_allclose(params['gamma'].value, sigval, 1e-4, 1e-4, '', True)
-
- # test #2: explicitly setting a param value should work, even when
- # it had been an expression. The value will be left as fixed
- gamval = 0.87543
- params['gamma'].set(value=gamval)
- assert(params['gamma'].expr is None)
- assert(not params['gamma'].vary)
- assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
-
- # test #3: explicitly setting an expression should work
- # Note, the only way to ensure that **ALL** constraints are up to date
- # is to call params.update_constraints(). This is because the constraint
- # may have multiple dependencies.
- params['gamma'].set(expr='sigma/2.0')
- assert(params['gamma'].expr is not None)
- assert(not params['gamma'].vary)
- params.update_constraints()
- assert_allclose(params['gamma'].value, sigval/2.0, 1e-4, 1e-4, '', True)
-
- # test #4: explicitly setting a param value WITH vary=True
- # will set it to be variable
- gamval = 0.7777
- params['gamma'].set(value=gamval, vary=True)
- assert(params['gamma'].expr is None)
- assert(params['gamma'].vary)
- assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
-
- # test 5: make sure issue #389 is fixed: set boundaries and make sure
- # they are kept when changing the value
- amplitude_vary = params['amplitude'].vary
- amplitude_expr = params['amplitude'].expr
- params['amplitude'].set(min=0.0, max=100.0)
- params.update_constraints()
- assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
- params['amplitude'].set(value=40.0)
- params.update_constraints()
- assert_allclose(params['amplitude'].value, 40.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
- assert(params['amplitude'].expr == amplitude_expr)
- assert(params['amplitude'].vary == amplitude_vary)
- assert(not params['amplitude'].brute_step)
-
- # test for possible regressions of this fix (without 'expr'):
- # the set function should only change the requested attribute(s)
- params['amplitude'].set(value=35.0)
- params.update_constraints()
- assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].min, 0.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
- assert(params['amplitude'].vary == amplitude_vary)
- assert(params['amplitude'].expr == amplitude_expr)
- assert(not params['amplitude'].brute_step)
-
- # set minimum
- params['amplitude'].set(min=10.0)
- params.update_constraints()
- assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].max, 100.0, 1e-4, 1e-4, '', True)
- assert(params['amplitude'].vary == amplitude_vary)
- assert(params['amplitude'].expr == amplitude_expr)
- assert(not params['amplitude'].brute_step)
-
- # set maximum
- params['amplitude'].set(max=110.0)
- params.update_constraints()
- assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True)
- assert(params['amplitude'].vary == amplitude_vary)
- assert(params['amplitude'].expr == amplitude_expr)
- assert(not params['amplitude'].brute_step)
-
- # set vary
- params['amplitude'].set(vary=False)
- params.update_constraints()
- assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True)
- assert(params['amplitude'].vary is False)
- assert(params['amplitude'].expr == amplitude_expr)
- assert(not params['amplitude'].brute_step)
-
- # set brute_step
- params['amplitude'].set(brute_step=0.1)
- params.update_constraints()
- assert_allclose(params['amplitude'].value, 35.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].min, 10.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['amplitude'].max, 110.0, 1e-4, 1e-4, '', True)
- assert(params['amplitude'].vary is False)
- assert(params['amplitude'].expr == amplitude_expr)
- assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True)
-
- # test for possible regressions of this fix for variables WITH 'expr':
- height_value = params['height'].value
- height_min = params['height'].min
- height_max = params['height'].max
- height_vary = params['height'].vary
- height_expr = params['height'].expr
- height_brute_step = params['height'].brute_step
-
- # set vary=True should remove expression
- params['height'].set(vary=True)
- params.update_constraints()
- assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].min, height_min, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
- assert(params['height'].vary is True)
- assert(params['height'].expr is None)
- assert(params['height'].brute_step == height_brute_step)
-
- # setting an expression should set vary=False
- params['height'].set(expr=height_expr)
- params.update_constraints()
- assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].min, height_min, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
- assert(params['height'].vary is False)
- assert(params['height'].expr == height_expr)
- assert(params['height'].brute_step == height_brute_step)
-
- # changing min/max should not remove expression
- params['height'].set(min=0)
- params.update_constraints()
- assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
- assert(params['height'].vary == height_vary)
- assert(params['height'].expr == height_expr)
- assert(params['height'].brute_step == height_brute_step)
-
- # changing brute_step should not remove expression
- params['height'].set(brute_step=0.1)
- params.update_constraints()
- assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
- assert(params['height'].vary == height_vary)
- assert(params['height'].expr == height_expr)
- assert_allclose(params['amplitude'].brute_step, 0.1, 1e-4, 1e-4, '', True)
-
- # changing the value should remove expression and keep vary=False
- params['height'].set(brute_step=0)
- params['height'].set(value=10.0)
- params.update_constraints()
- assert_allclose(params['height'].value, 10.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
- assert(params['height'].vary is False)
- assert(params['height'].expr is None)
- assert(params['height'].brute_step == height_brute_step)
-
- # passing expr='' should only remove the expression
- params['height'].set(expr=height_expr) # first restore the original expr
- params.update_constraints()
- params['height'].set(expr='')
- params.update_constraints()
- assert_allclose(params['height'].value, height_value, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].min, 0.0, 1e-4, 1e-4, '', True)
- assert_allclose(params['height'].max, height_max, 1e-4, 1e-4, '', True)
- assert(params['height'].vary is False)
- assert(params['height'].expr is None)
- assert(params['height'].brute_step == height_brute_step)
diff --git a/tests/test_printfuncs.py b/tests/test_printfuncs.py
index 2aad889..4c2aeb3 100644
--- a/tests/test_printfuncs.py
+++ b/tests/test_printfuncs.py
@@ -374,7 +374,7 @@ def test_ci_report_with_offset(confidence_interval):
@pytest.mark.parametrize("ndigits", [3, 5, 7])
def test_ci_report_with_ndigits(confidence_interval, ndigits):
- """Verify output of CI report when specifiying ndigits."""
+ """Verify output of CI report when specifying ndigits."""
report_split = ci_report(confidence_interval, ndigits=ndigits).split('\n')
period_values = [val for val in report_split[2].split()[2:]]
length = [len(val.split('.')[-1]) for val in period_values]
diff --git a/tests/test_saveload.py b/tests/test_saveload.py
index cb4e026..392558a 100644
--- a/tests/test_saveload.py
+++ b/tests/test_saveload.py
@@ -100,7 +100,7 @@ def test_save_load_model(dill):
file_exists = wait_for_file(SAVE_MODEL, timeout=10)
assert file_exists
- with open(SAVE_MODEL, 'r') as fh:
+ with open(SAVE_MODEL) as fh:
text = fh.read()
assert 1000 < len(text) < 2500
@@ -141,7 +141,7 @@ def test_save_load_modelresult(dill):
assert file_exists
text = ''
- with open(SAVE_MODELRESULT, 'r') as fh:
+ with open(SAVE_MODELRESULT) as fh:
text = fh.read()
assert_between(len(text), 8000, 25000)
@@ -185,18 +185,24 @@ def test_saveload_modelresult_exception():
clear_savefile(SAVE_MODEL)
-def test_saveload_modelresult_roundtrip():
+@pytest.mark.parametrize("method", ['leastsq', 'nelder', 'powell', 'cobyla',
+ 'bfgsb', 'differential_evolution', 'brute',
+ 'basinhopping', 'ampgo', 'shgo',
+ 'dual_annealing'])
+def test_saveload_modelresult_roundtrip(method):
"""Test for modelresult.loads()/dumps() and repeating that"""
def mfunc(x, a, b):
return a * (x-b)
model = Model(mfunc)
- params = model.make_params(a=0.0, b=3.0)
+ params = model.make_params(a=0.1, b=3.0)
+ params['a'].set(min=.01, max=1, brute_step=0.01)
+ params['b'].set(min=.01, max=3.1, brute_step=0.01)
xx = np.linspace(-5, 5, 201)
yy = 0.5 * (xx - 0.22) + np.random.normal(scale=0.01, size=len(xx))
- result1 = model.fit(yy, params, x=xx)
+ result1 = model.fit(yy, params=params, x=xx, method=method)
result2 = ModelResult(model, Parameters())
result2.loads(result1.dumps(), funcdefs={'mfunc': mfunc})
diff --git a/tests/test_stepmodel.py b/tests/test_stepmodel.py
index 1d16935..0ab8a3b 100644
--- a/tests/test_stepmodel.py
+++ b/tests/test_stepmodel.py
@@ -23,15 +23,15 @@ def test_stepmodel_linear():
out = mod.fit(y, pars, x=x)
- assert(out.nfev > 5)
- assert(out.nvarys == 4)
- assert(out.chisqr > 1)
- assert(out.params['c'].value > 3)
- assert(out.params['center'].value > 1)
- assert(out.params['center'].value < 4)
- assert(out.params['sigma'].value > 0.5)
- assert(out.params['sigma'].value < 3.5)
- assert(out.params['amplitude'].value > 50)
+ assert out.nfev > 5
+ assert out.nvarys == 4
+ assert out.chisqr > 1
+ assert out.params['c'].value > 3
+ assert out.params['center'].value > 1
+ assert out.params['center'].value < 4
+ assert out.params['sigma'].value > 0.5
+ assert out.params['sigma'].value < 3.5
+ assert out.params['amplitude'].value > 50
def test_stepmodel_erf():
@@ -44,12 +44,12 @@ def test_stepmodel_erf():
out = mod.fit(y, pars, x=x)
- assert(out.nfev > 5)
- assert(out.nvarys == 4)
- assert(out.chisqr > 1)
- assert(out.params['c'].value > 3)
- assert(out.params['center'].value > 1)
- assert(out.params['center'].value < 4)
- assert(out.params['amplitude'].value > 50)
- assert(out.params['sigma'].value > 0.2)
- assert(out.params['sigma'].value < 1.5)
+ assert out.nfev > 5
+ assert out.nvarys == 4
+ assert out.chisqr > 1
+ assert out.params['c'].value > 3
+ assert out.params['center'].value > 1
+ assert out.params['center'].value < 4
+ assert out.params['amplitude'].value > 50
+ assert out.params['sigma'].value > 0.2
+ assert out.params['sigma'].value < 1.5