summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDrew Parsons <dparsons@debian.org>2022-12-30 14:06:45 +0100
committerDrew Parsons <dparsons@debian.org>2022-12-30 14:06:45 +0100
commit982032378ea7e5228977ccd9977e6ae848e132e4 (patch)
tree2e9262c05afabb49e57a119804cb1f20a865022a
parent26c2fea5bb57cef6a72763a1cbded6b88e6ee2f5 (diff)
New upstream version 1.1.0
-rw-r--r--.codecov.yml9
-rw-r--r--.github/CONTRIBUTING.md55
-rw-r--r--.github/ISSUE_TEMPLATE.md32
-rw-r--r--[-rwxr-xr-x].gitignore4
-rw-r--r--.pre-commit-config.yaml29
-rw-r--r--AUTHORS.txt (renamed from THANKS.txt)11
-rw-r--r--INSTALL17
-rw-r--r--LICENSE2
-rw-r--r--MANIFEST.in16
-rw-r--r--PKG-INFO154
-rw-r--r--README.rst9
-rw-r--r--azure-pipelines.yml233
-rw-r--r--conda-recipe/bld.bat8
-rw-r--r--conda-recipe/build.sh9
-rw-r--r--conda-recipe/meta.yaml67
-rw-r--r--doc/Makefile3
-rw-r--r--doc/builtin_models.rst345
-rw-r--r--doc/conf.py21
-rw-r--r--doc/confidence.rst2
-rw-r--r--doc/constraints.rst2
-rwxr-xr-xdoc/doc_examples_to_gallery.py7
-rw-r--r--doc/extensions.py11
-rw-r--r--doc/faq.rst47
-rw-r--r--doc/fitting.rst17
-rw-r--r--doc/installation.rst60
-rw-r--r--doc/intro.rst21
-rw-r--r--doc/model.rst138
-rw-r--r--doc/parameters.rst10
-rw-r--r--doc/sphinx/theme/sphinx13/basic_layout.html21
-rw-r--r--doc/whatsnew.rst83
-rw-r--r--examples/doc_builtinmodels_nistgauss.py18
-rw-r--r--examples/doc_builtinmodels_nistgauss2.py10
-rw-r--r--examples/doc_builtinmodels_peakmodels.py24
-rw-r--r--examples/doc_builtinmodels_splinemodel.py63
-rw-r--r--examples/doc_builtinmodels_stepmodel.py8
-rw-r--r--examples/doc_confidence_advanced.py7
-rw-r--r--examples/doc_fitting_emcee.py17
-rw-r--r--examples/doc_model_composite.py4
-rw-r--r--examples/doc_model_gaussian.py8
-rw-r--r--examples/doc_model_loadmodel.py4
-rw-r--r--examples/doc_model_loadmodelresult.py4
-rw-r--r--examples/doc_model_loadmodelresult2.py4
-rw-r--r--examples/doc_model_two_components.py8
-rw-r--r--examples/doc_model_uncertainty.py8
-rw-r--r--examples/doc_model_uncertainty2.py84
-rw-r--r--examples/doc_model_with_iter_callback.py11
-rw-r--r--examples/doc_model_with_nan_policy.py8
-rw-r--r--examples/doc_parameters_basic.py5
-rw-r--r--examples/doc_parameters_valuesdict.py5
-rw-r--r--examples/example_Model_interface.py3
-rw-r--r--examples/example_brute.py64
-rw-r--r--examples/example_complex_resonator_model.py39
-rw-r--r--examples/example_confidence_interval.py37
-rw-r--r--examples/example_detect_outliers.py34
-rw-r--r--examples/example_diffev.py41
-rw-r--r--examples/example_emcee_Model_interface.py48
-rw-r--r--examples/example_expression_model.py13
-rw-r--r--examples/example_fit_multi_datasets.py25
-rw-r--r--examples/example_fit_with_algebraic_constraint.py20
-rw-r--r--examples/example_fit_with_bounds.py26
-rw-r--r--examples/example_fit_with_derivfunc.py45
-rw-r--r--examples/example_fit_with_inequality.py9
-rw-r--r--examples/example_reduce_fcn.py56
-rw-r--r--examples/example_sympy.py58
-rw-r--r--examples/example_two_dimensional_peak.py1
-rw-r--r--examples/example_use_pandas.py16
-rw-r--r--examples/lmfit_emcee_model_selection.py10
-rw-r--r--examples/test_splinepeak.dat504
-rw-r--r--lmfit.egg-info/PKG-INFO154
-rw-r--r--lmfit.egg-info/SOURCES.txt22
-rw-r--r--lmfit.egg-info/requires.txt66
-rw-r--r--lmfit/__init__.py11
-rw-r--r--lmfit/_ampgo.py10
-rw-r--r--lmfit/_version.py21
-rw-r--r--lmfit/confidence.py70
-rw-r--r--lmfit/jsonutils.py36
-rw-r--r--lmfit/lineshapes.py89
-rw-r--r--lmfit/minimizer.py232
-rw-r--r--lmfit/model.py325
-rw-r--r--lmfit/models.py367
-rw-r--r--lmfit/parameter.py91
-rw-r--r--lmfit/printfuncs.py98
-rw-r--r--lmfit/version.py5
-rwxr-xr-xpublish_docs.sh2
-rw-r--r--pyproject.toml7
-rw-r--r--requirements-dev.txt18
-rw-r--r--requirements.txt4
-rw-r--r--setup.cfg93
-rw-r--r--setup.py55
-rw-r--r--tests/NISTModels.py2
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/o.py11
-rw-r--r--tests/t_enso.py40
-rw-r--r--tests/test_1variable.py2
-rw-r--r--tests/test_NIST_Strd.py88
-rw-r--r--tests/test_ampgo.py10
-rw-r--r--tests/test_basinhopping.py6
-rw-r--r--tests/test_builtin_models.py30
-rw-r--r--tests/test_confidence.py5
-rw-r--r--tests/test_covariance_matrix.py8
-rw-r--r--tests/test_dual_annealing.py12
-rw-r--r--tests/test_itercb.py92
-rw-r--r--tests/test_jsonutils.py19
-rw-r--r--tests/test_least_squares.py2
-rw-r--r--tests/test_lineshapes.py32
-rw-r--r--tests/test_manypeaks_speed.py8
-rw-r--r--tests/test_model.py318
-rw-r--r--tests/test_model_saveload.py (renamed from tests/test_saveload.py)20
-rw-r--r--tests/test_model_uncertainties.py36
-rw-r--r--tests/test_models.py22
-rw-r--r--tests/test_multidatasets.py14
-rw-r--r--tests/test_nose.py14
-rw-r--r--tests/test_pandas.py11
-rw-r--r--tests/test_parameter.py2
-rw-r--r--tests/test_parameters.py29
-rw-r--r--tests/test_printfuncs.py30
-rw-r--r--tests/test_shgo.py26
-rw-r--r--tests/test_stepmodel.py1
-rw-r--r--versioneer.py1855
119 files changed, 3696 insertions, 3617 deletions
diff --git a/.codecov.yml b/.codecov.yml
index 56317e6..51c4885 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -1,10 +1,17 @@
+codecov:
+ token: 11c28e95-6e64-4829-9887-04e4cd661bf6
+ comment:
+ after_n_builds: 9
+
coverage:
status:
project:
default:
target: auto
- threshold: 0.5
+ threshold: 0.5%
+ informational: true
patch:
default:
target: auto
threshold: 10%
+ informational: true
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 5ee2d93..9d4065f 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -6,33 +6,38 @@ the Python coding style conventions (i.e., [PEP 8](https://www.python.org/dev/pe
closely. Additionally, we really want comprehensive docstrings that follow
[PEP 257](https://www.python.org/dev/peps/pep-0257/) using the
[numpydoc style](https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard),
-usable offline documentation, and good unit tests for the pytest framework. A
+usable offline documentation, and good unit tests for the ``pytest`` framework. A
good contribution includes all of these. To ensure compliance with our coding
style, we make use of the [pre-commit](https://pre-commit.com/) framework to run
several *hooks* when committing code. Please follow the instructions below if
you intend to contribute to the lmfit repository:
-- clone the GitHub repository:
- ``git clone https://github.com/lmfit/lmfit-py.git``
+- fork the GitHub repository
+- clone your forked GitHub repository:
+ ``git clone https://github.com/<your-name>/lmfit-py.git``
- install all (optional) dependencies either using ``pip`` or ``conda``:
- ``pip -r install requirements-dev.txt`` or
- ``conda install <packages in requirements-dev.txt>``
-- initialize ``pre-commit`` running ``pre-commit install`` in the lmfit directory
+ ``pip install lmfit[all]`` or
+ ``conda install <all packages listed in setup.cfg>``
+- initialize ``pre-commit`` by running ``pre-commit install`` in the lmfit directory
- create a new branch: ``git checkout -b <awesome_new_feature>``
- start coding
+- install the latest version of your code using the PEP517/PEP518 way (``python -m build && pip install .``)
- make sure the test-suite passes locally: run ``pytest`` in the lmfit directory
+- make sure the documentation builds locally: run ``make`` in the doc directory
- push to your fork: ``git push origin``
-- open a Pull Request on https://github.com/lmfit/lmfit-py/pulls
+- open a Pull Request on [the lmfit GitHub repository](https://github.com/lmfit/lmfit-py/pulls)
If you need any additional help, please send a message to the
-[mailing list](https://groups.google.com/group/lmfit-py)!
-
+[mailing list](https://groups.google.com/group/lmfit-py) or use the
+[GitHub discussions page](https://github.com/lmfit/lmfit-py/discussions).
## Using the Mailing List versus GitHub Issues
-If you have ***questions, comments, or suggestions*** for lmfit, please use the
-[mailing list](https://groups.google.com/group/lmfit-py). This provides an
-online conversation that is archived and can be searched easily.
+If you have ***questions, comments, or suggestions*** for lmfit, please use
+the [mailing list](https://groups.google.com/group/lmfit-py) or
+[GitHub discussions page](https://github.com/lmfit/lmfit-py/discussions).
+These provide online conversations that are archived and can be searched
+easily.
If you find a ***bug with the code or documentation***, please use
[GitHub Issues](https://github.com/lmfit/lmfit-py/issues) to submit a bug report.
@@ -40,16 +45,15 @@ If you have an idea for how to solve the problem and are familiar with Python
and GitHub, submitting a [Pull Request](https://github.com/lmfit/lmfit-py/pulls)
would be greatly appreciated (see above).
-**If you are at all unsure whether to use the mailing list or open an Issue,
-please start a conversation on the mailing list.**
-
-Starting the conversation with "How do I do this?" or "Why didn't this work?"
-instead of "This doesn't work" is preferred, and will better help others with
-similar questions. No posting about fitting data is inappropriate for the
-mailing list, but many questions are not Issues. We will try our best to engage
-in all discussions, but we may simply close GitHub Issues that are actually
-questions.
+**If you are at all unsure whether to open an Issue, please start a
+conversation on the discussions page or mailing list.**
+Starting the conversation with "How do I do this?" or "Why didn't this work
+the way I expected?" instead of "This doesn't work" is preferred, and will
+better help others with similar questions. No posting about fitting data is
+inappropriate for the mailing list, but many questions are not Issues. We
+will try our best to engage in all discussions, but we may simply close
+GitHub Issues that are actually questions.
## Providing an Example with GitHub Issues
@@ -72,13 +76,11 @@ version and installed dependencies. You can paste the code below in your
Python shell to get this information:
```python
-import sys, lmfit, numpy, scipy, asteval, uncertainties, six
-print('Python: {}\n\nlmfit: {}, scipy: {}, numpy: {}, asteval: {}, uncertainties: {}, six: {}'\
- .format(sys.version, lmfit.__version__, scipy.__version__, numpy.__version__, \
- asteval.__version__, uncertainties.__version__, six.__version__))
+import sys, lmfit, numpy, scipy, asteval, uncertainties
+print(f"Python: {sys.version}\n\nlmfit: {lmfit.__version__}, scipy: {scipy.__version__}, numpy: {numpy.__version__},"
+ f"asteval: {asteval.__version__}, uncertainties: {uncertainties.__version__}")
```
-
## Using IPython Notebooks to Show Examples
IPython Notebooks are very useful for showing code snippets and outcomes,
@@ -86,7 +88,6 @@ and are a good way to demonstrate a question or raise an issue. Please
see the above about providing examples. The notebook you provide will be
*read*, but will probably not be run.
-
## Secret Code for First Time Issues
If you have not done so in the past, and are going to submit a GitHub Issue,
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 6729f57..e73a264 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -8,14 +8,17 @@ If you have not submitted a GitHub Issue to lmfit before, read
***DO NOT USE GitHub Issues for questions, it is only for bugs!***
If you **think** something is an Issue, it probably is not an Issue.
-Getting a "bad fit" does NOT qualify as an Issue! Issues here are
-concerned with errors or problems in the lmfit code.
-
-Use the [mailing list](https://groups.google.com/group/lmfit-py) for
-questions about lmfit or things you think might be problems. If you ignore
-this advice and post a question as a GitHub Issue anyway, your Issue will
-be closed and not answered. If you have any doubt, start with the mailing
-list.
+Getting a "bad fit" definitely does NOT qualify as an Issue! Issues here
+are concerned with errors or problems in the lmfit code.
+
+Use the [mailing list](https://groups.google.com/group/lmfit-py) or
+[GitHub discussions page](https://github.com/lmfit/lmfit-py/discussions) for
+questions about lmfit or things you think might be problems. We don't feel
+obligated to spend our free time helping people who do not respect our
+chosen work processes, so if you ignore this advice and post a question as
+a GitHub Issue anyway, it is quite likely that your Issue will be closed
+and not answered. If you have any doubt, start with a discussion either on
+the mailing list or discussions page.
To submit an Issue, you MUST provide ALL of the following information. If
you delete any of these sections, your Issue may be closed. If you think one
@@ -28,7 +31,7 @@ of the sections does not apply to your Issue, state that explicitly.
<!-- Provide a short description of the issue, describe the expected outcome, and give the actual result -->
###### A Minimal, Complete, and Verifiable example
-<!-- see, for example, https://stackoverflow.com/help/mcve on how to do this. -->
+<!-- see, for example, https://stackoverflow.com/help/mcve on how to do this -->
###### Error message:
<!-- If any, paste the *full* error message inside a code block (starting from line Traceback) -->
@@ -42,12 +45,9 @@ Traceback (most recent call last):
###### Version information
<!-- Generate version information with this command in the Python shell and copy the output here:
import sys, lmfit, numpy, scipy, asteval, uncertainties
-print('Python: {}\n\nlmfit: {}, scipy: {}, numpy: {}, asteval: {}, uncertainties: {}'\
- .format(sys.version, lmfit.__version__, scipy.__version__, numpy.__version__, \
- asteval.__version__, uncertainties.__version__))
- -->
+print(f"Python: {sys.version}\n\nlmfit: {lmfit.__version__}, scipy: {scipy.__version__}, numpy: {numpy.__version__},"
+ f"asteval: {asteval.__version__}, uncertainties: {uncertainties.__version__}")
+-->
###### Link(s)
-<!--If you started a discussion on the lmfit mailing list or Stack Overflow, please provide
- the relevant link(s).
--->
+<!-- If you started a discussion on the lmfit mailing list, discussion page, or Stack Overflow, please provide the relevant link(s) -->
diff --git a/.gitignore b/.gitignore
index 4b76490..4ae0ccb 100755..100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,3 +20,7 @@ sandbox/
doc/html
*.sav
.pytest*
+*.coverage*
+*htmlcov/*
+.eggs
+lmfit/version.py
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a76743c..0a1047b 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,35 +1,35 @@
-exclude: 'versioneer.py|lmfit/_version|doc/conf.py'
+exclude: 'doc/conf.py'
repos:
- repo: https://github.com/asottile/pyupgrade
- rev: v2.9.0
+ rev: v3.2.2
hooks:
- id: pyupgrade
- # for now don't force to change from %-operator to {}
- args: [--keep-percent-format, --py36-plus]
+ args: [--py37-plus]
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v3.4.0
+ rev: v4.4.0
hooks:
- id: check-ast
- id: check-builtin-literals
+ - id: check-case-conflict
- id: check-merge-conflict
+ - id: check-toml
- id: debug-statements
- id: end-of-file-fixer
- id: mixed-line-ending
- id: trailing-whitespace
- - id: requirements-txt-fixer
- id: fix-encoding-pragma
args: [--remove]
-- repo: https://gitlab.com/pycqa/flake8
- rev: 3.8.4
+- repo: https://github.com/PyCQA/flake8
+ rev: 6.0.0
hooks:
- id: flake8
additional_dependencies: [flake8-deprecated, flake8-mutable]
- repo: https://github.com/PyCQA/isort/
- rev: 5.7.0
+ rev: 5.10.1
hooks:
- id: isort
@@ -37,29 +37,30 @@ repos:
hooks:
- id: rstcheck
name: rstcheck
- entry: rstcheck
+ entry: rstcheck --report-level WARNING
files: '.rst'
language: python
additional_dependencies: [rstcheck, sphinx]
- repo: https://github.com/pre-commit/pygrep-hooks
- rev: v1.7.1
+ rev: v1.9.0
hooks:
- id: rst-backticks
- id: rst-directive-colons
- id: rst-inline-touching-normal
+ - id: python-check-blanket-noqa
- repo: https://github.com/codespell-project/codespell
- rev: v2.0.0
+ rev: v2.2.2
hooks:
- id: codespell
files: '.py|.rst'
exclude: 'doc/doc_examples_to_gallery.py'
# escaped characters currently do not work correctly
# so \nnumber is considered a spelling error....
- args: [-L nnumber]
+ args: ["-L nnumber", "-L mone"]
- repo: https://github.com/asottile/yesqa
- rev: v1.2.2
+ rev: v1.4.0
hooks:
- id: yesqa
diff --git a/THANKS.txt b/AUTHORS.txt
index 2a5fd39..7ad1d47 100644
--- a/THANKS.txt
+++ b/AUTHORS.txt
@@ -1,6 +1,6 @@
-Many people have contributed to lmfit. The attribution of credit in a
+Many people have contributed to lmfit. The attribution of credit in a
project such as this is difficult to get perfect, and there are no doubt
-important contributions that are missing or under-represented here. Please
+important contributions that are missing or under-represented here. Please
consider this file as part of the code and documentation that may have bugs
that need fixing.
@@ -34,7 +34,7 @@ of size of the contribution to the existing code) are from:
The method used for placing bounds on parameters was derived from the
clear description in the MINUIT documentation, and adapted from
- J. J. Helmus's python implementation in leastsqbounds.py.
+ J. J. Helmus's Python implementation in leastsqbounds.py.
E. O. Le Bigot wrote the uncertainties package, a version of which was
used by lmfit for many years, and is now an external dependency.
@@ -54,8 +54,9 @@ nmearl, Gustavo Pasquevich, Clemens Prescher, LiCode, Ben Gamari, Yoav
Roam, Alexander Stark, Alexandre Beelen, Andrey Aristov, Nicholas Zobrist,
Ethan Welty, Julius Zimmermann, Mark Dean, Arun Persaud, Ray Osborn, @lneuhaus,
Marcel Stimberg, Yoshiera Huang, Leon Foks, Sebastian Weigand, Florian LB,
-Michael Hudson-Doyle, Ruben Verweij, @jedzill4, and many others.
+Michael Hudson-Doyle, Ruben Verweij, @jedzill4, @spalato, Jens Hedegaard Nielsen,
+Martin Majli, Kristian Meyer, @azelcer, Ivan Usov, and many others.
The lmfit code obviously depends on, and owes a very large debt to the code
-in scipy.optimize. Several discussions on the SciPy-user and lmfit mailing
+in scipy.optimize. Several discussions on the SciPy-user and lmfit mailing
lists have also led to improvements in this code.
diff --git a/INSTALL b/INSTALL
deleted file mode 100644
index fbca32c..0000000
--- a/INSTALL
+++ /dev/null
@@ -1,17 +0,0 @@
-Installation instructions for LMFIT-py
-========================================
-
-To install the lmfit Python module, use::
-
- python setup.py build
- python setup.py install
-
-For lmfit 1.0.X, the following versions are required:
- Python: 3.6 or higher
- NumPy: 1.18 or higher
- SciPy: 1.3 or higher
- asteval: 0.9.21 or higher
- uncertainties: 3.0.1 or higher
-
-Matt Newville <newville@cars.uchicago.edu>
-Last Update: 2020-December-16
diff --git a/LICENSE b/LICENSE
index 401e039..13eb336 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
BSD-3
-Copyright 2021 Matthew Newville, The University of Chicago
+Copyright 2022 Matthew Newville, The University of Chicago
Renee Otten, Brandeis University
Till Stensitzki, Freie Universitat Berlin
A. R. J. Nelson, Australian Nuclear Science and Technology Organisation
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index 4f373cc..0000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,16 +0,0 @@
-include README.txt INSTALL LICENSE MANIFEST.in PKG-INFO THANKS.txt
-include setup.py publish_docs.sh
-include requirements.txt
-exclude *.pyc core.* *~ *.pdf
-recursive-include lmfit *.py
-recursive-include tests *.py *.dat
-recursive-include NIST_STRD *.dat
-recursive-include examples *.py *.csv *.dat
-recursive-exclude examples/documentation *
-recursive-include doc *
-recursive-exclude doc/_build *
-recursive-exclude doc/examples *
-recursive-exclude doc *.pdf *.csv *.dat *.sav
-include versioneer.py
-include lmfit/_version.py
-global-exclude __pycache__/*
diff --git a/PKG-INFO b/PKG-INFO
index 2242d82..3693dc6 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,40 +1,138 @@
-Metadata-Version: 1.2
+Metadata-Version: 2.1
Name: lmfit
-Version: 1.0.2
+Version: 1.1.0
Summary: Least-Squares Minimization with Bounds and Constraints
-Home-page: https://lmfit.github.io/lmfit-py/
+Home-page: https://lmfit.github.io//lmfit-py/
Author: LMFit Development Team
Author-email: matt.newville@gmail.com
-License: BSD-3
-Download-URL: https://lmfit.github.io//lmfit-py/
-Description: A library for least-squares minimization and data fitting in
- Python. Built on top of scipy.optimize, lmfit provides a Parameter object
- which can be set as fixed or free, can have upper and/or lower bounds, or
- can be written in terms of algebraic constraints of other Parameters. The
- user writes a function to be minimized as a function of these Parameters,
- and the scipy.optimize methods are used to find the optimal values for the
- Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
- algorithm, and provides estimated standard errors and correlations between
- varied Parameters. Other minimization methods, including Nelder-Mead's
- downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
- others are also supported. Bounds and constraints can be placed on
- Parameters for all of these methods.
-
- In addition, methods for explicitly calculating confidence intervals are
- provided for exploring minmization problems where the approximation of
- estimating Parameter uncertainties from the covariance matrix is
- questionable.
+License: BSD 3-Clause
+Project-URL: Source, https://github.com/lmfit/lmfit-py
+Project-URL: Changelog, https://lmfit.github.io/lmfit-py/whatsnew.html
+Project-URL: Documentation, https://lmfit.github.io/lmfit-py/
+Project-URL: Tracker, https://github.com/lmfit/lmfit-py/issues
Keywords: curve-fitting,least-squares minimization
-Platform: Windows
-Platform: Linux
-Platform: Mac OS X
+Platform: any
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Science/Research
+Classifier: Topic :: Scientific/Engineering
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
-Classifier: Topic :: Scientific/Engineering
-Requires-Python: >=3.6
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.7
+Description-Content-Type: text/x-rst
+Provides-Extra: dev
+Provides-Extra: doc
+Provides-Extra: test
+Provides-Extra: all
+License-File: LICENSE
+License-File: AUTHORS.txt
+
+LMfit-py
+========
+
+.. image:: https://dev.azure.com/lmfit/lmfit-py/_apis/build/status/lmfit.lmfit-py?branchName=master
+ :target: https://dev.azure.com/lmfit/lmfit-py/_build/latest?definitionId=1&branchName=master
+
+.. image:: https://codecov.io/gh/lmfit/lmfit-py/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/lmfit/lmfit-py
+
+.. image:: https://img.shields.io/pypi/v/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/pypi/dm/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/badge/docs-read-brightgreen
+ :target: https://lmfit.github.io/lmfit-py/
+
+.. image:: https://zenodo.org/badge/4185/lmfit/lmfit-py.svg
+ :target: https://zenodo.org/badge/latestdoi/4185/lmfit/lmfit-py
+
+.. _LMfit mailing list: https://groups.google.com/group/lmfit-py
+
+
+Overview
+---------
+
+LMfit-py provides a Least-Squares Minimization routine and class with a simple,
+flexible approach to parameterizing a model for fitting to data.
+
+LMfit is a pure Python package, and so easy to install from source or with
+``pip install lmfit``.
+
+For questions, comments, and suggestions, please use the `LMfit mailing list`_.
+Using the bug tracking software in GitHub Issues is encouraged for known
+problems and bug reports. Please read
+`Contributing.md <.github/CONTRIBUTING.md>`_ before creating an Issue.
+
+
+Parameters and Fitting
+-------------------------
+
+LMfit-py provides a Least-Squares Minimization routine and class with a simple,
+flexible approach to parameterizing a model for fitting to data. Named
+Parameters can be held fixed or freely adjusted in the fit, or held between
+lower and upper bounds. In addition, parameters can be constrained as a simple
+mathematical expression of other Parameters.
+
+To do this, the programmer defines a Parameters object, an enhanced dictionary,
+containing named parameters::
+
+ fit_params = Parameters()
+ fit_params['amp'] = Parameter(value=1.2, min=0.1, max=1000)
+ fit_params['cen'] = Parameter(value=40.0, vary=False)
+ fit_params['wid'] = Parameter(value=4, min=0)
+
+or using the equivalent::
+
+ fit_params = Parameters()
+ fit_params.add('amp', value=1.2, min=0.1, max=1000)
+ fit_params.add('cen', value=40.0, vary=False)
+ fit_params.add('wid', value=4, min=0)
+
+The programmer will also write a function to be minimized (in the least-squares
+sense) with its first argument being this Parameters object, and additional
+positional and keyword arguments as desired::
+
+ def myfunc(params, x, data, someflag=True):
+ amp = params['amp'].value
+ cen = params['cen'].value
+ wid = params['wid'].value
+ ...
+ return residual_array
+
+For each call of this function, the values for the ``params`` may have changed,
+subject to the bounds and constraint settings for each Parameter. The function
+should return the residual (i.e., ``data-model``) array to be minimized.
+
+The advantage here is that the function to be minimized does not have to be
+changed if different bounds or constraints are placed on the fitting Parameters.
+The fitting model (as described in myfunc) is instead written in terms of
+physical parameters of the system, and remains remains independent of what is
+actually varied in the fit. In addition, which parameters are adjusted and which
+are fixed happens at run-time, so that changing what is varied and what
+constraints are placed on the parameters can easily be modified by the user in
+real-time data analysis.
+
+To perform the fit, the user calls::
+
+ result = minimize(myfunc, fit_params, args=(x, data), kws={'someflag':True}, ....)
+
+After the fit, a ``MinimizerResult`` class is returned that holds the results
+the fit (e.g., fitting statistics and optimized parameters). The dictionary
+``result.params`` contains the best-fit values, estimated standard deviations,
+and correlations with other variables in the fit.
+
+By default, the underlying fit algorithm is the Levenberg-Marquardt algorithm
+with numerically-calculated derivatives from MINPACK's lmdif function, as used
+by ``scipy.optimize.leastsq``. Most other solvers that are present in ``scipy``
+(e.g., Nelder-Mead, differential_evolution, basinhopping, etctera) are also
+supported.
diff --git a/README.rst b/README.rst
index 1a173c7..54777ef 100644
--- a/README.rst
+++ b/README.rst
@@ -7,7 +7,7 @@ LMfit-py
.. image:: https://codecov.io/gh/lmfit/lmfit-py/branch/master/graph/badge.svg
:target: https://codecov.io/gh/lmfit/lmfit-py
-.. image:: https://img.shields.io/pypi/v/lmfit.svg
+.. image:: https://img.shields.io/pypi/v/lmfit.svg
:target: https://pypi.org/project/lmfit
.. image:: https://img.shields.io/pypi/dm/lmfit.svg
@@ -19,7 +19,6 @@ LMfit-py
.. image:: https://zenodo.org/badge/4185/lmfit/lmfit-py.svg
:target: https://zenodo.org/badge/latestdoi/4185/lmfit/lmfit-py
-
.. _LMfit mailing list: https://groups.google.com/group/lmfit-py
@@ -73,9 +72,9 @@ positional and keyword arguments as desired::
...
return residual_array
-For each call of this function, the values for the params may have changed,
+For each call of this function, the values for the ``params`` may have changed,
subject to the bounds and constraint settings for each Parameter. The function
-should return the residual (i.e., data-model) array to be minimized.
+should return the residual (i.e., ``data-model``) array to be minimized.
The advantage here is that the function to be minimized does not have to be
changed if different bounds or constraints are placed on the fitting Parameters.
@@ -95,7 +94,7 @@ the fit (e.g., fitting statistics and optimized parameters). The dictionary
``result.params`` contains the best-fit values, estimated standard deviations,
and correlations with other variables in the fit.
-By default, the underlying fit algorithm is the Levenberg-Marquart algorithm
+By default, the underlying fit algorithm is the Levenberg-Marquardt algorithm
with numerically-calculated derivatives from MINPACK's lmdif function, as used
by ``scipy.optimize.leastsq``. Most other solvers that are present in ``scipy``
(e.g., Nelder-Mead, differential_evolution, basinhopping, etctera) are also
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index c670a99..b94eef3 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -14,16 +14,27 @@ stages:
steps:
- task: UsePythonVersion@0
inputs:
- versionSpec: '3.9'
+ versionSpec: '3.11'
- script: |
- python -m pip install --upgrade pip setuptools wheel
- pip install pre-commit
- displayName: 'Install Python dependencies'
+ python -m pip install --upgrade build pip setuptools wheel
+ displayName: 'Install Python build tools and dependencies'
- script: |
- pre-commit autoupdate ; pre-commit run --all-files
+ python -m build
+ python -m pip install '.[dev]'
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ pre-commit install ; pre-commit run --all-files
displayName: 'Run pre-commit hooks'
+ - script: |
+ check-wheel-contents dist/*.whl
+ displayName: 'Run check-wheel-contents'
+ - script: |
+ twine check dist/*
+ displayName: "Run twine check"
- stage: build_documentation
+ dependsOn: check_codestyle
+ condition: succeededOrFailed()
jobs:
- job: build_documentation
pool:
@@ -32,37 +43,34 @@ stages:
steps:
- task: UsePythonVersion@0
inputs:
- versionSpec: '3.9'
+ versionSpec: '3.11'
- script: |
- python -m pip install --upgrade pip setuptools wheel
- pip install -r requirements-dev.txt
- displayName: 'Install Python dependencies'
+ python -m pip install --upgrade build pip setuptools wheel
+ displayName: 'Install Python build tools'
- script: |
- python setup.py install
- displayName: 'Install lmfit'
+ python -m build
+ python -m pip install '.[doc]'
+ displayName: 'Build wheel/sdist and install lmfit'
- script: |
- sudo apt install -qq -y texlive-latex-extra latexmk
+ sudo apt-get update && sudo apt-get install -qq -y texlive-latex-extra latexmk
displayName: 'Install TeX Live'
- script: |
- cd doc ; make all
+ cd doc ; make all
displayName: 'Build the documentation'
- stage: tests_minimum_dependencies
dependsOn: check_codestyle
+ condition: succeededOrFailed()
jobs:
- job:
pool:
vmImage: 'ubuntu-latest'
strategy:
matrix:
- Python36:
- python.version: '3.6'
Python37:
python.version: '3.7'
Python38:
python.version: '3.8'
- Python39:
- python.version: '3.9'
steps:
- task: UsePythonVersion@0
@@ -70,45 +78,57 @@ stages:
versionSpec: '$(python.version)'
displayName: 'Use Python $(python.version)'
- script: |
- sudo apt update && sudo apt install -yq --no-install-suggests --no-install-recommends \
+ sudo apt-get update && sudo apt-get install -yq --no-install-suggests --no-install-recommends \
libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libsuitesparse-dev ccache \
swig libmpc-dev
displayName: 'Install dependencies'
- script: |
- python -m pip install --upgrade pip setuptools wheel
- pip install asteval==0.9.21 numpy==1.18 scipy==1.3.2 uncertainties==3.0.1 pytest coverage codecov
+ python -m pip install --upgrade build pip wheel
+ python -m pip install asteval==0.9.28 numpy==1.19.0 scipy==1.6.0 uncertainties==3.1.4
displayName: 'Install minimum required version of dependencies'
- script: |
- python setup.py install
- displayName: 'Install lmfit'
+ python -m build
+ python -m pip install ".[test]"
+ displayName: 'Build wheel/sdist and install lmfit'
- script: |
- pip list
+ python -m pip list
displayName: 'List installed Python packages'
- script: |
- pip install pytest-azurepipelines pytest-cov
- coverage run --source=lmfit -m pytest
- coverage report -m
+ python -m pip install pytest-azurepipelines
+ pytest
displayName: 'Run test-suite and collect coverage'
- script: |
- bash <(curl -s https://codecov.io/bash)
+ curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --import # One-time step
+ curl -Os https://uploader.codecov.io/latest/linux/codecov
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig
+ gpg --verify codecov.SHA256SUM.sig codecov.SHA256SUM
+ shasum -a 256 -c codecov.SHA256SUM
+ chmod +x codecov
+ displayName: 'Download and verify codecov uploader'
+ - script: |
+ ./codecov -v -f "coverage.xml"
displayName: 'Upload to codecov.io'
- stage: tests_latest_dependencies
dependsOn: check_codestyle
+ condition: succeededOrFailed()
jobs:
- job:
pool:
vmImage: 'ubuntu-latest'
strategy:
matrix:
- Python36:
- python.version: '3.6'
Python37:
python.version: '3.7'
Python38:
python.version: '3.8'
Python39:
python.version: '3.9'
+ Python310:
+ python.version: '3.10'
+ Python311:
+ python.version: '3.11'
steps:
- task: UsePythonVersion@0
@@ -116,88 +136,165 @@ stages:
versionSpec: '$(python.version)'
displayName: 'Use Python $(python.version)'
- script: |
- sudo apt update && sudo apt install -yq --no-install-suggests --no-install-recommends \
+ sudo apt-get update && sudo apt-get install -yq --no-install-suggests --no-install-recommends \
libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libsuitesparse-dev ccache \
swig libmpc-dev
displayName: 'Install dependencies'
- script: |
- python -m pip install --upgrade pip setuptools wheel
- pip install -r requirements-dev.txt
- # temporarily install asteval from master branch to avoid NumPy v1.20 DeprecationWarnings
- pip install git+https://github.com/newville/asteval@master
+ python -m pip install --upgrade build pip setuptools wheel
displayName: 'Install latest available version of Python dependencies'
- script: |
- python setup.py install
- displayName: 'Install lmfit'
+ python -m build
+ python -m pip install '.[all]'
+ displayName: 'Build wheel/sdist and install lmfit'
- script: |
- pip list
+ python -m pip list
displayName: 'List installed Python packages'
- script: |
- pip install pytest-azurepipelines pytest-cov
- coverage run --source=lmfit -m pytest
- coverage report -m
+ python -m pip install pytest-azurepipelines
+ pytest
displayName: 'Run test-suite and collect coverage'
- script: |
- bash <(curl -s https://codecov.io/bash)
+ curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --import # One-time step
+ curl -Os https://uploader.codecov.io/latest/linux/codecov
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig
+ gpg --verify codecov.SHA256SUM.sig codecov.SHA256SUM
+ shasum -a 256 -c codecov.SHA256SUM
+ chmod +x codecov
+ displayName: 'Download and verify codecov uploader'
+ - script: |
+ ./codecov -v -f "coverage.xml"
displayName: 'Upload to codecov.io'
+# Python 3.11 on Windows currently fails to build pycairo
+- stage: test_Windows_latest
+ dependsOn: check_codestyle
+ condition: succeededOrFailed()
+ jobs:
+ - job:
+ pool:
+ vmImage: 'windows-latest'
+ strategy:
+ matrix:
+ Python310:
+ python.version: '3.10'
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '$(python.version)'
+ displayName: 'Use Python $(python.version)'
+ - script: |
+ python -m pip install --upgrade build pip setuptools wheel
+ displayName: 'Install latest available version of Python dependencies'
+ - script: |
+ python -m build
+ python -m pip install .[all]
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ python -m pip list
+ displayName: 'List installed Python packages'
+ - script: |
+ python -m pip install pytest-azurepipelines
+ pytest
+ displayName: 'Run test-suite'
+ - powershell:
+ cd doc ; .\make.bat html
+ displayName: 'Build the HTML documentation'
+
+- stage: test_macOS_latest
+ dependsOn: check_codestyle
+ condition: succeededOrFailed()
+ jobs:
+ - job:
+ pool:
+ vmImage: 'macos-latest'
+ strategy:
+ matrix:
+ Python311:
+ python.version: '3.11'
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '$(python.version)'
+ displayName: 'Use Python $(python.version)'
+ - script: |
+ python -m pip install --upgrade build pip setuptools wheel
+ displayName: 'Install latest available version of Python dependencies'
+ - script: |
+ python -m build
+ python -m pip install '.[all]'
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ python -m pip list
+ displayName: 'List installed Python packages'
+ - script: |
+ python -m pip install pytest-azurepipelines
+ pytest
+ displayName: 'Run test-suite and collect coverage'
+
- stage: development_version
dependsOn: check_codestyle
+ condition: succeededOrFailed()
jobs:
- - job: Python310_dev
+ - job: Python312_dev
pool:
vmImage: 'ubuntu-latest'
steps:
- script: |
sudo add-apt-repository ppa:deadsnakes/nightly
- sudo apt-get update
- sudo apt-get install -y --no-install-recommends python3.10-dev python3.10-distutils
+ sudo apt-get update && sudo apt-get install -y --no-install-recommends python3.12-dev python3.12-venv
displayName: Install Python development version from the deadsnakes PPA
- script: |
- sudo apt update && sudo apt install -yq --no-install-suggests --no-install-recommends \
+ sudo apt-get update && sudo apt-get install -yq --no-install-suggests --no-install-recommends \
libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libsuitesparse-dev ccache \
swig libmpc-dev
displayName: 'Install dependencies'
- script: |
export PATH=/home/vsts/.local/bin:$PATH
- curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
- python3.10 get-pip.py --user
- pip3.10 install -U pip setuptools wheel pybind11
- # install Cython from GitHub to fix "undefined symbol: _PyGen_Send" error
- pip3.10 install git+https://github.com/cython/cython@0.29.x
- displayName: 'Install pip, setuptools, wheel, pybind11 cython'
+ ##curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
+ ##python3.12 get-pip.py --user
+ python3.12 -m ensurepip --upgrade
+ pip3.12 install -U build pip setuptools wheel pybind11 cython || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Install build, pip, setuptools, wheel, pybind11, and cython'
- script: |
export PATH=/home/vsts/.local/bin:$PATH
- wget https://github.com/numpy/numpy/releases/download/v1.20.0/numpy-1.20.0.tar.gz
- tar xzvf numpy-1.20.0.tar.gz
- cd numpy-1.20.0
- python3.10 setup.py install --user
+ export numpy_version=1.23.5
+ wget https://github.com/numpy/numpy/releases/download/v${numpy_version}/numpy-${numpy_version}.tar.gz
+ tar xzvf numpy-${numpy_version}.tar.gz
+ cd numpy-${numpy_version}
+ python3.12 setup.py install --user || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
displayName: 'Install latest available version of NumPy'
- script: |
export PATH=/home/vsts/.local/bin:$PATH
- wget https://github.com/scipy/scipy/releases/download/v1.6.0/scipy-1.6.0.tar.gz
- tar xzvf scipy-1.6.0.tar.gz
- cd scipy-1.6.0
- # force Cythonizing code in attempt to avoid "undefined symbol: _PyGen_Send" error
- rm -f PKG-INFO cythonize.dat
- python3.10 setup.py install --user
+ pip3.12 install -U pythran || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Install pythran'
+ - script: |
+ export PATH=/home/vsts/.local/bin:$PATH
+ export scipy_version=1.9.3
+ wget https://github.com/scipy/scipy/releases/download/v${scipy_version}/scipy-${scipy_version}.tar.gz
+ tar xzvf scipy-${scipy_version}.tar.gz
+ cd scipy-${scipy_version}
+ python3.12 setup.py install --user || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
displayName: 'Install latest available version of SciPy'
- script: |
export PATH=/home/vsts/.local/bin:$PATH
- pip3.10 install asteval uncertainties dill emcee numdifftools
- # temporarily install asteval from master branch to avoid NumPy v1.20 DeprecationWarnings
- pip3.10 install git+https://github.com/newville/asteval@master
+ # remove numdifftools for now as it pulls in statsmodels, that wants to build with NumPy 1.14.5
+ pip3.12 install asteval uncertainties dill emcee flaky pytest pytest-cov || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
displayName: 'Install latest available version of Python dependencies'
- script: |
- python3.10 setup.py install --user
- displayName: 'Install lmfit'
+ python3.12 -m build
+ python3.12 -m pip install '.[test]' --user || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Build wheel/sdist and install lmfit'
- script: |
export PATH=/home/vsts/.local/bin:$PATH
- pip3.10 list
+ pip3.12 list || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
displayName: 'List installed Python packages'
- script: |
export PATH=/home/vsts/.local/bin:$PATH
- pip3.10 install pytest pytest-azurepipelines
+ pip3.12 install pytest-azurepipelines
cd $(Agent.BuildDirectory)/s/tests
pytest || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
displayName: 'Run test-suite'
diff --git a/conda-recipe/bld.bat b/conda-recipe/bld.bat
deleted file mode 100644
index 87b1481..0000000
--- a/conda-recipe/bld.bat
+++ /dev/null
@@ -1,8 +0,0 @@
-"%PYTHON%" setup.py install
-if errorlevel 1 exit 1
-
-:: Add more build steps here, if they are necessary.
-
-:: See
-:: http://docs.continuum.io/conda/build.html
-:: for a list of environment variables that are set during the build process.
diff --git a/conda-recipe/build.sh b/conda-recipe/build.sh
deleted file mode 100644
index 4d7fc03..0000000
--- a/conda-recipe/build.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-$PYTHON setup.py install
-
-# Add more build steps here, if they are necessary.
-
-# See
-# http://docs.continuum.io/conda/build.html
-# for a list of environment variables that are set during the build process.
diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml
deleted file mode 100644
index 7bfb963..0000000
--- a/conda-recipe/meta.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
-package:
- name: lmfit
- version: "0.9.1"
-
-source:
- fn: lmfit-0.9.1.tar.gz
- url: https://pypi.python.org/packages/source/l/lmfit/lmfit-0.9.1.tar.gz
- md5: 61c7ec515d324663b9fb6ed59d0568e3
-# patches:
- # List any patch files here
- # - fix.patch
-
-# build:
- # noarch_python: True
- # preserve_egg_dir: True
- # entry_points:
- # Put any entry points (scripts to be generated automatically) here. The
- # syntax is module:function. For example
- #
- # - lmfit = lmfit:main
- #
- # Would create an entry point called lmfit that calls lmfit.main()
-
-
- # If this is a new build for the same version, increment the build
- # number. If you do not include this key, it defaults to 0.
- # number: 1
-
-requirements:
- build:
- - python
- - setuptools
- - numpy
- - scipy
-
- run:
- - python
- - numpy
- - scipy
-
-test:
- # Python imports
- imports:
- - lmfit
- - lmfit.ui
- - lmfit.uncertainties
-
- # commands:
- # You can put test commands to be run here. Use this to test that the
- # entry points work.
-
-
- # You can also put a file called run_test.py in the recipe that will be run
- # at test time.
-
- # requires:
- # Put any additional test requirements here. For example
- # - nose
-
-about:
- home: http://lmfit.github.io/lmfit-py/
- license: BSD
- summary: 'Least-Squares Minimization with Bounds and Constraints'
-
-# See
-# http://docs.continuum.io/conda/build.html for
-# more information about meta.yaml
diff --git a/doc/Makefile b/doc/Makefile
index 8b82021..dca8b3b 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -69,11 +69,12 @@ help:
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
- -rm -rf $(BUILDDIR)/*
+ -rm -rf $(BUILDDIR)
-rm -f extensions.py
-rm -f *.dat *.sav *.csv
-rm -rf examples/*
-rm -rf ../examples/documentation
+ -rm -rf __pycache__
dirhtml: gallery
$(SPHINXBUILD) -b dirhtml $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/dirhtml
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index fc05319..0aaa172 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -97,6 +97,11 @@ of 0 on the value of ``sigma``.
.. autoclass:: MoffatModel
+:class:`Pearson4Model`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Pearson4Model
+
:class:`Pearson7Model`
~~~~~~~~~~~~~~~~~~~~~~
@@ -159,7 +164,9 @@ Linear and Polynomial Models
These models correspond to polynomials of some degree. Of course, lmfit is
a very inefficient way to do linear regression (see :numpydoc:`polyfit`
or :scipydoc:`stats.linregress`), but these models may be useful as one
-of many components of a composite model.
+of many components of a composite model. The SplineModel below corresponds
+to a cubic spline.
+
:class:`ConstantModel`
~~~~~~~~~~~~~~~~~~~~~~
@@ -181,6 +188,12 @@ of many components of a composite model.
.. autoclass:: PolynomialModel
+:class:`SplinelModel`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: SplineModel
+
+
Periodic Models
---------------
@@ -312,9 +325,9 @@ could to define this in a script:
script = """
def mycurve(x, amp, cen, sig):
- loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
- gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
- return log(loren) * gradient(gauss) / gradient(x)
+ loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
+ gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
+ return log(loren) * gradient(gauss) / gradient(x)
"""
and then use this with :class:`ExpressionModel` as:
@@ -322,7 +335,7 @@ and then use this with :class:`ExpressionModel` as:
.. jupyter-execute::
mod = ExpressionModel('mycurve(x, height, mid, wid)', init_script=script,
- independent_vars=['x'])
+ independent_vars=['x'])
As above, this will interpret the parameter names to be ``height``, ``mid``,
and ``wid``, and build a model that can be used to fit data.
@@ -331,7 +344,7 @@ and ``wid``, and build a model that can be used to fit data.
Example 1: Fit Peak data to Gaussian, Lorentzian, and Voigt profiles
--------------------------------------------------------------------
-Here, we will fit data to three similar line shapes, in order to decide which
+Here, we will fit data to three similar lineshapes, in order to decide which
might be the better model. We will start with a Gaussian profile, as in
the previous chapter, but use the built-in :class:`GaussianModel` instead
of writing one ourselves. This is a slightly different version from the
@@ -378,9 +391,9 @@ are pretty good. A plot of the fit:
%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
- plt.plot(x, y, 'b-')
- plt.plot(x, out.best_fit, 'r-', label='Gaussian Model')
- plt.legend(loc='best')
+ plt.plot(x, y, '-')
+ plt.plot(x, out.best_fit, '-', label='Gaussian Model')
+ plt.legend()
plt.show()
shows a decent match to the data -- the fit worked with no explicit setting
@@ -410,9 +423,9 @@ and also by visual inspection of the fit to the data (figure below).
.. jupyter-execute::
:hide-code:
- plt.plot(x, y, 'b-')
- plt.plot(x, out.best_fit, 'r-', label='Lorentzian Model')
- plt.legend(loc='best')
+ plt.plot(x, y, '-')
+ plt.plot(x, out.best_fit, '-', label='Lorentzian Model')
+ plt.legend()
plt.show()
The tails are now too big, and the value for :math:`\chi^2` almost doubled.
@@ -440,15 +453,15 @@ in the figure below (left).
:hide-code:
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
- axes[0].plot(x, y, 'b-')
- axes[0].plot(x, out.best_fit, 'r-', label='Voigt Model\ngamma constrained')
- axes[0].legend(loc='best')
+ axes[0].plot(x, y, '-')
+ axes[0].plot(x, out.best_fit, '-', label='Voigt Model\ngamma constrained')
+ axes[0].legend()
# free gamma parameter
pars['gamma'].set(value=0.7, vary=True, expr='')
out_gamma = mod.fit(y, pars, x=x)
- axes[1].plot(x, y, 'b-')
- axes[1].plot(x, out_gamma.best_fit, 'r-', label='Voigt Model\ngamma unconstrained')
- axes[1].legend(loc='best')
+ axes[1].plot(x, y, '-')
+ axes[1].plot(x, out_gamma.best_fit, '-', label='Voigt Model\ngamma unconstrained')
+ axes[1].legend()
plt.show()
Fit to peak with Voigt model (left) and Voigt model with ``gamma``
@@ -525,10 +538,10 @@ with a plot of
.. jupyter-execute::
:hide-code:
- plt.plot(x, y, 'b')
- plt.plot(x, out.init_fit, 'k--', label='initial fit')
- plt.plot(x, out.best_fit, 'r-', label='best fit')
- plt.legend(loc='best')
+ plt.plot(x, y)
+ plt.plot(x, out.init_fit, '--', label='initial fit')
+ plt.plot(x, out.best_fit, '-', label='best fit')
+ plt.legend()
plt.show()
@@ -576,17 +589,17 @@ components displayed on the right:
:hide-code:
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
- axes[0].plot(x, y, 'b')
- axes[0].plot(x, init, 'k--', label='initial fit')
- axes[0].plot(x, out.best_fit, 'r-', label='best fit')
- axes[0].legend(loc='best')
+ axes[0].plot(x, y)
+ axes[0].plot(x, init, '--', label='initial fit')
+ axes[0].plot(x, out.best_fit, '-', label='best fit')
+ axes[0].legend()
comps = out.eval_components(x=x)
- axes[1].plot(x, y, 'b')
- axes[1].plot(x, comps['g1_'], 'g--', label='Gaussian component 1')
- axes[1].plot(x, comps['g2_'], 'm--', label='Gaussian component 2')
- axes[1].plot(x, comps['exp_'], 'k--', label='Exponential component')
- axes[1].legend(loc='best')
+ axes[1].plot(x, y)
+ axes[1].plot(x, comps['g1_'], '--', label='Gaussian component 1')
+ axes[1].plot(x, comps['g2_'], '--', label='Gaussian component 2')
+ axes[1].plot(x, comps['exp_'], '--', label='Exponential component')
+ axes[1].legend()
plt.show()
@@ -599,10 +612,10 @@ this, and by defining an :func:`index_of` function to limit the data range.
That is, with::
def index_of(arrval, value):
- """Return index of array *at or below* value."""
- if value < min(arrval):
- return 0
- return max(np.where(arrval <= value)[0])
+ """Return index of array *at or below* value."""
+ if value < min(arrval):
+ return 0
+ return max(np.where(arrval <= value)[0])
ix1 = index_of(x, 75)
ix2 = index_of(x, 135)
@@ -622,10 +635,10 @@ we can get a better initial estimate (see below).
.. jupyter-execute::
:hide-code:
- plt.plot(x, y, 'b')
- plt.plot(x, out.init_fit, 'k--', label='initial fit')
- plt.plot(x, out.best_fit, 'r-', label='best fit')
- plt.legend(loc='best')
+ plt.plot(x, y)
+ plt.plot(x, out.init_fit, '--', label='initial fit')
+ plt.plot(x, out.best_fit, '-', label='best fit')
+ plt.legend()
plt.show()
@@ -640,3 +653,257 @@ and without any bounds on parameters at all:
This script is in the file ``doc_builtinmodels_nistgauss2.py`` in the examples folder,
and the figure above shows an improved initial estimate of the data.
+
+
+Example 4: Using a Spline Model
+--------------------------------
+
+In the example above, the two peaks might represent the interesting part of
+the data, and the exponential decay could be viewed a "background" which
+might be due to other physical effects or part of some response of the
+instrumentation used to make the measurement. That is, the background
+might be well-understood to be modeled as an exponential decay, as in the
+example above and so easily included in the full analysis. As the results
+above show, there is some -- but not huge -- correlation of the parameters
+between the peak amplitudes and the decay of the exponential function.
+That means that it is helpful to include all of those components in a
+single fit, as the uncertainties in the peak amplitudes (which would be
+interpreted as "line strength" or "area") will reflect some of the
+uncertainty in how well we modeled the background.
+
+Sometimes a background is more complex or at least has a less obvious
+functional form. In these cases, it can be useful to use a *spline* to
+model part of the curve. Just for completeness, a spline is a piecewise
+continuous polynomial function (typically made of cubic polynomials) that
+has a series of ``x`` values known as "knots" at which the highest order
+derivative is allowed to be discontinuous. By adding more knots, the
+spline function has more flexibility to follow a particular function.
+
+As an example (see the example file "doc_builtinmodels_splinemodel.py"), we
+start with data with a single peak and a background that is hard to
+characterize clearly as a simple decay, oscillatory structure.
+
+.. jupyter-execute::
+ :hide-output:
+
+ import numpy as np
+ import matplotlib.pyplot as plt
+ from lmfit.models import SplineModel, GaussianModel
+
+ data = np.loadtxt('test_splinepeak.dat')
+ x = data[:, 0]
+ y = data[:, 1]
+
+ plt.plot(x, y, label='data')
+ plt.legend()
+ plt.show()
+
+which shows (figure below):
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, label='data')
+ plt.legend()
+ plt.show()
+
+
+There is definitely a peak there, so we could start with building a model
+for a Gaussian peak, say with:
+
+.. jupyter-execute::
+ :hide-output:
+
+ model = GaussianModel(prefix='peak_')
+ params = model.make_params(amplitude=8, center=16, sigma=1)
+
+
+To account for that changing background, we'll use a spline, but need to
+know where to put the "knots". Picking points away from the peak makes
+sense -- we don't want to fit the peak -- but we want it to have some
+flexibility near the peak. Let's try spacing knot points at ``x=1, 3, ...,
+13``, then skip over the peak at around ``x=16`` and then pick up knots points
+at ``x=19, 21, 23, 25``.
+
+.. jupyter-execute::
+ :hide-output:
+
+ knot_xvals = np.array([1, 3, 5, 7, 9, 11, 13, 19, 21, 23, 25])
+
+ bkg = SplineModel(prefix='bkg_', xknots=knot_xvals)
+ params.update(bkg.guess(y, x))
+
+
+Note that we used ``bkg.guess()`` to guess the initial values of the spline
+parameters and then update the ``params`` Parameters object with these 11
+parameters to account for the spline. These will be very close to the ``y``
+values at the knot ``x`` values. The precise definition of the spline knot
+parameters is not "the y-values through which the resulting spline curve
+goes", but these values are pretty good estimates for the resulting spline
+values. You'll see below that these initial values are close.
+
+With a spline background defined, we can create a composite model, and run
+a fit.
+
+.. jupyter-execute::
+ :hide-output:
+
+ model = model + bkg
+
+ params['peak_amplitude'].min = 0
+ params['peak_center'].min = 10
+ params['peak_center'].max = 20
+
+ out = model.fit(y, params, x=x)
+ print(out.fit_report(min_correl=0.3))
+
+You'll see that we first set some "sanity bounds" on the peak parameters to
+prevent the peak from going completely wrong. This really is not necessary
+in this case, but it is often a reasonable thing to do - the general advice
+for this is to be generous in the bounds, not overly restrictive.
+
+This fit will print out a report of
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(out.fit_report(min_correl=0.3))
+
+
+from this we can make a few observations. First, the correlation between
+the "spline" parameters" and the "peak parameters" is noticeable, but not
+extremely high -- that's good, and the estimated uncertainties do account
+for this correlation. The spline components are correlated with each other
+(especially with the N-1 and N+1 spline parameter). Second, we can see
+that the initial values for the background spline parameters are pretty
+good.
+
+We can plot the results and fit components with
+
+.. jupyter-execute::
+ :hide-output:
+
+ comps = out.eval_components()
+ plt.plot(x, out.best_fit, label='best fit')
+ plt.plot(x, comps['bkg_'], label='background')
+ plt.plot(x, comps['peak_'], label='peak')
+ plt.legend()
+
+which will generate the plot shown below:
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, label='data')
+ plt.plot(x, out.best_fit, label='best fit')
+ plt.plot(x, comps['bkg_'], label='background')
+ plt.plot(x, comps['peak_'], label='peak')
+ plt.legend()
+ plt.show()
+
+
+If we're interested in seeing the locations of the knots, you might do
+
+.. jupyter-execute::
+ :hide-output:
+
+ knot_yvals = np.array([o.value for o in out.params.values() if o.name.startswith('bkg')])
+ plt.plot(knot_xvals, knot_yvals, 'o', color='black', label='spline knots values')
+
+which will generate be shown as
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, label='data')
+ plt.plot(x, out.best_fit, label='best fit')
+ plt.plot(x, comps['bkg_'], label='background')
+ plt.plot(x, comps['peak_'], label='peak')
+ knot_yvals = np.array([o.value for o in out.params.values() if o.name.startswith('bkg')])
+ plt.plot(knot_xvals, knot_yvals, 'o', color='black', label='spline knots values')
+
+ plt.legend()
+ plt.show()
+
+
+You might be interested in trying to assess what impact the select of the
+knots has on the resulting peak intensity. For example, you might try some
+of the following set of knot values:
+
+.. jupyter-execute::
+ :hide-output:
+
+ knot_xvals1 = np.array([1, 3, 5, 7, 9, 11, 13, 19, 21, 23, 25])
+ knot_xvals2 = np.array([1, 3, 5, 7, 9, 11, 13, 16, 19, 21, 23, 25])
+ knot_xvals3 = np.array([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25])
+
+
+and re-run the fit with these different sets of knot points. The results
+are shown in the table below.
+
+
+.. _models_spline_results-table:
+
+ Table of Peak amplitudes with varying spline points
+
+ +-------------------+------+----------------------------------------+
+ | spline x points | N | Peak amplitude value and uncertainty |
+ +===================+======+========================================+
+ | knot_xvals1 | 11 | 12.223 (0.295) |
+ +-------------------+------+----------------------------------------+
+ | knot_xvals2 | 12 | 11.746 (0.594) |
+ +-------------------+------+----------------------------------------+
+ | knot_xvals3 | 13 | 12.052 (0.872) |
+ +-------------------+------+----------------------------------------+
+
+Adding more spline points, especially near the peak center around ``x=16.4``,
+can impact the measurement of the amplitude but the uncertainty increases
+dramatically enough to mostly cover the same range of values. This is a
+interesting case of adding more parameters to a fit and having the
+uncertainties in the fitted parameters getting worse. The interested
+reader is encouraged to explore the fit reports and plot these different case.
+
+
+Finally, the basic case above used 11 spline points to fit the baseline.
+In fact, it would be reasonable to ask whether that is enough parameters
+to fit the full spectra. By imposing that there is also a Gaussian
+peak nearby makes the spline fit only the background, but without the
+Gaussian, the spline could fit the full curve. By way of example, we'll
+just try increasing the number of spline points to fit this data
+
+.. jupyter-execute::
+ :hide-output:
+
+ plt.plot(x, y, 'o', label='data')
+ for nknots in (10, 15, 20, 25):
+ model = SplineModel(prefix='bkg_', xknots=np.linspace(0, 25, nknots))
+ params = model.guess(y, x)
+ out = model.fit(y, params, x=x)
+ plt.plot(x, out.best_fit, label=f'best-fit ({nknots} knots)')
+
+ plt.legend()
+ plt.show()
+
+
+
+which will show the fit below:
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, 'o', label='data')
+ for nknots in (10, 15, 20, 25):
+ model = SplineModel(prefix='bkg_', xknots=np.linspace(0, 25, nknots))
+ params = model.guess(y, x)
+ out = model.fit(y, params, x=x)
+ plt.plot(x, out.best_fit, label=f'best-fit ({nknots} knots)')
+
+ plt.legend()
+ plt.show()
+
+
+By itself, 10 knots does not give a very good fit, but 25 knots or more
+does give a very good fit to the peak. This should give some confidence
+that the fit with 11 parameters for the background spline is acceptable,
+but also give some reason to be careful in selecting the number of spline
+points to use.
diff --git a/doc/conf.py b/doc/conf.py
index f115750..93969c6 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -35,15 +35,17 @@ autoclass_content = 'both'
# shpinx.ext.intersphinx settings
intersphinx_mapping = {'py': ('https://docs.python.org/3', None),
- 'numpy': ('https://numpy.org/doc/stable', None),
- 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
- 'matplotlib': ('https://matplotlib.org', None),
+ 'numpy': ('https://numpy.org/doc/stable/', None),
+ 'scipy': ('https://matplotlib.org/stable/', None),
+ 'matplotlib': ('https://matplotlib.org/stable/', None),
+ 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
+ 'sympy': ('https://docs.sympy.org/latest/', None),
}
# shpinx.ext.extlinks settings
extlinks = {
- 'scipydoc': ('https://docs.scipy.org/doc/scipy/reference/generated/scipy.%s.html', 'scipy.'),
- 'numpydoc': ('https://docs.scipy.org/doc/numpy/reference/generated/numpy.%s.html', 'numpy.'),
+ 'scipydoc': ('https://docs.scipy.org/doc/scipy/reference/generated/scipy.%s.html', 'scipy.%s'),
+ 'numpydoc': ('https://docs.scipy.org/doc/numpy/reference/generated/numpy.%s.html', 'numpy.%s'),
}
# sphinx.ext.imgmath settings
@@ -63,11 +65,11 @@ master_doc = 'index'
# General information about the project.
project = u'lmfit'
-copyright = u'{}, Matthew Newville, Till Stensitzki, Renee Otten, and others'.format(date.today().year)
+copyright = f'{date.today().year}, Matthew Newville, Till Stensitzki, Renee Otten, and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
-version = release = lmfit.__version__.split('+', 1)[0]
+version = release = lmfit.__version__.split('.post')[0]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
@@ -167,7 +169,12 @@ sphinx_gallery_conf = {
'filename_pattern': r'(\\|/)documentation|(\\|/)example_',
'ignore_pattern': r'(\\|/)doc_',
'ignore_repr_types': r'matplotlib',
+ 'image_srcset': ["3x"],
}
+# remove certain RuntimeWarnings from examples
+warnings.filterwarnings("ignore", category=RuntimeWarning,
+ message="overflow encountered")
+
# Suppress "WARNING: unknown mimetype for _static/empty
suppress_warnings = ['epub.unknown_project_files']
diff --git a/doc/confidence.rst b/doc/confidence.rst
index 008b9e2..cdedacc 100644
--- a/doc/confidence.rst
+++ b/doc/confidence.rst
@@ -83,7 +83,7 @@ and parameter values that are at the varying confidence levels given by
steps in :math:`\sigma`. As we can see, the estimated error is almost the
same, and the uncertainties are well behaved: Going from 1-:math:`\sigma`
(68% confidence) to 3-:math:`\sigma` (99.7% confidence) uncertainties is
-fairly linear. It can also be seen that the errors are fairy symmetric
+fairly linear. It can also be seen that the errors are fairly symmetric
around the best fit value. For this problem, it is not necessary to
calculate confidence intervals, and the estimates of the uncertainties from
the covariance matrix are sufficient.
diff --git a/doc/constraints.rst b/doc/constraints.rst
index dbe439e..cc82bb9 100644
--- a/doc/constraints.rst
+++ b/doc/constraints.rst
@@ -13,7 +13,7 @@ highly desirable to place mathematical constraints on parameter values.
For example, one might want to require that two Gaussian peaks have the
same width, or have amplitudes that are constrained to add to some value.
Of course, one could rewrite the objective or model function to place such
-requirements, but this is somewhat error prone, and limits the flexibility
+requirements, but this is somewhat error-prone, and limits the flexibility
so that exploring constraints becomes laborious.
To simplify the setting of constraints, Parameters can be assigned a
diff --git a/doc/doc_examples_to_gallery.py b/doc/doc_examples_to_gallery.py
index 1ecd2ec..4cfeb5b 100755
--- a/doc/doc_examples_to_gallery.py
+++ b/doc/doc_examples_to_gallery.py
@@ -51,11 +51,8 @@ for fn in files:
gallery_file = examples_documentation_dir / fn.name[4:]
msg = "" # add optional message f
- gallery_file.write_text(
- '"""\n{}\n{}\n\n{}\n"""\n{}'.format(
- fn.name, "=" * len(fn.name), msg, script_text
- )
- )
+ gallery_file.write_text(f'"""\n{fn.name}\n{"=" * len(fn.name)}\n\n'
+ f'{msg}\n"""\n{script_text}')
# make sure the saved Models and ModelResult are available
if "save" in fn.name:
diff --git a/doc/extensions.py b/doc/extensions.py
deleted file mode 100644
index bd91142..0000000
--- a/doc/extensions.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# Sphinx extensions for generating HTML output
-
-extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.extlinks',
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.napoleon',
- 'sphinx.ext.todo',
- 'IPython.sphinxext.ipython_console_highlighting',
- 'jupyter_sphinx',
- 'sphinx_gallery.gen_gallery']
diff --git a/doc/faq.rst b/doc/faq.rst
index ab519a8..6b92543 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -30,7 +30,6 @@ If you see something like::
then you need to install the ``ipywidgets`` package, try: ``pip install ipywidgets``.
-
How can I fit multi-dimensional data?
=====================================
@@ -44,6 +43,7 @@ do this is to use :numpydoc:`ndarray.flatten`, for example::
resid = calculate_multidim_residual()
return resid.flatten()
+
How can I fit multiple data sets?
=================================
@@ -66,7 +66,6 @@ different arrays. As a bonus, the two lines share the 'offset' parameter::
return np.concatenate((resid1, resid2))
-
How can I fit complex data?
===========================
@@ -90,12 +89,12 @@ is that you also get access to the plot routines from the ModelResult
class, which are also complex-aware.
-
How should I cite LMFIT?
========================
See https://dx.doi.org/10.5281/zenodo.11813
+
I get errors from NaN in my fit. What can I do?
================================================
@@ -113,6 +112,7 @@ function (if using ``Model``) generates a NaN, the fit will stop
immediately. If your objective or model function generates a NaN, you
really must handle that.
+
``nan_policy``
~~~~~~~~~~~~~~
@@ -161,13 +161,14 @@ you probably won't ever have values greater than 1.e308 and can therefore
(usually) safely clip the argument passed to ``exp()`` to be smaller than
about 700.
+
.. _faq_params_stuck:
-Why are Parameter Values sometime stuck at initial values?
+Why are Parameter values sometimes stuck at initial values?
===========================================================
In order for a Parameter to be optimized in a fit, changing its value must
-have an impact on the fit residual (`data-model` when curve fitting, for
+have an impact on the fit residual (``data-model`` when curve fitting, for
example). If a fit has not changed one or more of the Parameters, it means
that changing those Parameters did not change the fit residual.
@@ -208,6 +209,7 @@ not at the boundary values.
Finally, one reason for a Parameter to not change is that they are actually
used as discrete values. This is discussed below in :ref:`faq_discrete_params`.
+
.. _faq_params_no_uncertainties:
Why are uncertainties in Parameters sometimes not determined?
@@ -224,7 +226,6 @@ not change from their initial values.
Can Parameters be used for Array Indices or Discrete Values?
=============================================================
-
The short answer is "No": variables in all of the fitting methods used in
``lmfit`` (and all of those available in ``scipy.optimize``) are treated as
continuous values, and represented as double precision floating point
@@ -248,20 +249,23 @@ That you implement with a model function and use to fit data like this:
.. jupyter-execute::
import numpy as np
+
import lmfit
+
def quad_off(x, x0, a, b, c):
- model = a + b*x**2
- model[np.where(x<x0)] = c
+ model = a + b * x**2
+ model[np.where(x < x0)] = c
return model
+
x0 = 19
b = 0.02
a = 2.0
xdat = np.linspace(0, 100, 101)
- ydat = a + b*xdat**2
+ ydat = a + b * xdat**2
ydat[np.where(xdat < x0)] = a + b * x0**2
- ydat += np.random.normal(scale=0.1, size=len(xdat))
+ ydat += np.random.normal(scale=0.1, size=xdat.size)
mod = lmfit.Model(quad_off)
pars = mod.make_params(x0=22, a=1, b=1, c=1)
@@ -271,41 +275,44 @@ That you implement with a model function and use to fit data like this:
This will not result in a very good fit, as the value for ``x0`` cannot be
found by making a small change in its value. Specifically,
-``model[np.where(x<x0)]`` will give the same result for ``x0=22`` and
+``model[np.where(x < x0)]`` will give the same result for ``x0=22`` and
``x0=22.001``, and so that value is not changed during the fit.
-There are a couple ways around this problems. First, you may be able to
+There are a couple ways around this problem. First, you may be able to
make the fit depend on ``x0`` in a way that is not just discrete. That
-depends on your model function. A second option is treat the break not as a
+depends on your model function. A second option is to treat the break not as a
hard break but as a more gentle transition with a sigmoidal function, such
as an error function. Like the break-point, these will go from 0 to 1, but
more gently and with some finite value leaking into neighboring points.
The amount of leakage or width of the step can also be adjusted.
-A simple modification of the above would to use an error function would
+A simple modification of the above to use an error function would
look like this and give better fit results:
.. jupyter-execute::
import numpy as np
- import lmfit
from scipy.special import erf
+ import lmfit
+
+
def quad_off(x, x0, a, b, c):
- m1 = a + b*x**2
+ m1 = a + b * x**2
m2 = c * np.ones(len(x))
- # step up from 0 to 1 at x0: (erf(x-x0)+1)/2
+ # step up from 0 to 1 at x0: (erf(x-x0)+1)/2
# step down from 1 to 0 at x0: (1-erf(x-x0))/2
- model = m1 * (erf(x-x0)+1)/2 + m2*(1-erf(x-x0))/2
+ model = m1 * (erf(x-x0)+1)/2 + m2 * (1-erf(x-x0))/2
return model
+
x0 = 19
b = 0.02
a = 2.0
xdat = np.linspace(0, 100, 101)
- ydat = a + b*xdat**2
+ ydat = a + b * xdat**2
ydat[np.where(xdat < x0)] = a + b * x0**2
- ydat += np.random.normal(scale=0.1, size=len(xdat))
+ ydat += np.random.normal(scale=0.1, size=xdat.size)
mod = lmfit.Model(quad_off)
pars = mod.make_params(x0=22, a=1, b=1, c=1)
diff --git a/doc/fitting.rst b/doc/fitting.rst
index 1045653..50a05c8 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -129,7 +129,7 @@ Choosing Different Fitting Methods
By default, the `Levenberg-Marquardt
<https://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm>`_ algorithm is
used for fitting. While often criticized, including the fact it finds a
-*local* minima, this approach has some distinct advantages. These include
+*local* minimum, this approach has some distinct advantages. These include
being fast, and well-behaved for most curve-fitting needs, and making it
easy to estimate uncertainties for and correlations between pairs of fit
variables, as discussed in :ref:`fit-results-label`.
@@ -449,7 +449,7 @@ and standard errors could be done as
print('-------------------------------')
print('Parameter Value Stderr')
for name, param in out.params.items():
- print('{:7s} {:11.5f} {:11.5f}'.format(name, param.value, param.stderr))
+ print(f'{name:7s} {param.value:11.5f} {param.stderr:11.5f}')
.. _fit-itercb-label:
@@ -476,7 +476,7 @@ be used to abort a fit.
:type resid: numpy.ndarray
:param args: Positional arguments. Must match ``args`` argument to :func:`minimize`
:param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize`
- :return: Residual array (generally ``data-model``) to be minimized in the least-squares sense.
+ :return: Iteration abort flag.
:rtype: None for normal behavior, any value like ``True`` to abort the fit.
@@ -575,8 +575,6 @@ parameters, which is a similar goal to the one here.
x = np.linspace(1, 10, 250)
np.random.seed(0)
y = 3.0 * np.exp(-x / 2) - 5.0 * np.exp(-(x - 0.1) / 10.) + 0.1 * np.random.randn(x.size)
- plt.plot(x, y, 'b')
- plt.show()
Create a Parameter set for the initial guesses:
@@ -603,9 +601,9 @@ and plotting the fit using the Maximum Likelihood solution gives the graph below
.. jupyter-execute::
- plt.plot(x, y, 'b')
- plt.plot(x, residual(mi.params) + y, 'r', label='best fit')
- plt.legend(loc='best')
+ plt.plot(x, y, 'o')
+ plt.plot(x, residual(mi.params) + y, label='best fit')
+ plt.legend()
plt.show()
Note that the fit here (for which the ``numdifftools`` package is installed)
@@ -656,9 +654,10 @@ worked as intended (as a rule of thumb the value should be between 0.2 and
.. jupyter-execute::
- plt.plot(res.acceptance_fraction, 'b')
+ plt.plot(res.acceptance_fraction, 'o')
plt.xlabel('walker')
plt.ylabel('acceptance fraction')
+ plt.show()
With the results from ``emcee``, we can visualize the posterior distributions
for the parameters using the ``corner`` package:
diff --git a/doc/installation.rst b/doc/installation.rst
index 0d38aee..3951fa4 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -5,8 +5,9 @@ Downloading and Installation
.. _lmfit github repository: https://github.com/lmfit/lmfit-py
.. _python: https://python.org
.. _scipy: https://scipy.org/scipylib/index.html
-.. _numpy: http://numpy.org/
+.. _numpy: https://numpy.org/
.. _pytest: https://pytest.org/
+.. _pytest-cov: https://github.com/pytest-dev/pytest-cov
.. _emcee: https://emcee.readthedocs.io/
.. _pandas: https://pandas.pydata.org/
.. _jupyter: https://jupyter.org/
@@ -22,35 +23,47 @@ Downloading and Installation
.. _sphinxcontrib-svg2pdfconverter: https://github.com/missinglinkelectronics/sphinxcontrib-svg2pdfconverter
.. _cairosvg: https://cairosvg.org/
.. _Pillow: https://python-pillow.org/
-
+.. _sphinx-gallery: https://sphinx-gallery.github.io/stable/index.html
+.. _flaky: https://github.com/box/flaky
+.. _SymPy: https://www.sympy.org/
+.. _Latexmk: https://ctan.org/pkg/latexmk/
Prerequisites
~~~~~~~~~~~~~
-Lmfit works with `Python`_ versions 3.6 and higher. Version
+Lmfit works with `Python`_ versions 3.7 and higher. Version
0.9.15 is the final version to support Python 2.7.
Lmfit requires the following Python packages, with versions given:
- * `NumPy`_ version 1.18 or higher.
- * `SciPy`_ version 1.3 or higher.
- * `asteval`_ version 0.9.21 or higher.
- * `uncertainties`_ version 3.0.1 or higher.
+ * `NumPy`_ version 1.19 or higher.
+ * `SciPy`_ version 1.6 or higher.
+ * `asteval`_ version 0.9.28 or higher.
+ * `uncertainties`_ version 3.1.4 or higher.
-All of these are readily available on PyPI, and should be installed
+All of these are readily available on PyPI, and are installed
automatically if installing with ``pip install lmfit``.
-In order to run the test suite, the `pytest`_ package is required. Some
-functionality requires the `emcee`_ (version 3+), `corner`_, `pandas`_, `Jupyter`_,
-`matplotlib`_, `dill`_, or `numdifftools`_ packages. These are not installed
-automatically, but we highly recommend each of these packages.
+In order to run the test suite, the `pytest`_, `pytest-cov`_, and `flaky`_
+packages are required. Some functionality requires the `emcee`_ (version 3+),
+`corner`_, `pandas`_, `Jupyter`_, `matplotlib`_, `dill`_, or `numdifftools`_
+packages. These are not installed automatically, but we highly recommend each
+of them.
+
+For building the documentation and generating the examples gallery, `matplotlib`_,
+`emcee`_ (version 3+), `corner`_, `Sphinx`_, `sphinx-gallery`_, `jupyter_sphinx`_,
+`Pillow`_, and `SymPy`_ are required. For generating the PDF documentation, the Python
+packages `sphinxcontrib-svg2pdfconverter`_ and `cairosvg`_ are also required,
+as well as the LaTex package `Latexmk`_ (which is included by default in some
+LaTex distributions).
-For building the documentation and generating the examples gallery,
-`matplotlib`_, `emcee`_ (version 3+), `corner`_, `Sphinx`_,
-`jupyter_sphinx`_, `Pillow`_, `sphinxcontrib-svg2pdfconverter`_, and `cairosvg`_
-are required (the latter two only when generating the PDF document).
+Please refer to ``setup.cfg`` under ``options.extras_require`` for a list of all
+dependencies that are needed if you want to participate in the development of lmfit.
+You can install all these dependencies automatically by doing ``pip install lmfit[all]``,
+or select only a subset (e.g., ``dev```, ``doc``, or ``test``).
-Please refer to ``requirements-dev.txt`` for a list of all dependencies that
-are needed if you want to participate in the development of lmfit.
+Please note: the "original" ``python setup.py install`` is deprecated, but we will
+provide a shim ``setup.py`` file for as long as ``Python`` and/or ``setuptools``
+allow the use of this legacy command.
Downloads
~~~~~~~~~
@@ -81,7 +94,14 @@ To get the latest development version from the `lmfit GitHub repository`_, use::
and install using::
- python setup.py install
+ pip install --upgrade build pip setuptools wheel
+
+to install the required build dependencies and then do::
+
+ python -m build
+ pip install ".[all]'
+
+to generate the wheel and install ``lmfit`` with all its dependencies.
We welcome all contributions to lmfit! If you cloned the repository for this
purpose, please read `CONTRIBUTING.md`_ for more detailed instructions.
@@ -102,7 +122,7 @@ that `matplotlib`_ has been installed and is working correctly.
Acknowledgements
~~~~~~~~~~~~~~~~
-.. literalinclude:: ../THANKS.txt
+.. literalinclude:: ../AUTHORS.txt
:language: none
diff --git a/doc/intro.rst b/doc/intro.rst
index 0b77d4c..49ae3ad 100644
--- a/doc/intro.rst
+++ b/doc/intro.rst
@@ -44,7 +44,7 @@ sine wave, and so write an objective function like this:
from numpy import exp, sin
- def residual(variables, x, data, eps_data):
+ def residual(variables, x, data, uncertainty):
"""Model a decaying sine wave and subtract data."""
amp = variables[0]
phaseshift = variables[1]
@@ -53,7 +53,7 @@ sine wave, and so write an objective function like this:
model = amp * sin(x*freq + phaseshift) * exp(-x*x*decay)
- return (data-model) / eps_data
+ return (data-model) / uncertainty
To perform the minimization with :mod:`scipy.optimize`, one would do this:
@@ -64,11 +64,14 @@ To perform the minimization with :mod:`scipy.optimize`, one would do this:
# generate synthetic data with noise
x = linspace(0, 100)
- eps_data = random.normal(size=x.size, scale=0.2)
- data = 7.5 * sin(x*0.22 + 2.5) * exp(-x*x*0.01) + eps_data
+ noise = random.normal(size=x.size, scale=0.2)
+ data = 7.5 * sin(x*0.22 + 2.5) * exp(-x*x*0.01) + noise
+
+ # generate experimental uncertainties
+ uncertainty = abs(0.16 + random.normal(size=x.size, scale=0.05))
variables = [10.0, 0.2, 3.0, 0.007]
- out = leastsq(residual, variables, args=(x, data, eps_data))
+ out = leastsq(residual, variables, args=(x, data, uncertainty))
Though it is wonderful to be able to use Python for such optimization
problems, and the SciPy library is robust and easy to use, the approach
@@ -122,7 +125,7 @@ for the decaying sine wave as:
from lmfit import minimize, Parameters
- def residual(params, x, data, eps_data):
+ def residual(params, x, data, uncertainty):
amp = params['amp']
phaseshift = params['phase']
freq = params['frequency']
@@ -130,7 +133,7 @@ for the decaying sine wave as:
model = amp * sin(x*freq + phaseshift) * exp(-x*x*decay)
- return (data-model) / eps_data
+ return (data-model) / uncertainty
params = Parameters()
@@ -139,7 +142,7 @@ for the decaying sine wave as:
params.add('phase', value=0.2)
params.add('frequency', value=3.0)
- out = minimize(residual, params, args=(x, data, eps_data))
+ out = minimize(residual, params, args=(x, data, uncertainty))
At first look, we simply replaced a list of values with a dictionary,
accessed by name -- not a huge improvement. But each of the named
@@ -166,7 +169,7 @@ created:
params['decay'].min = 0.10
Importantly, our objective function remains unchanged. This means the
-objective function can simply express the parameterized phenomenon to be
+objective function can simply express the parametrized phenomenon to be
modeled, and is separate from the choice of parameters to be varied in the
fit.
diff --git a/doc/model.rst b/doc/model.rst
index 21b899f..d6f5928 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -28,7 +28,7 @@ different from :scipydoc:`optimize.curve_fit`, for example in that it uses
important advantages.
In addition to allowing you to turn any model function into a curve-fitting
-method, lmfit also provides canonical definitions for many known line shapes
+method, lmfit also provides canonical definitions for many known lineshapes
such as Gaussian or Lorentzian peaks and Exponential decays that are widely
used in many scientific domains. These are available in the :mod:`models`
module that will be discussed in more detail in the next chapter
@@ -91,8 +91,8 @@ signature itself:
from lmfit import Model
gmodel = Model(gaussian)
- print('parameter names: {}'.format(gmodel.param_names))
- print('independent variables: {}'.format(gmodel.independent_vars))
+ print(f'parameter names: {gmodel.param_names}')
+ print(f'independent variables: {gmodel.independent_vars}')
As you can see, the Model ``gmodel`` determined the names of the parameters
and the independent variables. By default, the first argument of the
@@ -186,14 +186,14 @@ plot:
.. jupyter-execute::
:hide-code:
- plt.plot(x, y, 'bo')
- plt.plot(x, result.init_fit, 'k--', label='initial fit')
- plt.plot(x, result.best_fit, 'r-', label='best fit')
- plt.legend(loc='best')
+ plt.plot(x, y, 'o')
+ plt.plot(x, result.init_fit, '--', label='initial fit')
+ plt.plot(x, result.best_fit, '-', label='best fit')
+ plt.legend()
plt.show()
-which shows the data in blue dots, the best fit as a solid red line, and
-the initial fit as a dashed black line.
+which shows the data in blue dots, the best fit as a solid green line, and
+the initial fit as a dashed orange line.
Note that the model fitting was really performed with:
@@ -329,7 +329,7 @@ function is fairly easy. Let's try another one:
decay_model = Model(decay)
- print('independent variables: {}'.format(decay_model.independent_vars))
+ print(f'independent variables: {decay_model.independent_vars}')
params = decay_model.make_params()
print('\nParameters:')
@@ -346,7 +346,7 @@ you can say so:
.. jupyter-execute::
decay_model = Model(decay, independent_vars=['tau'])
- print('independent variables: {}'.format(decay_model.independent_vars))
+ print(f'independent variables: {decay_model.independent_vars}')
params = decay_model.make_params()
print('\nParameters:')
@@ -378,7 +378,7 @@ Parameters if the supplied default value was a valid number (but not
.. jupyter-execute::
def decay2(t, tau, N=10, check_positive=False):
- if check_small:
+ if check_positive:
arg = abs(t)/max(1.e-9, abs(tau))
else:
arg = t/tau
@@ -589,15 +589,22 @@ emphasized that if you are willing to save or reuse the definition of the
model function as Python code, then saving the Parameters and rest of the
components that make up a model presents no problem.
-If the ``dill`` package is installed, the model function will be saved using
-it. But because saving the model function is not always reliable, saving a
-model will always save the *name* of the model function. The :func:`load_model`
-takes an optional :attr:`funcdefs` argument that can contain a dictionary of
-function definitions with the function names as keys and function objects as
-values. If one of the dictionary keys matches the saved name, the
-corresponding function object will be used as the model function. With this
-approach, if you save a model and can provide the code used for the model
-function, the model can be saved and reliably reloaded and used.
+If the ``dill`` package is installed, the model function will also be saved
+using it. But because saving the model function is not always reliable,
+saving a model will always save the *name* of the model function. The
+:func:`load_model` takes an optional :attr:`funcdefs` argument that can
+contain a dictionary of function definitions with the function names as
+keys and function objects as values. If one of the dictionary keys matches
+the saved name, the corresponding function object will be used as the model
+function. If it is not found by name, and if ``dill`` was used to save
+the model, and if ``dill`` is available at run-time, the ``dill``-encoded
+function will try to be used. Note that this approach will generally allow
+you to save a model that can be used by another installation of the
+same version of Python, but may not work across Python versions. For preserving
+fits for extended periods of time (say, archiving for documentation of
+scientific results), we strongly encourage you to save the full Python code
+used for the model function and fit process.
+
.. autofunction:: save_model
@@ -707,6 +714,17 @@ and ``bic``.
numpy.ndarray of data to compare to model.
+.. attribute:: dely
+
+ numpy.ndarray of estimated uncertainties in the ``y`` values of the model
+ from :meth:`ModelResult.eval_uncertainty` (see :ref:`eval_uncertainty_sec`).
+
+.. attribute:: dely_comps
+
+ a dictionary of estimated uncertainties in the ``y`` values of the model
+ components, from :meth:`ModelResult.eval_uncertainty` (see
+ :ref:`eval_uncertainty_sec`).
+
.. attribute:: errorbars
Boolean for whether error bars were estimated by fit.
@@ -789,6 +807,17 @@ and ``bic``.
numpy.ndarray for residual.
+.. attribute:: rsquared
+
+ Floating point :math:`R^2` statisic, defined for data :math:`y` and best-fit model :math:`f` as
+
+.. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ R^2 &=& 1 - \frac{\sum_i (y_i - f_i)^2}{\sum_i (y_i - \bar{y})^2}
+ \end{eqnarray*}
+
.. attribute:: scale_covar
Boolean flag for whether to automatically scale covariance matrix.
@@ -804,6 +833,8 @@ and ``bic``.
array, so that ``weights*(data - fit)`` is minimized in the
least-squares sense.
+.. _eval_uncertainty_sec:
+
Calculating uncertainties in the model function
-----------------------------------------------
@@ -834,15 +865,30 @@ figure below.
.. jupyter-execute::
:hide-code:
- plt.plot(x, y, 'bo')
- plt.plot(x, result.init_fit, 'k--', label='initial fit')
- plt.plot(x, result.best_fit, 'r-', label='best fit')
+ plt.plot(x, y, 'o')
+ plt.plot(x, result.init_fit, '--', label='initial fit')
+ plt.plot(x, result.best_fit, '-', label='best fit')
plt.fill_between(x, result.best_fit-dely, result.best_fit+dely, color="#ABABAB",
label='3-$\sigma$ uncertainty band')
- plt.legend(loc='best')
+ plt.legend()
plt.show()
+.. versionadded:: 1.0.4
+
+If the model is a composite built from multiple components, the
+:meth:`ModelResult.eval_uncertainty` method will evaluate the uncertainty of
+both the full model (often the sum of multiple components) as well as the
+uncertainty in each component. The uncertainty of the full model will be held in
+``result.dely``, and the uncertainties for each component will be held in the dictionary
+``result.dely_comps``, with keys that are the component prefixes.
+
+An example script shows how the uncertainties in components of a composite
+model can be calculated and used:
+
+.. jupyter-execute:: ../examples/doc_model_uncertainty2.py
+
+
.. _modelresult_saveload_sec:
Saving and Loading ModelResults
@@ -949,31 +995,31 @@ and shows the plot on the left.
:hide-code:
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
- axes[0].plot(x, y, 'bo')
- axes[0].plot(x, result.init_fit, 'k--', label='initial fit')
- axes[0].plot(x, result.best_fit, 'r-', label='best fit')
- axes[0].legend(loc='best')
+ axes[0].plot(x, y, 'o')
+ axes[0].plot(x, result.init_fit, '--', label='initial fit')
+ axes[0].plot(x, result.best_fit, '-', label='best fit')
+ axes[0].legend()
comps = result.eval_components()
- axes[1].plot(x, y, 'bo')
- axes[1].plot(x, comps['gaussian'], 'k--', label='Gaussian component')
- axes[1].plot(x, comps['line'], 'r--', label='Line component')
- axes[1].legend(loc='best')
+ axes[1].plot(x, y, 'o')
+ axes[1].plot(x, comps['gaussian'], '--', label='Gaussian component')
+ axes[1].plot(x, comps['line'], '--', label='Line component')
+ axes[1].legend()
plt.show()
On the left, data is shown in blue dots, the total fit is shown in solid
-red line, and the initial fit is shown as a black dashed line. The figure
+green line, and the initial fit is shown as a orange dashed line. The figure
on the right shows again the data in blue dots, the Gaussian component as
-a black dashed line and the linear component as a red dashed line. It is
+a orange dashed line and the linear component as a green dashed line. It is
created using the following code:
.. jupyter-execute::
:hide-output:
comps = result.eval_components()
- plt.plot(x, y, 'bo')
- plt.plot(x, comps['gaussian'], 'k--', label='Gaussian component')
- plt.plot(x, comps['line'], 'r--', label='Line component')
+ plt.plot(x, y, 'o')
+ plt.plot(x, comps['gaussian'], '--', label='Gaussian component')
+ plt.plot(x, comps['line'], '--', label='Line component')
The components were generated after the fit using the
:meth:`ModelResult.eval_components` method of the ``result``, which returns
@@ -1066,14 +1112,14 @@ and shows the plots:
:hide-code:
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
- axes[0].plot(x, y, 'bo')
- axes[0].plot(x, result.init_fit, 'k--', label='initial fit')
- axes[0].plot(x, result.best_fit, 'r-', label='best fit')
- axes[0].legend(loc='best')
- axes[1].plot(x, y, 'bo')
- axes[1].plot(x, 10*comps['jump'], 'k--', label='Jump component')
- axes[1].plot(x, 10*comps['gaussian'], 'r-', label='Gaussian component')
- axes[1].legend(loc='best')
+ axes[0].plot(x, y, 'o')
+ axes[0].plot(x, result.init_fit, '--', label='initial fit')
+ axes[0].plot(x, result.best_fit, '-', label='best fit')
+ axes[0].legend()
+ axes[1].plot(x, y, 'o')
+ axes[1].plot(x, 10*comps['jump'], '--', label='Jump component')
+ axes[1].plot(x, 10*comps['gaussian'], '-', label='Gaussian component')
+ axes[1].legend()
plt.show()
Using composite models with built-in or custom operators allows you to
diff --git a/doc/parameters.rst b/doc/parameters.rst
index 0838568..ae82762 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -81,6 +81,16 @@ The :class:`Parameters` class
.. automethod:: load
+.. _dumpload_warning:
+
+.. warning::
+
+ Saving Parameters with user-added functions to the ``_asteval``
+ interpreter using :meth::`dump` and :meth:`dumps` may not be easily
+ recovered with the :meth:`load` and :meth:`loads`. See
+ :ref:`model_saveload_sec` for further discussion.
+
+
Simple Example
==============
diff --git a/doc/sphinx/theme/sphinx13/basic_layout.html b/doc/sphinx/theme/sphinx13/basic_layout.html
index 8bfb459..b2a6453 100644
--- a/doc/sphinx/theme/sphinx13/basic_layout.html
+++ b/doc/sphinx/theme/sphinx13/basic_layout.html
@@ -4,7 +4,7 @@
Master layout template for Sphinx themes.
- :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
+ :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
{%- block doctype -%}{%- if html5_doctype %}
@@ -17,9 +17,7 @@
{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %}
{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and
(sidebars != []) %}
-{%- set url_root = pathto('', 1) %}
-{# XXX necessary? #}
-{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
+{# URL root should never be #, then all links are fragments #}
{%- if not embedded and docstitle %}
{%- set titlesuffix = " &#8212; "|safe + docstitle|e %}
{%- else %}
@@ -37,7 +35,7 @@
{%- if not loop.first %}{{ reldelim2 }}{% endif %}</li>
{%- endfor %}
{%- block rootrellink %}
- <li class="nav-item nav-item-0"><a href="{{ pathto(master_doc)|e }}">{{ shorttitle|e }}</a>{{ reldelim1 }}</li>
+ <li class="nav-item nav-item-0"><a href="{{ pathto(root_doc)|e }}">{{ shorttitle|e }}</a>{{ reldelim1 }}</li>
{%- endblock %}
{%- for parent in parents %}
<li class="nav-item nav-item-{{ loop.index }}"><a href="{{ parent.link|e }}" {% if loop.last %}{{ accesskey("U") }}{% endif %}>{{ parent.title }}</a>{{ reldelim1 }}</li>
@@ -52,9 +50,9 @@
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
{%- block sidebarlogo %}
- {%- if logo %}
- <p class="logo"><a href="{{ pathto(master_doc)|e }}">
- <img class="logo" src="{{ pathto('_static/' + logo, 1)|e }}" alt="Logo"/>
+ {%- if logo_url %}
+ <p class="logo"><a href="{{ pathto(root_doc)|e }}">
+ <img class="logo" src="{{ logo_url|e }}" alt="Logo"/>
</a></p>
{%- endif %}
{%- endblock %}
@@ -87,15 +85,12 @@
{%- endmacro %}
{%- macro script() %}
- <script id="documentation_options" data-url_root="{{ pathto('', 1) }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
{%- for js in script_files %}
{{ js_tag(js) }}
{%- endfor %}
{%- endmacro %}
{%- macro css() %}
- <link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" />
- <link rel="stylesheet" href="{{ pathto('_static/' + style, 1)|e }}" type="text/css" />
{%- for css in css_files %}
{%- if css|attr("filename") %}
{{ css_tag(css) }}
@@ -139,8 +134,8 @@
title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
href="{{ pathto('_static/opensearch.xml', 1) }}"/>
{%- endif %}
- {%- if favicon %}
- <link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1)|e }}"/>
+ {%- if favicon_url %}
+ <link rel="shortcut icon" href="{{ favicon_url|e }}"/>
{%- endif %}
{%- endif %}
{%- block linktags %}
diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst
index 7cfc6e2..428598e 100644
--- a/doc/whatsnew.rst
+++ b/doc/whatsnew.rst
@@ -11,11 +11,88 @@ significant to the use and behavior of the library. This is not meant
to be a comprehensive list of changes. For such a complete record,
consult the `lmfit GitHub repository`_.
+.. _whatsnew_110_label:
+
+Version 1.1.0 Release Notes (November 27, 2022)
+=================================================
+
+New features:
+
+- add ``Pearson4Model`` (@lellid; PR #800)
+- add ``SplineModel`` (PR #804)
+- add R^2 ``rsquared`` statistic to fit outputs and reports for Model fits (Issue #803; PR #810)
+- add calculation of ``dely`` for model components of composite models (Issue #761; PR #826)
+
+Bug fixes/enhancements:
+
+- make sure variable ``spercent`` is always defined in ``params_html_table`` functions (reported by @MySlientWind; Issue #768, PR #770)
+- always initialize the variables ``success`` and ``covar`` the ``MinimizerResult`` (reported by Marc W. Pound; PR #771)
+- build package following PEP517/PEP518; use ``pyproject.toml`` and ``setup.cfg``; leave ``setup.py`` for now (PR #777)
+- components used to create a ``CompositeModel`` can now have different independent variables (@Julian-Hochhaus; Discussion #787; PR #788)
+- fixed function definition for ``StepModel(form='linear')``, was not consistent with the other ones (@matpompili; PR #794)
+- fixed height factor for ``Gaussian2dModel``, was not correct (@matpompili; PR #795)
+- for covariances with negative diagonal elements, we set the covariance to ``None`` (PR #813)
+- fixed linear mode for ``RectangleModel`` (@arunpersaud; Issue #815; PR #816)
+- report correct initial values for parameters with bounds (Issue #820; PR #821)
+- allow recalculation of confidence intervals (@jagerber48; PR #798)
+- include 'residual' in JSON output of ModelResult.dumps (@mac01021; PR #830)
+- supports and is tested against Python 3.11; updated minimum required version of SciPy, NumPy, and asteval (PR #832)
+
+Deprecations:
+
+- remove support for Python 3.6 which reached EOL on 2021-12-23 (PR #790)
+
+
+.. _whatsnew_103_label:
+
+Version 1.0.3 Release Notes (October 14, 2021)
+==============================================
+
+Potentially breaking change:
+
+- argument ``x`` is now required for the ``guess`` method of Models (Issue #747; PR #748)
+
+To get reasonable estimates for starting values one should always supply both ``x`` and ``y`` values; in some cases it would work
+when only providing ``data`` (i.e., y-values). With the change above, ``x`` is now required in the ``guess`` method call, so scripts might
+need to be updated to explicitly supply ``x``.
+
+Bug fixes/enhancements:
+
+- do not overwrite user-specified figure titles in Model.plot() functions and allow setting with ``title`` keyword argument (PR #711)
+- preserve Parameters subclass in deepcopy (@jenshnielsen; PR #719)
+- coerce ``data`` and ``indepdent_vars`` to NumPy array with ``dtype=float64`` or ``dtype=complex128`` where applicable (Issues #723 and #728)
+- fix collision between parameter names in built-in models and user-specified parameters (Issue #710 and PR #732)
+- correct error message in PolynomialModel (@kremeyer; PR #737)
+- improved handling of altered JSON data (Issue #739; PR #740, reported by Matthew Giammar)
+- map ``max_nfev`` to ``maxiter`` when using ``differential_evolution`` (PR #749, reported by Olivier B.)
+- correct use of noise versus experimental uncertainty in the documentation (PR #751, reported by Andrés Zelcer)
+- specify return type of ``eval`` method more precisely and allow for plotting of (Complex)ConstantModel by coercing their
+ ``float``, ``int``, or ``complex`` return value to a ``numpy.ndarray`` (Issue #684 and PR #754)
+- fix ``dho`` (Damped Harmonic Oscillator) lineshape (PR #755; @rayosborn)
+- reset ``Minimizer._abort`` to ``False`` before starting a new fit (Issue #756 and PR #757; @azelcer)
+- fix typo in ``guess_from_peak2d`` (@ivan-usovl; PR #758)
+
+Various:
+
+- update asteval dependency to >= 0.9.22 to avoid DeprecationWarnings from NumPy v1.20.0 (PR #707)
+- remove incorrectly spelled ``DonaichModel`` and ``donaich`` lineshape, deprecated in version 1.0.1 (PR #707)
+- remove occurrences of OrderedDict throughout the code; dict is order-preserving since Python 3.6 (PR #713)
+- update the contributing instructions (PR #718; @martin-majlis)
+- (again) defer import of matplotlib to when it is needed (@zobristnicholas; PR #721)
+- fix description of ``name`` argument in ``Parameters.add`` (@kristianmeyerr; PR #725)
+- update dependencies, make sure a functional development environment is installed on Windows (Issue #712)
+- use ``setuptools_scm`` for version info instead of ``versioneer`` (PR #729)
+- transition to using ``f-strings`` (PR #730)
+- mark ``test_manypeaks_speed.py`` as flaky to avoid intermittent test failures (repeat up to 5 times; PR #745)
+- update scipy dependency to >= 1.14.0 (PR #751)
+- improvement to output of examples in sphinx-gallery and use higher resolution figures (PR #753)
+- remove deprecated functions ``lmfit.printfuncs.report_errors`` and ``asteval`` argument in ``Parameters`` class (PR #759)
+
.. _whatsnew_102_label:
-Version 1.0.2 Release Notes
-===========================
+Version 1.0.2 Release Notes (February 7, 2021)
+==============================================
Version 1.0.2 officially supports Python 3.9 and has dropped support for Python 3.5. The minimum version
of the following dependencies were updated: asteval>=0.9.21, numpy>=1.18, and scipy>=1.3.
@@ -67,7 +144,7 @@ require 3.6+ so that we can use formatting-strings and rely on dictionaries bein
New features:
- added thermal distribution model and lineshape (PR #620; @mpmdean)
-- introduced a new argument ``max_nfev`` to uniformly specify the maximum number of function evalutions (PR #610)
+- introduced a new argument ``max_nfev`` to uniformly specify the maximum number of function evaluations (PR #610)
**Please note: all other arguments (e.g., ``maxfev``, ``maxiter``, ...) will no longer be passed to the underlying
solver. A warning will be emitted stating that one should use ``max_nfev``.**
- the attribute ``call_kws`` was added to the ``MinimizerResult`` class and contains the keyword arguments that are
diff --git a/examples/doc_builtinmodels_nistgauss.py b/examples/doc_builtinmodels_nistgauss.py
index d5e38e6..56e6eb1 100644
--- a/examples/doc_builtinmodels_nistgauss.py
+++ b/examples/doc_builtinmodels_nistgauss.py
@@ -33,17 +33,17 @@ out = mod.fit(y, pars, x=x)
print(out.fit_report(min_correl=0.5))
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
-axes[0].plot(x, y, 'b')
-axes[0].plot(x, init, 'k--', label='initial fit')
-axes[0].plot(x, out.best_fit, 'r-', label='best fit')
-axes[0].legend(loc='best')
+axes[0].plot(x, y)
+axes[0].plot(x, init, '--', label='initial fit')
+axes[0].plot(x, out.best_fit, '-', label='best fit')
+axes[0].legend()
comps = out.eval_components(x=x)
-axes[1].plot(x, y, 'b')
-axes[1].plot(x, comps['g1_'], 'g--', label='Gaussian component 1')
-axes[1].plot(x, comps['g2_'], 'm--', label='Gaussian component 2')
-axes[1].plot(x, comps['exp_'], 'k--', label='Exponential component')
-axes[1].legend(loc='best')
+axes[1].plot(x, y)
+axes[1].plot(x, comps['g1_'], '--', label='Gaussian component 1')
+axes[1].plot(x, comps['g2_'], '--', label='Gaussian component 2')
+axes[1].plot(x, comps['exp_'], '--', label='Exponential component')
+axes[1].legend()
plt.show()
# <end examples/doc_builtinmodels_nistgauss.py>
diff --git a/examples/doc_builtinmodels_nistgauss2.py b/examples/doc_builtinmodels_nistgauss2.py
index cbfe98f..80ce2f5 100644
--- a/examples/doc_builtinmodels_nistgauss2.py
+++ b/examples/doc_builtinmodels_nistgauss2.py
@@ -14,7 +14,7 @@ gauss2 = GaussianModel(prefix='g2_')
def index_of(arrval, value):
- """return index of array *at or below* value """
+ """Return index of array *at or below* value."""
if value < min(arrval):
return 0
return max(np.where(arrval <= value)[0])
@@ -35,9 +35,9 @@ out = mod.fit(y, pars, x=x)
print(out.fit_report(min_correl=0.5))
-plt.plot(x, y, 'b')
-plt.plot(x, out.init_fit, 'k--', label='initial fit')
-plt.plot(x, out.best_fit, 'r-', label='best fit')
-plt.legend(loc='best')
+plt.plot(x, y)
+plt.plot(x, out.init_fit, '--', label='initial fit')
+plt.plot(x, out.best_fit, '-', label='best fit')
+plt.legend()
plt.show()
# <end examples/doc_nistgauss2.py>
diff --git a/examples/doc_builtinmodels_peakmodels.py b/examples/doc_builtinmodels_peakmodels.py
index 108ad53..0708a25 100644
--- a/examples/doc_builtinmodels_peakmodels.py
+++ b/examples/doc_builtinmodels_peakmodels.py
@@ -16,9 +16,9 @@ out = mod.fit(y, pars, x=x)
print(out.fit_report(min_correl=0.25))
-plt.plot(x, y, 'b-')
-plt.plot(x, out.best_fit, 'r-', label='Gaussian Model')
-plt.legend(loc='best')
+plt.plot(x, y)
+plt.plot(x, out.best_fit, '-', label='Gaussian Model')
+plt.legend()
plt.show()
@@ -30,9 +30,9 @@ out = mod.fit(y, pars, x=x)
print(out.fit_report(min_correl=0.25))
plt.figure()
-plt.plot(x, y, 'b-')
-plt.plot(x, out.best_fit, 'r-', label='Lorentzian Model')
-plt.legend(loc='best')
+plt.plot(x, y, '-')
+plt.plot(x, out.best_fit, '-', label='Lorentzian Model')
+plt.legend()
plt.show()
@@ -45,16 +45,16 @@ print(out.fit_report(min_correl=0.25))
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
-axes[0].plot(x, y, 'b-')
-axes[0].plot(x, out.best_fit, 'r-', label='Voigt Model\ngamma constrained')
-axes[0].legend(loc='best')
+axes[0].plot(x, y, '-')
+axes[0].plot(x, out.best_fit, '-', label='Voigt Model\ngamma constrained')
+axes[0].legend()
# free gamma parameter
pars['gamma'].set(value=0.7, vary=True, expr='')
out_gamma = mod.fit(y, pars, x=x)
-axes[1].plot(x, y, 'b-')
-axes[1].plot(x, out_gamma.best_fit, 'r-', label='Voigt Model\ngamma unconstrained')
-axes[1].legend(loc='best')
+axes[1].plot(x, y, '-')
+axes[1].plot(x, out_gamma.best_fit, '-', label='Voigt Model\ngamma unconstrained')
+axes[1].legend()
plt.show()
# <end examples/doc_builtinmodels_peakmodels.py>
diff --git a/examples/doc_builtinmodels_splinemodel.py b/examples/doc_builtinmodels_splinemodel.py
new file mode 100644
index 0000000..a47ebea
--- /dev/null
+++ b/examples/doc_builtinmodels_splinemodel.py
@@ -0,0 +1,63 @@
+# <examples/doc_builtinmodels_splinemodel.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.models import GaussianModel, SplineModel
+
+data = np.loadtxt('test_splinepeak.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+plt.plot(x, y, label='data')
+
+model = GaussianModel(prefix='peak_')
+params = model.make_params(amplitude=8, center=16, sigma=1)
+
+# make a background spline with knots evenly spaced over the background,
+# but sort of skipping over where the peak is
+knot_xvals3 = np.array([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25])
+knot_xvals2 = np.array([1, 3, 5, 7, 9, 11, 13, 16, 19, 21, 23, 25]) # noqa: E241
+knot_xvals1 = np.array([1, 3, 5, 7, 9, 11, 13, 19, 21, 23, 25]) # noqa: E241
+
+bkg = SplineModel(prefix='bkg_', xknots=knot_xvals1)
+params.update(bkg.guess(y, x))
+
+model = model + bkg
+
+params['peak_center'].min = 10
+params['peak_center'].max = 20
+params['peak_amplitude'].min = 0
+
+plt.plot(x, model.eval(params, x=x), label='initial')
+
+out = model.fit(y, params, x=x)
+print(out.fit_report(min_correl=0.3))
+comps = out.eval_components()
+
+plt.plot(x, out.best_fit, label='best fit')
+plt.plot(x, comps['bkg_'], label='background')
+plt.plot(x, comps['peak_'], label='peak')
+
+knot_yvals = np.array([o.value for o in out.params.values() if o.name.startswith('bkg')])
+plt.plot(knot_xvals1, knot_yvals, 'o', color='black', label='spline knots values')
+plt.legend()
+plt.show()
+
+
+# knot positions | peak amplitude
+# 11, 13, 19, 21 | 12.223 0.295
+# 11, 13, 16, 19, 21 | 11.746 0.594
+# 11, 13, 15, 17, 19, 21 | 12.052 0.872
+
+
+plt.plot(x, y, 'o', label='data')
+
+for nknots in (10, 15, 20, 25, 30):
+ model = SplineModel(prefix='bkg_', xknots=np.linspace(0, 25, nknots))
+ params = model.guess(y, x)
+ out = model.fit(y, params, x=x)
+ plt.plot(x, out.best_fit, label=f'best-fit ({nknots} knots)')
+plt.legend()
+plt.show()
+
+# <end examples/doc_builtinmodels_splinemodel.py>
diff --git a/examples/doc_builtinmodels_stepmodel.py b/examples/doc_builtinmodels_stepmodel.py
index ef214a5..8f4f716 100644
--- a/examples/doc_builtinmodels_stepmodel.py
+++ b/examples/doc_builtinmodels_stepmodel.py
@@ -22,9 +22,9 @@ out = mod.fit(y, pars, x=x)
print(out.fit_report())
-plt.plot(x, y, 'b')
-plt.plot(x, out.init_fit, 'k--', label='initial fit')
-plt.plot(x, out.best_fit, 'r-', label='best fit')
-plt.legend(loc='best')
+plt.plot(x, y)
+plt.plot(x, out.init_fit, '--', label='initial fit')
+plt.plot(x, out.best_fit, '-', label='best fit')
+plt.legend()
plt.show()
# <end examples/doc_builtinmodels_stepmodel.py>
diff --git a/examples/doc_confidence_advanced.py b/examples/doc_confidence_advanced.py
index 39e173c..af43063 100644
--- a/examples/doc_confidence_advanced.py
+++ b/examples/doc_confidence_advanced.py
@@ -33,8 +33,9 @@ lmfit.printfuncs.report_ci(ci)
# plot data and best fit
plt.figure()
-plt.plot(x, y, 'b')
-plt.plot(x, residual(out2.params) + y, 'r-')
+plt.plot(x, y)
+plt.plot(x, residual(out2.params) + y, '-')
+plt.show()
# plot confidence intervals (a1 vs t2 and a2 vs t2)
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
@@ -49,6 +50,7 @@ ctp = axes[1].contourf(cx, cy, grid, np.linspace(0, 1, 11))
fig.colorbar(ctp, ax=axes[1])
axes[1].set_xlabel('a2')
axes[1].set_ylabel('t2')
+plt.show()
# plot dependence between two parameters
fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
@@ -62,6 +64,5 @@ axes[0].set_ylabel('t2')
axes[1].scatter(cx2, cy2, c=prob2, s=30)
axes[1].set_xlabel('t2')
axes[1].set_ylabel('a1')
-
plt.show()
# <end examples/doc_confidence_advanced.py>
diff --git a/examples/doc_fitting_emcee.py b/examples/doc_fitting_emcee.py
index 419c543..94b6761 100644
--- a/examples/doc_fitting_emcee.py
+++ b/examples/doc_fitting_emcee.py
@@ -19,9 +19,6 @@ x = np.linspace(1, 10, 250)
np.random.seed(0)
y = (3.0*np.exp(-x/2) - 5.0*np.exp(-(x-0.1) / 10.) +
0.1*np.random.randn(x.size))
-if HASPYLAB:
- plt.plot(x, y, 'b')
- plt.show()
p = lmfit.Parameters()
p.add_many(('a1', 4), ('a2', 4), ('t1', 3), ('t2', 3., True))
@@ -36,9 +33,9 @@ mi = lmfit.minimize(residual, p, method='nelder', nan_policy='omit')
lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
if HASPYLAB:
plt.figure()
- plt.plot(x, y, 'b')
- plt.plot(x, residual(mi.params) + y, 'r', label='best fit')
- plt.legend(loc='best')
+ plt.plot(x, y, 'o')
+ plt.plot(x, residual(mi.params) + y, label='best fit')
+ plt.legend()
plt.show()
# Place bounds on the ln(sigma) parameter that emcee will automatically add
@@ -55,7 +52,7 @@ if HASPYLAB and HASCORNER:
plt.show()
if HASPYLAB:
- plt.plot(res.acceptance_fraction)
+ plt.plot(res.acceptance_fraction, 'o')
plt.xlabel('walker')
plt.ylabel('acceptance fraction')
plt.show()
@@ -88,9 +85,9 @@ for name, param in p.items():
if HASPYLAB:
plt.figure()
- plt.plot(x, y, 'b')
- plt.plot(x, residual(mi.params) + y, 'r', label='Nelder-Mead')
- plt.plot(x, residual(res.params) + y, 'k--', label='emcee')
+ plt.plot(x, y, 'o')
+ plt.plot(x, residual(mi.params) + y, label='Nelder-Mead')
+ plt.plot(x, residual(res.params) + y, '--', label='emcee')
plt.legend()
plt.show()
diff --git a/examples/doc_model_composite.py b/examples/doc_model_composite.py
index 3695d30..26be84f 100644
--- a/examples/doc_model_composite.py
+++ b/examples/doc_model_composite.py
@@ -52,12 +52,12 @@ fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
axes[0].plot(x, y, 'bo')
axes[0].plot(x, result.init_fit, 'k--', label='initial fit')
axes[0].plot(x, result.best_fit, 'r-', label='best fit')
-axes[0].legend(loc='best')
+axes[0].legend()
axes[1].plot(x, y, 'bo')
axes[1].plot(x, 10*comps['jump'], 'k--', label='Jump component')
axes[1].plot(x, 10*comps['gaussian'], 'r-', label='Gaussian component')
-axes[1].legend(loc='best')
+axes[1].legend()
plt.show()
# <end examples/doc_model_composite.py>
diff --git a/examples/doc_model_gaussian.py b/examples/doc_model_gaussian.py
index a23c281..e06e631 100644
--- a/examples/doc_model_gaussian.py
+++ b/examples/doc_model_gaussian.py
@@ -19,9 +19,9 @@ result = gmodel.fit(y, x=x, amp=5, cen=5, wid=1)
print(result.fit_report())
-plt.plot(x, y, 'bo')
-plt.plot(x, result.init_fit, 'k--', label='initial fit')
-plt.plot(x, result.best_fit, 'r-', label='best fit')
-plt.legend(loc='best')
+plt.plot(x, y, 'o')
+plt.plot(x, result.init_fit, '--', label='initial fit')
+plt.plot(x, result.best_fit, '-', label='best fit')
+plt.legend()
plt.show()
# <end examples/doc_model_gaussian.py>
diff --git a/examples/doc_model_loadmodel.py b/examples/doc_model_loadmodel.py
index 11b2996..3ac5653 100644
--- a/examples/doc_model_loadmodel.py
+++ b/examples/doc_model_loadmodel.py
@@ -22,7 +22,7 @@ params['amp'].min = 0.0
result = model.fit(y, params, x=x)
print(result.fit_report())
-plt.plot(x, y, 'bo')
-plt.plot(x, result.best_fit, 'r-')
+plt.plot(x, y, 'o')
+plt.plot(x, result.best_fit, '-')
plt.show()
# <end examples/doc_model_loadmodel.py>
diff --git a/examples/doc_model_loadmodelresult.py b/examples/doc_model_loadmodelresult.py
index 8eec5af..a286f16 100644
--- a/examples/doc_model_loadmodelresult.py
+++ b/examples/doc_model_loadmodelresult.py
@@ -11,7 +11,7 @@ y = data[:, 1]
result = load_modelresult('gauss_modelresult.sav')
print(result.fit_report())
-plt.plot(x, y, 'bo')
-plt.plot(x, result.best_fit, 'r-')
+plt.plot(x, y, 'o')
+plt.plot(x, result.best_fit, '-')
plt.show()
# <end examples/doc_model_loadmodelresult.py>
diff --git a/examples/doc_model_loadmodelresult2.py b/examples/doc_model_loadmodelresult2.py
index dc91e67..d8ac0f4 100644
--- a/examples/doc_model_loadmodelresult2.py
+++ b/examples/doc_model_loadmodelresult2.py
@@ -11,7 +11,7 @@ y = dat[:, 0]
result = load_modelresult('nistgauss_modelresult.sav')
print(result.fit_report())
-plt.plot(x, y, 'bo')
-plt.plot(x, result.best_fit, 'r-')
+plt.plot(x, y, 'o')
+plt.plot(x, result.best_fit, '-')
plt.show()
# <end examples/doc_model_loadmodelresult2.py>
diff --git a/examples/doc_model_two_components.py b/examples/doc_model_two_components.py
index 853f670..95088e0 100644
--- a/examples/doc_model_two_components.py
+++ b/examples/doc_model_two_components.py
@@ -26,9 +26,9 @@ result = mod.fit(y, pars, x=x)
print(result.fit_report())
-plt.plot(x, y, 'bo')
-plt.plot(x, result.init_fit, 'k--', label='initial fit')
-plt.plot(x, result.best_fit, 'r-', label='best fit')
-plt.legend(loc='best')
+plt.plot(x, y, 'o')
+plt.plot(x, result.init_fit, '--', label='initial fit')
+plt.plot(x, result.best_fit, '-', label='best fit')
+plt.legend()
plt.show()
# <end examples/doc_model_two_components.py>
diff --git a/examples/doc_model_uncertainty.py b/examples/doc_model_uncertainty.py
index cc2c160..6aed14f 100644
--- a/examples/doc_model_uncertainty.py
+++ b/examples/doc_model_uncertainty.py
@@ -21,11 +21,11 @@ print(result.fit_report())
dely = result.eval_uncertainty(sigma=3)
-plt.plot(x, y, 'bo')
-plt.plot(x, result.init_fit, 'k--', label='initial fit')
-plt.plot(x, result.best_fit, 'r-', label='best fit')
+plt.plot(x, y, 'o')
+plt.plot(x, result.init_fit, '--', label='initial fit')
+plt.plot(x, result.best_fit, '-', label='best fit')
plt.fill_between(x, result.best_fit-dely, result.best_fit+dely,
color="#ABABAB", label=r'3-$\sigma$ uncertainty band')
-plt.legend(loc='best')
+plt.legend()
plt.show()
# <end examples/doc_model_uncertainty.py>
diff --git a/examples/doc_model_uncertainty2.py b/examples/doc_model_uncertainty2.py
new file mode 100644
index 0000000..4cf7090
--- /dev/null
+++ b/examples/doc_model_uncertainty2.py
@@ -0,0 +1,84 @@
+# <examples/doc_model_uncertainty2.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.models import ExponentialModel, GaussianModel
+
+dat = np.loadtxt('NIST_Gauss2.dat')
+x = dat[:, 1]
+y = dat[:, 0]
+
+model = (GaussianModel(prefix='g1_') +
+ GaussianModel(prefix='g2_') +
+ ExponentialModel(prefix='bkg_'))
+
+params = model.make_params(bkg_amplitude=100, bkg_decay=80,
+ g1_amplitude=3000,
+ g1_center=100,
+ g1_sigma=10,
+ g2_amplitude=3000,
+ g2_center=150,
+ g2_sigma=10)
+
+result = model.fit(y, params, x=x)
+
+print(result.fit_report(min_correl=0.5))
+
+
+comps = result.eval_components(x=x)
+dely = result.eval_uncertainty(sigma=3)
+
+
+fig, axes = plt.subplots(2, 2, figsize=(12.8, 9.6))
+
+axes[0][0].plot(x, y, 'o', color='#99002299', markersize=3, label='data')
+axes[0][0].plot(x, result.best_fit, '-', label='best fit')
+axes[0][0].plot(x, result.init_fit, '--', label='initial fit')
+axes[0][0].set_title('data, initial fit, and best-fit')
+axes[0][0].legend()
+
+axes[0][1].plot(x, y, 'o', color='#99002299', markersize=3, label='data')
+axes[0][1].plot(x, result.best_fit, '-', label='best fit')
+axes[0][1].fill_between(x, result.best_fit-dely, result.best_fit+dely,
+ color="#8A8A8A", label=r'3-$\sigma$ band')
+axes[0][1].set_title('data, best-fit, and uncertainty band')
+axes[0][1].legend()
+
+
+axes[1][0].plot(x, result.best_fit, '-', label=r'best fit, 3-$\sigma$ band')
+axes[1][0].fill_between(x,
+ result.best_fit-result.dely,
+ result.best_fit+result.dely,
+ color="#8A8A8A")
+
+axes[1][0].plot(x, comps['bkg_'], label=r'background, 3-$\sigma$ band')
+axes[1][0].fill_between(x,
+ comps['bkg_']-result.dely_comps['bkg_'],
+ comps['bkg_']+result.dely_comps['bkg_'],
+ color="#8A8A8A")
+
+axes[1][0].plot(x, comps['g1_'], label=r'Gaussian #1, 3-$\sigma$ band')
+axes[1][0].fill_between(x,
+ comps['g1_']-result.dely_comps['g1_'],
+ comps['g1_']+result.dely_comps['g1_'],
+ color="#8A8A8A")
+
+axes[1][0].plot(x, comps['g2_'], label=r'Gaussian #2, 3-$\sigma$ band')
+axes[1][0].fill_between(x,
+ comps['g2_']-result.dely_comps['g2_'],
+ comps['g2_']+result.dely_comps['g2_'],
+ color="#8A8A8A")
+axes[1][0].set_title('model components with uncertainty bands')
+axes[1][0].legend()
+#
+axes[1][1].plot(x, result.best_fit, '-', label='best fit')
+axes[1][1].plot(x, 10*result.dely, label=r'3-$\sigma$ total (x10)')
+axes[1][1].plot(x, 10*result.dely_comps['bkg_'], label=r'3-$\sigma$ background (x10)')
+axes[1][1].plot(x, 10*result.dely_comps['g1_'], label=r'3-$\sigma$ Gaussian #1 (x10)')
+axes[1][1].plot(x, 10*result.dely_comps['g2_'], label=r'3-$\sigma$ Gaussian #2 (x10)')
+axes[1][1].set_title('uncertainties for model components')
+axes[1][1].legend()
+
+plt.show()
+
+# <end examples/doc_model_uncertainty2.py>
diff --git a/examples/doc_model_with_iter_callback.py b/examples/doc_model_with_iter_callback.py
index 77d56c3..5318de5 100644
--- a/examples/doc_model_with_iter_callback.py
+++ b/examples/doc_model_with_iter_callback.py
@@ -7,11 +7,12 @@ from lmfit.models import GaussianModel, LinearModel
def per_iteration(pars, iteration, resid, *args, **kws):
- print(" ITER ", iteration, ["%.5f" % p for p in pars.values()])
+ print(" ITER ", iteration, [f"{p.name} = {p.value:.5f}" for p in pars.values()])
x = linspace(0., 20, 401)
y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
+random.seed(2021)
y = y - .20*x + 3.333 + random.normal(scale=0.23, size=x.size)
mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
@@ -25,12 +26,12 @@ pars['bkg_slope'].value = 0.0
out = mod.fit(y, pars, x=x, iter_cb=per_iteration)
-plt.plot(x, y, 'b--')
+plt.plot(x, y, '--')
-print('Nfev = ', out.nfev)
+print(f'Nfev = {out.nfev}')
print(out.fit_report())
-plt.plot(x, out.best_fit, 'k-', label='best fit')
-plt.legend(loc='best')
+plt.plot(x, out.best_fit, '-', label='best fit')
+plt.legend()
plt.show()
# <end examples/doc_with_itercb.py>
diff --git a/examples/doc_model_with_nan_policy.py b/examples/doc_model_with_nan_policy.py
index bb7b623..a58766c 100644
--- a/examples/doc_model_with_nan_policy.py
+++ b/examples/doc_model_with_nan_policy.py
@@ -25,9 +25,9 @@ print(result.fit_report())
x_ = x[np.where(np.isfinite(y))]
y_ = y[np.where(np.isfinite(y))]
-plt.plot(x_, y_, 'bo')
-plt.plot(x_, result.init_fit, 'k--', label='initial fit')
-plt.plot(x_, result.best_fit, 'r-', label='best fit')
-plt.legend(loc='best')
+plt.plot(x_, y_, 'o')
+plt.plot(x_, result.init_fit, '--', label='initial fit')
+plt.plot(x_, result.best_fit, '-', label='best fit')
+plt.legend()
plt.show()
# <end examples/doc_model_with_nan_policy.py>
diff --git a/examples/doc_parameters_basic.py b/examples/doc_parameters_basic.py
index ae182da..dc0c95b 100644
--- a/examples/doc_parameters_basic.py
+++ b/examples/doc_parameters_basic.py
@@ -5,6 +5,7 @@ from lmfit import Minimizer, Parameters, report_fit
# create data to be fitted
x = np.linspace(0, 15, 301)
+np.random.seed(2021)
data = (5.0 * np.sin(2.0*x - 0.1) * np.exp(-x*x*0.025) +
np.random.normal(size=x.size, scale=0.2))
@@ -40,8 +41,8 @@ report_fit(result)
# try to plot results
try:
import matplotlib.pyplot as plt
- plt.plot(x, data, 'k+')
- plt.plot(x, final, 'r')
+ plt.plot(x, data, '+')
+ plt.plot(x, final)
plt.show()
except ImportError:
pass
diff --git a/examples/doc_parameters_valuesdict.py b/examples/doc_parameters_valuesdict.py
index 702ffcc..7a98f82 100644
--- a/examples/doc_parameters_valuesdict.py
+++ b/examples/doc_parameters_valuesdict.py
@@ -5,6 +5,7 @@ from lmfit import Minimizer, Parameters, report_fit
# create data to be fitted
x = np.linspace(0, 15, 301)
+np.random.seed(2021)
data = (5.0 * np.sin(2.0*x - 0.1) * np.exp(-x*x*0.025) +
np.random.normal(size=x.size, scale=0.2))
@@ -38,8 +39,8 @@ report_fit(result)
# try to plot results
try:
import matplotlib.pyplot as plt
- plt.plot(x, data, 'k+')
- plt.plot(x, final, 'r')
+ plt.plot(x, data, '+')
+ plt.plot(x, final)
plt.show()
except ImportError:
pass
diff --git a/examples/example_Model_interface.py b/examples/example_Model_interface.py
index de05f0c..c72afdc 100644
--- a/examples/example_Model_interface.py
+++ b/examples/example_Model_interface.py
@@ -25,6 +25,7 @@ def decay(t, N, tau):
# The parameters are in no particular order. We'll need some example data. I
# will use ``N=7`` and ``tau=3``, and add a little noise.
t = np.linspace(0, 5, num=1000)
+np.random.seed(2021)
data = decay(t, 7, 3) + np.random.randn(t.size)
###############################################################################
@@ -148,7 +149,7 @@ report_fit(result.params)
# The default setting is ``nan_policy='raise'``, which does check for NaNs and
# raises an exception when present.
#
-# Null-chekcing relies on ``pandas.isnull`` if it is available. If pandas
+# Null-checking relies on ``pandas.isnull`` if it is available. If pandas
# cannot be imported, it silently falls back on ``numpy.isnan``.
###############################################################################
diff --git a/examples/example_brute.py b/examples/example_brute.py
index 0e183c8..743abc6 100644
--- a/examples/example_brute.py
+++ b/examples/example_brute.py
@@ -26,7 +26,7 @@ from lmfit import Minimizer, Parameters, fit_report
# "We illustrate the use of brute to seek the global minimum of a function of
# two variables that is given as the sum of a positive-definite quadratic and
# two deep “Gaussian-shaped” craters. Specifically, define the objective
-# function f as the sum of three other functions, ``f = f1 + f2 + f3``. We
+# function ``f`` as the sum of three other functions, ``f = f1 + f2 + f3``. We
# suppose each of these has a signature ``(z, *params), where z = (x, y)``,
# and params and the functions are as defined below."
#
@@ -93,7 +93,7 @@ result = fitter.minimize(method='brute')
###############################################################################
# , which will increment ``x`` and ``y`` between ``-4`` in increments of
# ``0.25`` until ``4`` (not inclusive).
-grid_x, grid_y = [np.unique(par.ravel()) for par in result.brute_grid]
+grid_x, grid_y = (np.unique(par.ravel()) for par in result.brute_grid)
print(grid_x)
###############################################################################
@@ -106,17 +106,17 @@ print(grid_x)
print(result.brute_x0)
###############################################################################
-# ``result.brute_fval`` -- Function value at the point x0.
+# ``result.brute_fval`` -- Function value at the point ``x0``.
print(result.brute_fval)
###############################################################################
# ``result.brute_grid`` -- Representation of the evaluation grid. It has the
-# same length as x0.
+# same length as ``x0``.
print(result.brute_grid)
###############################################################################
# ``result.brute_Jout`` -- Function values at each point of the evaluation
-# grid, i.e., Jout = func(\*grid).
+# grid, i.e., ``Jout = func(*grid)``.
print(result.brute_Jout)
###############################################################################
@@ -128,13 +128,14 @@ print(result.brute_Jout)
#
# In this example, we will explain some of the options of the algorithm.
#
-# We start off by generating some synthetic data with noise for a decaying
-# sine wave, define an objective function and create a Parameter set.
+# We start off by generating some synthetic data with noise for a decaying sine
+# wave, define an objective function, and create/initialize a Parameter set.
x = np.linspace(0, 15, 301)
np.random.seed(7)
noise = np.random.normal(size=x.size, scale=0.2)
data = (5. * np.sin(2*x - 0.1) * np.exp(-x*x*0.025) + noise)
-plt.plot(x, data, 'b')
+plt.plot(x, data, 'o')
+plt.show()
def fcn2min(params, x, data):
@@ -169,7 +170,7 @@ params['omega'].set(brute_step=0.25)
params.pretty_print()
###############################################################################
-# First, we initialize a Minimizer and perform the grid search:
+# First, we initialize a ``Minimizer`` and perform the grid search:
fitter = Minimizer(fcn2min, params, fcn_args=(x, data))
result_brute = fitter.minimize(method='brute', Ns=25, keep=25)
@@ -186,44 +187,45 @@ result_brute = fitter.minimize(method='brute', Ns=25, keep=25)
par_name = 'shift'
indx_shift = result_brute.var_names.index(par_name)
grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel())
-print("parameter = {}\nnumber of steps = {}\ngrid = {}".format(par_name,
- len(grid_shift),
- grid_shift))
+print(f"parameter = {par_name}\nnumber of steps = {len(grid_shift)}\ngrid = {grid_shift}")
###############################################################################
# If finite bounds are not set for a certain parameter then the user **must**
# specify ``brute_step`` - three more scenarios are considered here:
#
-# **(2)** lower bound (min) and brute_step are specified:
-# range = (min, min + Ns * brute_step, brute_step)
+# **(2)** lower bound ``(min``) and ``brute_step`` are specified:
+# ``range = (min, min + Ns * brute_step, brute_step)``
par_name = 'amp'
indx_shift = result_brute.var_names.index(par_name)
grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel())
-print("parameter = {}\nnumber of steps = {}\ngrid = {}".format(par_name, len(grid_shift), grid_shift))
+print(f"parameter = {par_name}\nnumber of steps = {len(grid_shift)}\ngrid = {grid_shift}")
###############################################################################
-# **(3)** upper bound (max) and brute_step are specified:
-# range = (max - Ns * brute_step, max, brute_step)
+# **(3)** upper bound (``max``) and ``brute_step`` are specified:
+# ``range = (max - Ns * brute_step, max, brute_step)``
par_name = 'omega'
indx_shift = result_brute.var_names.index(par_name)
grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel())
-print("parameter = {}\nnumber of steps = {}\ngrid = {}".format(par_name, len(grid_shift), grid_shift))
+print(f"parameter = {par_name}\nnumber of steps = {len(grid_shift)}\ngrid = {grid_shift}")
###############################################################################
-# **(4)** numerical value (value) and brute_step are specified:
-# range = (value - (Ns//2) * brute_step, value + (Ns//2) * brute_step, brute_step)
+# **(4)** numerical value (``value``) and ``brute_step`` are specified:
+# ``range = (value - (Ns//2) * brute_step, value + (Ns//2) * brute_step, brute_step)``
par_name = 'decay'
indx_shift = result_brute.var_names.index(par_name)
grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel())
-print("parameter = {}\nnumber of steps = {}\ngrid = {}".format(par_name, len(grid_shift), grid_shift))
+print(f"parameter = {par_name}\nnumber of steps = {len(grid_shift)}\ngrid = {grid_shift}")
###############################################################################
# The ``MinimizerResult`` contains all the usual best-fit parameters and
# fitting statistics. For example, the optimal solution from the grid search
# is given below together with a plot:
print(fit_report(result_brute))
-plt.plot(x, data, 'b')
-plt.plot(x, data + fcn2min(result_brute.params, x, data), 'r--')
+
+###############################################################################
+plt.plot(x, data, 'o')
+plt.plot(x, data + fcn2min(result_brute.params, x, data), '--')
+plt.show()
###############################################################################
# We can see that this fit is already very good, which is what we should expect
@@ -233,12 +235,12 @@ plt.plot(x, data + fcn2min(result_brute.params, x, data), 'r--')
# In a more realistic, complicated example the ``brute`` method will be used
# to get reasonable values for the parameters and perform another minimization
# (e.g., using ``leastsq``) using those as starting values. That is where the
-# `keep`` parameter comes into play: it determines the "number of best
+# ``keep`` parameter comes into play: it determines the "number of best
# candidates from the brute force method that are stored in the ``candidates``
# attribute". In the example above we store the best-ranking 25 solutions (the
# default value is ``50`` and storing all the grid points can be accomplished
# by choosing ``all``). The ``candidates`` attribute contains the parameters
-# and ``chisqr`` from the brute force method as a namedtuple,
+# and ``chisqr`` from the brute force method as a ``namedtuple``,
# ``(‘Candidate’, [‘params’, ‘score’])``, sorted on the (lowest) ``chisqr``
# value. To access the values for a particular candidate one can use
# ``result.candidate[#].params`` or ``result.candidate[#].score``, where a
@@ -278,10 +280,10 @@ print(fit_report(best_result))
# As expected the parameters have not changed significantly as they were
# already very close to the "real" values, which can also be appreciated from
# the plots below.
-plt.plot(x, data, 'b')
-plt.plot(x, data + fcn2min(result_brute.params, x, data), 'r--',
+plt.plot(x, data, 'o')
+plt.plot(x, data + fcn2min(result_brute.params, x, data), '-',
label='brute')
-plt.plot(x, data + fcn2min(best_result.params, x, data), 'g--',
+plt.plot(x, data + fcn2min(best_result.params, x, data), '--',
label='brute followed by leastsq')
plt.legend()
@@ -338,7 +340,7 @@ def plot_results_brute(result, best_vals=True, varlabels=None,
if i == 0:
axes[0, 0].axis('off')
ax = axes[i, j+1]
- red_axis = tuple([a for a in range(npars) if a != i])
+ red_axis = tuple(a for a in range(npars) if a != i)
ax.plot(np.unique(result.brute_grid[i]),
np.minimum.reduce(result.brute_Jout, axis=red_axis),
'o', ms=3)
@@ -352,7 +354,7 @@ def plot_results_brute(result, best_vals=True, varlabels=None,
# parameter vs chi2 profile on the left
elif j == 0 and i > 0:
ax = axes[i, j]
- red_axis = tuple([a for a in range(npars) if a != i])
+ red_axis = tuple(a for a in range(npars) if a != i)
ax.plot(np.minimum.reduce(result.brute_Jout, axis=red_axis),
np.unique(result.brute_grid[i]), 'o', ms=3)
ax.invert_xaxis()
@@ -367,7 +369,7 @@ def plot_results_brute(result, best_vals=True, varlabels=None,
# contour plots for all combinations of two parameters
elif j > i:
ax = axes[j, i+1]
- red_axis = tuple([a for a in range(npars) if a not in (i, j)])
+ red_axis = tuple(a for a in range(npars) if a not in (i, j))
X, Y = np.meshgrid(np.unique(result.brute_grid[i]),
np.unique(result.brute_grid[j]))
lvls1 = np.linspace(result.brute_Jout.min(),
diff --git a/examples/example_complex_resonator_model.py b/examples/example_complex_resonator_model.py
index 1663d7c..161e444 100644
--- a/examples/example_complex_resonator_model.py
+++ b/examples/example_complex_resonator_model.py
@@ -36,16 +36,15 @@ def linear_resonator(f, f_0, Q, Q_e_real, Q_e_imag):
###############################################################################
-# The standard practice of defining an ``lmfit`` model is as follows:
-
+# The standard practice of defining a ``lmfit`` model is as follows:
class ResonatorModel(lmfit.model.Model):
- __doc__ = "resonator model" + lmfit.models.COMMON_DOC
+ __doc__ = "resonator model" + lmfit.models.COMMON_INIT_DOC
def __init__(self, *args, **kwargs):
- # pass in the defining equation so the user doesn't have to later.
+ # pass in the defining equation so the user doesn't have to later
super().__init__(linear_resonator, *args, **kwargs)
- self.set_param_hint('Q', min=0) # Enforce Q is positive
+ self.set_param_hint('Q', min=0) # enforce Q is positive
def guess(self, data, f=None, **kwargs):
verbose = kwargs.pop('verbose', None)
@@ -55,24 +54,23 @@ class ResonatorModel(lmfit.model.Model):
fmin = f.min()
fmax = f.max()
f_0_guess = f[argmin_s21] # guess that the resonance is the lowest point
- Q_min = 0.1 * (f_0_guess/(fmax-fmin)) # assume the user isn't trying to fit just a small part of a resonance curve.
+ Q_min = 0.1 * (f_0_guess/(fmax-fmin)) # assume the user isn't trying to fit just a small part of a resonance curve
delta_f = np.diff(f) # assume f is sorted
min_delta_f = delta_f[delta_f > 0].min()
Q_max = f_0_guess/min_delta_f # assume data actually samples the resonance reasonably
Q_guess = np.sqrt(Q_min*Q_max) # geometric mean, why not?
Q_e_real_guess = Q_guess/(1-np.abs(data[argmin_s21]))
if verbose:
- print("fmin=", fmin, "fmax=", fmax, "f_0_guess=", f_0_guess)
- print("Qmin=", Q_min, "Q_max=", Q_max, "Q_guess=", Q_guess, "Q_e_real_guess=", Q_e_real_guess)
+ print(f"fmin={fmin}, fmax={fmax}, f_0_guess={f_0_guess}")
+ print(f"Qmin={Q_min}, Q_max={Q_max}, Q_guess={Q_guess}, Q_e_real_guess={Q_e_real_guess}")
params = self.make_params(Q=Q_guess, Q_e_real=Q_e_real_guess, Q_e_imag=0, f_0=f_0_guess)
- params['%sQ' % self.prefix].set(min=Q_min, max=Q_max)
- params['%sf_0' % self.prefix].set(min=fmin, max=fmax)
+ params[f'{self.prefix}Q'].set(min=Q_min, max=Q_max)
+ params[f'{self.prefix}f_0'].set(min=fmin, max=fmax)
return lmfit.models.update_param_vals(params, self.prefix, **kwargs)
###############################################################################
# Now let's use the model to generate some fake data:
-
resonator = ResonatorModel()
true_params = resonator.make_params(f_0=100, Q=10000, Q_e_real=9000, Q_e_imag=-9000)
@@ -82,20 +80,17 @@ noise_scale = 0.02
np.random.seed(123)
measured_s21 = true_s21 + noise_scale*(np.random.randn(100) + 1j*np.random.randn(100))
-plt.figure()
plt.plot(f, 20*np.log10(np.abs(measured_s21)))
plt.ylabel('|S21| (dB)')
plt.xlabel('MHz')
plt.title('simulated measurement')
###############################################################################
-# Try out the guess method we added:
-
+# Try out the ``guess`` method we added:
guess = resonator.guess(measured_s21, f=f, verbose=True)
##############################################################################
-# And now fit the data using the guess as a starting point:
-
+# And now fit the data using the ``guess``-ed values as a starting point:
result = resonator.fit(measured_s21, params=guess, f=f, verbose=True)
print(result.fit_report() + '\n')
@@ -115,16 +110,16 @@ guess_s21 = resonator.eval(params=guess, f=f)
plt.figure()
plot_ri(measured_s21, '.')
-plot_ri(fit_s21, 'r.-', label='best fit')
-plot_ri(guess_s21, 'k--', label='inital fit')
-plt.legend(loc='best')
+plot_ri(fit_s21, '.-', label='best fit')
+plot_ri(guess_s21, '--', label='initial fit')
+plt.legend()
plt.xlabel('Re(S21)')
plt.ylabel('Im(S21)')
plt.figure()
plt.plot(f, 20*np.log10(np.abs(measured_s21)), '.')
-plt.plot(f, 20*np.log10(np.abs(fit_s21)), 'r.-', label='best fit')
-plt.plot(f, 20*np.log10(np.abs(guess_s21)), 'k--', label='initial fit')
-plt.legend(loc='best')
+plt.plot(f, 20*np.log10(np.abs(fit_s21)), '.-', label='best fit')
+plt.plot(f, 20*np.log10(np.abs(guess_s21)), '--', label='initial fit')
+plt.legend()
plt.ylabel('|S21| (dB)')
plt.xlabel('MHz')
diff --git a/examples/example_confidence_interval.py b/examples/example_confidence_interval.py
index 5e7be2d..baa792b 100644
--- a/examples/example_confidence_interval.py
+++ b/examples/example_confidence_interval.py
@@ -33,6 +33,7 @@ p_true.add('shift', value=0.123)
p_true.add('decay', value=0.010)
x = linspace(0.0, 250.0, 2500)
+random.seed(2021)
noise = random.normal(scale=0.7215, size=x.size)
data = residual(p_true, x) + noise
@@ -45,8 +46,8 @@ fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)
###############################################################################
-# Set-up the minimizer and perform the fit using leastsq algorithm, and show
-# the report:
+# Set-up the minimizer and perform the fit using ``leastsq`` algorithm, and
+# show the report:
mini = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data': data})
out = mini.leastsq()
@@ -94,7 +95,7 @@ for fixed in names:
f = prob < 0.96
x, y = res[free], res[fixed]
- ax.scatter(x[f], y[f], c=1-prob[f], s=200*(1-prob[f]+0.5))
+ ax.scatter(x[f], y[f], c=1-prob[f], s=25*(1-prob[f]+0.5))
ax.autoscale(1, 1)
j += 1
i += 1
@@ -106,21 +107,34 @@ for fixed in names:
names = list(out.params.keys())
plt.figure()
-cm = plt.cm.coolwarm
for i in range(4):
for j in range(4):
- plt.subplot(4, 4, 16-j*4-i)
+ indx = 16-j*4-i
+ ax = plt.subplot(4, 4, indx)
+ ax.ticklabel_format(style='sci', scilimits=(-2, 2), axis='y')
+
+ # set-up labels and tick marks
+ ax.tick_params(labelleft=False, labelbottom=False)
+ if indx in (2, 5, 9, 13):
+ plt.ylabel(names[j])
+ ax.tick_params(labelleft=True)
+ if indx == 1:
+ ax.tick_params(labelleft=True)
+ if indx in (13, 14, 15, 16):
+ plt.xlabel(names[i])
+ ax.tick_params(labelbottom=True)
+ [label.set_rotation(45) for label in ax.get_xticklabels()]
+
if i != j:
x, y, m = conf_interval2d(mini, out, names[i], names[j], 20, 20)
- plt.contourf(x, y, m, linspace(0, 1, 10), cmap=cm)
- plt.xlabel(names[i])
- plt.ylabel(names[j])
+ plt.contourf(x, y, m, linspace(0, 1, 10))
x = tr[names[i]][names[i]]
y = tr[names[i]][names[j]]
pr = tr[names[i]]['prob']
s = argsort(x)
- plt.scatter(x[s], y[s], c=pr[s], s=30, lw=1, cmap=cm)
+ plt.scatter(x[s], y[s], c=pr[s], s=30, lw=1)
+
else:
x = tr[names[i]][names[i]]
y = tr[names[i]]['prob']
@@ -128,8 +142,9 @@ for i in range(4):
t, s = unique(x, True)
f = interp1d(t, y[s], 'slinear')
xn = linspace(x.min(), x.max(), 50)
- plt.plot(xn, f(xn), 'g', lw=1)
- plt.xlabel(names[i])
+ plt.plot(xn, f(xn), lw=1)
plt.ylabel('prob')
+ ax.tick_params(labelleft=True)
+plt.tight_layout()
plt.show()
diff --git a/examples/example_detect_outliers.py b/examples/example_detect_outliers.py
index cd997e8..08ae5f5 100644
--- a/examples/example_detect_outliers.py
+++ b/examples/example_detect_outliers.py
@@ -3,7 +3,7 @@ Outlier detection via leave-one-out
===================================
Outliers can sometimes be identified by assessing the influence of each
-datapoint. To assess the influence of one point, we fit the dataset while the
+datapoint. To assess the influence of one point, we fit the dataset without the
point and compare the result with the fit of the full dataset. The code below
shows how to do this with lmfit. Note that the presented method is very basic.
"""
@@ -14,10 +14,8 @@ import numpy as np
import lmfit
-plt.rcParams['figure.dpi'] = 130
-plt.rcParams['figure.autolayout'] = True
###############################################################################
-# Generate test data and model. Apply the model to the data
+# Generate test data and model:
x = np.linspace(0.3, 10, 100)
np.random.seed(1)
y = 1.0 / (0.1 * x) + 2.0 + 3 * np.random.randn(x.size)
@@ -30,25 +28,27 @@ def func(x, a, b):
return 1.0 / (a * x) + b
-# Make 5 points outliers
+###############################################################################
+# Make five points outliers:
idx = np.random.randint(0, x.size, 5)
y[idx] += 10 * np.random.randn(idx.size)
-# Fit the data
+###############################################################################
+# Fit the data:
model = lmfit.Model(func, independent_vars=['x'])
fit_result = model.fit(y, x=x, a=0.1, b=2)
###############################################################################
# and gives the plot and fitting results below:
-
fit_result.plot_fit()
-plt.plot(x[idx], y[idx], 'o', color='r', label='outliers')
+plt.plot(x[idx], y[idx], 'o', label='outliers')
plt.show()
-print(fit_result.fit_report())
###############################################################################
-# Fit the dataset while omitting one data point
+print(fit_result.fit_report())
+###############################################################################
+# Fit the dataset while omitting one data point:
best_vals = defaultdict(lambda: np.zeros(x.size))
stderrs = defaultdict(lambda: np.zeros(x.size))
chi_sq = np.zeros_like(x)
@@ -56,9 +56,7 @@ for i in range(x.size):
idx2 = np.arange(0, x.size)
idx2 = np.delete(idx2, i)
tmp_x = x[idx2]
- tmp = model.fit(y[idx2],
- x=tmp_x,
- a=fit_result.params['a'],
+ tmp = model.fit(y[idx2], x=tmp_x, a=fit_result.params['a'],
b=fit_result.params['b'])
chi_sq[i] = tmp.chisqr
for p in tmp.params:
@@ -67,21 +65,17 @@ for i in range(x.size):
stderrs[p][i] = (tpar.stderr / fit_result.params[p].stderr)
###############################################################################
-# Plot the influence on the red. chisqr of each point
-
+# Plot the influence on the red. chisqr of each point:
fig, ax = plt.subplots()
ax.plot(x, (fit_result.chisqr - chi_sq) / chi_sq)
-ax.scatter(x[idx],
- fit_result.chisqr / chi_sq[idx] - 1,
- color='r',
+ax.scatter(x[idx], fit_result.chisqr / chi_sq[idx] - 1, color='r',
label='outlier')
ax.set_ylabel(r'Relative red. $\chi^2$ change')
ax.set_xlabel('x')
ax.legend()
###############################################################################
-# Plot the influence on the parameter value and error of each point
-
+# Plot the influence on the parameter value and error of each point:
fig, axs = plt.subplots(4, figsize=(4, 7), sharex='col')
axs[0].plot(x, best_vals['a'])
axs[0].scatter(x[idx], best_vals['a'][idx], color='r', label='outlier')
diff --git a/examples/example_diffev.py b/examples/example_diffev.py
index bb6d85b..31a3800 100644
--- a/examples/example_diffev.py
+++ b/examples/example_diffev.py
@@ -2,8 +2,8 @@
Fit Using differential_evolution Algorithm
==========================================
-This example compares the "leastsq" and "differential_evolution" algorithms on
-a fairly simple problem.
+This example compares the ``leastsq`` and ``differential_evolution`` algorithms
+on a fairly simple problem.
"""
import matplotlib.pyplot as plt
@@ -11,18 +11,6 @@ import numpy as np
import lmfit
-np.random.seed(2)
-x = np.linspace(0, 10, 101)
-
-# Setup example
-decay = 5
-offset = 1.0
-amp = 2.0
-omega = 4.0
-
-y = offset + amp*np.sin(omega*x) * np.exp(-x/decay)
-yn = y + np.random.normal(size=y.size, scale=0.450)
-
def resid(params, x, ydata):
decay = params['decay'].value
@@ -34,22 +22,37 @@ def resid(params, x, ydata):
return y_model - ydata
+###############################################################################
+# Generate synthetic data and set-up Parameters with initial values/boundaries:
+decay = 5
+offset = 1.0
+amp = 2.0
+omega = 4.0
+
+np.random.seed(2)
+x = np.linspace(0, 10, 101)
+y = offset + amp*np.sin(omega*x) * np.exp(-x/decay)
+yn = y + np.random.normal(size=y.size, scale=0.450)
+
params = lmfit.Parameters()
params.add('offset', 2.0, min=0, max=10.0)
params.add('omega', 3.3, min=0, max=10.0)
params.add('amp', 2.5, min=0, max=10.0)
params.add('decay', 1.0, min=0, max=10.0)
+###############################################################################
+# Perform the fits and show fitting results and plot:
o1 = lmfit.minimize(resid, params, args=(x, yn), method='leastsq')
print("# Fit using leastsq:")
lmfit.report_fit(o1)
+###############################################################################
o2 = lmfit.minimize(resid, params, args=(x, yn), method='differential_evolution')
print("\n\n# Fit using differential_evolution:")
lmfit.report_fit(o2)
-plt.plot(x, yn, 'ko', lw=2)
-plt.plot(x, yn+o1.residual, 'r-', lw=2)
-plt.plot(x, yn+o2.residual, 'b--', lw=2)
-plt.legend(['data', 'leastsq', 'diffev'], loc='upper left')
-plt.show()
+###############################################################################
+plt.plot(x, yn, 'o', label='data')
+plt.plot(x, yn+o1.residual, '-', label='leastsq')
+plt.plot(x, yn+o2.residual, '--', label='diffev')
+plt.legend()
diff --git a/examples/example_emcee_Model_interface.py b/examples/example_emcee_Model_interface.py
index 971b40b..60ff11f 100644
--- a/examples/example_emcee_Model_interface.py
+++ b/examples/example_emcee_Model_interface.py
@@ -11,7 +11,7 @@ import lmfit
###############################################################################
-# Set up a double-exponential function and create a Model
+# Set up a double-exponential function and create a Model:
def double_exp(x, a1, t1, a2, t2):
return a1*np.exp(-x/t1) + a2*np.exp(-(x-0.1) / t2)
@@ -19,14 +19,14 @@ def double_exp(x, a1, t1, a2, t2):
model = lmfit.Model(double_exp)
###############################################################################
-# Generate some fake data from the model with added noise
+# Generate some fake data from the model with added noise:
truths = (3.0, 2.0, -5.0, 10.0)
x = np.linspace(1, 10, 250)
np.random.seed(0)
y = double_exp(x, *truths)+0.1*np.random.randn(x.size)
###############################################################################
-# Create model parameters and give them initial values
+# Create model parameters and give them initial values:
p = model.make_params(a1=4, t1=3, a2=4, t2=3)
###############################################################################
@@ -37,13 +37,13 @@ lmfit.report_fit(result)
result.plot()
###############################################################################
-# Calculate parameter covariance using emcee:
+# Calculate parameter covariance using ``emcee``:
#
# - start the walkers out at the best-fit values
-# - set is_weighted to False to estimate the noise weights
+# - set ``is_weighted`` to ``False`` to estimate the noise weights
# - set some sensible priors on the uncertainty to keep the MCMC in check
-#
-emcee_kws = dict(steps=1000, burn=300, thin=20, is_weighted=False,
+
+emcee_kws = dict(steps=5000, burn=500, thin=20, is_weighted=False,
progress=False)
emcee_params = result.params.copy()
emcee_params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2.0))
@@ -53,48 +53,48 @@ emcee_params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2
result_emcee = model.fit(data=y, x=x, params=emcee_params, method='emcee',
nan_policy='omit', fit_kws=emcee_kws)
+###############################################################################
lmfit.report_fit(result_emcee)
-ax = plt.plot(x, model.eval(params=result.params, x=x), label='Nelder', zorder=100)
-result_emcee.plot_fit(ax=ax, data_kws=dict(color='gray', markersize=2))
-plt.show()
+###############################################################################
+result_emcee.plot_fit()
+plt.plot(x, model.eval(params=result.params, x=x), '--', label='Nelder')
+plt.legend()
###############################################################################
-# check the acceptance fraction to see whether emcee performed well
-plt.plot(result_emcee.acceptance_fraction)
+# Check the acceptance fraction to see whether ``emcee`` performed well:
+plt.plot(result_emcee.acceptance_fraction, 'o')
plt.xlabel('walker')
plt.ylabel('acceptance fraction')
-plt.show()
###############################################################################
-# try to compute the autocorrelation time
+# Try to compute the autocorrelation time:
if hasattr(result_emcee, "acor"):
print("Autocorrelation time for the parameters:")
print("----------------------------------------")
for i, p in enumerate(result.params):
- print(p, result.acor[i])
-
+ print(f'{p} = {result_emcee.acor[i]:.3f}')
###############################################################################
-# Plot the parameter covariances returned by emcee using corner
+# Plot the parameter covariances returned by ``emcee`` using ``corner``:
emcee_corner = corner.corner(result_emcee.flatchain, labels=result_emcee.var_names,
truths=list(result_emcee.params.valuesdict().values()))
###############################################################################
-#
print("\nmedian of posterior probability distribution")
print('--------------------------------------------')
lmfit.report_fit(result_emcee.params)
-# find the maximum likelihood solution
+###############################################################################
+# Find the maximum likelihood solution:
highest_prob = np.argmax(result_emcee.lnprob)
hp_loc = np.unravel_index(highest_prob, result_emcee.lnprob.shape)
mle_soln = result_emcee.chain[hp_loc]
-print("\nMaximum likelihood Estimation")
-print('-----------------------------')
+print("\nMaximum Likelihood Estimation (MLE):")
+print('----------------------------------')
for ix, param in enumerate(emcee_params):
- print(param + ': ' + str(mle_soln[ix]))
+ print(f"{param}: {mle_soln[ix]:.3f}")
quantiles = np.percentile(result_emcee.flatchain['t1'], [2.28, 15.9, 50, 84.2, 97.7])
-print("\n\n1 sigma spread", 0.5 * (quantiles[3] - quantiles[1]))
-print("2 sigma spread", 0.5 * (quantiles[4] - quantiles[0]))
+print(f"\n\n1 sigma spread = {0.5 * (quantiles[3] - quantiles[1]):.3f}")
+print(f"2 sigma spread = {0.5 * (quantiles[4] - quantiles[0]):.3f}")
diff --git a/examples/example_expression_model.py b/examples/example_expression_model.py
index 6398e89..834e6d0 100644
--- a/examples/example_expression_model.py
+++ b/examples/example_expression_model.py
@@ -17,10 +17,11 @@ x = np.linspace(-10, 10, 201)
amp, cen, wid = 3.4, 1.8, 0.5
y = amp * np.exp(-(x-cen)**2 / (2*wid**2)) / (np.sqrt(2*np.pi)*wid)
+np.random.seed(2021)
y = y + np.random.normal(size=x.size, scale=0.01)
###############################################################################
-# Define the ExpressionModel and perform the fit:
+# Define the ``ExpressionModel`` and perform the fit:
gmod = ExpressionModel("amp * exp(-(x-cen)**2 /(2*wid**2))/(sqrt(2*pi)*wid)")
result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
@@ -28,8 +29,8 @@ result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
# this results in the following output:
print(result.fit_report())
-plt.plot(x, y, 'bo')
-plt.plot(x, result.init_fit, 'k--', label='initial fit')
-plt.plot(x, result.best_fit, 'r-', label='best fit')
-plt.legend(loc='best')
-plt.show()
+###############################################################################
+plt.plot(x, y, 'o')
+plt.plot(x, result.init_fit, '--', label='initial fit')
+plt.plot(x, result.best_fit, '-', label='best fit')
+plt.legend()
diff --git a/examples/example_fit_multi_datasets.py b/examples/example_fit_multi_datasets.py
index 2920f0d..587b9da 100644
--- a/examples/example_fit_multi_datasets.py
+++ b/examples/example_fit_multi_datasets.py
@@ -5,9 +5,9 @@ Fit Multiple Data Sets
Fitting multiple (simulated) Gaussian data sets simultaneously.
All minimizers require the residual array to be one-dimensional. Therefore, in
-the ``objective`` we need to ```flatten``` the array before returning it.
+the ``objective`` function we need to ``flatten`` the array before returning it.
-TODO: this should be using the Model interface / built-in models!
+TODO: this could/should be using the Model interface / built-in models!
"""
import matplotlib.pyplot as plt
@@ -23,9 +23,9 @@ def gauss(x, amp, cen, sigma):
def gauss_dataset(params, i, x):
"""Calculate Gaussian lineshape from parameters for data set."""
- amp = params['amp_%i' % (i+1)]
- cen = params['cen_%i' % (i+1)]
- sig = params['sig_%i' % (i+1)]
+ amp = params[f'amp_{i+1}']
+ cen = params[f'cen_{i+1}']
+ sig = params[f'sig_{i+1}']
return gauss(x, amp, cen, sig)
@@ -44,7 +44,7 @@ def objective(params, x, data):
###############################################################################
# Create five simulated Gaussian data sets
-
+np.random.seed(2021)
x = np.linspace(-1, 2, 151)
data = []
for _ in np.arange(5):
@@ -58,31 +58,26 @@ data = np.array(data)
###############################################################################
# Create five sets of fitting parameters, one per data set
-
fit_params = Parameters()
for iy, y in enumerate(data):
- fit_params.add('amp_%i' % (iy+1), value=0.5, min=0.0, max=200)
- fit_params.add('cen_%i' % (iy+1), value=0.4, min=-2.0, max=2.0)
- fit_params.add('sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0)
+ fit_params.add(f'amp_{iy+1}', value=0.5, min=0.0, max=200)
+ fit_params.add(f'cen_{iy+1}', value=0.4, min=-2.0, max=2.0)
+ fit_params.add(f'sig_{iy+1}', value=0.3, min=0.01, max=3.0)
###############################################################################
# Constrain the values of sigma to be the same for all peaks by assigning
# sig_2, ..., sig_5 to be equal to sig_1.
-
for iy in (2, 3, 4, 5):
- fit_params['sig_%i' % iy].expr = 'sig_1'
+ fit_params[f'sig_{iy}'].expr = 'sig_1'
###############################################################################
# Run the global fit and show the fitting result
-
out = minimize(objective, fit_params, args=(x, data))
report_fit(out.params)
###############################################################################
# Plot the data sets and fits
-
plt.figure()
for i in range(5):
y_fit = gauss_dataset(out.params, i, x)
plt.plot(x, data[i, :], 'o', x, y_fit, '-')
-plt.show()
diff --git a/examples/example_fit_with_algebraic_constraint.py b/examples/example_fit_with_algebraic_constraint.py
index 20dcdea..2d8bc74 100644
--- a/examples/example_fit_with_algebraic_constraint.py
+++ b/examples/example_fit_with_algebraic_constraint.py
@@ -2,7 +2,6 @@
Fit with Algebraic Constraint
=============================
-
"""
import matplotlib.pyplot as plt
from numpy import linspace, random
@@ -32,9 +31,7 @@ x = linspace(0.0, 20.0, 601)
data = (gaussian(x, 21, 8.1, 1.2) +
lorentzian(x, 10, 9.6, 2.4) +
- random.normal(scale=0.23, size=x.size) +
- x*0.5)
-
+ random.normal(scale=0.23, size=x.size) + x*0.5)
pfit = Parameters()
pfit.add(name='amp_g', value=10)
@@ -49,9 +46,8 @@ pfit.add(name='line_off', value=0.0)
sigma = 0.021 # estimate of data error (for all data points)
-myfit = Minimizer(residual, pfit,
- fcn_args=(x,), fcn_kws={'sigma': sigma, 'data': data},
- scale_covar=True)
+myfit = Minimizer(residual, pfit, fcn_args=(x,),
+ fcn_kws={'sigma': sigma, 'data': data})
result = myfit.leastsq()
init = residual(pfit, x)
@@ -59,8 +55,8 @@ fit = residual(result.params, x)
report_fit(result)
-plt.plot(x, data, 'r+')
-plt.plot(x, init, 'b--', label='initial fit')
-plt.plot(x, fit, 'k-', label='best fit')
-plt.legend(loc='best')
-plt.show()
+###############################################################################
+plt.plot(x, data, '+')
+plt.plot(x, init, '--', label='initial fit')
+plt.plot(x, fit, '-', label='best fit')
+plt.legend()
diff --git a/examples/example_fit_with_bounds.py b/examples/example_fit_with_bounds.py
index df354e4..2446500 100644
--- a/examples/example_fit_with_bounds.py
+++ b/examples/example_fit_with_bounds.py
@@ -17,6 +17,8 @@ from numpy import exp, linspace, pi, random, sign, sin
from lmfit import Parameters, minimize
from lmfit.printfuncs import report_fit
+###############################################################################
+# Define the 'correct' Parameter values and residual function:
p_true = Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.4321)
@@ -35,28 +37,28 @@ def residual(pars, x, data=None):
return model - data
+###############################################################################
+# Generate synthetic data and initialize fitting Parameters:
random.seed(0)
x = linspace(0, 250, 1500)
-noise = random.normal(scale=2.80, size=x.size)
+noise = random.normal(scale=2.8, size=x.size)
data = residual(p_true, x) + noise
fit_params = Parameters()
-fit_params.add('amp', value=13.0, max=20, min=0.0)
+fit_params.add('amp', value=13, max=20, min=0)
fit_params.add('period', value=2, max=10)
-fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.)
-fit_params.add('decay', value=0.02, max=0.10, min=0.00)
+fit_params.add('shift', value=0, max=pi/2., min=-pi/2.)
+fit_params.add('decay', value=0.02, max=0.1, min=0)
+###############################################################################
+# Perform the fit and show the results:
out = minimize(residual, fit_params, args=(x,), kws={'data': data})
fit = residual(out.params, x)
###############################################################################
-# This gives the following fitting results:
-
-report_fit(out, show_correl=True, modelpars=p_true)
+report_fit(out, modelpars=p_true)
###############################################################################
-# and shows the plot below:
-#
-plt.plot(x, data, 'ro')
-plt.plot(x, fit, 'b')
-plt.show()
+plt.plot(x, data, 'o', label='data')
+plt.plot(x, fit, label='best fit')
+plt.legend()
diff --git a/examples/example_fit_with_derivfunc.py b/examples/example_fit_with_derivfunc.py
index 6c3eb54..1647705 100644
--- a/examples/example_fit_with_derivfunc.py
+++ b/examples/example_fit_with_derivfunc.py
@@ -38,44 +38,39 @@ params.add('c', value=10)
a, b, c = 2.5, 1.3, 0.8
x = np.linspace(0, 4, 50)
y = f([a, b, c], x)
+np.random.seed(2021)
data = y + 0.15*np.random.normal(size=x.size)
-# fit without analytic derivative
+###############################################################################
+# Fit without analytic derivative:
min1 = Minimizer(func, params, fcn_args=(x,), fcn_kws={'data': data})
out1 = min1.leastsq()
fit1 = func(out1.params, x)
-# fit with analytic derivative
+###############################################################################
+# Fit with analytic derivative:
min2 = Minimizer(func, params, fcn_args=(x,), fcn_kws={'data': data})
out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
fit2 = func(out2.params, x)
###############################################################################
# Comparison of fit to exponential decay with/without analytical derivatives
-# to model = a*exp(-b*x) + c
-print('''
-"true" parameters are: a = %.3f, b = %.3f, c = %.3f
-
-==============================================
-Statistic/Parameter| Without | With |
-----------------------------------------------
-N Function Calls | %3i | %3i |
-Chi-square | %.4f | %.4f |
- a | %.4f | %.4f |
- b | %.4f | %.4f |
- c | %.4f | %.4f |
-----------------------------------------------
-''' % (a, b, c,
- out1.nfev, out2.nfev,
- out1.chisqr, out2.chisqr,
- out1.params['a'], out2.params['a'],
- out1.params['b'], out2.params['b'],
- out1.params['c'], out2.params['c']))
+# to model = a*exp(-b*x) + c:
+print(f'"true" parameters are: a = {a:.3f}, b = {b:.3f}, c = {c:.3f}\n\n'
+ '|=========================================\n'
+ '| Statistic/Parameter | Without | With |\n'
+ '|-----------------------------------------\n'
+ f'| N Function Calls | {out1.nfev:d} | {out2.nfev:d} |\n'
+ f'| Chi-square | {out1.chisqr:.4f} | {out2.chisqr:.4f} |\n'
+ f"| a | {out1.params['a'].value:.4f} | {out2.params['a'].value:.4f} |\n"
+ f"| b | {out1.params['b'].value:.4f} | {out2.params['b'].value:.4f} |\n"
+ f"| c | {out1.params['c'].value:.4f} | {out2.params['c'].value:.4f} |\n"
+ '------------------------------------------')
###############################################################################
# and the best-fit to the synthetic data (with added noise) is the same for
# both methods:
-plt.plot(x, data, 'ro')
-plt.plot(x, fit1, 'b')
-plt.plot(x, fit2, 'k')
-plt.show()
+plt.plot(x, data, 'o', label='data')
+plt.plot(x, fit1, label='with analytical derivative')
+plt.plot(x, fit2, '--', label='without analytical derivative')
+plt.legend()
diff --git a/examples/example_fit_with_inequality.py b/examples/example_fit_with_inequality.py
index 6decf8d..170fd7e 100644
--- a/examples/example_fit_with_inequality.py
+++ b/examples/example_fit_with_inequality.py
@@ -23,7 +23,7 @@ def residual(pars, x, data):
###############################################################################
-# Generate the simulated data using a Gaussian and Lorentzian line shape:
+# Generate the simulated data using a Gaussian and Lorentzian lineshape:
np.random.seed(0)
x = np.linspace(0, 20.0, 601)
@@ -55,7 +55,6 @@ report_fit(out.params)
###############################################################################
# and figure:
-plt.plot(x, data, 'bo')
-plt.plot(x, best_fit, 'r--', label='best fit')
-plt.legend(loc='best')
-plt.show()
+plt.plot(x, data, 'o')
+plt.plot(x, best_fit, '--', label='best fit')
+plt.legend()
diff --git a/examples/example_reduce_fcn.py b/examples/example_reduce_fcn.py
index 64af016..d047f84 100644
--- a/examples/example_reduce_fcn.py
+++ b/examples/example_reduce_fcn.py
@@ -2,10 +2,10 @@
Fit Specifying Different Reduce Function
========================================
-The reduce_fcn specifies how to convert a residual array to a scalar value for
-the scalar minimizers. The default value is None (i.e., "sum of squares of
-residual") - alternatives are: 'negentropy' and 'neglogcauchy' or a
-user-specified "callable". For more information please refer to:
+The ``reduce_fcn`` specifies how to convert a residual array to a scalar value
+for the scalar minimizers. The default value is None (i.e., "sum of squares of
+residual") - alternatives are: ``negentropy``, ``neglogcauchy``, or a
+user-specified ``callable``. For more information please refer to:
https://lmfit.github.io/lmfit-py/fitting.html#using-the-minimizer-class
Here, we use as an example the Student's t log-likelihood for robust fitting
@@ -17,56 +17,54 @@ import numpy as np
import lmfit
-np.random.seed(2)
-x = np.linspace(0, 10, 101)
-# Setup example
+def resid(params, x, ydata):
+ decay = params['decay'].value
+ offset = params['offset'].value
+ omega = params['omega'].value
+ amp = params['amp'].value
+
+ y_model = offset + amp * np.sin(x*omega) * np.exp(-x/decay)
+ return y_model - ydata
+
+
+###############################################################################
+# Generate synthetic data with noise/outliers and initialize fitting Parameters:
decay = 5
offset = 1.0
amp = 2.0
omega = 4.0
+np.random.seed(2)
+x = np.linspace(0, 10, 101)
y = offset + amp * np.sin(omega*x) * np.exp(-x/decay)
yn = y + np.random.normal(size=y.size, scale=0.250)
outliers = np.random.randint(int(len(x)/3.0), len(x), int(len(x)/12))
yn[outliers] += 5*np.random.random(len(outliers))
-
-def resid(params, x, ydata):
- decay = params['decay'].value
- offset = params['offset'].value
- omega = params['omega'].value
- amp = params['amp'].value
-
- y_model = offset + amp * np.sin(x*omega) * np.exp(-x/decay)
- return y_model - ydata
-
-
params = lmfit.Parameters()
-
params.add('offset', 2.0)
params.add('omega', 3.3)
params.add('amp', 2.5)
params.add('decay', 1.0, min=0)
+###############################################################################
+# Perform fits using the ``L-BFGS-B`` method with different ``reduce_fcn``:
method = 'L-BFGS-B'
-
o1 = lmfit.minimize(resid, params, args=(x, yn), method=method)
print("# Fit using sum of squares:\n")
lmfit.report_fit(o1)
+###############################################################################
o2 = lmfit.minimize(resid, params, args=(x, yn), method=method,
reduce_fcn='neglogcauchy')
print("\n\n# Robust Fit, using log-likelihood with Cauchy PDF:\n")
lmfit.report_fit(o2)
-plt.plot(x, y, 'ko', lw=2)
-plt.plot(x, yn, 'k--*', lw=1)
-plt.plot(x, yn+o1.residual, 'r-', lw=2)
-plt.plot(x, yn+o2.residual, 'b-', lw=2)
-plt.legend(['True function',
- 'with noise+outliers',
- 'sum of squares fit',
- 'robust fit'], loc='upper left')
-plt.show()
+###############################################################################
+plt.plot(x, y, 'o', label='true function')
+plt.plot(x, yn, '--*', label='with noise+outliers')
+plt.plot(x, yn+o1.residual, '-', label='sum of squares fit')
+plt.plot(x, yn+o2.residual, '-', label='robust fit')
+plt.legend()
diff --git a/examples/example_sympy.py b/examples/example_sympy.py
index 688e85c..1039073 100644
--- a/examples/example_sympy.py
+++ b/examples/example_sympy.py
@@ -3,11 +3,11 @@ Building a lmfit model with SymPy
=================================
SymPy is a Python library for symbolic mathematics. It can be very useful to
-build a model with sympy and then use that apply that model to the data with
-lmfit. This example shows how to do that. Notice, that this example requires
-both sympy and matplotlib.
-"""
+build a model with SymPy and then apply that model to the data with lmfit.
+This example shows how to do that. Please note that this example requires
+both the sympy and matplotlib packages.
+"""
import matplotlib.pyplot as plt
import numpy as np
import sympy
@@ -15,11 +15,9 @@ from sympy.parsing import sympy_parser
import lmfit
-np.random.seed(1)
-
-# %%
-# Instead of creating the sympy-sybols explicitly and building an expression
-# with them, we will use the sympy parser.
+###############################################################################
+# Instead of creating the SymPy symbols explicitly and building an expression
+# with them, we will use the SymPy parser.
gauss_peak1 = sympy_parser.parse_expr('A1*exp(-(x-xc1)**2/(2*sigma1**2))')
gauss_peak2 = sympy_parser.parse_expr('A2*exp(-(x-xc2)**2/(2*sigma2**2))')
@@ -27,51 +25,49 @@ exp_back = sympy_parser.parse_expr('B*exp(-x/xw)')
model_list = sympy.Array((gauss_peak1, gauss_peak2, exp_back))
model = sum(model_list)
-model
+print(model)
-# %%
-# We are using sympys lambdify function to make a function from the model
-# expressions. We use these functions to generate some fake data.
+###############################################################################
+# We are using SymPy's lambdify function to make a function from the model
+# expressions. We then use these functions to generate some fake data.
model_list_func = sympy.lambdify(list(model_list.free_symbols), model_list)
model_func = sympy.lambdify(list(model.free_symbols), model)
+###############################################################################
+# Generate synthetic data with noise and plot the data.
+np.random.seed(1)
x = np.linspace(0, 10, 40)
-param_values = dict(x=x, A1=2, sigma1=1, sigma2=1, A2=3,
- xc1=2, xc2=5, xw=4, B=5)
+param_values = dict(x=x, A1=2, sigma1=1, sigma2=1, A2=3, xc1=2, xc2=5, xw=4, B=5)
y = model_func(**param_values)
yi = model_list_func(**param_values)
yn = y + np.random.randn(y.size)*0.4
-plt.plot(x, yn, 'o', zorder=1.9, ms=3)
-plt.plot(x, y, lw=3)
+plt.plot(x, yn, 'o')
+plt.plot(x, y)
for c in yi:
- plt.plot(x, c, lw=1, c='0.7')
+ plt.plot(x, c, color='0.7')
-
-# %%
+###############################################################################
# Next, we will just create a lmfit model from the function and fit the data.
-
lm_mod = lmfit.Model(model_func, independent_vars=('x'))
res = lm_mod.fit(data=yn, **param_values)
+
+###############################################################################
res.plot_fit()
plt.plot(x, y, label='true')
plt.legend()
-res
-# %%
-# The nice thing of using sympy is that we can easily modify our fit function.
-# Let's assume we know that the width of both gaussians is identical. Simliary,
-# we assume that the ratio between both gaussians is fixed to 3:2 for some
+###############################################################################
+# The nice thing of using SymPy is that we can easily modify our fit function.
+# Let's assume we know that the width of both Gaussians are identical. Similarly,
+# we assume that the ratio between both Gaussians is fixed to 3:2 for some
# reason. Both can be expressed by just substituting the variables.
-
model2 = model.subs('sigma2', 'sigma1').subs('A2', '3/2*A1')
model2_func = sympy.lambdify(list(model2.free_symbols), model2)
lm_mod = lmfit.Model(model2_func, independent_vars=('x'))
-param2_values = dict(x=x, A1=2, sigma1=1, A2=3, xc1=2, xc2=5, xw=4, B=5)
-res2 = lm_mod.fit(data=yn, **param_values)
+param2_values = dict(x=x, A1=2, sigma1=1, xc1=2, xc2=5, xw=4, B=5)
+res2 = lm_mod.fit(data=yn, **param2_values)
res2.plot_fit()
plt.plot(x, y, label='true')
plt.legend()
-
-res2
diff --git a/examples/example_two_dimensional_peak.py b/examples/example_two_dimensional_peak.py
index 96612bd..8b732fc 100644
--- a/examples/example_two_dimensional_peak.py
+++ b/examples/example_two_dimensional_peak.py
@@ -19,6 +19,7 @@ from lmfit.lineshapes import gaussian2d, lorentzian
# depends on coordinates `(x, y)`. The most general case of experimental
# data will be irregularly sampled and noisy. Let's simulate some:
npoints = 10000
+np.random.seed(2021)
x = np.random.rand(npoints)*10 - 4
y = np.random.rand(npoints)*5 - 3
z = gaussian2d(x, y, amplitude=30, centerx=2, centery=-.5, sigmax=.6, sigmay=.8)
diff --git a/examples/example_use_pandas.py b/examples/example_use_pandas.py
index afb5f4b..d908014 100644
--- a/examples/example_use_pandas.py
+++ b/examples/example_use_pandas.py
@@ -2,17 +2,16 @@
Fit with Data in a pandas DataFrame
===================================
-Simple example demonstrating how to read in the data using pandas and supply
-the elements of the DataFrame from lmfit.
+Simple example demonstrating how to read in the data using ``pandas`` and
+supply the elements of the ``DataFrame`` to lmfit.
"""
-import matplotlib.pyplot as plt
import pandas as pd
from lmfit.models import LorentzianModel
###############################################################################
-# read the data into a pandas DataFrame, and use the 'x' and 'y' columns:
+# read the data into a pandas DataFrame, and use the ``x`` and ``y`` columns:
dframe = pd.read_csv('peak.csv')
model = LorentzianModel()
@@ -21,8 +20,9 @@ params = model.guess(dframe['y'], x=dframe['x'])
result = model.fit(dframe['y'], params, x=dframe['x'])
###############################################################################
-# and gives the plot and fitting results below:
-result.plot_fit()
-plt.show()
-
+# and gives the fitting results:
print(result.fit_report())
+
+###############################################################################
+# and plot below:
+result.plot_fit()
diff --git a/examples/lmfit_emcee_model_selection.py b/examples/lmfit_emcee_model_selection.py
index 0279a7f..5b8f55e 100644
--- a/examples/lmfit_emcee_model_selection.py
+++ b/examples/lmfit_emcee_model_selection.py
@@ -43,8 +43,8 @@ def residual(p, just_generative=False):
v = p.valuesdict()
generative = v['a'] + v['b'] * x
M = 0
- while 'a_max%d' % M in v:
- generative += gauss(x, v['a_max%d' % M], v['loc%d' % M], v['sd%d' % M])
+ while f'a_max{M}' in v:
+ generative += gauss(x, v[f'a_max{M}'], v[f'loc{M}'], v[f'sd{M}'])
M += 1
if just_generative:
@@ -70,9 +70,9 @@ def initial_peak_params(M):
p.add_many(('a', a, True, 0, 10), ('b', b, True, 1, 15))
for i in range(M):
- p.add_many(('a_max%d' % i, 0.5 * a_max, True, 10, a_max),
- ('loc%d' % i, loc, True, np.min(x), np.max(x)),
- ('sd%d' % i, sd, True, 0.1, np.max(x) - np.min(x)))
+ p.add_many((f'a_max{i}', 0.5 * a_max, True, 10, a_max),
+ (f'loc{i}', loc, True, np.min(x), np.max(x)),
+ (f'sd{i}', sd, True, 0.1, np.max(x) - np.min(x)))
return p
diff --git a/examples/test_splinepeak.dat b/examples/test_splinepeak.dat
new file mode 100644
index 0000000..48d1fff
--- /dev/null
+++ b/examples/test_splinepeak.dat
@@ -0,0 +1,504 @@
+# test data for spline + peak
+#---------------------------------
+# x y
+ 0.000 2.96659626314
+ 0.050 3.00493991014
+ 0.100 3.06123652247
+ 0.150 2.52252640704
+ 0.200 3.87613932061
+ 0.250 4.00216534666
+ 0.300 3.50572316777
+ 0.350 3.43342382697
+ 0.400 3.73272247102
+ 0.450 3.59902815254
+ 0.500 3.98899222373
+ 0.550 3.44264010167
+ 0.600 3.23902639219
+ 0.650 3.74484184457
+ 0.700 3.61591299694
+ 0.750 3.25767615382
+ 0.800 3.47569848627
+ 0.850 3.55767377006
+ 0.900 3.78963224708
+ 0.950 3.41651644442
+ 1.000 3.84201421917
+ 1.050 3.94339562855
+ 1.100 3.46389760925
+ 1.150 3.57238104938
+ 1.200 3.78684815453
+ 1.250 3.61931156950
+ 1.300 2.88997092351
+ 1.350 3.59552177983
+ 1.400 3.78583510228
+ 1.450 3.11958901418
+ 1.500 3.92621017016
+ 1.550 3.77612346521
+ 1.600 3.11182878182
+ 1.650 3.20534315458
+ 1.700 3.76012162528
+ 1.750 3.24997196038
+ 1.800 3.90360364053
+ 1.850 3.28102384929
+ 1.900 4.20259080948
+ 1.950 3.14915680186
+ 2.000 3.45753793299
+ 2.050 3.40378870206
+ 2.100 3.90539898346
+ 2.150 3.27699485343
+ 2.200 4.08801741844
+ 2.250 3.40091449389
+ 2.300 4.43561592129
+ 2.350 3.53720202703
+ 2.400 3.51340902548
+ 2.450 4.27899843739
+ 2.500 4.38777379643
+ 2.550 4.35138356487
+ 2.600 3.45138414045
+ 2.650 3.89606317148
+ 2.700 3.38078269422
+ 2.750 3.72757358484
+ 2.800 3.95344629899
+ 2.850 3.64031251260
+ 2.900 3.92633692392
+ 2.950 4.54903499496
+ 3.000 3.83500263553
+ 3.050 3.86787436371
+ 3.100 4.20560369029
+ 3.150 3.79452687040
+ 3.200 4.42391607348
+ 3.250 3.25791105383
+ 3.300 4.54395118612
+ 3.350 3.67373233023
+ 3.400 4.06495292232
+ 3.450 4.06471624329
+ 3.500 3.97053389404
+ 3.550 4.22092346672
+ 3.600 4.16692173295
+ 3.650 3.89074827421
+ 3.700 3.54698063028
+ 3.750 4.24222168533
+ 3.800 3.40519864458
+ 3.850 3.37131572450
+ 3.900 3.56771484953
+ 3.950 4.11322749571
+ 4.000 4.20772923214
+ 4.050 4.01716397254
+ 4.100 4.47782680015
+ 4.150 4.41509687464
+ 4.200 4.47090595323
+ 4.250 3.86313946103
+ 4.300 4.10546063443
+ 4.350 4.27900062014
+ 4.400 3.72291118630
+ 4.450 4.07444282978
+ 4.500 4.49688013792
+ 4.550 4.32224000187
+ 4.600 4.01213249893
+ 4.650 4.34527050803
+ 4.700 4.31885480690
+ 4.750 4.65528015751
+ 4.800 4.12477942263
+ 4.850 4.26700226032
+ 4.900 4.73315243845
+ 4.950 3.57777878796
+ 5.000 4.61624329502
+ 5.050 4.26130401694
+ 5.100 4.41044807874
+ 5.150 4.20565975364
+ 5.200 4.09490101037
+ 5.250 4.50943737680
+ 5.300 4.17516575948
+ 5.350 4.34499306419
+ 5.400 4.22560906354
+ 5.450 4.16916811170
+ 5.500 4.22005179025
+ 5.550 3.89140676445
+ 5.600 4.49734760696
+ 5.650 4.79573697402
+ 5.700 4.32159418798
+ 5.750 4.48087045377
+ 5.800 4.71087348808
+ 5.850 4.46962833927
+ 5.900 4.38868250013
+ 5.950 3.67409708909
+ 6.000 4.11391097790
+ 6.050 4.46659297510
+ 6.100 4.59009406285
+ 6.150 4.58796249430
+ 6.200 4.50791819418
+ 6.250 4.34311032610
+ 6.300 4.30969225891
+ 6.350 4.08985177839
+ 6.400 4.14675849182
+ 6.450 4.96318969822
+ 6.500 4.55756202498
+ 6.550 4.78740417795
+ 6.600 4.62448849670
+ 6.650 4.68109171567
+ 6.700 3.94105744714
+ 6.750 4.02574773487
+ 6.800 4.52507717940
+ 6.850 4.21922040040
+ 6.900 4.69394237402
+ 6.950 3.76093339774
+ 7.000 4.53351841031
+ 7.050 4.69681713690
+ 7.100 5.00416599327
+ 7.150 4.51989607590
+ 7.200 4.72095593713
+ 7.250 4.59173268396
+ 7.300 4.06150079948
+ 7.350 4.77379936788
+ 7.400 4.35851553657
+ 7.450 4.53657427644
+ 7.500 3.92579351613
+ 7.550 4.71415366995
+ 7.600 4.25663227483
+ 7.650 4.54277178651
+ 7.700 4.60273770589
+ 7.750 4.18383910866
+ 7.800 4.40643674830
+ 7.850 4.05832509380
+ 7.900 4.41109717440
+ 7.950 4.17361477709
+ 8.000 4.49864472516
+ 8.050 4.14943454404
+ 8.100 4.77850525292
+ 8.150 3.74482589060
+ 8.200 3.98576039677
+ 8.250 4.64204576206
+ 8.300 4.02517889099
+ 8.350 4.30844807599
+ 8.400 4.52955833524
+ 8.450 4.73266004495
+ 8.500 4.61393789944
+ 8.550 4.99148544903
+ 8.600 3.67392469964
+ 8.650 4.44633683024
+ 8.700 4.56004129726
+ 8.750 4.02613947662
+ 8.800 4.21234308684
+ 8.850 4.11412083941
+ 8.900 4.26464490846
+ 8.950 4.44003840779
+ 9.000 4.17024949358
+ 9.050 4.34281951934
+ 9.100 4.67817992527
+ 9.150 4.21599805736
+ 9.200 4.54681771875
+ 9.250 4.94611036787
+ 9.300 3.82217290114
+ 9.350 4.19721177412
+ 9.400 4.59405568559
+ 9.450 4.50726307379
+ 9.500 5.04938393753
+ 9.550 4.16524366757
+ 9.600 3.90645337751
+ 9.650 4.77177694766
+ 9.700 4.73709711425
+ 9.750 4.51350541878
+ 9.800 4.44276376029
+ 9.850 5.02961839971
+ 9.900 3.95135573308
+ 9.950 3.60165390921
+ 10.00 4.28961288113
+ 10.05 3.53050359494
+ 10.10 4.52907680271
+ 10.15 4.29287529925
+ 10.20 3.98331562037
+ 10.25 4.25374899010
+ 10.30 4.32529280063
+ 10.35 4.32232682041
+ 10.40 4.10806219744
+ 10.45 4.27337485226
+ 10.50 4.82499805164
+ 10.55 4.08836674752
+ 10.60 3.78034279797
+ 10.65 4.13907067891
+ 10.70 4.24076127260
+ 10.75 3.98116602080
+ 10.80 4.18492061335
+ 10.85 4.12632626001
+ 10.90 4.05426403615
+ 10.95 4.62204244130
+ 11.00 4.10339432701
+ 11.05 3.65529839865
+ 11.10 3.62558839284
+ 11.15 3.73918961761
+ 11.20 3.68158174982
+ 11.25 4.19252058029
+ 11.30 4.23233845989
+ 11.35 3.97458702150
+ 11.40 4.00994330502
+ 11.45 3.94082278387
+ 11.50 3.98429819877
+ 11.55 3.53044767475
+ 11.60 3.68498046799
+ 11.65 3.89628464108
+ 11.70 3.40746069225
+ 11.75 3.84699025044
+ 11.80 3.96691104951
+ 11.85 3.38337975679
+ 11.90 3.24154766087
+ 11.95 3.32962768199
+ 12.00 3.84556015350
+ 12.05 3.88669262940
+ 12.10 3.51917557918
+ 12.15 3.85087376839
+ 12.20 3.64596702791
+ 12.25 4.00322112622
+ 12.30 3.91683279654
+ 12.35 3.77869224060
+ 12.40 3.49129211425
+ 12.45 3.18950893506
+ 12.50 3.66108203033
+ 12.55 3.74916470116
+ 12.60 3.72885486815
+ 12.65 3.27357287085
+ 12.70 4.02808369797
+ 12.75 2.90391966989
+ 12.80 3.47147897927
+ 12.85 3.53376937142
+ 12.90 3.67614003452
+ 12.95 3.86581047355
+ 13.00 4.03268817472
+ 13.05 3.79538384868
+ 13.10 3.49862517721
+ 13.15 3.58068341046
+ 13.20 3.72218732962
+ 13.25 3.76383752518
+ 13.30 3.70826100345
+ 13.35 3.15528401222
+ 13.40 3.78343107537
+ 13.45 3.88942581080
+ 13.50 3.30649700834
+ 13.55 3.61346986635
+ 13.60 3.20095422077
+ 13.65 3.63449770192
+ 13.70 4.37128278661
+ 13.75 3.09475840948
+ 13.80 3.64559069249
+ 13.85 3.70920856273
+ 13.90 3.71343881083
+ 13.95 2.94883314831
+ 14.00 3.62391537217
+ 14.05 2.96134101806
+ 14.10 3.50318527074
+ 14.15 3.69812270471
+ 14.20 3.21170636813
+ 14.25 3.75489895972
+ 14.30 3.83295556149
+ 14.35 3.35411441152
+ 14.40 3.65377126898
+ 14.45 4.20179018192
+ 14.50 3.42700220111
+ 14.55 2.96089725781
+ 14.60 3.63963100704
+ 14.65 3.45143192242
+ 14.70 3.47312870863
+ 14.75 3.68702071489
+ 14.80 3.74068343607
+ 14.85 3.80376538961
+ 14.90 4.16365943041
+ 14.95 4.33154401598
+ 15.00 4.40066122642
+ 15.05 4.11934259358
+ 15.10 4.95355878648
+ 15.15 4.52072404063
+ 15.20 4.84582662392
+ 15.25 4.75949753235
+ 15.30 5.28780550686
+ 15.35 5.99012095403
+ 15.40 5.73524180180
+ 15.45 5.93297578991
+ 15.50 6.26632055195
+ 15.55 6.74191937829
+ 15.60 6.49209695411
+ 15.65 6.87162095663
+ 15.70 6.89944165666
+ 15.75 7.16294722525
+ 15.80 7.78296801835
+ 15.85 8.03245785545
+ 15.90 9.04872095901
+ 15.95 9.47017598572
+ 16.00 8.51494345125
+ 16.05 8.62786233062
+ 16.10 8.81794996882
+ 16.15 9.47806204831
+ 16.20 9.34318913312
+ 16.25 10.1293952874
+ 16.30 9.94059006806
+ 16.35 9.46882942068
+ 16.40 9.46750041992
+ 16.45 10.0778227245
+ 16.50 9.60290613840
+ 16.55 9.68542564727
+ 16.60 10.0580512248
+ 16.65 9.54613864279
+ 16.70 9.08379114524
+ 16.75 8.85952483325
+ 16.80 8.77578733506
+ 16.85 9.43018023157
+ 16.90 8.35733674481
+ 16.95 8.65147020522
+ 17.00 8.17052247013
+ 17.05 7.71834063615
+ 17.10 7.64063615191
+ 17.15 7.10331868336
+ 17.20 6.88917292959
+ 17.25 6.49250021047
+ 17.30 6.83830350059
+ 17.35 5.86882143800
+ 17.40 5.99296213029
+ 17.45 4.81643628678
+ 17.50 5.07760708618
+ 17.55 5.21167983715
+ 17.60 4.61888070923
+ 17.65 4.61020867356
+ 17.70 4.73219860417
+ 17.75 3.76703344159
+ 17.80 3.68532849169
+ 17.85 3.67919307630
+ 17.90 3.62080268098
+ 17.95 3.63788880924
+ 18.00 2.89503192888
+ 18.05 3.50520587540
+ 18.10 2.84731747670
+ 18.15 4.27734825573
+ 18.20 3.54931968124
+ 18.25 3.28803468974
+ 18.30 3.17606045637
+ 18.35 3.10697518720
+ 18.40 3.48227906531
+ 18.45 3.21774996564
+ 18.50 3.58005048248
+ 18.55 3.27289036433
+ 18.60 2.88835892253
+ 18.65 2.85267546528
+ 18.70 2.92606772123
+ 18.75 2.46493125067
+ 18.80 3.03768504662
+ 18.85 2.57488214976
+ 18.90 3.29665698032
+ 18.95 2.88975339506
+ 19.00 2.70839853213
+ 19.05 3.00727227103
+ 19.10 3.29396028863
+ 19.15 2.69856536118
+ 19.20 2.86223090733
+ 19.25 3.27945065920
+ 19.30 3.05698092032
+ 19.35 2.77889552950
+ 19.40 2.62209068612
+ 19.45 2.50767338025
+ 19.50 2.28450752744
+ 19.55 2.28284494498
+ 19.60 3.05169783614
+ 19.65 3.07706070518
+ 19.70 3.07713755192
+ 19.75 3.08181857658
+ 19.80 2.93746839618
+ 19.85 2.93867692906
+ 19.90 2.89353641833
+ 19.95 2.84229568098
+ 20.00 3.14929449847
+ 20.05 2.92304225785
+ 20.10 2.51691328490
+ 20.15 3.66548182308
+ 20.20 2.72562129136
+ 20.25 2.92151941234
+ 20.30 2.61179520432
+ 20.35 3.39507949935
+ 20.40 2.86993930461
+ 20.45 2.44626599977
+ 20.50 3.03537741348
+ 20.55 2.25431030457
+ 20.60 2.74131220408
+ 20.65 2.49859125103
+ 20.70 3.46647322323
+ 20.75 2.79881233180
+ 20.80 3.14435082063
+ 20.85 3.24721502910
+ 20.90 2.12722356266
+ 20.95 2.23445294938
+ 21.00 3.22000367656
+ 21.05 2.79143633941
+ 21.10 2.55544938709
+ 21.15 3.09754430899
+ 21.20 2.93071189433
+ 21.25 3.15992128024
+ 21.30 2.60689765856
+ 21.35 3.38911899380
+ 21.40 3.17429295653
+ 21.45 3.28631473944
+ 21.50 2.98695670582
+ 21.55 2.98607712077
+ 21.60 3.09986464921
+ 21.65 2.82480782016
+ 21.70 2.89843603470
+ 21.75 3.20075854402
+ 21.80 3.20954084532
+ 21.85 3.53507050197
+ 21.90 2.75657608164
+ 21.95 2.79758925161
+ 22.00 2.87013825451
+ 22.05 2.79369963948
+ 22.10 2.83919143936
+ 22.15 3.00376872657
+ 22.20 3.03732823723
+ 22.25 3.38912091248
+ 22.30 3.18724881855
+ 22.35 2.84844803639
+ 22.40 3.47473510025
+ 22.45 3.02060426317
+ 22.50 3.63189699986
+ 22.55 2.86578549981
+ 22.60 3.04485365263
+ 22.65 3.27402675418
+ 22.70 2.55563996962
+ 22.75 3.38776152248
+ 22.80 3.75540487159
+ 22.85 3.31664199957
+ 22.90 3.56987968004
+ 22.95 3.50800782027
+ 23.00 3.37994390077
+ 23.05 3.27316188615
+ 23.10 3.41300112675
+ 23.15 3.15286512579
+ 23.20 3.53386979609
+ 23.25 3.06783652450
+ 23.30 3.63534152792
+ 23.35 3.08136446967
+ 23.40 3.43013868764
+ 23.45 3.13085944234
+ 23.50 2.52523438290
+ 23.55 3.44669475069
+ 23.60 3.22765951756
+ 23.65 3.50896545125
+ 23.70 3.86470748598
+ 23.75 2.96775610362
+ 23.80 2.97248103417
+ 23.85 3.11419495505
+ 23.90 3.69486046720
+ 23.95 3.81093313552
+ 24.00 4.04869270397
+ 24.05 3.39149397968
+ 24.10 3.46063743255
+ 24.15 3.28606281454
+ 24.20 3.71658861188
+ 24.25 3.78921391295
+ 24.30 3.52836967541
+ 24.35 3.75802856378
+ 24.40 3.32036173561
+ 24.45 3.49811083392
+ 24.50 3.74123977672
+ 24.55 3.13512327376
+ 24.60 3.32137818900
+ 24.65 3.77393820599
+ 24.70 3.69256530273
+ 24.75 4.02144329738
+ 24.80 3.51055117275
+ 24.85 4.14278693963
+ 24.90 3.97479741857
+ 24.95 3.21220712914
+ 25.00 3.90788342338
diff --git a/lmfit.egg-info/PKG-INFO b/lmfit.egg-info/PKG-INFO
index 2242d82..3693dc6 100644
--- a/lmfit.egg-info/PKG-INFO
+++ b/lmfit.egg-info/PKG-INFO
@@ -1,40 +1,138 @@
-Metadata-Version: 1.2
+Metadata-Version: 2.1
Name: lmfit
-Version: 1.0.2
+Version: 1.1.0
Summary: Least-Squares Minimization with Bounds and Constraints
-Home-page: https://lmfit.github.io/lmfit-py/
+Home-page: https://lmfit.github.io//lmfit-py/
Author: LMFit Development Team
Author-email: matt.newville@gmail.com
-License: BSD-3
-Download-URL: https://lmfit.github.io//lmfit-py/
-Description: A library for least-squares minimization and data fitting in
- Python. Built on top of scipy.optimize, lmfit provides a Parameter object
- which can be set as fixed or free, can have upper and/or lower bounds, or
- can be written in terms of algebraic constraints of other Parameters. The
- user writes a function to be minimized as a function of these Parameters,
- and the scipy.optimize methods are used to find the optimal values for the
- Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
- algorithm, and provides estimated standard errors and correlations between
- varied Parameters. Other minimization methods, including Nelder-Mead's
- downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
- others are also supported. Bounds and constraints can be placed on
- Parameters for all of these methods.
-
- In addition, methods for explicitly calculating confidence intervals are
- provided for exploring minmization problems where the approximation of
- estimating Parameter uncertainties from the covariance matrix is
- questionable.
+License: BSD 3-Clause
+Project-URL: Source, https://github.com/lmfit/lmfit-py
+Project-URL: Changelog, https://lmfit.github.io/lmfit-py/whatsnew.html
+Project-URL: Documentation, https://lmfit.github.io/lmfit-py/
+Project-URL: Tracker, https://github.com/lmfit/lmfit-py/issues
Keywords: curve-fitting,least-squares minimization
-Platform: Windows
-Platform: Linux
-Platform: Mac OS X
+Platform: any
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Science/Research
+Classifier: Topic :: Scientific/Engineering
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
-Classifier: Topic :: Scientific/Engineering
-Requires-Python: >=3.6
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.7
+Description-Content-Type: text/x-rst
+Provides-Extra: dev
+Provides-Extra: doc
+Provides-Extra: test
+Provides-Extra: all
+License-File: LICENSE
+License-File: AUTHORS.txt
+
+LMfit-py
+========
+
+.. image:: https://dev.azure.com/lmfit/lmfit-py/_apis/build/status/lmfit.lmfit-py?branchName=master
+ :target: https://dev.azure.com/lmfit/lmfit-py/_build/latest?definitionId=1&branchName=master
+
+.. image:: https://codecov.io/gh/lmfit/lmfit-py/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/lmfit/lmfit-py
+
+.. image:: https://img.shields.io/pypi/v/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/pypi/dm/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/badge/docs-read-brightgreen
+ :target: https://lmfit.github.io/lmfit-py/
+
+.. image:: https://zenodo.org/badge/4185/lmfit/lmfit-py.svg
+ :target: https://zenodo.org/badge/latestdoi/4185/lmfit/lmfit-py
+
+.. _LMfit mailing list: https://groups.google.com/group/lmfit-py
+
+
+Overview
+---------
+
+LMfit-py provides a Least-Squares Minimization routine and class with a simple,
+flexible approach to parameterizing a model for fitting to data.
+
+LMfit is a pure Python package, and so easy to install from source or with
+``pip install lmfit``.
+
+For questions, comments, and suggestions, please use the `LMfit mailing list`_.
+Using the bug tracking software in GitHub Issues is encouraged for known
+problems and bug reports. Please read
+`Contributing.md <.github/CONTRIBUTING.md>`_ before creating an Issue.
+
+
+Parameters and Fitting
+-------------------------
+
+LMfit-py provides a Least-Squares Minimization routine and class with a simple,
+flexible approach to parameterizing a model for fitting to data. Named
+Parameters can be held fixed or freely adjusted in the fit, or held between
+lower and upper bounds. In addition, parameters can be constrained as a simple
+mathematical expression of other Parameters.
+
+To do this, the programmer defines a Parameters object, an enhanced dictionary,
+containing named parameters::
+
+ fit_params = Parameters()
+ fit_params['amp'] = Parameter(value=1.2, min=0.1, max=1000)
+ fit_params['cen'] = Parameter(value=40.0, vary=False)
+ fit_params['wid'] = Parameter(value=4, min=0)
+
+or using the equivalent::
+
+ fit_params = Parameters()
+ fit_params.add('amp', value=1.2, min=0.1, max=1000)
+ fit_params.add('cen', value=40.0, vary=False)
+ fit_params.add('wid', value=4, min=0)
+
+The programmer will also write a function to be minimized (in the least-squares
+sense) with its first argument being this Parameters object, and additional
+positional and keyword arguments as desired::
+
+ def myfunc(params, x, data, someflag=True):
+ amp = params['amp'].value
+ cen = params['cen'].value
+ wid = params['wid'].value
+ ...
+ return residual_array
+
+For each call of this function, the values for the ``params`` may have changed,
+subject to the bounds and constraint settings for each Parameter. The function
+should return the residual (i.e., ``data-model``) array to be minimized.
+
+The advantage here is that the function to be minimized does not have to be
+changed if different bounds or constraints are placed on the fitting Parameters.
+The fitting model (as described in myfunc) is instead written in terms of
+physical parameters of the system, and remains remains independent of what is
+actually varied in the fit. In addition, which parameters are adjusted and which
+are fixed happens at run-time, so that changing what is varied and what
+constraints are placed on the parameters can easily be modified by the user in
+real-time data analysis.
+
+To perform the fit, the user calls::
+
+ result = minimize(myfunc, fit_params, args=(x, data), kws={'someflag':True}, ....)
+
+After the fit, a ``MinimizerResult`` class is returned that holds the results
+the fit (e.g., fitting statistics and optimized parameters). The dictionary
+``result.params`` contains the best-fit values, estimated standard deviations,
+and correlations with other variables in the fit.
+
+By default, the underlying fit algorithm is the Levenberg-Marquardt algorithm
+with numerically-calculated derivatives from MINPACK's lmdif function, as used
+by ``scipy.optimize.leastsq``. Most other solvers that are present in ``scipy``
+(e.g., Nelder-Mead, differential_evolution, basinhopping, etctera) are also
+supported.
diff --git a/lmfit.egg-info/SOURCES.txt b/lmfit.egg-info/SOURCES.txt
index 96341bf..fec1d79 100644
--- a/lmfit.egg-info/SOURCES.txt
+++ b/lmfit.egg-info/SOURCES.txt
@@ -2,18 +2,14 @@
.gitattributes
.gitignore
.pre-commit-config.yaml
-INSTALL
+AUTHORS.txt
LICENSE
-MANIFEST.in
README.rst
-THANKS.txt
azure-pipelines.yml
publish_docs.sh
-requirements-dev.txt
-requirements.txt
+pyproject.toml
setup.cfg
setup.py
-versioneer.py
.github/CONTRIBUTING.md
.github/ISSUE_TEMPLATE.md
.github/PULL_REQUEST_TEMPLATE.md
@@ -51,9 +47,6 @@ asv_benchmarking/asv.conf.json
asv_benchmarking/run_benchmark_code.py
asv_benchmarking/benchmarks/__init__.py
asv_benchmarking/benchmarks/benchmarks.py
-conda-recipe/bld.bat
-conda-recipe/build.sh
-conda-recipe/meta.yaml
doc/Makefile
doc/bounds.rst
doc/builtin_models.rst
@@ -62,7 +55,6 @@ doc/confidence.rst
doc/constraints.rst
doc/contents.rst
doc/doc_examples_to_gallery.py
-doc/extensions.py
doc/faq.rst
doc/filter_spurious_link_from_html.py
doc/fitting.rst
@@ -93,6 +85,7 @@ examples/README.txt
examples/doc_builtinmodels_nistgauss.py
examples/doc_builtinmodels_nistgauss2.py
examples/doc_builtinmodels_peakmodels.py
+examples/doc_builtinmodels_splinemodel.py
examples/doc_builtinmodels_stepmodel.py
examples/doc_confidence_advanced.py
examples/doc_confidence_basic.py
@@ -108,6 +101,7 @@ examples/doc_model_savemodelresult.py
examples/doc_model_savemodelresult2.py
examples/doc_model_two_components.py
examples/doc_model_uncertainty.py
+examples/doc_model_uncertainty2.py
examples/doc_model_with_iter_callback.py
examples/doc_model_with_nan_policy.py
examples/doc_parameters_basic.py
@@ -134,9 +128,9 @@ examples/model1d_gauss.dat
examples/peak.csv
examples/sinedata.dat
examples/test_peak.dat
+examples/test_splinepeak.dat
lmfit/__init__.py
lmfit/_ampgo.py
-lmfit/_version.py
lmfit/confidence.py
lmfit/jsonutils.py
lmfit/lineshapes.py
@@ -145,15 +139,15 @@ lmfit/model.py
lmfit/models.py
lmfit/parameter.py
lmfit/printfuncs.py
+lmfit/version.py
lmfit.egg-info/PKG-INFO
lmfit.egg-info/SOURCES.txt
lmfit.egg-info/dependency_links.txt
lmfit.egg-info/requires.txt
lmfit.egg-info/top_level.txt
tests/NISTModels.py
+tests/__init__.py
tests/conftest.py
-tests/o.py
-tests/t_enso.py
tests/test_1variable.py
tests/test_NIST_Strd.py
tests/test_algebraic_constraint.py
@@ -177,6 +171,7 @@ tests/test_manypeaks_speed.py
tests/test_max_nfev.py
tests/test_minimizer.py
tests/test_model.py
+tests/test_model_saveload.py
tests/test_model_uncertainties.py
tests/test_models.py
tests/test_multidatasets.py
@@ -185,6 +180,5 @@ tests/test_pandas.py
tests/test_parameter.py
tests/test_parameters.py
tests/test_printfuncs.py
-tests/test_saveload.py
tests/test_shgo.py
tests/test_stepmodel.py \ No newline at end of file
diff --git a/lmfit.egg-info/requires.txt b/lmfit.egg-info/requires.txt
index 81149a4..e2f6e15 100644
--- a/lmfit.egg-info/requires.txt
+++ b/lmfit.egg-info/requires.txt
@@ -1,4 +1,62 @@
-asteval>=0.9.16
-numpy>=1.16
-scipy>=1.2
-uncertainties>=3.0.1
+asteval>=0.9.28
+numpy>=1.19
+scipy>=1.6
+uncertainties>=3.1.4
+
+[all]
+build
+check-wheel-contents
+pre-commit
+twine
+codecov
+coverage
+flaky
+pytest
+pytest-cov
+cairosvg
+corner
+dill
+emcee>=3.0.0
+jupyter_sphinx>=0.2.4
+matplotlib
+numdifftools
+pandas
+Pillow
+Sphinx
+sphinx-gallery>=0.10
+sphinxcontrib-svg2pdfconverter
+sympy
+
+[all:platform_system == "Windows"]
+pycairo
+
+[dev]
+build
+check-wheel-contents
+pre-commit
+twine
+
+[doc]
+cairosvg
+corner
+dill
+emcee>=3.0.0
+jupyter_sphinx>=0.2.4
+matplotlib
+numdifftools
+pandas
+Pillow
+Sphinx
+sphinx-gallery>=0.10
+sphinxcontrib-svg2pdfconverter
+sympy
+
+[doc:platform_system == "Windows"]
+pycairo
+
+[test]
+codecov
+coverage
+flaky
+pytest
+pytest-cov
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
index 052f637..c3e190c 100644
--- a/lmfit/__init__.py
+++ b/lmfit/__init__.py
@@ -30,7 +30,7 @@ useful enhancements, including:
* Many built-in models for common lineshapes are included and ready
to use.
-Copyright (c) 2021 Lmfit Developers ; BSD-3 license ; see LICENSE
+Copyright (c) 2022 Lmfit Developers ; BSD-3 license ; see LICENSE
"""
from asteval import Interpreter
@@ -38,13 +38,8 @@ from asteval import Interpreter
from .confidence import conf_interval, conf_interval2d
from .minimizer import Minimizer, MinimizerException, minimize
from .parameter import Parameter, Parameters
-from .printfuncs import (ci_report, fit_report, report_ci, report_errors,
- report_fit)
+from .printfuncs import ci_report, fit_report, report_ci, report_fit
from .model import Model, CompositeModel
from . import lineshapes, models
-# versioneer code
-from ._version import get_versions
-
-__version__ = get_versions()['version']
-del get_versions
+from lmfit.version import version as __version__
diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py
index 5dec685..607c439 100644
--- a/lmfit/_ampgo.py
+++ b/lmfit/_ampgo.py
@@ -118,11 +118,11 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None,
maxfunevals = np.inf
if tabulistsize < 1:
- raise Exception('Invalid tabulistsize specified: {:d}. It should be '
- 'an integer greater than zero.'.format(tabulistsize))
+ raise Exception(f'Invalid tabulistsize specified: {tabulistsize}. '
+ 'It should be an integer greater than zero.')
if tabustrategy not in ['oldest', 'farthest']:
- raise Exception('Invalid tabustrategy specified: {:s}. It must be one '
- 'of "oldest" or "farthest".'.format(tabustrategy))
+ raise Exception(f'Invalid tabustrategy specified: {tabustrategy}. '
+ 'It must be one of "oldest" or "farthest".')
tabulist = []
best_f = np.inf
@@ -225,7 +225,7 @@ def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None,
if disp:
print('\n\n ==> Successful tunnelling phase. Reached new '
- 'local minimum: {:.5g} < {:.5g}\n'.format(yf, oldf))
+ f'local minimum: {yf:.5g} < {oldf:.5g}\n')
i += 1
diff --git a/lmfit/_version.py b/lmfit/_version.py
deleted file mode 100644
index 2617e1c..0000000
--- a/lmfit/_version.py
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# This file was generated by 'versioneer.py' (0.19) from
-# revision-control system data, or from the parent directory name of an
-# unpacked source archive. Distribution tarballs contain a pre-generated copy
-# of this file.
-
-import json
-
-version_json = '''
-{
- "date": "2021-02-07T16:30:42-0600",
- "dirty": false,
- "error": null,
- "full-revisionid": "ddf7d40e8eae540a0c6ff7387064ac800b94cc2a",
- "version": "1.0.2"
-}
-''' # END VERSION_JSON
-
-
-def get_versions():
- return json.loads(version_json)
diff --git a/lmfit/confidence.py b/lmfit/confidence.py
index 684d53d..6914a58 100644
--- a/lmfit/confidence.py
+++ b/lmfit/confidence.py
@@ -1,18 +1,17 @@
"""Contains functions to calculate confidence intervals."""
-from collections import OrderedDict
from warnings import warn
import numpy as np
-from scipy.optimize import brentq
+from scipy.optimize import root_scalar
from scipy.special import erf
from scipy.stats import f
from .minimizer import MinimizerException
CONF_ERR_GEN = 'Cannot determine Confidence Intervals'
-CONF_ERR_STDERR = '%s without sensible uncertainty estimates' % CONF_ERR_GEN
-CONF_ERR_NVARS = '%s with < 2 variables' % CONF_ERR_GEN
+CONF_ERR_STDERR = f'{CONF_ERR_GEN} without sensible uncertainty estimates'
+CONF_ERR_NVARS = f'{CONF_ERR_GEN} with < 2 variables'
def f_compare(best_fit, new_fit):
@@ -31,7 +30,7 @@ def f_compare(best_fit, new_fit):
Returns
-------
float
- Value of the calculated probality.
+ Value of the calculated probability.
"""
nfree = best_fit.nfree
@@ -213,7 +212,7 @@ class ConfidenceInterval:
def calc_all_ci(self):
"""Calculate all confidence intervals."""
- out = OrderedDict()
+ out = {}
for p in self.p_names:
out[p] = (self.calc_ci(p, -1)[::-1] +
@@ -234,14 +233,20 @@ class ConfidenceInterval:
para = self.params[para]
# function used to calculate the probability
- calc_prob = lambda val, prob: self.calc_prob(para, val, prob)
+ cache = {}
+
+ def calc_prob(val, target_prob):
+ if val not in cache:
+ cache[val] = self.calc_prob(para, val, 0)
+ return cache[val] - target_prob
+
if self.trace:
x = [i.value for i in self.params.values()]
self.trace_dict[para.name].append(x + [0])
para.vary = False
limit, max_prob = self.find_limit(para, direction)
- start_val = a_limit = float(para.value)
+ a_limit = float(para.value)
ret = []
orig_warn_settings = np.geterr()
np.seterr(all='ignore')
@@ -250,17 +255,12 @@ class ConfidenceInterval:
ret.append((prob, direction*np.inf))
continue
- try:
- val = brentq(calc_prob, a_limit,
- limit, rtol=.5e-4, args=prob)
- except ValueError:
- self.reset_vals()
- try:
- val = brentq(calc_prob, start_val,
- limit, rtol=.5e-4, args=prob)
- except ValueError:
- val = np.nan
-
+ sol = root_scalar(calc_prob, method='toms748', bracket=sorted([limit, a_limit]), rtol=.5e-4, args=(prob,))
+ if sol.converged:
+ val = sol.root
+ else:
+ val = np.nan
+ break
a_limit = val
ret.append((prob, val))
@@ -276,7 +276,7 @@ class ConfidenceInterval:
def find_limit(self, para, direction):
"""Find a value for given parameter so that prob(val) > sigmas."""
if self.verbose:
- print('Calculating CI for ' + para.name)
+ print(f'Calculating CI for {para.name}')
self.reset_vals()
# determine starting step
@@ -294,7 +294,7 @@ class ConfidenceInterval:
max_prob = max(self.probs)
while old_prob < max_prob:
- i = i + 1
+ i += 1
limit += step * direction
if limit > para.max:
limit = para.max
@@ -307,30 +307,26 @@ class ConfidenceInterval:
rel_change = (new_prob - old_prob) / max(new_prob, old_prob, 1e-12)
old_prob = new_prob
if self.verbose:
- msg = "P({}={}) = {}, max. prob={}"
- print(msg.format(para.name, limit, new_prob, max_prob))
+ print(f'P({para.name}={limit}) = {new_prob}, '
+ f'max. prob={max_prob}')
# check for convergence
- if bound_reached:
- if new_prob < max(self.probs):
- errmsg = ("Bound reached with "
- "prob({}={}) = {} < max(sigmas)"
- ).format(para.name, limit, new_prob)
- warn(errmsg)
- break
+ if bound_reached and new_prob < max(self.probs):
+ errmsg = (f'Bound reached with prob({para.name}={limit}) '
+ f'= {new_prob} < max(sigmas)')
+ warn(errmsg)
+ break
if i > self.maxiter:
- errmsg = f"maxiter={self.maxiter} reached "
- errmsg += ("and prob({}={}) = {} < "
- "max(sigmas).".format(para.name, limit, new_prob))
+ errmsg = (f'maxiter={self.maxiter} reached and prob('
+ f'{para.name}={limit}) = {new_prob} < max(sigmas)')
warn(errmsg)
break
if rel_change < self.min_rel_change:
- errmsg = "rel_change={} < {} ".format(rel_change,
- self.min_rel_change)
- errmsg += ("at iteration {} and prob({}={}) = {} < max"
- "(sigmas).".format(i, para.name, limit, new_prob))
+ errmsg = (f'rel_change={rel_change} < {self.min_rel_change} '
+ f'at iteration {i} and prob({para.name}={limit}) = '
+ f'{new_prob} < max(sigmas)')
warn(errmsg)
break
diff --git a/lmfit/jsonutils.py b/lmfit/jsonutils.py
index 96c73be..7e91afa 100644
--- a/lmfit/jsonutils.py
+++ b/lmfit/jsonutils.py
@@ -2,6 +2,7 @@
from base64 import b64decode, b64encode
import sys
+import warnings
import numpy as np
@@ -18,6 +19,9 @@ except ImportError:
read_json = None
+pyvers = f'{sys.version_info.major}.{sys.version_info.minor}'
+
+
def find_importer(obj):
"""Find importer of an object."""
oname = obj.__name__
@@ -85,16 +89,10 @@ def encode4js(obj):
out[encode4js(key)] = encode4js(val)
return out
if callable(obj):
- val, importer = None, None
- pyvers = "%d.%d" % (sys.version_info.major,
- sys.version_info.minor)
- if HAS_DILL:
- val = str(b64encode(dill.dumps(obj)), 'utf-8')
- else:
- val = None
- importer = find_importer(obj)
+ value = str(b64encode(dill.dumps(obj)), 'utf-8') if HAS_DILL else None
return dict(__class__='Callable', __name__=obj.__name__,
- pyversion=pyvers, value=val, importer=importer)
+ pyversion=pyvers, value=value,
+ importer=find_importer(obj))
return obj
@@ -131,13 +129,19 @@ def decode4js(obj):
elif classname == 'PSeries' and read_json is not None:
out = read_json(obj['value'], typ='series')
elif classname == 'Callable':
- out = val = obj['__name__']
- pyvers = "%d.%d" % (sys.version_info.major,
- sys.version_info.minor)
- if pyvers == obj['pyversion'] and HAS_DILL:
- out = dill.loads(b64decode(obj['value']))
- elif obj['importer'] is not None:
- out = import_from(obj['importer'], val)
+ out = obj['__name__']
+ try:
+ out = import_from(obj['importer'], out)
+ unpacked = True
+ except (ImportError, AttributeError):
+ unpacked = False
+ if not unpacked and HAS_DILL:
+ try:
+ out = dill.loads(b64decode(obj['value']))
+ except RuntimeError:
+ msg = "Could not unpack dill-encoded callable `{0}`, saved with Python version {1}"
+ warnings.warn(msg.format(obj['__name__'],
+ obj['pyversion']))
elif classname in ('Dict', 'dict'):
out = {}
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
index 064a18c..7ecdc42 100644
--- a/lmfit/lineshapes.py
+++ b/lmfit/lineshapes.py
@@ -1,25 +1,24 @@
-"""Basic model line shapes and distribution functions."""
+"""Basic model lineshapes and distribution functions."""
-import warnings
-
-from numpy import (arctan, copysign, cos, exp, isnan, log, pi, real, sin, sqrt,
- where)
+from numpy import (arctan, copysign, cos, exp, isclose, isnan, log, log1p,
+ maximum, minimum, pi, real, sin, sqrt, where)
+from scipy.special import betaln as betalnfcn
from scipy.special import erf, erfc
from scipy.special import gamma as gamfcn
+from scipy.special import loggamma as loggammafcn
from scipy.special import wofz
log2 = log(2)
s2pi = sqrt(2*pi)
-spi = sqrt(pi)
s2 = sqrt(2.0)
# tiny had been numpy.finfo(numpy.float64).eps ~=2.2e16.
# here, we explicitly set it to 1.e-15 == numpy.finfo(numpy.float64).resolution
tiny = 1.0e-15
functions = ('gaussian', 'gaussian2d', 'lorentzian', 'voigt', 'pvoigt',
- 'moffat', 'pearson7', 'breit_wigner', 'damped_oscillator',
+ 'moffat', 'pearson4', 'pearson7', 'breit_wigner', 'damped_oscillator',
'dho', 'logistic', 'lognormal', 'students_t', 'expgaussian',
- 'doniach', 'donaich', 'skewed_gaussian', 'skewed_voigt',
+ 'doniach', 'skewed_gaussian', 'skewed_voigt',
'thermal_distribution', 'step', 'rectangle', 'exponential',
'powerlaw', 'linear', 'parabolic', 'sine', 'expsine',
'split_lorentzian')
@@ -148,6 +147,26 @@ def moffat(x, amplitude=1, center=0., sigma=1, beta=1.):
return amplitude / (((x - center)/max(tiny, sigma))**2 + 1)**beta
+def pearson4(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0, skew=0.0):
+ """Return a Pearson4 lineshape.
+
+ Using the Wikipedia definition:
+
+ pearson4(x, amplitude, center, sigma, expon, skew) =
+ amplitude*|gamma(expon + I skew/2)/gamma(m)|**2/(w*beta(expon-0.5, 0.5)) * (1+arg**2)**(-expon) * exp(-skew * arctan(arg))
+
+ where ``arg = (x-center)/sigma``, `gamma` is the gamma function and `beta` is the beta function.
+
+ For more information, see: https://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_IV_distribution
+
+ """
+ expon = max(tiny, expon)
+ sigma = max(tiny, sigma)
+ arg = (x - center) / sigma
+ logprefactor = 2 * (real(loggammafcn(expon + skew * 0.5j)) - loggammafcn(expon)) - betalnfcn(expon - 0.5, 0.5)
+ return (amplitude / sigma) * exp(logprefactor - expon * log1p(arg * arg) - skew * arctan(arg))
+
+
def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0):
"""Return a Pearson7 lineshape.
@@ -188,7 +207,7 @@ def damped_oscillator(x, amplitude=1.0, center=1., sigma=0.1):
return amplitude/sqrt((1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2)
-def dho(x, amplitude=1., center=0., sigma=1., gamma=1.0):
+def dho(x, amplitude=1., center=1., sigma=1., gamma=1.0):
"""Return a Damped Harmonic Oscillator.
Similar to the version from PAN:
@@ -201,16 +220,19 @@ def dho(x, amplitude=1., center=0., sigma=1., gamma=1.0):
``lp(x, center, sigma) = 1.0 / ((x+center)**2 + sigma**2)``
"""
+ factor = amplitude * sigma / pi
bose = (1.0 - exp(-x/max(tiny, gamma)))
if isinstance(bose, (int, float)):
- bose = max(tiny, bose)
+ bose = not_zero(bose)
else:
bose[where(isnan(bose))] = tiny
- bose[where(bose <= tiny)] = tiny
+ bose[where(abs(bose) <= tiny)] = tiny
lm = 1.0/((x-center)**2 + sigma**2)
lp = 1.0/((x+center)**2 + sigma**2)
- return amplitude*sigma/pi*(lm - lp)/bose
+ return factor * where(isclose(x, 0.0),
+ 4*gamma*center/(center**2+sigma**2)**2,
+ (lm - lp)/bose)
def logistic(x, amplitude=1., center=0., sigma=1.):
@@ -289,20 +311,6 @@ def doniach(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2)
-def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
- """Return a Doniach Sunjic asymmetric lineshape.
-
- Function added here for backwards-compatibility, will emit a
- `FutureWarning` when used.
-
- """
- msg = ('Please correct the name of your lineshape function: donaich --> '
- 'doniach. The incorrect spelling will be removed in a later '
- 'release.')
- warnings.warn(FutureWarning(msg))
- return doniach(x, amplitude, center, sigma, gamma)
-
-
def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0):
"""Return a Gaussian lineshape, skewed with error function.
@@ -397,8 +405,8 @@ def thermal_distribution(x, amplitude=1.0, center=0.0, kt=1.0, form='bose'):
elif form.startswith('fermi'):
offset = 1
else:
- msg = "Invalid value ('%s') for argument 'form'; should be one of %s."\
- % (form, "'maxwell', 'fermi', or 'bose'")
+ msg = (f"Invalid value ('{form}') for argument 'form'; should be one "
+ "of 'maxwell', 'fermi', or 'bose'.")
raise ValueError(msg)
return real(1/(amplitude*exp((x - center)/not_zero(kt)) + offset + tiny*1j))
@@ -410,7 +418,7 @@ def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
Starts at 0.0, ends at `amplitude`, with half-max at `center`, and
rising with `form`:
- - `'linear'` (default) = amplitude * min(1, max(0, arg))
+ - `'linear'` (default) = amplitude * min(1, max(0, arg + 0.5))
- `'atan'`, `'arctan'` = amplitude * (0.5 + atan(arg)/pi)
- `'erf'` = amplitude * (1 + erf(arg))/2.0
- `'logistic'` = amplitude * [1 - 1/(1 + exp(arg))]
@@ -423,15 +431,14 @@ def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
if form == 'erf':
out = 0.5*(1 + erf(out))
elif form == 'logistic':
- out = (1. - 1./(1. + exp(out)))
+ out = 1. - 1./(1. + exp(out))
elif form in ('atan', 'arctan'):
out = 0.5 + arctan(out)/pi
elif form == 'linear':
- out[where(out < 0)] = 0.0
- out[where(out > 1)] = 1.0
+ out = minimum(1, maximum(0, out + 0.5))
else:
- msg = "Invalid value ('%s') for argument 'form'; should be one of %s."\
- % (form, "'erf', 'logistic', 'atan', 'arctan', or 'linear'")
+ msg = (f"Invalid value ('{form}') for argument 'form'; should be one "
+ "of 'erf', 'logistic', 'atan', 'arctan', or 'linear'.")
raise ValueError(msg)
return amplitude*out
@@ -462,18 +469,14 @@ def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
if form == 'erf':
out = 0.5*(erf(arg1) + erf(arg2))
elif form == 'logistic':
- out = (1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2)))
+ out = 1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2))
elif form in ('atan', 'arctan'):
out = (arctan(arg1) + arctan(arg2))/pi
elif form == 'linear':
- arg1[where(arg1 < 0)] = 0.0
- arg1[where(arg1 > 1)] = 1.0
- arg2[where(arg2 > 0)] = 0.0
- arg2[where(arg2 < -1)] = -1.0
- out = arg1 + arg2
+ out = 0.5*(minimum(1, maximum(-1, arg1)) + minimum(1, maximum(-1, arg2)))
else:
- msg = "Invalid value ('%s') for argument 'form'; should be one of %s."\
- % (form, "'erf', 'logistic', 'atan', 'arctan', or 'linear'")
+ msg = (f"Invalid value ('{form}') for argument 'form'; should be one "
+ "of 'erf', 'logistic', 'atan', 'arctan', or 'linear'.")
raise ValueError(msg)
return amplitude*out
@@ -501,7 +504,7 @@ def powerlaw(x, amplitude=1, exponent=1.0):
def linear(x, slope=1.0, intercept=0.0):
"""Return a linear function.
- linear(x, slope, interceps) = slope * x + intercept
+ linear(x, slope, intercept) = slope * x + intercept
"""
return slope * x + intercept
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index f57b58b..641651e 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -22,6 +22,7 @@ import numbers
import warnings
import numpy as np
+from scipy import __version__ as scipy_version
from scipy.linalg import LinAlgError, inv
from scipy.optimize import basinhopping as scipy_basinhopping
from scipy.optimize import brute as scipy_brute
@@ -76,7 +77,7 @@ except ImportError:
# define the namedtuple here so pickle will work with the MinimizerResult
Candidate = namedtuple('Candidate', ['params', 'score'])
-MAXEVAL_Warning = "ignoring `%s` argument to `%s()`. Use `max_nfev` instead."
+maxeval_warning = "ignoring `{}` argument to `{}()`. Use `max_nfev` instead."
def thisfuncname():
@@ -317,20 +318,19 @@ class MinimizerResult:
@property
def flatchain(self):
"""Show flatchain view of the sampling chain from `emcee` method."""
- if hasattr(self, 'chain'):
- if HAS_PANDAS:
- if len(self.chain.shape) == 4:
- return pd.DataFrame(self.chain[0, ...].reshape((-1, self.nvarys)),
- columns=self.var_names)
- elif len(self.chain.shape) == 3:
- return pd.DataFrame(self.chain.reshape((-1, self.nvarys)),
- columns=self.var_names)
- else:
- raise NotImplementedError('Please install Pandas to see the '
- 'flattened chain')
- else:
+ if not hasattr(self, 'chain'):
return None
+ if not HAS_PANDAS:
+ raise NotImplementedError('Please install Pandas to see the '
+ 'flattened chain')
+ if len(self.chain.shape) == 4:
+ return pd.DataFrame(self.chain[0, ...].reshape((-1, self.nvarys)),
+ columns=self.var_names)
+ elif len(self.chain.shape) == 3:
+ return pd.DataFrame(self.chain.reshape((-1, self.nvarys)),
+ columns=self.var_names)
+
def show_candidates(self, candidate_nmb='all'):
"""Show pretty_print() representation of candidates.
@@ -347,16 +347,13 @@ class MinimizerResult:
if hasattr(self, 'candidates'):
if candidate_nmb == 'all':
for i, candidate in enumerate(self.candidates):
- print("\nCandidate #{}, chisqr = "
- "{:.3f}".format(i+1, candidate.score))
+ print(f"\nCandidate #{i + 1}, chisqr = {candidate.score:.3f}")
candidate.params.pretty_print()
elif (candidate_nmb < 1 or candidate_nmb > len(self.candidates)):
- raise ValueError("'candidate_nmb' should be between 1 and {}."
- .format(len(self.candidates)))
+ raise ValueError(f"'candidate_nmb' should be between 1 and {len(self.candidates)}.")
else:
candidate = self.candidates[candidate_nmb-1]
- print("\nCandidate #{}, chisqr = "
- "{:.3f}".format(candidate_nmb, candidate.score))
+ print(f"\nCandidate #{candidate_nmb}, chisqr = {candidate.score:.3f}")
candidate.params.pretty_print()
def _calculate_statistics(self):
@@ -390,7 +387,7 @@ class Minimizer:
_err_nonparam = ("params must be a minimizer.Parameters() instance or"
" list of Parameters()")
- _err_max_evals = ("Too many function calls (max set to %i)! Use:"
+ _err_max_evals = ("Too many function calls (max set to {:i})! Use:"
" minimize(func, params, ..., max_nfev=NNN)"
" to increase this maximum.")
@@ -493,7 +490,7 @@ class Minimizer:
self.userkws = {}
for maxnfev_alias in ('maxfev', 'maxiter'):
if maxnfev_alias in kws:
- warnings.warn(MAXEVAL_Warning % (maxnfev_alias, 'Minimizer'),
+ warnings.warn(maxeval_warning.format(maxnfev_alias, 'Minimizer'),
RuntimeWarning)
kws.pop(maxnfev_alias)
@@ -567,11 +564,10 @@ class Minimizer:
if fvars.shape == ():
fvars = fvars.reshape((1,))
- if apply_bounds_transformation:
- for name, val in zip(self.result.var_names, fvars):
+ for name, val in zip(self.result.var_names, fvars):
+ if apply_bounds_transformation:
params[name].value = params[name].from_internal(val)
- else:
- for name, val in zip(self.result.var_names, fvars):
+ else:
params[name].value = val
params.update_constraints()
@@ -582,10 +578,10 @@ class Minimizer:
self.result.last_internal_values = fvars
if self.result.nfev > self.max_nfev:
self.result.aborted = True
- m = "number of function evaluations > %d" % self.max_nfev
- self.result.message = "Fit aborted: %s" % m
+ m = f"number of function evaluations > {self.max_nfev}"
+ self.result.message = f"Fit aborted: {m}"
self.result.success = False
- raise AbortFitException("fit aborted: too many function evaluations (%d)." % self.max_nfev)
+ raise AbortFitException(f"fit aborted: too many function evaluations {self.max_nfev}")
out = self.userfcn(params, *self.userargs, **self.userkws)
@@ -690,11 +686,11 @@ class Minimizer:
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
"""
- # determine which parameters are actually variables
- # and which are defined expressions.
+ self._abort = False
+
self.result = MinimizerResult()
result = self.result
if params is not None:
@@ -715,11 +711,15 @@ class Minimizer:
# and which are defined expressions.
result.var_names = [] # note that this *does* belong to self...
result.init_vals = []
+ result._init_vals_internal = []
result.params.update_constraints()
result.nfev = 0
result.call_kws = {}
result.errorbars = False
result.aborted = False
+ result.success = True
+ result.covar = None
+
for name, par in self.result.params.items():
par.stderr = None
par.correl = None
@@ -727,7 +727,8 @@ class Minimizer:
par.vary = False
if par.vary:
result.var_names.append(name)
- result.init_vals.append(par.setup_bounds())
+ result._init_vals_internal.append(par.setup_bounds())
+ result.init_vals.append(par.value)
par.init_value = par.value
if par.name is None:
@@ -790,8 +791,12 @@ class Minimizer:
Hfun = ndt.Hessian(self.penalty, step=1.e-4)
hessian_ndt = Hfun(fvars)
cov_x = inv(hessian_ndt) * 2.0
+
+ if cov_x.diagonal().min() < 0:
+ # we know the calculated covariance is incorrect, so we set the covariance to None
+ cov_x = None
except (LinAlgError, ValueError):
- return None
+ cov_x = None
finally:
self.result.nfev = nfev
@@ -871,8 +876,8 @@ class Minimizer:
for par in self.result.params.values():
eval_stderr(par, uvars, self.result.var_names, self.result.params)
# restore nominal values
- for v, nam in zip(uvars, self.result.var_names):
- self.result.params[nam].value = v.nominal_value
+ for v, name in zip(uvars, self.result.var_names):
+ self.result.params[name].value = v.nominal_value
def scalar_minimize(self, method='Nelder-Mead', params=None, max_nfev=None,
**kws):
@@ -934,7 +939,7 @@ class Minimizer:
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
Notes
@@ -951,15 +956,16 @@ class Minimizer:
"""
result = self.prepare_fit(params=params)
result.method = method
- variables = result.init_vals
+ variables = result._init_vals_internal
params = result.params
self.set_max_nfev(max_nfev, 2000*(result.nvarys+1))
fmin_kws = dict(method=method, options={'maxiter': 2*self.max_nfev})
+ # fmin_kws = dict(method=method, options={'maxfun': 2*self.max_nfev})
fmin_kws.update(self.kws)
if 'maxiter' in kws:
- warnings.warn(MAXEVAL_Warning % ('maxiter', thisfuncname()),
+ warnings.warn(maxeval_warning.format('maxiter', thisfuncname()),
RuntimeWarning)
kws.pop('maxiter')
fmin_kws.update(kws)
@@ -1000,7 +1006,7 @@ class Minimizer:
'bound for all varying parameters')
_bounds = [(-np.pi / 2., np.pi / 2.)] * len(variables)
- kwargs = dict(args=(), strategy='best1bin', maxiter=None,
+ kwargs = dict(args=(), strategy='best1bin', maxiter=self.max_nfev,
popsize=15, tol=0.01, mutation=(0.5, 1),
recombination=0.7, seed=None, callback=None,
disp=False, polish=True, init='latinhypercube',
@@ -1045,7 +1051,7 @@ class Minimizer:
result._calculate_statistics()
- # calculate the cov_x and estimate uncertanties/correlations
+ # calculate the cov_x and estimate uncertainties/correlations
if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
len(result.residual) > len(result.var_names)):
_covar_ndt = self._calculate_covariance_matrix(result.x)
@@ -1081,7 +1087,7 @@ class Minimizer:
Extra keyword arguments required for user objective function.
float_behavior : {'posterior', 'chi2'}, optional
Specifies meaning of objective when it returns a float. Use
- `'posterior'` if objective function returnins a log-posterior
+ `'posterior'` if objective function returns a log-posterior
probability (default) or `'chi2'` if it returns a chi2 value.
is_weighted : bool, optional
If `userfcn` returns a vector of residuals then `is_weighted`
@@ -1258,8 +1264,8 @@ class Minimizer:
`lnprob` contains the log probability for each sample in
`chain`. The sample with the highest probability corresponds
to the maximum likelihood estimate. `acor` is an array
- containing the autocorrelation time for each parameter if the
- autocorrelation time can be computed from the chain. Finally,
+ containing the auto-correlation time for each parameter if the
+ auto-correlation time can be computed from the chain. Finally,
`acceptance_fraction` (an array of the fraction of steps
accepted for each walker).
@@ -1346,16 +1352,14 @@ class Minimizer:
# check if the userfcn returns a vector of residuals
out = self.userfcn(params, *self.userargs, **self.userkws)
out = np.asarray(out).ravel()
- if out.size > 1 and is_weighted is False:
- # we need to marginalise over a constant data uncertainty
- if '__lnsigma' not in params:
- # __lnsigma should already be in params if is_weighted was
- # previously set to True.
- params.add('__lnsigma', value=0.01, min=-np.inf, max=np.inf,
- vary=True)
- # have to re-prepare the fit
- result = self.prepare_fit(params)
- params = result.params
+ if out.size > 1 and is_weighted is False and '__lnsigma' not in params:
+ # __lnsigma should already be in params if is_weighted was
+ # previously set to True.
+ params.add('__lnsigma', value=0.01, min=-np.inf, max=np.inf,
+ vary=True)
+ # have to re-prepare the fit
+ result = self.prepare_fit(params)
+ params = result.params
result.method = 'emcee'
@@ -1374,7 +1378,6 @@ class Minimizer:
else:
# don't want to append bounds if they're not being varied.
continue
-
param.from_internal = lambda val: val
lb, ub = param.min, param.max
if lb is None or lb is np.nan:
@@ -1500,9 +1503,8 @@ class Minimizer:
handle_inf=False)
# If uncertainty was automatically estimated, weight the residual properly
- if (not is_weighted) and (result.residual.size > 1):
- if '__lnsigma' in params:
- result.residual = result.residual/np.exp(params['__lnsigma'].value)
+ if not is_weighted and result.residual.size > 1 and '__lnsigma' in params:
+ result.residual /= np.exp(params['__lnsigma'].value)
# Calculate statistics for the two standard cases:
if isinstance(result.residual, np.ndarray) or (float_behavior == 'chi2'):
@@ -1556,7 +1558,7 @@ class Minimizer:
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
"""
result = self.prepare_fit(params)
@@ -1572,19 +1574,30 @@ class Minimizer:
lower_bounds.append(replace_none(par.min, -1))
upper_bounds.append(replace_none(par.max, 1))
- result.call_kws = kws
+ least_squares_kws = dict(jac='2-point', method='trf', ftol=1e-08,
+ xtol=1e-08, gtol=1e-08, x_scale=1.0,
+ loss='linear', f_scale=1.0, diff_step=None,
+ tr_solver=None, tr_options={},
+ jac_sparsity=None, max_nfev=2*self.max_nfev,
+ verbose=0, kwargs={})
+
+ least_squares_kws.update(self.kws)
+ least_squares_kws.update(kws)
+
+ least_squares_kws['kwargs'].update({'apply_bounds_transformation': False})
+ result.call_kws = least_squares_kws
+
try:
ret = least_squares(self.__residual, start_vals,
bounds=(lower_bounds, upper_bounds),
- kwargs=dict(apply_bounds_transformation=False),
- max_nfev=2*self.max_nfev, **kws)
+ **least_squares_kws)
result.residual = ret.fun
except AbortFitException:
pass
- # note: upstream least_squares is actually returning
- # "last evaluation", not "best result", do we do that
- # here for consistency
+ # Note: scipy.optimize.least_squares is actually returning the
+ # "last evaluation", which is not necessarily the "best result"; so we
+ # do that here for consistency
if not result.aborted:
result.nfev -= 1
result.residual = self.__residual(ret.x, False)
@@ -1602,15 +1615,7 @@ class Minimizer:
hess = (ret.jac.T * ret.jac).toarray()
elif isinstance(ret.jac, LinearOperator):
identity = np.eye(ret.jac.shape[1], dtype=ret.jac.dtype)
- # TODO: Remove try-except when SciPy < 1.4.0 support dropped
- try:
- # For SciPy >= 1.4.0 (with Linear Operator transpose)
- # https://github.com/scipy/scipy/pull/9064
- hess = (ret.jac.T * ret.jac) * identity
- except AttributeError:
- # For SciPy < 1.4.0 (without Linear Operator transpose)
- jac = ret.jac * identity
- hess = np.matmul(jac.T, jac)
+ hess = (ret.jac.T * ret.jac) * identity
else:
hess = np.matmul(ret.jac.T, ret.jac)
result.covar = np.linalg.inv(hess)
@@ -1629,22 +1634,7 @@ class Minimizer:
from the covariance matrix.
This method calls :scipydoc:`optimize.leastsq` and, by default,
- numerical derivatives are used, and the following arguments are
- set:
-
- +------------------+----------------+------------------------+
- | :meth:`leastsq` | Default Value | Description |
- | arg | | |
- +==================+================+========================+
- | `xtol` | 1.e-7 | Relative error in the |
- | | | approximate solution |
- +------------------+----------------+------------------------+
- | `ftol` | 1.e-7 | Relative error in the |
- | | | desired sum-of-squares |
- +------------------+----------------+------------------------+
- | `Dfun` | None | Function to call for |
- | | | Jacobian calculation |
- +------------------+----------------+------------------------+
+ numerical derivatives are used.
Parameters
----------
@@ -1665,28 +1655,31 @@ class Minimizer:
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
"""
result = self.prepare_fit(params=params)
result.method = 'leastsq'
result.nfev -= 2 # correct for "pre-fit" initialization/checks
- variables = result.init_vals
+ variables = result._init_vals_internal
- # note we set the max number of function evaluations here, and send twice that
- # value to the solver so it essentially never stops on its own
+ # Note: we set max number of function evaluations here, and send twice
+ # that value to the solver so it essentially never stops on its own
self.set_max_nfev(max_nfev, 2000*(result.nvarys+1))
- lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7, col_deriv=False,
- gtol=1.e-7, maxfev=2*self.max_nfev, Dfun=None)
+ lskws = dict(Dfun=None, full_output=1, col_deriv=0, ftol=1.49012e-08,
+ xtol=1.49012e-08, gtol=0.0, maxfev=2*self.max_nfev,
+ epsfcn=None, factor=100, diag=None)
+
if 'maxfev' in kws:
- warnings.warn(MAXEVAL_Warning % ('maxfev', thisfuncname()),
+ warnings.warn(maxeval_warning.format('maxfev', thisfuncname()),
RuntimeWarning)
kws.pop('maxfev')
lskws.update(self.kws)
lskws.update(kws)
self.col_deriv = False
+
if lskws['Dfun'] is not None:
self.jacfcn = lskws['Dfun']
self.col_deriv = lskws['col_deriv']
@@ -1731,7 +1724,7 @@ class Minimizer:
elif ier == 4:
result.message = 'One or more variable did not affect the fit.'
elif ier == 5:
- result.message = self._err_max_evals % lskws['maxfev']
+ result.message = self._err_max_evals.format(lskws['maxfev'])
else:
result.message = 'Tolerance seems to be too small.'
@@ -1743,7 +1736,7 @@ class Minimizer:
# calculate parameter uncertainties and correlations
self._calculate_uncertainties_correlations()
else:
- result.message = '%s Could not estimate error-bars.' % result.message
+ result.message = f'{result.message} Could not estimate error-bars.'
np.seterr(**orig_warn_settings)
@@ -1782,14 +1775,19 @@ class Minimizer:
result.method = 'basinhopping'
self.set_max_nfev(max_nfev, 200000*(result.nvarys+1))
basinhopping_kws = dict(niter=100, T=1.0, stepsize=0.5,
- minimizer_kwargs={}, take_step=None,
+ minimizer_kwargs=None, take_step=None,
accept_test=None, callback=None, interval=50,
disp=False, niter_success=None, seed=None)
+ # FIXME: update when SciPy requirement is >= 1.8
+ if int(scipy_version.split('.')[1]) >= 8:
+ basinhopping_kws.update({'target_accept_rate': 0.5,
+ 'stepwise_factor': 0.9})
+
basinhopping_kws.update(self.kws)
basinhopping_kws.update(kws)
- x0 = result.init_vals
+ x0 = result._init_vals_internal
result.call_kws = basinhopping_kws
try:
ret = scipy_basinhopping(self.penalty, x0, **basinhopping_kws)
@@ -1803,7 +1801,7 @@ class Minimizer:
result._calculate_statistics()
- # calculate the cov_x and estimate uncertanties/correlations
+ # calculate the cov_x and estimate uncertainties/correlations
if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
len(result.residual) > len(result.var_names)):
_covar_ndt = self._calculate_covariance_matrix(ret.x)
@@ -2082,8 +2080,8 @@ class Minimizer:
ampgo_kws.update(self.kws)
ampgo_kws.update(kws)
- values = result.init_vals
- result.method = "ampgo, with {} as local solver".format(ampgo_kws['local'])
+ values = result._init_vals_internal
+ result.method = f"ampgo, with {ampgo_kws['local']} as local solver"
result.call_kws = ampgo_kws
try:
ret = ampgo(self.penalty, values, **ampgo_kws)
@@ -2105,7 +2103,7 @@ class Minimizer:
result._calculate_statistics()
- # calculate the cov_x and estimate uncertanties/correlations
+ # calculate the cov_x and estimate uncertainties/correlations
if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
len(result.residual) > len(result.var_names)):
_covar_ndt = self._calculate_covariance_matrix(result.ampgo_x0)
@@ -2155,6 +2153,10 @@ class Minimizer:
minimizer_kwargs=None, options=None,
sampling_method='simplicial')
+ # FIXME: update when SciPy requirement is >= 1.7
+ if int(scipy_version.split('.')[1]) >= 7:
+ shgo_kws['n'] = None
+
shgo_kws.update(self.kws)
shgo_kws.update(kws)
@@ -2179,7 +2181,7 @@ class Minimizer:
result._calculate_statistics()
- # calculate the cov_x and estimate uncertanties/correlations
+ # calculate the cov_x and estimate uncertainties/correlations
if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
len(result.residual) > len(result.var_names)):
result.covar = self._calculate_covariance_matrix(result.shgo_x)
@@ -2225,12 +2227,16 @@ class Minimizer:
da_kws = dict(maxiter=1000, local_search_options={},
initial_temp=5230.0, restart_temp_ratio=2e-05,
visit=2.62, accept=-5.0, maxfun=2*self.max_nfev,
- seed=None, no_local_search=False, callback=None,
- x0=None)
+ seed=None, no_local_search=False, callback=None, x0=None)
da_kws.update(self.kws)
da_kws.update(kws)
+ # FIXME: update when SciPy requirement is >= 1.8
+ # ``local_search_options`` deprecated in favor of ``minimizer_kwargs``
+ if int(scipy_version.split('.')[1]) >= 8:
+ da_kws.update({'minimizer_kwargs': da_kws.pop('local_search_options')})
+
varying = np.asarray([par.vary for par in self.params.values()])
bounds = np.asarray([(par.min, par.max) for par in
self.params.values()])[varying]
@@ -2256,7 +2262,7 @@ class Minimizer:
result._calculate_statistics()
- # calculate the cov_x and estimate uncertanties/correlations
+ # calculate the cov_x and estimate uncertainties/correlations
if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
len(result.residual) > len(result.var_names)):
result.covar = self._calculate_covariance_matrix(result.da_x)
@@ -2324,14 +2330,14 @@ class Minimizer:
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
"""
kwargs = {'params': params}
kwargs.update(self.kws)
for maxnfev_alias in ('maxfev', 'maxiter'):
if maxnfev_alias in kws:
- warnings.warn(MAXEVAL_Warning % (maxnfev_alias, thisfuncname()),
+ warnings.warn(maxeval_warning.format(maxnfev_alias, thisfuncname()),
RuntimeWarning)
kws.pop(maxnfev_alias)
@@ -2378,8 +2384,8 @@ def _make_random_gen(seed):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
- raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
- ' instance' % seed)
+ raise ValueError(f'{seed:r} cannot be used to seed a numpy.random.RandomState'
+ ' instance')
def _nan_policy(arr, nan_policy='raise', handle_inf=True):
@@ -2553,7 +2559,7 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None, iter_cb=None,
.. versionchanged:: 0.9.0
- Return value changed to :class:`MinimizerResult`.
+ Return value changed to :class:`MinimizerResult`.
Notes
diff --git a/lmfit/model.py b/lmfit/model.py
index b04ae70..07e335f 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -1,6 +1,5 @@
"""Implementation of the Model interface."""
-from collections import OrderedDict
from copy import deepcopy
from functools import wraps
import inspect
@@ -8,16 +7,21 @@ import json
import operator
import warnings
+from asteval import valid_symbol_name
import numpy as np
from scipy.special import erf
from scipy.stats import t
+import lmfit
+
from . import Minimizer, Parameter, Parameters, lineshapes
from .confidence import conf_interval
from .jsonutils import HAS_DILL, decode4js, encode4js
from .minimizer import MinimizerResult
from .printfuncs import ci_report, fit_report, fitreport_html_table
+tiny = 1.e-15
+
# Use pandas.isnull for aligning missing data if pandas is available.
# otherwise use numpy.isnan
try:
@@ -37,7 +41,7 @@ def _align(var, mask, data):
try:
- from matplotlib import pyplot as plt
+ import matplotlib # noqa: F401
_HAS_MATPLOTLIB = True
except Exception:
_HAS_MATPLOTLIB = False
@@ -70,7 +74,7 @@ def get_reducer(option):
"""
if option not in ['real', 'imag', 'abs', 'angle']:
- raise ValueError("Invalid option ('%s') for function 'propagate_err'." % option)
+ raise ValueError(f"Invalid option ('{option}') for function 'propagate_err'.")
def reducer(array):
"""Convert a complex array to a real array.
@@ -134,15 +138,15 @@ def propagate_err(z, dz, option):
so a value of `math:pi` is returned.
In the case where ``option='abs'`` and ``numpy.abs(z) == 0`` for any
- value of `z` the mangnitude uncertainty is approximated by
+ value of `z` the magnitude uncertainty is approximated by
``numpy.abs(dz)`` for that value.
"""
if option not in ['real', 'imag', 'abs', 'angle']:
- raise ValueError("Invalid option ('%s') for function 'propagate_err'." % option)
+ raise ValueError(f"Invalid option ('{option}') for function 'propagate_err'.")
- if not z.shape == dz.shape:
- raise ValueError("shape of z: %s != shape of dz: %s" % (z.shape, dz.shape))
+ if z.shape != dz.shape:
+ raise ValueError(f"shape of z: {z.shape} != shape of dz: {dz.shape}")
# Check the main vector for complex. Do nothing if real.
if any(np.iscomplex(z)):
@@ -262,7 +266,12 @@ class Model:
"""
self.func = func
+ if not isinstance(prefix, str):
+ prefix = ''
+ if len(prefix) > 0 and not valid_symbol_name(prefix):
+ raise ValueError(f"'{prefix}' is not a valid Model prefix")
self._prefix = prefix
+
self._param_root_names = param_names # will not include prefixes
self.independent_vars = independent_vars
self._func_allargs = []
@@ -270,8 +279,8 @@ class Model:
self.nan_policy = nan_policy
self.opts = kws
- self.param_hints = OrderedDict()
# the following has been changed from OrderedSet for the time being
+ self.param_hints = {}
self._param_names = []
self._parse_params()
if self.independent_vars is None:
@@ -284,13 +293,13 @@ class Model:
out = self._name
opts = []
if len(self._prefix) > 0:
- opts.append("prefix='%s'" % (self._prefix))
+ opts.append(f"prefix='{self._prefix}'")
if long:
for k, v in self.opts.items():
- opts.append("%s='%s'" % (k, v))
+ opts.append(f"{k}='{v}'")
if len(opts) > 0:
- out = "%s, %s" % (out, ', '.join(opts))
- return "Model(%s)" % out
+ out = f"{out}, {', '.join(opts)}"
+ return f"Model({out})"
def _get_state(self):
"""Save a Model for serialization.
@@ -449,7 +458,7 @@ class Model:
def __repr__(self):
"""Return representation of Model."""
- return "<lmfit.Model: %s>" % (self.name)
+ return f"<lmfit.Model: {self.name}>"
def copy(self, **kwargs):
"""DOES NOT WORK."""
@@ -459,6 +468,8 @@ class Model:
"""Build parameters from function arguments."""
if self.func is None:
return
+ kw_args = {}
+ keywords_ = None
# need to fetch the following from the function signature:
# pos_args: list of positional argument names
# kw_args: dict of keyword arguments with default values
@@ -466,15 +477,11 @@ class Model:
# 1. limited support for asteval functions as the model functions:
if hasattr(self.func, 'argnames') and hasattr(self.func, 'kwargs'):
pos_args = self.func.argnames[:]
- keywords_ = None
- kw_args = {}
for name, defval in self.func.kwargs:
kw_args[name] = defval
# 2. modern, best-practice approach: use inspect.signature
else:
pos_args = []
- kw_args = {}
- keywords_ = None
sig = inspect.signature(self.func)
for fnam, fpar in sig.parameters.items():
if fpar.kind == fpar.VAR_KEYWORD:
@@ -485,7 +492,7 @@ class Model:
else:
kw_args[fnam] = fpar.default
elif fpar.kind == fpar.VAR_POSITIONAL:
- raise ValueError("varargs '*%s' is not supported" % fnam)
+ raise ValueError(f"varargs '*{fnam}' is not supported")
# inspection done
self._func_haskeywords = keywords_ is not None
@@ -526,12 +533,9 @@ class Model:
new_opts[opt] = val
self.opts = new_opts
- names = []
if self._prefix is None:
self._prefix = ''
- for pname in self._param_root_names:
- names.append("%s%s" % (self._prefix, pname))
-
+ names = [f"{self._prefix}{pname}" for pname in self._param_root_names]
# check variables names for validity
# The implicit magic in fit() requires us to disallow some
fname = self.func.__name__
@@ -589,7 +593,7 @@ class Model:
name = name[npref:]
if name not in self.param_hints:
- self.param_hints[name] = OrderedDict()
+ self.param_hints[name] = {}
for key, val in kwargs.items():
if key in self._hint_names:
@@ -674,20 +678,20 @@ class Model:
par.value = kwargs[name]
params.add(par)
if verbose:
- print(' - Adding parameter "%s"' % name)
+ print(f' - Adding parameter "{name}"')
# next build parameters defined in param_hints
# note that composites may define their own additional
# convenience parameters here
for basename, hint in self.param_hints.items():
- name = "%s%s" % (self._prefix, basename)
+ name = f"{self._prefix}{basename}"
if name in params:
par = params[name]
else:
par = Parameter(name=name)
params.add(par)
if verbose:
- print(' - Adding parameter for hint "%s"' % name)
+ print(f' - Adding parameter for hint "{name}"')
par._delay_asteval = True
for item in self._hint_names:
if item in hint:
@@ -702,7 +706,7 @@ class Model:
p._delay_asteval = False
return params
- def guess(self, data, **kws):
+ def guess(self, data, x, **kws):
"""Guess starting values for the parameters of a Model.
This is not implemented for all models, but is available for many
@@ -711,7 +715,9 @@ class Model:
Parameters
----------
data : array_like
- Array of data to use to guess parameter values.
+ Array of data (i.e., y-values) to use to guess parameter values.
+ x : array_like
+ Array of values for the independent variable (i.e., x-values).
**kws : optional
Additional keyword arguments, passed to model function.
@@ -731,9 +737,12 @@ class Model:
`self.make_params()`, update starting values and return a
Parameters object.
+ .. versionchanged:: 1.0.3
+ Argument ``x`` is now explicitly required to estimate starting values.
+
"""
cname = self.__class__.__name__
- msg = 'guess() not implemented for %s' % cname
+ msg = f'guess() not implemented for {cname}'
raise NotImplementedError(msg)
def _residual(self, params, data, weights, **kwargs):
@@ -793,13 +802,24 @@ class Model:
kwargs = {}
out = {}
out.update(self.opts)
+
+ # 1. fill in in all parameter values
for name, par in params.items():
if strip:
name = self._strip_prefix(name)
if name in self._func_allargs or self._func_haskeywords:
out[name] = par.value
- # kwargs handled slightly differently -- may set param value too!
+ # 2. for each function argument, use 'prefix+varname' in params,
+ # avoiding possible name collisions with unprefixed params
+ if len(self._prefix) > 0:
+ for fullname in self._param_names:
+ if fullname in params:
+ name = self._strip_prefix(fullname)
+ if name in self._func_allargs or self._func_haskeywords:
+ out[name] = params[fullname].value
+
+ # 3. kwargs handled slightly differently -- may set param value too!
for name, val in kwargs.items():
if strip:
name = self._strip_prefix(name)
@@ -813,7 +833,7 @@ class Model:
"""Generate **all** function args for all functions."""
args = {}
for key, val in self.make_funcargs(params, kwargs).items():
- args["%s%s" % (self._prefix, key)] = val
+ args[f"{self._prefix}{key}"] = val
return args
def eval(self, params=None, **kwargs):
@@ -828,7 +848,7 @@ class Model:
Returns
-------
- numpy.ndarray
+ numpy.ndarray, float, int or complex
Value of model given the parameters and other arguments.
Notes
@@ -842,6 +862,11 @@ class Model:
all the independent variables** will need to be passed in using
keyword arguments.
+ 3. The return type depends on the model function. For many of the
+ built-models it is a `numpy.ndarray`, with the exception of
+ `ConstantModel` and `ComplexConstantModel`, which return a `float`/`int`
+ or `complex` value.
+
"""
return self.func(**self.make_funcargs(params, kwargs))
@@ -862,7 +887,7 @@ class Model:
Returns
-------
- OrderedDict
+ dict
Keys are prefixes for component model, values are value of
each component.
@@ -884,8 +909,9 @@ class Model:
params : Parameters, optional
Parameters to use in fit (default is None).
weights : array_like, optional
- Weights to use for the calculation of the fit residual
- (default is None). Must have the same size as `data`.
+ Weights to use for the calculation of the fit residual [i.e.,
+ `weights*(data-fit)`]. Default is None; must have the same size as
+ `data`.
method : str, optional
Name of fitting method to use (default is `'leastsq'`).
iter_cb : callable, optional
@@ -965,34 +991,24 @@ class Model:
# All remaining kwargs should correspond to independent variables.
for name in kwargs:
if name not in self.independent_vars:
- warnings.warn("The keyword argument %s does not " % name +
+ warnings.warn(f"The keyword argument {name} does not " +
"match any arguments of the model function. " +
"It will be ignored.", UserWarning)
# If any parameter is not initialized raise a more helpful error.
- missing_param = any([p not in params.keys()
- for p in self.param_names])
- blank_param = any([(p.value is None and p.expr is None)
- for p in params.values()])
+ missing_param = any(p not in params.keys() for p in self.param_names)
+ blank_param = any((p.value is None and p.expr is None)
+ for p in params.values())
if missing_param or blank_param:
msg = ('Assign each parameter an initial value by passing '
'Parameters or keyword arguments to fit.\n')
missing = [p for p in self.param_names if p not in params.keys()]
blank = [name for name, p in params.items()
if p.value is None and p.expr is None]
- msg += 'Missing parameters: %s\n' % str(missing)
- msg += 'Non initialized parameters: %s' % str(blank)
+ msg += f'Missing parameters: {str(missing)}\n'
+ msg += f'Non initialized parameters: {str(blank)}'
raise ValueError(msg)
- # Do not alter anything that implements the array interface (np.array, pd.Series)
- # but convert other iterables (e.g., Python lists) to NumPy arrays.
- if not hasattr(data, '__array__'):
- data = np.asfarray(data)
- for var in self.independent_vars:
- var_data = kwargs[var]
- if isinstance(var_data, (list, tuple)):
- kwargs[var] = np.asfarray(var_data)
-
# Handle null/missing values.
if nan_policy is not None:
self.nan_policy = nan_policy
@@ -1007,12 +1023,27 @@ class Model:
# If independent_vars and data are alignable (pandas), align them,
# and apply the mask from above if there is one.
-
for var in self.independent_vars:
if not np.isscalar(kwargs[var]):
- # print("Model fit align ind dep ", var, mask.sum())
kwargs[var] = _align(kwargs[var], mask, data)
+ # Make sure `dtype` for data is always `float64` or `complex128`
+ if np.isrealobj(data):
+ data = np.asfarray(data)
+ elif np.iscomplexobj(data):
+ data = np.asarray(data, dtype='complex128')
+
+ # Coerce `dtype` for independent variable(s) to `float64` or
+ # `complex128` when the variable has one of the following types: list,
+ # tuple, numpy.ndarray, or pandas.Series
+ for var in self.independent_vars:
+ var_data = kwargs[var]
+ if isinstance(var_data, (list, tuple, np.ndarray, Series)):
+ if np.isrealobj(var_data):
+ kwargs[var] = np.asfarray(var_data)
+ elif np.iscomplexobj(var_data):
+ kwargs[var] = np.asarray(var_data, dtype='complex128')
+
if fit_kws is None:
fit_kws = {}
@@ -1052,10 +1083,6 @@ class CompositeModel(Model):
"""
- _names_collide = ("\nTwo models have parameters named '{clash}'. "
- "Use distinct names.")
- _bad_arg = "CompositeModel: argument {arg} is not a Model"
- _bad_op = "CompositeModel: operator {op} is not callable"
_known_ops = {operator.add: '+', operator.sub: '-',
operator.mul: '*', operator.truediv: '/'}
@@ -1075,15 +1102,15 @@ class CompositeModel(Model):
Notes
-----
- The two models must use the same independent variable.
+ The two models can use different independent variables.
"""
if not isinstance(left, Model):
- raise ValueError(self._bad_arg.format(arg=left))
+ raise ValueError(f'CompositeModel: argument {left} is not a Model')
if not isinstance(right, Model):
- raise ValueError(self._bad_arg.format(arg=right))
+ raise ValueError(f'CompositeModel: argument {right} is not a Model')
if not callable(op):
- raise ValueError(self._bad_op.format(op=op))
+ raise ValueError(f'CompositeModel: operator {op} is not callable')
self.left = left
self.right = right
@@ -1093,12 +1120,15 @@ class CompositeModel(Model):
if len(name_collisions) > 0:
msg = ''
for collision in name_collisions:
- msg += self._names_collide.format(clash=collision)
+ msg += (f"\nTwo models have parameters named '{collision}'; "
+ "use distinct names.")
raise NameError(msg)
- # we assume that all the sub-models have the same independent vars
+ # the unique ``independent_vars`` of the left and right model are
+ # combined to ``independent_vars`` of the ``CompositeModel``
if 'independent_vars' not in kws:
- kws['independent_vars'] = self.left.independent_vars
+ ivars = self.left.independent_vars + self.right.independent_vars
+ kws['independent_vars'] = list(np.unique(ivars))
if 'nan_policy' not in kws:
kws['nan_policy'] = self.left.nan_policy
@@ -1109,7 +1139,7 @@ class CompositeModel(Model):
for side in (left, right):
prefix = side.prefix
for basename, hint in side.param_hints.items():
- self.param_hints["%s%s" % (prefix, basename)] = hint
+ self.param_hints[f"{prefix}{basename}"] = hint
def _parse_params(self):
self._func_haskeywords = (self.left._func_haskeywords or
@@ -1122,9 +1152,9 @@ class CompositeModel(Model):
self.opts.update(self.left.opts)
def _reprstring(self, long=False):
- return "(%s %s %s)" % (self.left._reprstring(long=long),
- self._known_ops.get(self.op, self.op),
- self.right._reprstring(long=long))
+ return (f"({self.left._reprstring(long=long)} "
+ f"{self._known_ops.get(self.op, self.op)} "
+ f"{self.right._reprstring(long=long)})")
def eval(self, params=None, **kwargs):
"""Evaluate model function for composite model."""
@@ -1132,8 +1162,8 @@ class CompositeModel(Model):
self.right.eval(params=params, **kwargs))
def eval_components(self, **kwargs):
- """Return OrderedDict of name, results for each component."""
- out = OrderedDict(self.left.eval_components(**kwargs))
+ """Return dictionary of name, results for each component."""
+ out = dict(self.left.eval_components(**kwargs))
out.update(self.right.eval_components(**kwargs))
return out
@@ -1387,6 +1417,11 @@ class ModelResult(Minimizer):
except AttributeError:
pass
+ if self.data is not None and len(self.data) > 1:
+ sstot = ((self.data - self.data.mean())**2).sum()
+ if isinstance(self.residual, np.ndarray) and len(self.residual) > 1:
+ self.rsquared = 1.0 - (self.residual**2).sum()/max(tiny, sstot)
+
self.init_values = self.model._make_all_args(self.init_params)
self.best_values = self.model._make_all_args(_ret.params)
self.best_fit = self.model.eval(params=_ret.params, **self.userkws)
@@ -1403,8 +1438,8 @@ class ModelResult(Minimizer):
Returns
-------
- numpy.ndarray
- Array for evaluated model.
+ numpy.ndarray, float, int, or complex
+ Array or value for the evaluated model.
"""
userkws = self.userkws.copy()
@@ -1425,7 +1460,7 @@ class ModelResult(Minimizer):
Returns
-------
- OrderedDict
+ dict
Keys are prefixes of component models, and values are the
estimated model value for each component of the model.
@@ -1468,6 +1503,10 @@ class ModelResult(Minimizer):
< 1, it is interpreted as the probability itself. That is,
``sigma=1`` and ``sigma=0.6827`` will give the same results,
within precision errors.
+ 3. Also sets attributes of `dely` for the uncertainty of the model
+ (which will be the same as the array returned by this method) and
+ `dely_comps`, a dictionary of `dely` for each component.
+
Examples
--------
@@ -1489,10 +1528,16 @@ class ModelResult(Minimizer):
# ensure fjac and df2 are correct size if independent var updated by kwargs
ndata = self.model.eval(params, **userkws).size
covar = self.covar
- fjac = np.zeros((nvarys, ndata))
- df2 = np.zeros(ndata)
- if any([p.stderr is None for p in params.values()]):
- return df2
+ if any(p.stderr is None for p in params.values()):
+ return np.zeros(ndata)
+
+ fjac = {'0': np.zeros((nvarys, ndata))} # '0' signify 'Full', an invalid prefix
+ df2 = {'0': np.zeros(ndata)}
+
+ for comp in self.components:
+ label = comp.prefix if len(comp.prefix) > 1 else comp._name
+ fjac[label] = np.zeros((nvarys, ndata))
+ df2[label] = np.zeros(ndata)
# find derivative by hand!
pars = params.copy()
@@ -1500,25 +1545,35 @@ class ModelResult(Minimizer):
pname = self.var_names[i]
val0 = pars[pname].value
dval = pars[pname].stderr/3.0
-
pars[pname].value = val0 + dval
- res1 = self.model.eval(pars, **userkws)
+ res1 = {'0': self.model.eval(pars, **userkws)}
+ res1.update(self.model.eval_components(params=pars, **userkws))
pars[pname].value = val0 - dval
- res2 = self.model.eval(pars, **userkws)
+ res2 = {'0': self.model.eval(pars, **userkws)}
+ res2.update(self.model.eval_components(params=pars, **userkws))
pars[pname].value = val0
- fjac[i] = (res1 - res2) / (2*dval)
+ for key in fjac:
+ fjac[key][i] = (res1[key] - res2[key]) / (2*dval)
for i in range(nvarys):
for j in range(nvarys):
- df2 += fjac[i]*fjac[j]*covar[i, j]
+ for key in fjac:
+ df2[key] += fjac[key][i] * fjac[key][j] * covar[i, j]
if sigma < 1.0:
prob = sigma
else:
prob = erf(sigma/np.sqrt(2))
- return np.sqrt(df2) * t.ppf((prob+1)/2.0, self.ndata-nvarys)
+
+ scale = t.ppf((prob+1)/2.0, self.ndata-nvarys)
+ self.dely = scale * np.sqrt(df2.pop('0'))
+
+ self.dely_comps = {}
+ for key in df2:
+ self.dely_comps[key] = scale * np.sqrt(df2[key])
+ return self.dely
def conf_interval(self, **kwargs):
"""Calculate the confidence intervals for the variable parameters.
@@ -1530,8 +1585,7 @@ class ModelResult(Minimizer):
recalculating them.
"""
- if self.ci_out is None:
- self.ci_out = conf_interval(self, self, **kwargs)
+ self.ci_out = conf_interval(self, self, **kwargs)
return self.ci_out
def ci_report(self, with_offset=True, ndigits=5, **kwargs):
@@ -1590,14 +1644,14 @@ class ModelResult(Minimizer):
show_correl=show_correl,
min_correl=min_correl, sort_pars=sort_pars)
modname = self.model._reprstring(long=True)
- return '[[Model]]\n %s\n%s' % (modname, report)
+ return f'[[Model]]\n {modname}\n{report}'
def _repr_html_(self, show_correl=True, min_correl=0.1):
"""Return a HTML representation of parameters data."""
report = fitreport_html_table(self, show_correl=show_correl,
min_correl=min_correl)
modname = self.model._reprstring(long=True)
- return "<h2> Model</h2> %s %s" % (modname, report)
+ return f"<h2> Model</h2> {modname} {report}"
def dumps(self, **kws):
"""Represent ModelResult as a JSON string.
@@ -1625,12 +1679,13 @@ class ModelResult(Minimizer):
for key in pasteval.user_defined_symbols()}
for attr in ('aborted', 'aic', 'best_values', 'bic', 'chisqr',
- 'ci_out', 'col_deriv', 'covar', 'errorbars',
- 'flatchain', 'ier', 'init_values', 'lmdif_message',
- 'message', 'method', 'nan_policy', 'ndata', 'nfev',
- 'nfree', 'nvarys', 'redchi', 'scale_covar', 'calc_covar',
- 'success', 'userargs', 'userkws', 'values', 'var_names',
- 'weights', 'user_options'):
+ 'ci_out', 'col_deriv', 'covar', 'errorbars', 'flatchain',
+ 'ier', 'init_values', 'lmdif_message', 'message',
+ 'method', 'nan_policy', 'ndata', 'nfev', 'nfree',
+ 'nvarys', 'redchi', 'rsquared', 'scale_covar',
+ 'calc_covar', 'success', 'userargs', 'userkws', 'values',
+ 'var_names', 'weights', 'user_options'):
+
try:
val = getattr(self, attr)
except AttributeError:
@@ -1723,7 +1778,7 @@ class ModelResult(Minimizer):
'errorbars', 'fjac', 'flatchain', 'ier', 'init_fit',
'init_values', 'kws', 'lmdif_message', 'message',
'method', 'nan_policy', 'ndata', 'nfev', 'nfree',
- 'nvarys', 'redchi', 'residual', 'scale_covar',
+ 'nvarys', 'redchi', 'residual', 'rsquared', 'scale_covar',
'calc_covar', 'success', 'userargs', 'userkws',
'var_names', 'weights', 'user_options'):
setattr(self, attr, decode4js(modres.get(attr, None)))
@@ -1772,7 +1827,7 @@ class ModelResult(Minimizer):
def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
xlabel=None, ylabel=None, yerr=None, numpoints=None,
data_kws=None, fit_kws=None, init_kws=None, ax_kws=None,
- show_init=False, parse_complex='abs'):
+ show_init=False, parse_complex='abs', title=None):
"""Plot the fit results using matplotlib, if available.
The plot will include the data points, the initial fit curve
@@ -1817,6 +1872,8 @@ class ModelResult(Minimizer):
How to reduce complex data for plotting. Options are one of:
`'abs'` (default), `'real'`, `'imag'`, or `'angle'`, which
correspond to the NumPy functions with the same name.
+ title : str, optional
+ Matplotlib format string for figure title.
Returns
-------
@@ -1864,7 +1921,7 @@ class ModelResult(Minimizer):
return False
if not isinstance(ax, plt.Axes):
- ax = plt.gca(**ax_kws)
+ ax = plt.axes(**ax_kws)
x_array = self.userkws[independent_var]
@@ -1875,11 +1932,15 @@ class ModelResult(Minimizer):
x_array_dense = x_array
if show_init:
+ y_eval_init = self.model.eval(self.init_params,
+ **{independent_var: x_array_dense})
+ if isinstance(self.model, (lmfit.models.ConstantModel,
+ lmfit.models.ComplexConstantModel)):
+ y_eval_init *= np.ones(x_array_dense.size)
+
ax.plot(
- x_array_dense,
- reduce_complex(self.model.eval(
- self.init_params, **{independent_var: x_array_dense})),
- initfmt, label='init', **init_kws)
+ x_array_dense, reduce_complex(y_eval_init), initfmt,
+ label='initial fit', **init_kws)
if yerr is None and self.weights is not None:
yerr = 1.0/self.weights
@@ -1892,13 +1953,18 @@ class ModelResult(Minimizer):
ax.plot(x_array, reduce_complex(self.data),
datafmt, label='data', **data_kws)
- ax.plot(
- x_array_dense,
- reduce_complex(self.model.eval(self.params,
- **{independent_var: x_array_dense})),
- fitfmt, label='best-fit', **fit_kws)
+ y_eval = self.model.eval(self.params, **{independent_var: x_array_dense})
+ if isinstance(self.model, (lmfit.models.ConstantModel,
+ lmfit.models.ComplexConstantModel)):
+ y_eval *= np.ones(x_array_dense.size)
- ax.set_title(self.model.name)
+ ax.plot(x_array_dense, reduce_complex(y_eval), fitfmt, label='best fit',
+ **fit_kws)
+
+ if title:
+ ax.set_title(title)
+ elif ax.get_title() == '':
+ ax.set_title(self.model.name)
if xlabel is None:
ax.set_xlabel(independent_var)
else:
@@ -1907,12 +1973,13 @@ class ModelResult(Minimizer):
ax.set_ylabel('y')
else:
ax.set_ylabel(ylabel)
- ax.legend(loc='best')
+ ax.legend()
return ax
@_ensureMatplotlib
def plot_residuals(self, ax=None, datafmt='o', yerr=None, data_kws=None,
- fit_kws=None, ax_kws=None, parse_complex='abs'):
+ fit_kws=None, ax_kws=None, parse_complex='abs',
+ title=None):
"""Plot the fit residuals using matplotlib, if available.
If `yerr` is supplied or if the model included weights, errorbars
@@ -1937,6 +2004,8 @@ class ModelResult(Minimizer):
How to reduce complex data for plotting. Options are one of:
`'abs'` (default), `'real'`, `'imag'`, or `'angle'`, which
correspond to the NumPy functions with the same name.
+ title : str, optional
+ Matplotlib format string for figure title.
Returns
-------
@@ -1982,32 +2051,40 @@ class ModelResult(Minimizer):
return False
if not isinstance(ax, plt.Axes):
- ax = plt.gca(**ax_kws)
+ ax = plt.axes(**ax_kws)
x_array = self.userkws[independent_var]
- ax.axhline(0, **fit_kws)
+ ax.axhline(0, **fit_kws, color='k')
+
+ y_eval = self.model.eval(self.params, **{independent_var: x_array})
+ if isinstance(self.model, (lmfit.models.ConstantModel,
+ lmfit.models.ComplexConstantModel)):
+ y_eval *= np.ones(x_array.size)
if yerr is None and self.weights is not None:
yerr = 1.0/self.weights
+
+ residuals = reduce_complex(self.eval()) - reduce_complex(self.data)
if yerr is not None:
- ax.errorbar(x_array, reduce_complex(self.eval()) - reduce_complex(self.data),
+ ax.errorbar(x_array, residuals,
yerr=propagate_err(self.data, yerr, parse_complex),
- fmt=datafmt, label='residuals', **data_kws)
+ fmt=datafmt, **data_kws)
else:
- ax.plot(x_array, reduce_complex(self.eval()) - reduce_complex(self.data), datafmt,
- label='residuals', **data_kws)
+ ax.plot(x_array, residuals, datafmt, **data_kws)
- ax.set_title(self.model.name)
+ if title:
+ ax.set_title(title)
+ elif ax.get_title() == '':
+ ax.set_title(self.model.name)
ax.set_ylabel('residuals')
- ax.legend(loc='best')
return ax
@_ensureMatplotlib
def plot(self, datafmt='o', fitfmt='-', initfmt='--', xlabel=None,
ylabel=None, yerr=None, numpoints=None, fig=None, data_kws=None,
fit_kws=None, init_kws=None, ax_res_kws=None, ax_fit_kws=None,
- fig_kws=None, show_init=False, parse_complex='abs'):
+ fig_kws=None, show_init=False, parse_complex='abs', title=None):
"""Plot the fit results and residuals using matplotlib.
The method will produce a matplotlib figure (if package available)
@@ -2057,11 +2134,12 @@ class ModelResult(Minimizer):
How to reduce complex data for plotting. Options are one of:
`'abs'` (default), `'real'`, `'imag'`, or `'angle'`, which
correspond to the NumPy functions with the same name.
+ title : str, optional
+ Matplotlib format string for figure title.
Returns
-------
- tuple
- A tuple with matplotlib's Figure and GridSpec objects.
+ matplotlib.figure.Figure
See Also
--------
@@ -2085,6 +2163,7 @@ class ModelResult(Minimizer):
called, otherwise `fig_kws` is ignored.
"""
+ from matplotlib import pyplot as plt
if data_kws is None:
data_kws = {}
if fit_kws is None:
@@ -2118,10 +2197,12 @@ class ModelResult(Minimizer):
initfmt=initfmt, xlabel=xlabel, ylabel=ylabel,
numpoints=numpoints, data_kws=data_kws,
fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws,
- show_init=show_init, parse_complex=parse_complex)
+ show_init=show_init, parse_complex=parse_complex,
+ title=title)
self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,
data_kws=data_kws, fit_kws=fit_kws,
- ax_kws=ax_res_kws, parse_complex=parse_complex)
+ ax_kws=ax_res_kws, parse_complex=parse_complex,
+ title=title)
plt.setp(ax_res.get_xticklabels(), visible=False)
ax_fit.set_title('')
- return fig, gs
+ return fig
diff --git a/lmfit/models.py b/lmfit/models.py
index d3ea944..090c2dc 100644
--- a/lmfit/models.py
+++ b/lmfit/models.py
@@ -1,16 +1,16 @@
"""Module containing built-in fitting models."""
import time
-import warnings
from asteval import Interpreter, get_ast_names
import numpy as np
+from scipy.interpolate import splev, splrep
from . import lineshapes
from .lineshapes import (breit_wigner, damped_oscillator, dho, doniach,
expgaussian, exponential, gaussian, gaussian2d,
linear, lognormal, lorentzian, moffat, parabolic,
- pearson7, powerlaw, pvoigt, rectangle, sine,
+ pearson4, pearson7, powerlaw, pvoigt, rectangle, sine,
skewed_gaussian, skewed_voigt, split_lorentzian, step,
students_t, thermal_distribution, tiny, voigt)
from .model import Model
@@ -42,9 +42,6 @@ def height_expr(model):
def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
"""Estimate starting values from 1D peak data and create Parameters."""
- if x is None:
- return 1.0, 0.0, 1.0
-
sort_increasing = np.argsort(x)
x = x[sort_increasing]
y = y[sort_increasing]
@@ -68,18 +65,15 @@ def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
sig = sig*sigscale
pars = model.make_params(amplitude=amp, center=cen, sigma=sig)
- pars['%ssigma' % model.prefix].set(min=0.0)
+ pars[f'{model.prefix}sigma'].set(min=0.0)
return pars
def guess_from_peak2d(model, z, x, y, negative):
"""Estimate starting values from 2D peak data and create Parameters."""
- if x is None or y is None:
- return 1.0, 0.0, 0.0, 1.0, 1.0
-
maxx, minx = max(x), min(x)
maxy, miny = max(y), min(y)
- maxz, minz = max(z), min(x)
+ maxz, minz = max(z), min(z)
centerx = x[np.argmax(z)]
centery = y[np.argmax(z)]
@@ -96,15 +90,15 @@ def guess_from_peak2d(model, z, x, y, negative):
pars = model.make_params(amplitude=amp, centerx=centerx, centery=centery,
sigmax=sigmax, sigmay=sigmay)
- pars['%ssigmax' % model.prefix].set(min=0.0)
- pars['%ssigmay' % model.prefix].set(min=0.0)
+ pars[f'{model.prefix}sigmax'].set(min=0.0)
+ pars[f'{model.prefix}sigmay'].set(min=0.0)
return pars
def update_param_vals(pars, prefix, **kwargs):
"""Update parameter values with keyword arguments."""
for key, val in kwargs.items():
- pname = "%s%s" % (prefix, key)
+ pname = f"{prefix}{key}"
if pname in pars:
pars[pname].value = val
pars.update_constraints()
@@ -141,7 +135,9 @@ COMMON_GUESS_DOC = """Guess starting values for the parameters of a model.
Parameters
----------
data : array_like
- Array of data to use to guess parameter values.
+ Array of data (i.e., y-values) to use to guess parameter values.
+ x : array_like
+ Array of values for the independent variable (i.e., x-values).
**kws : optional
Additional keyword arguments, passed to model function.
@@ -150,9 +146,10 @@ COMMON_GUESS_DOC = """Guess starting values for the parameters of a model.
params : Parameters
Initial, guessed values for the parameters of a Model.
- """
+ .. versionchanged:: 1.0.3
+ Argument ``x`` is now explicitly required to estimate starting values.
-COMMON_DOC = COMMON_INIT_DOC
+ """
class ConstantModel(Model):
@@ -174,11 +171,11 @@ class ConstantModel(Model):
return c
super().__init__(constant, **kwargs)
- def guess(self, data, **kwargs):
+ def guess(self, data, x=None, **kwargs):
"""Estimate initial model parameter values from data."""
pars = self.make_params()
- pars['%sc' % self.prefix].set(value=data.mean())
+ pars[f'{self.prefix}c'].set(value=data.mean())
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -204,11 +201,11 @@ class ComplexConstantModel(Model):
return re + 1j*im
super().__init__(constant, **kwargs)
- def guess(self, data, **kwargs):
+ def guess(self, data, x=None, **kwargs):
"""Estimate initial model parameter values from data."""
pars = self.make_params()
- pars['%sre' % self.prefix].set(value=data.real.mean())
- pars['%sim' % self.prefix].set(value=data.imag.mean())
+ pars[f'{self.prefix}re'].set(value=data.real.mean())
+ pars[f'{self.prefix}im'].set(value=data.imag.mean())
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -234,11 +231,9 @@ class LinearModel(Model):
'independent_vars': independent_vars})
super().__init__(linear, **kwargs)
- def guess(self, data, x=None, **kwargs):
+ def guess(self, data, x, **kwargs):
"""Estimate initial model parameter values from data."""
- sval, oval = 0., 0.
- if x is not None:
- sval, oval = np.polyfit(x, data, 1)
+ sval, oval = np.polyfit(x, data, 1)
pars = self.make_params(intercept=oval, slope=sval)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -263,11 +258,9 @@ class QuadraticModel(Model):
'independent_vars': independent_vars})
super().__init__(parabolic, **kwargs)
- def guess(self, data, x=None, **kwargs):
+ def guess(self, data, x, **kwargs):
"""Estimate initial model parameter values from data."""
- a, b, c = 0., 0., 0.
- if x is not None:
- a, b, c = np.polyfit(x, data, 2)
+ a, b, c = np.polyfit(x, data, 2)
pars = self.make_params(a=a, b=b, c=c)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -292,7 +285,7 @@ class PolynomialModel(Model):
"""
MAX_DEGREE = 7
- DEGREE_ERR = "degree must be an integer less than %d."
+ DEGREE_ERR = f"degree must be an integer equal to or smaller than {MAX_DEGREE}."
valid_forms = (0, 1, 2, 3, 4, 5, 6, 7)
@@ -303,10 +296,10 @@ class PolynomialModel(Model):
if 'form' in kwargs:
degree = int(kwargs.pop('form'))
if not isinstance(degree, int) or degree > self.MAX_DEGREE:
- raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE)
+ raise TypeError(self.DEGREE_ERR)
self.poly_degree = degree
- pnames = ['c%i' % (i) for i in range(degree + 1)]
+ pnames = [f'c{i}' for i in range(degree + 1)]
kwargs['param_names'] = pnames
def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
@@ -314,19 +307,134 @@ class PolynomialModel(Model):
super().__init__(polynomial, **kwargs)
- def guess(self, data, x=None, **kwargs):
+ def guess(self, data, x, **kwargs):
"""Estimate initial model parameter values from data."""
pars = self.make_params()
- if x is not None:
- out = np.polyfit(x, data, self.poly_degree)
- for i, coef in enumerate(out[::-1]):
- pars['%sc%i' % (self.prefix, i)].set(value=coef)
+ out = np.polyfit(x, data, self.poly_degree)
+ for i, coef in enumerate(out[::-1]):
+ pars[f'{self.prefix}c{i}'].set(value=coef)
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
guess.__doc__ = COMMON_GUESS_DOC
+class SplineModel(Model):
+ r"""A 1-D cubic spline model with a variable number of `knots` and
+ parameters `s0`, `s1`, ..., `sN`, for `N` knots.
+
+ The user must supply a list or ndarray `xknots`: the `x` values for the
+ 'knots' which control the flexibility of the spline function.
+
+ The parameters `s0`, ..., `sN` (where `N` is the size of `xknots`) will
+ correspond to the `y` values for the spline knots at the `x=xknots`
+ positions where the highest order derivative will be discontinuous.
+ The resulting curve will not necessarily pass through these knot
+ points, but for finely-spaced knots, the spline parameter values will
+ be very close to the `y` values of the resulting curve.
+
+ The maximum number of knots supported is 300.
+
+ Using the `guess()` method to initialize parameter values is highly
+ recommended.
+
+ Parameters
+ ----------
+ xknots : :obj:`list` of floats or :obj:`ndarray`, required
+ x-values of knots for spline.
+ independent_vars : :obj:`list` of :obj:`str`, optional
+ Arguments to the model function that are independent variables
+ default is ['x']).
+ prefix : str, optional
+ String to prepend to parameter names, needed to add two Models
+ that have parameter names in common.
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ How to handle NaN and missing values in data. See Notes below.
+
+ Notes
+ -----
+ 1. There must be at least 4 knot points, and not more than 300.
+
+ 2. `nan_policy` sets what to do when a NaN or missing value is seen in
+ the data. Should be one of:
+
+ - `'raise'` : raise a `ValueError` (default)
+ - `'propagate'` : do nothing
+ - `'omit'` : drop missing data
+
+ """
+
+ MAX_KNOTS = 300
+ NKNOTS_MAX_ERR = f"SplineModel supports up to {MAX_KNOTS:d} knots"
+ NKNOTS_NDARRY_ERR = "SplineModel xknots must be 1-D array-like"
+ DIM_ERR = "SplineModel supports only 1-d spline interpolation"
+
+ def __init__(self, xknots, independent_vars=['x'], prefix='',
+ nan_policy='raise', **kwargs):
+ """ """
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+
+ if isinstance(xknots, (list, tuple)):
+ xknots = np.asarray(xknots, dtype=np.float64)
+ try:
+ xknots = xknots.flatten()
+ except Exception:
+ raise TypeError(self.NKNOTS_NDARRAY_ERR)
+
+ if len(xknots) > self.MAX_KNOTS:
+ raise TypeError(self.NKNOTS_MAX_ERR)
+
+ if len(independent_vars) > 1:
+ raise TypeError(self.DIM_ERR)
+
+ self.xknots = xknots
+ self.nknots = len(xknots)
+ self.order = 3 # cubic splines only
+
+ def spline_model(x, s0=1, s1=1, s2=1, s3=1, s4=1, s5=1):
+ "used only for the initial parsing"
+ return x
+
+ super().__init__(spline_model, **kwargs)
+
+ if 'x' not in independent_vars:
+ self.independent_vars.pop('x')
+
+ self._param_root_names = [f's{d}' for d in range(self.nknots)]
+ self._param_names = [f'{prefix}{s}' for s in self._param_root_names]
+
+ self.knots, _c, _k = splrep(self.xknots, np.ones(self.nknots),
+ k=self.order)
+
+ def eval(self, params=None, **kwargs):
+ """note that we override `eval()` here for a variadic function,
+ as we will not know the number of spline parameters until run time
+ """
+ self.make_funcargs(params, kwargs)
+
+ coefs = [params[f'{self.prefix}s{d}'].value for d in range(self.nknots)]
+ coefs.extend([coefs[-1]]*(self.order+1))
+ coefs = np.array(coefs)
+ x = kwargs[self.independent_vars[0]]
+ return splev(x, [self.knots, coefs, self.order])
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = self.make_params()
+
+ for i, xk in enumerate(self.xknots):
+ ix = np.abs(x-xk).argmin()
+ this = data[ix]
+ pone = data[ix+1] if ix < len(x)-2 else this
+ mone = data[ix-1] if ix > 0 else this
+ pars[f'{self.prefix}s{i}'].value = (4.*this + pone + mone)/6.
+
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
class SineModel(Model):
r"""A model based on a sinusoidal lineshape.
@@ -414,7 +522,7 @@ class GaussianModel(Model):
self.set_param_hint('fwhm', expr=fwhm_expr(self))
self.set_param_hint('height', expr=height_expr(self))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -447,7 +555,7 @@ class Gaussian2dModel(Model):
"""
fwhm_factor = 2*np.sqrt(2*np.log(2))
- height_factor = 1./2*np.pi
+ height_factor = 1./(2*np.pi)
def __init__(self, independent_vars=['x', 'y'], prefix='', nan_policy='raise',
**kwargs):
@@ -467,7 +575,7 @@ class Gaussian2dModel(Model):
expr = fmt.format(tiny=tiny, factor=self.height_factor, prefix=self.prefix)
self.set_param_hint('height', expr=expr)
- def guess(self, data, x=None, y=None, negative=False, **kwargs):
+ def guess(self, data, x, y, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak2d(self, data, x, y, negative)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -512,7 +620,7 @@ class LorentzianModel(Model):
self.set_param_hint('fwhm', expr=fwhm_expr(self))
self.set_param_hint('height', expr=height_expr(self))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -568,11 +676,11 @@ class SplitLorentzianModel(Model):
self.set_param_hint('fwhm', expr=fwhm_expr.format(pre=self.prefix))
self.set_param_hint('height', expr=height_expr.format(np.pi, tiny, pre=self.prefix))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
- sigma = pars['%ssigma' % self.prefix]
- pars['%ssigma_r' % self.prefix].set(value=sigma.value, min=sigma.min, max=sigma.max)
+ sigma = pars[f'{self.prefix}sigma']
+ pars[f'{self.prefix}sigma_r'].set(value=sigma.value, min=sigma.min, max=sigma.max)
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -623,7 +731,7 @@ class VoigtModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0)
- self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
+ self.set_param_hint('gamma', expr=f'{self.prefix}sigma')
fexpr = ("1.0692*{pre:s}gamma+" +
"sqrt(0.8664*{pre:s}gamma**2+5.545083*{pre:s}sigma**2)")
@@ -633,7 +741,7 @@ class VoigtModel(Model):
self.set_param_hint('fwhm', expr=fexpr.format(pre=self.prefix))
self.set_param_hint('height', expr=hexpr.format(tiny, pre=self.prefix))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative,
ampscale=1.5, sigscale=0.65)
@@ -689,10 +797,10 @@ class PseudoVoigtModel(Model):
"max({0}, (pi*{prefix:s}sigma)))")
self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
- pars['%sfraction' % self.prefix].set(value=0.5, min=0.0, max=1.0)
+ pars[f'{self.prefix}fraction'].set(value=0.5, min=0.0, max=1.0)
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -732,10 +840,10 @@ class MoffatModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0)
self.set_param_hint('beta')
- self.set_param_hint('fwhm', expr="2*%ssigma*sqrt(2**(1.0/max(1e-3, %sbeta))-1)" % (self.prefix, self.prefix))
- self.set_param_hint('height', expr="%samplitude" % self.prefix)
+ self.set_param_hint('fwhm', expr=f"2*{self.prefix}sigma*sqrt(2**(1.0/max(1e-3, {self.prefix}beta))-1)")
+ self.set_param_hint('height', expr=f"{self.prefix}amplitude")
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative, ampscale=0.5, sigscale=1.)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -744,6 +852,59 @@ class MoffatModel(Model):
guess.__doc__ = COMMON_GUESS_DOC
+class Pearson4Model(Model):
+ r"""A model based on a Pearson IV distribution.
+
+ The model has five parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), `sigma` (:math:`\sigma`), `expon` (:math:`m`) and `skew` (:math:`\nu`).
+ In addition, parameters `fwhm`, `height` and `position` are included as
+ constraints to report estimates for the approximate full width at half maximum (20% error),
+ the peak height, and the peak position (the position of the maximal function value), respectively.
+ The fwhm value has an error of about 20% in the
+ parameter range expon: (0.5, 1000], skew: [-1000, 1000].
+
+ .. math::
+
+ f(x;A,\mu,\sigma,m,\nu)=A \frac{\left|\frac{\Gamma(m+i\tfrac{\nu}{2})}{\Gamma(m)}\right|^2}{\sigma\beta(m-\tfrac{1}{2},\tfrac{1}{2})}\left[1+\frac{(x-\mu)^2}{\sigma^2}\right]^{-m}\exp\left(-\nu \arctan\left(\frac{x-\mu}{\sigma}\right)\right)
+
+ where :math:`\beta` is the beta function (see :scipydoc:`special.beta`).
+ The :meth:`guess` function always gives a starting value of 1.5 for `expon`,
+ and 0 for `skew`.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_IV_distribution
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(pearson4, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('expon', value=1.5, min=0.5 + tiny, max=1000)
+ self.set_param_hint('skew', value=0.0, min=-1000, max=1000)
+ fmt = ("{prefix:s}sigma*sqrt(2**(1/{prefix:s}expon)-1)*pi/arctan2(exp(1)*{prefix:s}expon, {prefix:s}skew)")
+ self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix))
+ fmt = ("({prefix:s}amplitude / {prefix:s}sigma) * exp(2 * (real(loggammafcn({prefix:s}expon + {prefix:s}skew * 0.5j)) - loggammafcn({prefix:s}expon)) - betalnfnc({prefix:s}expon-0.5, 0.5) - "
+ "{prefix:s}expon * log1p(square({prefix:s}skew/(2*{prefix:s}expon))) - {prefix:s}skew * arctan(-{prefix:s}skew/(2*{prefix:s}expon)))")
+ self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
+ fmt = ("{prefix:s}center-{prefix:s}sigma*{prefix:s}skew/(2*{prefix:s}expon)")
+ self.set_param_hint('position', expr=fmt.format(prefix=self.prefix))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative)
+ pars[f'{self.prefix}expon'].set(value=1.5)
+ pars[f'{self.prefix}skew'].set(value=0.0)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
class Pearson7Model(Model):
r"""A model based on a Pearson VII distribution.
@@ -783,10 +944,10 @@ class Pearson7Model(Model):
"max({0}, (gamfcn(0.5)*gamfcn({prefix:s}expon-0.5)*{prefix:s}sigma))")
self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative)
- pars['%sexpon' % self.prefix].set(value=1.5)
+ pars[f'{self.prefix}expon'].set(value=1.5)
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -828,7 +989,7 @@ class StudentsTModel(Model):
"{prefix:s}sigma-{prefix:s}sigma)")
self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -861,10 +1022,10 @@ class BreitWignerModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0.0)
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative)
- pars['%sq' % self.prefix].set(value=1.0)
+ pars[f'{self.prefix}q'].set(value=1.0)
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -906,10 +1067,10 @@ class LognormalModel(Model):
"2*log(2)))")
self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25)
- pars['%ssigma' % self.prefix].set(min=0.0)
+ pars[f'{self.prefix}sigma'].set(min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -946,7 +1107,7 @@ class DampedOscillatorModel(Model):
self.set_param_hint('sigma', min=0)
self.set_param_hint('height', expr=height_expr(self))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative,
ampscale=0.1, sigscale=0.1)
@@ -989,6 +1150,7 @@ class DampedHarmonicOscillatorModel(Model):
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
+ self.set_param_hint('center', min=0)
self.set_param_hint('sigma', min=0)
self.set_param_hint('gamma', min=1.e-19)
fmt = ("({prefix:s}amplitude*{prefix:s}sigma)/"
@@ -998,11 +1160,11 @@ class DampedHarmonicOscillatorModel(Model):
self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
self.set_param_hint('fwhm', expr=fwhm_expr(self))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative,
ampscale=0.1, sigscale=0.1)
- pars['%sgamma' % self.prefix].set(value=1.0, min=0.0)
+ pars[f'{self.prefix}gamma'].set(value=1.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -1040,7 +1202,7 @@ class ExponentialGaussianModel(Model):
self.set_param_hint('sigma', min=0)
self.set_param_hint('gamma', min=0, max=20)
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -1080,7 +1242,7 @@ class SkewedGaussianModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0)
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -1119,9 +1281,9 @@ class SkewedVoigtModel(Model):
def _set_paramhints_prefix(self):
self.set_param_hint('sigma', min=0)
- self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
+ self.set_param_hint('gamma', expr=f'{self.prefix}sigma')
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -1173,14 +1335,10 @@ class ThermalDistributionModel(Model):
super().__init__(thermal_distribution, **kwargs)
self._set_paramhints_prefix()
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
- if x is None:
- center = 0
- kt = 1
- else:
- center = np.mean(x)
- kt = (max(x) - min(x))/10
+ center = np.mean(x)
+ kt = (max(x) - min(x))/10
pars = self.make_params()
return update_param_vals(pars, self.prefix, center=center, kt=kt)
@@ -1220,7 +1378,7 @@ class DoniachModel(Model):
"*cos(pi*{prefix:s}gamma/2)")
self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
- def guess(self, data, x=None, negative=False, **kwargs):
+ def guess(self, data, x, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
pars = guess_from_peak(self, data, x, negative, ampscale=0.5)
return update_param_vals(pars, self.prefix, **kwargs)
@@ -1229,24 +1387,6 @@ class DoniachModel(Model):
guess.__doc__ = COMMON_GUESS_DOC
-class DonaichModel(DoniachModel):
- """A model of an Doniach Sunjic asymmetric lineshape.
-
- Model added here for backwards-compatibility, will emit a
- `FutureWarning` when used.
-
- """
-
- def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
- **kwargs):
-
- msg = ('Please correct the name of your built-in model: DonaichModel '
- '--> DoniachModel. The incorrect spelling will be removed in '
- 'a later release.')
- warnings.warn(FutureWarning(msg))
- super().__init__(**kwargs)
-
-
class PowerLawModel(Model):
r"""A model based on a Power Law.
@@ -1267,7 +1407,7 @@ class PowerLawModel(Model):
'independent_vars': independent_vars})
super().__init__(powerlaw, **kwargs)
- def guess(self, data, x=None, **kwargs):
+ def guess(self, data, x, **kwargs):
"""Estimate initial model parameter values from data."""
try:
expon, amp = np.polyfit(np.log(x+1.e-14), np.log(data+1.e-14), 1)
@@ -1302,7 +1442,7 @@ class ExponentialModel(Model):
'independent_vars': independent_vars})
super().__init__(exponential, **kwargs)
- def guess(self, data, x=None, **kwargs):
+ def guess(self, data, x, **kwargs):
"""Estimate initial model parameter values from data."""
try:
sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
@@ -1337,10 +1477,10 @@ class StepModel(Model):
:nowrap:
\begin{eqnarray*}
- & f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) & = A \min{[1, \max{(0, \alpha)}]} \\
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) & = A \min{[1, \max{(0, \alpha + 1/2)}]} \\
& f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) & = A [1/2 + \arctan{(\alpha)}/{\pi}] \\
& f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) & = A [1 + {\operatorname{erf}}(\alpha)]/2 \\
- & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A [1 - \frac{1}{1 + e^{\alpha}} ]
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A \left[1 - \frac{1}{1 + e^{\alpha}} \right]
\end{eqnarray*}
where :math:`\alpha = (x - \mu)/{\sigma}`.
@@ -1355,15 +1495,13 @@ class StepModel(Model):
'form': form, 'independent_vars': independent_vars})
super().__init__(step, **kwargs)
- def guess(self, data, x=None, **kwargs):
+ def guess(self, data, x, **kwargs):
"""Estimate initial model parameter values from data."""
- if x is None:
- return
ymin, ymax = min(data), max(data)
xmin, xmax = min(x), max(x)
pars = self.make_params(amplitude=(ymax-ymin),
center=(xmax+xmin)/2.0)
- pars['%ssigma' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
+ pars[f'{self.prefix}sigma'].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -1397,10 +1535,10 @@ class RectangleModel(Model):
:nowrap:
\begin{eqnarray*}
- &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) &= A \{ \min{[1, \max{(0, \alpha_1)}]} + \min{[-1, \max{(0, \alpha_2)}]} \} \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) &= A \{ \min{[1, \max{(-1, \alpha_1)}]} + \min{[1, \max{(-1, \alpha_2)}]} \}/2 \\
&f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) &= A [\arctan{(\alpha_1)} + \arctan{(\alpha_2)}]/{\pi} \\
- &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) &= A [{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)]/2 \\
- &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A [1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 + e^{\alpha_2}} ]
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) &= A \left[{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)\right]/2 \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A \left[1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 + e^{\alpha_2}} \right]
\end{eqnarray*}
@@ -1423,20 +1561,17 @@ class RectangleModel(Model):
self.set_param_hint('center1')
self.set_param_hint('center2')
self.set_param_hint('midpoint',
- expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
- self.prefix))
+ expr=f'({self.prefix}center1+{self.prefix}center2)/2.0')
- def guess(self, data, x=None, **kwargs):
+ def guess(self, data, x, **kwargs):
"""Estimate initial model parameter values from data."""
- if x is None:
- return
ymin, ymax = min(data), max(data)
xmin, xmax = min(x), max(x)
pars = self.make_params(amplitude=(ymax-ymin),
center1=(xmax+xmin)/4.0,
center2=3*(xmax+xmin)/4.0)
- pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
- pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
+ pars[f'{self.prefix}sigma1'].set(value=(xmax-xmin)/7.0, min=0.0)
+ pars[f'{self.prefix}sigma2'].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
__init__.__doc__ = COMMON_INIT_DOC
@@ -1446,8 +1581,8 @@ class RectangleModel(Model):
class ExpressionModel(Model):
"""ExpressionModel class."""
- idvar_missing = "No independent variable found in\n %s"
- idvar_notfound = "Cannot find independent variables '%s' in\n %s"
+ idvar_missing = "No independent variable found in\n {}"
+ idvar_notfound = "Cannot find independent variables '{}' in\n {}"
no_prefix = "ExpressionModel does not support `prefix` argument"
def __init__(self, expr, independent_vars=None, init_script=None,
@@ -1475,7 +1610,7 @@ class ExpressionModel(Model):
2. `prefix` is **not supported** for ExpressionModel.
3. `nan_policy` sets what to do when a NaN or missing value is
- seen in the data. Should be one of:
+ seen in the data. Should be one of:
- `'raise'` : raise a `ValueError` (default)
- `'propagate'` : do nothing
@@ -1499,7 +1634,7 @@ class ExpressionModel(Model):
if independent_vars is None and 'x' in sym_names:
independent_vars = ['x']
if independent_vars is None:
- raise ValueError(self.idvar_missing % (self.expr))
+ raise ValueError(self.idvar_missing.format(self.expr))
# determine which named symbols are parameter names,
# try to find all independent variables
@@ -1518,7 +1653,7 @@ class ExpressionModel(Model):
if not found:
lost.append(independent_vars[ix])
lost = ', '.join(lost)
- raise ValueError(self.idvar_notfound % (lost, self.expr))
+ raise ValueError(self.idvar_notfound.format(lost, self.expr))
kws['independent_vars'] = independent_vars
if 'prefix' in kws:
@@ -1544,7 +1679,7 @@ class ExpressionModel(Model):
def __repr__(self):
"""Return printable representation of ExpressionModel."""
- return "<lmfit.ExpressionModel('%s')>" % (self.expr)
+ return f"<lmfit.ExpressionModel('{self.expr}')>"
def _parse_params(self):
"""Over-write ExpressionModel._parse_params with `pass`.
@@ -1559,6 +1694,7 @@ lmfit_models = {'Constant': ConstantModel,
'Linear': LinearModel,
'Quadratic': QuadraticModel,
'Polynomial': PolynomialModel,
+ 'Spline': SplineModel,
'Gaussian': GaussianModel,
'Gaussian-2D': Gaussian2dModel,
'Lorentzian': LorentzianModel,
@@ -1566,6 +1702,7 @@ lmfit_models = {'Constant': ConstantModel,
'Voigt': VoigtModel,
'PseudoVoigt': PseudoVoigtModel,
'Moffat': MoffatModel,
+ 'Pearson4': Pearson4Model,
'Pearson7': Pearson7Model,
'StudentsT': StudentsTModel,
'Breit-Wigner': BreitWignerModel,
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index c5620e1..9856497 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -1,9 +1,7 @@
"""Parameter class."""
-from collections import OrderedDict
from copy import deepcopy
import json
-import warnings
from asteval import Interpreter, get_ast_names, valid_symbol_name
from numpy import arcsin, array, cos, inf, isclose, sin, sqrt
@@ -13,7 +11,7 @@ from .jsonutils import decode4js, encode4js
from .lineshapes import tiny
from .printfuncs import params_html_table
-SCIPY_FUNCTIONS = {'gamfcn': scipy.special.gamma}
+SCIPY_FUNCTIONS = {'gamfcn': scipy.special.gamma, 'loggammafcn': scipy.special.loggamma, 'betalnfnc': scipy.special.betaln}
for fnc_name in ('erf', 'erfc', 'wofz'):
SCIPY_FUNCTIONS[fnc_name] = getattr(scipy.special, fnc_name)
@@ -24,8 +22,8 @@ def check_ast_errors(expr_eval):
expr_eval.raise_exception(None)
-class Parameters(OrderedDict):
- """An ordered dictionary of Parameter objects.
+class Parameters(dict):
+ """A dictionary of Parameter objects.
It should contain all Parameter objects that are required to specify
a fit model. All minimization and Model fitting routines in lmfit will
@@ -46,14 +44,10 @@ class Parameters(OrderedDict):
"""
- def __init__(self, asteval=None, usersyms=None):
+ def __init__(self, usersyms=None):
"""
Arguments
---------
- asteval : :class:`asteval.Interpreter`, optional
- Instance of the `asteval.Interpreter` to use for constraint
- expressions. If None (default), a new interpreter will be
- created. **Warning: deprecated**, use `usersyms` if possible!
usersyms : dict, optional
Dictionary of symbols to add to the
:class:`asteval.Interpreter` (default is None).
@@ -61,15 +55,7 @@ class Parameters(OrderedDict):
"""
super().__init__(self)
- self._asteval = asteval
- if asteval is None:
- self._asteval = Interpreter()
- else:
- msg = ("The use of the 'asteval' argument for the Parameters class"
- " was deprecated in lmfit v0.9.12 and will be removed in a "
- "later release. Please use the 'usersyms' argument instead!")
- warnings.warn(FutureWarning(msg))
- self._asteval = asteval
+ self._asteval = Interpreter()
_syms = {}
_syms.update(SCIPY_FUNCTIONS)
@@ -85,7 +71,7 @@ class Parameters(OrderedDict):
def update(self, other):
"""Update values and symbols with another Parameters object."""
if not isinstance(other, Parameters):
- raise ValueError("'%s' is not a Parameters object" % other)
+ raise ValueError(f"'{other}' is not a Parameters object")
self.add_many(*other.values())
for sym in other._asteval.user_defined_symbols():
self._asteval.symtable[sym] = other._asteval.symtable[sym]
@@ -102,7 +88,7 @@ class Parameters(OrderedDict):
all individual Parameter objects are copied.
"""
- _pars = Parameters(asteval=None)
+ _pars = self.__class__()
# find the symbols that were added by users, not during construction
unique_symbols = {key: self._asteval.symtable[key]
@@ -132,12 +118,11 @@ class Parameters(OrderedDict):
def __setitem__(self, key, par):
"""Set items of Parameters object."""
- if key not in self:
- if not valid_symbol_name(key):
- raise KeyError("'%s' is not a valid Parameters name" % key)
+ if key not in self and not valid_symbol_name(key):
+ raise KeyError(f"'{key}' is not a valid Parameters name")
if par is not None and not isinstance(par, Parameter):
- raise ValueError("'%s' is not a Parameter" % par)
- OrderedDict.__setitem__(self, key, par)
+ raise ValueError(f"'{par}' is not a Parameter")
+ dict.__setitem__(self, key, par)
par.name = key
par._expr_eval = self._asteval
self._asteval.symtable[key] = par.value
@@ -145,7 +130,7 @@ class Parameters(OrderedDict):
def __add__(self, other):
"""Add Parameters objects."""
if not isinstance(other, Parameters):
- raise ValueError("'%s' is not a Parameters object" % other)
+ raise ValueError(f"'{other}' is not a Parameters object")
out = deepcopy(self)
out.add_many(*other.values())
for sym in other._asteval.user_defined_symbols():
@@ -203,6 +188,12 @@ class Parameters(OrderedDict):
# then add all the parameters
self.add_many(*state['params'])
+ def __repr__(self):
+ """__repr__ from OrderedDict."""
+ if not self:
+ return f'{self.__class__.__name__}()'
+ return f'{self.__class__.__name__}({list(self.items())!r})'
+
def eval(self, expr):
"""Evaluate a statement using the `asteval` Interpreter.
@@ -266,10 +257,10 @@ class Parameters(OrderedDict):
"""
if oneline:
- return super().__repr__()
+ return self.__repr__()
s = "Parameters({\n"
for key in self.keys():
- s += " '%s': %s, \n" % (key, self[key])
+ s += f" '{key}': {self[key]}, \n"
s += " })\n"
return s
@@ -310,7 +301,7 @@ class Parameters(OrderedDict):
otherstyles = dict(name='{name:<{name_len}} ', stderr='{stderr!s:>{n}}',
vary='{vary!s:>{n}}', expr='{expr!s:>{n}}',
brute_step='{brute_step!s:>{n}}')
- line = ' '.join([otherstyles.get(k, numstyle % k) for k in allcols])
+ line = ' '.join(otherstyles.get(k, numstyle % k) for k in allcols)
for name, values in sorted(self.items()):
pvalues = {k: getattr(values, k) for k in columns}
pvalues['name'] = name
@@ -334,9 +325,11 @@ class Parameters(OrderedDict):
Parameters
----------
- name : str
- Name of parameter. Must match ``[a-z_][a-z0-9_]*`` and cannot
- be a Python reserved word.
+ name : str or Parameter
+ If ``name`` refers to a Parameter object it will be added directly
+ to the Parameters instance, otherwise a new Parameter object with name
+ ``string`` is created before adding it. In both cases, ``name`` must
+ match ``[a-z_][a-z0-9_]*`` and cannot be a Python reserved word.
value : float, optional
Numerical Parameter value, typically the *initial value*.
vary : bool, optional
@@ -415,12 +408,12 @@ class Parameters(OrderedDict):
Returns
-------
- OrderedDict
- An ordered dictionary of :attr:`name`::attr:`value` pairs for
- each Parameter.
+ dict
+ A dictionary of :attr:`name`::attr:`value` pairs for each
+ Parameter.
"""
- return OrderedDict((p.name, p.value) for p in self.values())
+ return {p.name: p.value for p in self.values()}
def dumps(self, **kws):
"""Represent Parameters as a JSON string.
@@ -695,7 +688,7 @@ class Parameter:
if self.min > self.max:
self.min, self.max = self.max, self.min
if isclose(self.min, self.max, atol=1e-13, rtol=1e-13):
- raise ValueError("Parameter '%s' has min == max" % self.name)
+ raise ValueError(f"Parameter '{self.name}' has min == max")
if self._val > self.max:
self._val = self.max
if self._val < self.min:
@@ -717,24 +710,25 @@ class Parameter:
self._expr_eval = None
self._expr_deps = []
self._delay_asteval = False
- self.value = _value
+ self._val = _value
self._init_bounds()
+ self.value = _value
def __repr__(self):
"""Return printable representation of a Parameter object."""
s = []
- sval = "value=%s" % repr(self._getval())
+ sval = f"value={repr(self._getval())}"
if not self.vary and self._expr is None:
sval += " (fixed)"
elif self.stderr is not None:
- sval += " +/- %.3g" % self.stderr
+ sval += f" +/- {self.stderr:.3g}"
s.append(sval)
- s.append("bounds=[%s:%s]" % (repr(self.min), repr(self.max)))
+ s.append(f"bounds=[{repr(self.min)}:{repr(self.max)}]")
if self._expr is not None:
- s.append("expr='%s'" % self.expr)
+ s.append(f"expr='{self.expr}'")
if self.brute_step is not None:
- s.append("brute_step=%s" % (self.brute_step))
- return "<Parameter '%s', %s>" % (self.name, ', '.join(s))
+ s.append(f"brute_step={self.brute_step}")
+ return f"<Parameter '{self.name}', {', '.join(s)}>"
def setup_bounds(self):
"""Set up Minuit-style internal/external parameter transformation
@@ -810,10 +804,9 @@ class Parameter:
if self._expr is not None:
if self._expr_ast is None:
self.__set_expression(self._expr)
- if self._expr_eval is not None:
- if not self._delay_asteval:
- self.value = self._expr_eval(self._expr_ast)
- check_ast_errors(self._expr_eval)
+ if self._expr_eval is not None and not self._delay_asteval:
+ self.value = self._expr_eval(self._expr_ast)
+ check_ast_errors(self._expr_eval)
return self._val
@property
diff --git a/lmfit/printfuncs.py b/lmfit/printfuncs.py
index d0547c9..520536a 100644
--- a/lmfit/printfuncs.py
+++ b/lmfit/printfuncs.py
@@ -2,7 +2,6 @@
from math import log10
import re
-import warnings
import numpy as np
@@ -25,7 +24,7 @@ def getfloat_attr(obj, attr, length=11):
if val is None:
return 'unknown'
if isinstance(val, int):
- return '%d' % val
+ return f'{val}'
if isinstance(val, float):
return gformat(val, length=length).strip()
return repr(val)
@@ -74,11 +73,7 @@ def gformat(val, length=11):
prec += 4
if expon > 0:
prec -= expon
- fmt = '{0: %i.%i%s}' % (length, prec, form)
- return fmt.format(val)[:length]
-
-
-CORREL_HEAD = '[[Correlations]] (unreported correlations are < %.3f)'
+ return f'{val:{length}.{prec}{form}}'
def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
@@ -131,17 +126,19 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
buff = []
add = buff.append
- namelen = max([len(n) for n in parnames])
+ namelen = max(len(n) for n in parnames)
if result is not None:
add("[[Fit Statistics]]")
- add(" # fitting method = %s" % (result.method))
- add(" # function evals = %s" % getfloat_attr(result, 'nfev'))
- add(" # data points = %s" % getfloat_attr(result, 'ndata'))
- add(" # variables = %s" % getfloat_attr(result, 'nvarys'))
- add(" chi-square = %s" % getfloat_attr(result, 'chisqr'))
- add(" reduced chi-square = %s" % getfloat_attr(result, 'redchi'))
- add(" Akaike info crit = %s" % getfloat_attr(result, 'aic'))
- add(" Bayesian info crit = %s" % getfloat_attr(result, 'bic'))
+ add(f" # fitting method = {result.method}")
+ add(f" # function evals = {getfloat_attr(result, 'nfev')}")
+ add(f" # data points = {getfloat_attr(result, 'ndata')}")
+ add(f" # variables = {getfloat_attr(result, 'nvarys')}")
+ add(f" chi-square = {getfloat_attr(result, 'chisqr')}")
+ add(f" reduced chi-square = {getfloat_attr(result, 'redchi')}")
+ add(f" Akaike info crit = {getfloat_attr(result, 'aic')}")
+ add(f" Bayesian info crit = {getfloat_attr(result, 'bic')}")
+ if hasattr(result, 'rsquared'):
+ add(f" R-squared = {getfloat_attr(result, 'rsquared')}")
if not result.errorbars:
add("## Warning: uncertainties could not be estimated:")
if result.method in ('leastsq', 'least_squares') or HAS_NUMDIFFTOOLS:
@@ -151,9 +148,9 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
par = params[name]
space = ' '*(namelen-len(name))
if par.init_value and np.allclose(par.value, par.init_value):
- add(' %s:%s at initial value' % (name, space))
+ add(f' {name}:{space} at initial value')
if (np.allclose(par.value, par.min) or np.allclose(par.value, par.max)):
- add(' %s:%s at boundary' % (name, space))
+ add(f' {name}:{space} at boundary')
else:
add(" this fitting method does not natively calculate uncertainties")
add(" and numdifftools is not installed for lmfit to do this. Use")
@@ -164,12 +161,12 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
for name in parnames:
par = params[name]
space = ' '*(namelen-len(name))
- nout = "%s:%s" % (name, space)
+ nout = f"{name}:{space}"
inval = '(init = ?)'
if par.init_value is not None:
- inval = '(init = %.7g)' % par.init_value
+ inval = f'(init = {par.init_value:.7g})'
if modelpars is not None and name in modelpars:
- inval = '%s, model_value = %.7g' % (inval, modelpars[name].value)
+ inval = f'{inval}, model_value = {modelpars[name].value:.7g}'
try:
sval = gformat(par.value)
except (TypeError, ValueError):
@@ -177,17 +174,17 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
if par.stderr is not None:
serr = gformat(par.stderr)
try:
- spercent = '({:.2%})'.format(abs(par.stderr/par.value))
+ spercent = f'({abs(par.stderr/par.value):.2%})'
except ZeroDivisionError:
spercent = ''
- sval = '%s +/-%s %s' % (sval, serr, spercent)
+ sval = f'{sval} +/-{serr} {spercent}'
if par.vary:
- add(" %s %s %s" % (nout, sval, inval))
+ add(f" {nout} {sval} {inval}")
elif par.expr is not None:
- add(" %s %s == '%s'" % (nout, sval, par.expr))
+ add(f" {nout} {sval} == '{par.expr}'")
else:
- add(" %s % .7g (fixed)" % (nout, par.value))
+ add(f" {nout} {par.value: .7g} (fixed)")
if show_correl:
correls = {}
@@ -199,16 +196,17 @@ def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
for name2 in parnames[i+1:]:
if (name != name2 and name2 in par.correl and
abs(par.correl[name2]) > min_correl):
- correls["%s, %s" % (name, name2)] = par.correl[name2]
+ correls[f"{name}, {name2}"] = par.correl[name2]
sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
sort_correl.reverse()
if len(sort_correl) > 0:
- add(CORREL_HEAD % min_correl)
- maxlen = max([len(k) for k in list(correls.keys())])
+ add('[[Correlations]] (unreported correlations are < '
+ f'{min_correl:.3f})')
+ maxlen = max(len(k) for k in list(correls.keys()))
for name, val in sort_correl:
lspace = max(0, maxlen - len(name))
- add(' C(%s)%s = % .3f' % (name, (' '*30)[:lspace], val))
+ add(f" C({name}){(' '*30)[:lspace]} = {val:.3f}")
return '\n'.join(buff)
@@ -235,7 +233,7 @@ def fitreport_html_table(result, show_correl=True, min_correl=0.1):
add = html.append
def stat_row(label, val, val2=''):
- add('<tr><td>%s</td><td>%s</td><td>%s</td></tr>' % (label, val, val2))
+ add(f'<tr><td>{label}</td><td>{val}</td><td>{val2}</td></tr>')
add('<h2>Fit Statistics</h2>')
add('<table>')
@@ -247,6 +245,8 @@ def fitreport_html_table(result, show_correl=True, min_correl=0.1):
stat_row('reduced chi-square', gformat(result.redchi))
stat_row('Akaike info crit.', gformat(result.aic))
stat_row('Bayesian info crit.', gformat(result.bic))
+ if hasattr(result, 'rsquared'):
+ stat_row('R-squared', gformat(result.rsquared))
add('</table>')
add('<h2>Variables</h2>')
add(params_html_table(result.params))
@@ -265,11 +265,11 @@ def fitreport_html_table(result, show_correl=True, min_correl=0.1):
if len(correls) > 0:
sort_correls = sorted(correls, key=lambda val: abs(val[2]))
sort_correls.reverse()
- extra = '(unreported correlations are < %.3f)' % (min_correl)
- add('<h2>Correlations %s</h2>' % extra)
+ extra = f'(unreported correlations are < {min_correl:.3f})'
+ add(f'<h2>Correlations {extra}</h2>')
add('<table>')
for name1, name2, val in sort_correls:
- stat_row(name1, name2, "%.4f" % val)
+ stat_row(name1, name2, f"{val:.4f}")
add('</table>')
return ''.join(html)
@@ -288,15 +288,15 @@ def params_html_table(params):
Multi-line HTML code of fitting parameters.
"""
- has_err = any([p.stderr is not None for p in params.values()])
- has_expr = any([p.expr is not None for p in params.values()])
- has_brute = any([p.brute_step is not None for p in params.values()])
+ has_err = any(p.stderr is not None for p in params.values())
+ has_expr = any(p.expr is not None for p in params.values())
+ has_brute = any(p.brute_step is not None for p in params.values())
html = []
add = html.append
def cell(x, cat='td'):
- return add('<%s> %s </%s>' % (cat, x, cat))
+ return add(f'<{cat}> {x} </{cat}>')
add('<table><tr>')
headers = ['name', 'value']
@@ -315,21 +315,21 @@ def params_html_table(params):
rows = [par.name, gformat(par.value)]
if has_err:
serr = ''
+ spercent = ''
if par.stderr is not None:
serr = gformat(par.stderr)
try:
- spercent = '({:.2%})'.format(abs(par.stderr/par.value))
+ spercent = f'({abs(par.stderr/par.value):.2%})'
except ZeroDivisionError:
- spercent = ''
+ pass
rows.extend([serr, spercent])
rows.extend((par.init_value, gformat(par.min),
- gformat(par.max), '%s' % par.vary))
+ gformat(par.max), f'{par.vary}'))
if has_expr:
expr = ''
if par.expr is not None:
expr = par.expr
rows.append(expr)
-
if has_brute:
brute_step = 'None'
if par.brute_step is not None:
@@ -344,14 +344,6 @@ def params_html_table(params):
return ''.join(html)
-def report_errors(params, **kws):
- """Print a report for fitted params: see error_report()."""
- warnings.warn("The function 'report_errors' is deprecated as of lmfit "
- "0.9.14 and will be removed in the next release. Please "
- "use 'report_fit' instead.", FutureWarning)
- print(fit_report(params, **kws))
-
-
def report_fit(params, **kws):
"""Print a report of the fitting results."""
print(fit_report(params, **kws))
@@ -377,7 +369,7 @@ def ci_report(ci, with_offset=True, ndigits=5):
Text of formatted report on confidence intervals.
"""
- maxlen = max([len(i) for i in ci])
+ maxlen = max(len(i) for i in ci)
buff = []
add = buff.append
@@ -385,7 +377,7 @@ def ci_report(ci, with_offset=True, ndigits=5):
"""Convert probabilities into header for CI report."""
if abs(x[0]) < 1.e-2:
return "_BEST_"
- return "%.2f%%" % (x[0]*100)
+ return f"{x[0] * 100:.2f}%"
title_shown = False
fmt_best = fmt_diff = "{0:.%if}" % ndigits
@@ -396,7 +388,7 @@ def ci_report(ci, with_offset=True, ndigits=5):
add("".join([''.rjust(maxlen+1)] + [i.rjust(ndigits+5)
for i in map(convp, row)]))
title_shown = True
- thisrow = [" %s:" % name.ljust(maxlen)]
+ thisrow = [f" {name.ljust(maxlen)}:"]
offset = 0.0
if with_offset:
for cval, val in row:
diff --git a/lmfit/version.py b/lmfit/version.py
new file mode 100644
index 0000000..bef9f66
--- /dev/null
+++ b/lmfit/version.py
@@ -0,0 +1,5 @@
+# coding: utf-8
+# file generated by setuptools_scm
+# don't change, don't track in version control
+__version__ = version = '1.1.0'
+__version_tuple__ = version_tuple = (1, 1, 0)
diff --git a/publish_docs.sh b/publish_docs.sh
index 1c7f4e7..5ed4247 100755
--- a/publish_docs.sh
+++ b/publish_docs.sh
@@ -31,7 +31,7 @@ rm -f .buildinfo
echo '# Commit changes to gh-pages branch'
export version=`git tag | sort | tail -1`
git add *
-git commit -am "DOC: update documentation for ${version}"
+PRE_COMMIT_ALLOW_NO_CONFIG=1 git commit -am "DOC: update documentation for ${version}" --no-verify
if [ $? -ne 0 ] ; then
echo ' failed.'
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..5a184b9
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,7 @@
+[build-system]
+requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.2"]
+build-backend = "setuptools.build_meta"
+
+[tool.setuptools_scm]
+write_to = "lmfit/version.py"
+version_scheme = "post-release"
diff --git a/requirements-dev.txt b/requirements-dev.txt
deleted file mode 100644
index cbb2e36..0000000
--- a/requirements-dev.txt
+++ /dev/null
@@ -1,18 +0,0 @@
--r requirements.txt
-cairosvg
-codecov
-corner
-coverage
-dill
-emcee>=3.0.0
-jupyter_sphinx>=0.2.4
-matplotlib
-numdifftools
-pandas
-Pillow
-pre-commit
-pytest
-Sphinx
-sphinx-gallery>=0.8
-sphinxcontrib-svg2pdfconverter
-sympy
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index f3e48a5..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-asteval>=0.9.21
-numpy>=1.18
-scipy>=1.3
-uncertainties>=3.0.1
diff --git a/setup.cfg b/setup.cfg
index 1c166a8..f7ce000 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,13 +1,83 @@
-[versioneer]
-vcs = git
-style = pep440
-versionfile_source = lmfit/_version.py
-versionfile_build = lmfit/_version.py
-tag_prefix =
-parentdir_prefix = lmfit-
+[metadata]
+name = lmfit
+description = Least-Squares Minimization with Bounds and Constraints
+long_description = file: README.rst
+long_description_content_type = text/x-rst
+author = LMFit Development Team
+author_email = matt.newville@gmail.com
+url = https://lmfit.github.io//lmfit-py/
+license = BSD 3-Clause
+platforms = any
+classifiers =
+ Development Status :: 5 - Production/Stable
+ Intended Audience :: Science/Research
+ Topic :: Scientific/Engineering
+ License :: OSI Approved :: BSD License
+ Operating System :: OS Independent
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3 :: Only
+ Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
+ Programming Language :: Python :: 3.11
+ Programming Language :: Python :: Implementation :: CPython
+ Programming Language :: Python :: Implementation :: PyPy
+keywords = curve-fitting, least-squares minimization
+project_urls =
+ Source = https://github.com/lmfit/lmfit-py
+ Changelog = https://lmfit.github.io/lmfit-py/whatsnew.html
+ Documentation = https://lmfit.github.io/lmfit-py/
+ Tracker = https://github.com/lmfit/lmfit-py/issues
+
+[options]
+packages = find:
+python_requires = >=3.7
+setup_requires = setuptools_scm
+install_requires =
+ asteval>=0.9.28
+ numpy>=1.19
+ scipy>=1.6
+ uncertainties>=3.1.4
+
+[options.packages.find]
+include =
+ lmfit
+
+[options.extras_require]
+dev =
+ build
+ check-wheel-contents
+ pre-commit
+ twine
+doc =
+ cairosvg
+ corner
+ dill
+ emcee>=3.0.0
+ jupyter_sphinx>=0.2.4
+ matplotlib
+ numdifftools
+ pandas
+ Pillow
+ pycairo;platform_system=="Windows"
+ Sphinx
+ sphinx-gallery>=0.10
+ sphinxcontrib-svg2pdfconverter
+ sympy
+test =
+ codecov
+ coverage
+ flaky
+ pytest
+ pytest-cov
+all =
+ %(dev)s
+ %(test)s
+ %(doc)s
[isort]
-skip = versioneer.py,lmfit/_version.py,lmfit/__init__.py,doc/conf.py
+skip = lmfit/__init__.py,doc/conf.py
known_third_party = asteval,dill,emcee,IPython,matplotlib,numdifftools,numpy,NISTModels,pandas,pytest,scipy,uncertainties
known_first_party = lmfit,lmfit_testutils
force_sort_within_sections = True
@@ -20,10 +90,13 @@ ignore_directives = autoclass,autodoc,autofunction,automethod,jupyter-execute,ma
[flake8]
ignore = E121,E123,E126,E226,W503,W504,E501,E731
-exclude = doc/conf.py, versioneer.py, lmfit/__init__.py
+exclude = doc/conf.py,lmfit/__init__.py
[coverage:run]
-omit = lmfit/_version.py
+omit = tests/*
+
+[tool:pytest]
+addopts = --cov=lmfit --cov-report html
[egg_info]
tag_build =
diff --git a/setup.py b/setup.py
index 6365853..bac24a4 100644
--- a/setup.py
+++ b/setup.py
@@ -1,55 +1,6 @@
#!/usr/bin/env python
-from setuptools import setup
+import setuptools
-import versioneer
-
-long_desc = """A library for least-squares minimization and data fitting in
-Python. Built on top of scipy.optimize, lmfit provides a Parameter object
-which can be set as fixed or free, can have upper and/or lower bounds, or
-can be written in terms of algebraic constraints of other Parameters. The
-user writes a function to be minimized as a function of these Parameters,
-and the scipy.optimize methods are used to find the optimal values for the
-Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
-algorithm, and provides estimated standard errors and correlations between
-varied Parameters. Other minimization methods, including Nelder-Mead's
-downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
-others are also supported. Bounds and constraints can be placed on
-Parameters for all of these methods.
-
-In addition, methods for explicitly calculating confidence intervals are
-provided for exploring minmization problems where the approximation of
-estimating Parameter uncertainties from the covariance matrix is
-questionable. """
-
-setup(name='lmfit',
- version=versioneer.get_version(),
- cmdclass=versioneer.get_cmdclass(),
- author='LMFit Development Team',
- author_email='matt.newville@gmail.com',
- url='https://lmfit.github.io/lmfit-py/',
- download_url='https://lmfit.github.io//lmfit-py/',
- install_requires=['asteval>=0.9.16',
- 'numpy>=1.16',
- 'scipy>=1.2',
- 'uncertainties>=3.0.1'],
- python_requires='>=3.6',
- license='BSD-3',
- description="Least-Squares Minimization with Bounds and Constraints",
- long_description=long_desc,
- platforms=['Windows', 'Linux', 'Mac OS X'],
- classifiers=['Development Status :: 5 - Production/Stable',
- 'Intended Audience :: Science/Research',
- 'License :: OSI Approved :: BSD License',
- 'Operating System :: OS Independent',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8',
- 'Programming Language :: Python :: 3.9',
- 'Topic :: Scientific/Engineering',
- ],
- keywords='curve-fitting, least-squares minimization',
- tests_require=['pytest'],
- package_dir={'lmfit': 'lmfit'},
- packages=['lmfit'],
- )
+if __name__ == "__main__":
+ setuptools.setup()
diff --git a/tests/NISTModels.py b/tests/NISTModels.py
index 87931df..792da57 100644
--- a/tests/NISTModels.py
+++ b/tests/NISTModels.py
@@ -188,7 +188,7 @@ def ReadNistData(dataset):
"""NIST STRD data is in a simple, fixed format with
line numbers being significant!
"""
- finp = open(os.path.join(NIST_DIR, "%s.dat" % dataset))
+ finp = open(os.path.join(NIST_DIR, f"{dataset}.dat"))
lines = [line[:-1] for line in finp.readlines()]
finp.close()
ModelLines = lines[30:39]
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/o.py b/tests/o.py
deleted file mode 100644
index 603e9d9..0000000
--- a/tests/o.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import lmfit
-import numpy as np
-mod = lmfit.models.SineModel()
-x = np.linspace(-10, 10, 201)
-pars = dict(amplitude=1.5, frequency=0.5, shift=0.4)
-
-y = pars['amplitude']*np.sin(x*pars['frequency'] + pars['shift'])
-
-params = mod.guess(y, x=x)
-
-print(params)
diff --git a/tests/t_enso.py b/tests/t_enso.py
deleted file mode 100644
index 18ad7e2..0000000
--- a/tests/t_enso.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from numpy import arctan, array, cos, exp, log, sin
-from lmfit import Parameters, minimize, report_fit
-from NISTModels import Models, ReadNistData
-
-def read_params(params):
- if isinstance(params, Parameters):
- return [par.value for par in params.values()]
- else:
- return params
-
-
-def ENSO(b, x, y=0):
- b = read_params(b)
- print("ENSO ", b)
- pi = 3.141592653589793238462643383279
-
- return y - b[0] + (b[1]*cos(2*pi*x/12) + b[2]*sin(2*pi*x/12) +
- b[4]*cos(2*pi*x/b[3]) + b[5]*sin(2*pi*x/b[3]) +
- b[7]*cos(2*pi*x/b[6]) + b[8]*sin(2*pi*x/b[6]))
-
-NISTdata = ReadNistData('ENSO')
-resid, npar, dimx = Models['ENSO']
-y = NISTdata['y']
-x = NISTdata['x']
-cert_values = NISTdata['cert_values']
-
-resid_cert = ENSO(cert_values, x=x, y=y)
-print("ENSO -> " , (resid_cert**2).sum())
-
-print(NISTdata.keys())
-params = Parameters()
-for i in range(npar):
- pname = 'b%i' % (i+1)
- pval1 = NISTdata['start2'][i]
- params.add(pname, value=pval1)
-
-myfit = minimize(resid, params, method='leastsq', args=(x,), kws={'y': y})
-print(report_fit(myfit))
-# digs, buff = Compare_NIST_Results(DataSet, myfit, myfit.params, NISTdata)
-# print(buff)
diff --git a/tests/test_1variable.py b/tests/test_1variable.py
index 12be3ff..b17d19e 100644
--- a/tests/test_1variable.py
+++ b/tests/test_1variable.py
@@ -26,7 +26,7 @@ def linear_chisq(params, x, data, errs=None):
if errs is not None:
residuals = residuals/errs
- return(residuals)
+ return residuals
def test_1var():
diff --git a/tests/test_NIST_Strd.py b/tests/test_NIST_Strd.py
index 4c13413..2b4b56b 100644
--- a/tests/test_NIST_Strd.py
+++ b/tests/test_NIST_Strd.py
@@ -1,11 +1,12 @@
import math
from optparse import OptionParser
-from NISTModels import Models, ReadNistData
from numpy.testing import assert_allclose
from lmfit import Parameters, minimize
+from .NISTModels import Models, ReadNistData
+
def ndig(a, b):
"""Precision for NIST values."""
@@ -17,7 +18,7 @@ ABAR = ' |----------------+----------------+------------------+-----------------
def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
buff = [' ======================================',
- ' %s: ' % DataSet,
+ f' {DataSet}: ',
' | Parameter Name | Value Found | Certified Value | # Matching Digits |']
buff.append(ABAR)
@@ -25,7 +26,7 @@ def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
err_dig_min = 200
fmt = ' | %s | % -.7e | % -.7e | %2i |'
for i in range(NISTdata['nparams']):
- parname = 'b%i' % (i+1)
+ parname = f'b{i+1}'
par = params[parname]
thisval = par.value
certval = NISTdata['cert_values'][i]
@@ -55,10 +56,10 @@ def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
buff.append(' | * * * * COULD NOT ESTIMATE UNCERTAINTIES * * * * |')
err_dig_min = 0
if err_dig_min < 199:
- buff.append(' Worst agreement: %i digits for value, %i digits for error '
- % (val_dig_min, err_dig_min))
+ buff.append(f' Worst agreement: {val_dig_min} digits for value, '
+ f'{err_dig_min} digits for error ')
else:
- buff.append(' Worst agreement: %i digits' % (val_dig_min))
+ buff.append(f' Worst agreement: {val_dig_min} digits')
return val_dig_min, '\n'.join(buff)
@@ -72,7 +73,7 @@ def NIST_Dataset(DataSet, method='leastsq', start='start2',
params = Parameters()
for i in range(npar):
- pname = 'b%i' % (i+1)
+ pname = f'b{i+1}'
pval1 = NISTdata[start][i]
params.add(pname, value=pval1)
try:
@@ -92,14 +93,14 @@ def build_usage():
modelnames = []
ms = ''
for d in sorted(Models.keys()):
- ms = ms + ' %s ' % d
+ ms = ms + f' {d} '
if len(ms) > 55:
modelnames.append(ms)
ms = ' '
modelnames.append(ms)
modelnames = '\n'.join(modelnames)
- usage = """
+ usage = f"""
=== Test Fit to NIST StRD Models ===
usage:
@@ -109,7 +110,7 @@ usage:
where Start is one of 'start1','start2' or 'cert', for different
starting values, and Model is one of
- %s
+ {modelnames}
if Model = 'all', all models and starting values will be run.
@@ -117,9 +118,9 @@ options:
--------
-m name of fitting method. One of:
leastsq, nelder, powell, lbfgsb, bfgs,
- tnc, cobyla, slsqp, cg, newto-cg
+ tnc, cobyla, slsqp, cg, newton-cg
leastsq (Levenberg-Marquardt) is the default
-""" % modelnames
+"""
return usage
############################
@@ -153,10 +154,10 @@ def run_interactive():
tpass += 1
else:
tfail += 1
- failures.append(" %s (starting at '%s')" % (dset, start))
+ failures.append(f" {dset} (starting at '{start}')")
print('--------------------------------------')
- print(' Fit Method: %s ' % opts.method)
- print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
+ print(f' Fit Method: {opts.method} ')
+ print(f' Final Results: {tpass} pass, {tfail} fail.')
print(' Tests Failed for:\n %s' % '\n '.join(failures))
print('--------------------------------------')
elif dset not in Models:
@@ -181,116 +182,115 @@ def RunNIST_Model(model):
out1 = NIST_Dataset(model, start='start1', plot=False, verbose=False)
out2 = NIST_Dataset(model, start='start2', plot=False, verbose=False)
- assert(out1 or out2)
- return out1 or out2
+ assert (out1 or out2)
def test_Bennett5():
- return RunNIST_Model('Bennett5')
+ RunNIST_Model('Bennett5')
def test_BoxBOD():
- return RunNIST_Model('BoxBOD')
+ RunNIST_Model('BoxBOD')
def test_Chwirut1():
- return RunNIST_Model('Chwirut1')
+ RunNIST_Model('Chwirut1')
def test_Chwirut2():
- return RunNIST_Model('Chwirut2')
+ RunNIST_Model('Chwirut2')
def test_DanWood():
- return RunNIST_Model('DanWood')
+ RunNIST_Model('DanWood')
def test_ENSO():
- return RunNIST_Model('ENSO')
+ RunNIST_Model('ENSO')
def test_Eckerle4():
- return RunNIST_Model('Eckerle4')
+ RunNIST_Model('Eckerle4')
def test_Gauss1():
- return RunNIST_Model('Gauss1')
+ RunNIST_Model('Gauss1')
def test_Gauss2():
- return RunNIST_Model('Gauss2')
+ RunNIST_Model('Gauss2')
def test_Gauss3():
- return RunNIST_Model('Gauss3')
+ RunNIST_Model('Gauss3')
def test_Hahn1():
- return RunNIST_Model('Hahn1')
+ RunNIST_Model('Hahn1')
def test_Kirby2():
- return RunNIST_Model('Kirby2')
+ RunNIST_Model('Kirby2')
def test_Lanczos1():
- return RunNIST_Model('Lanczos1')
+ RunNIST_Model('Lanczos1')
def test_Lanczos2():
- return RunNIST_Model('Lanczos2')
+ RunNIST_Model('Lanczos2')
def test_Lanczos3():
- return RunNIST_Model('Lanczos3')
+ RunNIST_Model('Lanczos3')
def test_MGH09():
- return RunNIST_Model('MGH09')
+ RunNIST_Model('MGH09')
def test_MGH10():
- return RunNIST_Model('MGH10')
+ RunNIST_Model('MGH10')
def test_MGH17():
- return RunNIST_Model('MGH17')
+ RunNIST_Model('MGH17')
def test_Misra1a():
- return RunNIST_Model('Misra1a')
+ RunNIST_Model('Misra1a')
def test_Misra1b():
- return RunNIST_Model('Misra1b')
+ RunNIST_Model('Misra1b')
def test_Misra1c():
- return RunNIST_Model('Misra1c')
+ RunNIST_Model('Misra1c')
def test_Misra1d():
- return RunNIST_Model('Misra1d')
+ RunNIST_Model('Misra1d')
def test_Nelson():
- return RunNIST_Model('Nelson')
+ RunNIST_Model('Nelson')
def test_Rat42():
- return RunNIST_Model('Rat42')
+ RunNIST_Model('Rat42')
def test_Rat43():
- return RunNIST_Model('Rat43')
+ RunNIST_Model('Rat43')
def test_Roszman1():
- return RunNIST_Model('Roszman1')
+ RunNIST_Model('Roszman1')
def test_Thurber():
- return RunNIST_Model('Thurber')
+ RunNIST_Model('Thurber')
if __name__ == '__main__':
diff --git a/tests/test_ampgo.py b/tests/test_ampgo.py
index 36e1746..86e3646 100644
--- a/tests/test_ampgo.py
+++ b/tests/test_ampgo.py
@@ -4,6 +4,7 @@ import sys
import numpy as np
from numpy.testing import assert_allclose
import pytest
+from scipy import __version__ as scipy_version
import lmfit
from lmfit._ampgo import ampgo, tunnel
@@ -60,8 +61,13 @@ def test_ampgo_local_solver(minimizer_Alpine02):
"""Test AMPGO algorithm with local solver."""
kws = {'local': 'Nelder-Mead'}
- msg = r'Method Nelder-Mead cannot handle constraints nor bounds'
- with pytest.warns(RuntimeWarning, match=msg):
+ # bounds in Nelder-Mead are supported since SciPy v1.7.0
+ # FIXME: clean this up after we require SciPy >= 1.7.0
+ if int(scipy_version.split('.')[1]) < 7:
+ msg = r'Method Nelder-Mead cannot handle constraints nor bounds'
+ with pytest.warns(RuntimeWarning, match=msg):
+ out = minimizer_Alpine02.minimize(method='ampgo', **kws)
+ else:
out = minimizer_Alpine02.minimize(method='ampgo', **kws)
out_x = np.array([out.params['x0'].value, out.params['x1'].value])
diff --git a/tests/test_basinhopping.py b/tests/test_basinhopping.py
index 8e0776c..89b7bad 100644
--- a/tests/test_basinhopping.py
+++ b/tests/test_basinhopping.py
@@ -2,6 +2,7 @@
import numpy as np
from numpy.testing import assert_allclose
import pytest
+from scipy import __version__ as scipy_version
from scipy.optimize import basinhopping
import lmfit
@@ -61,6 +62,11 @@ def test_basinhopping_2d_lmfit_vs_scipy():
assert_allclose(out.params['x0'].value, ret.x[0], rtol=1e-5)
assert_allclose(out.params['x1'].value, ret.x[1], rtol=1e-5)
+ # FIXME: update when SciPy requirement is >= 1.8
+ if int(scipy_version.split('.')[1]) >= 8:
+ assert 'target_accept_rate' in out.call_kws
+ assert 'stepwise_factor' in out.call_kws
+
def test_basinhopping_Alpine02(minimizer_Alpine02):
"""Test basinhopping on Alpine02 function."""
diff --git a/tests/test_builtin_models.py b/tests/test_builtin_models.py
index 904684c..aedde97 100644
--- a/tests/test_builtin_models.py
+++ b/tests/test_builtin_models.py
@@ -8,6 +8,7 @@ import pytest
from scipy.optimize import fsolve
from lmfit import lineshapes, models
+from lmfit.models import GaussianModel
def check_height_fwhm(x, y, lineshape, model):
@@ -19,6 +20,8 @@ def check_height_fwhm(x, y, lineshape, model):
mu = out.params['center'].value
if lineshape is lineshapes.lognormal:
cen = np.exp(mu - out.params['sigma']**2)
+ elif lineshape is lineshapes.pearson4:
+ cen = out.params['position']
else:
cen = mu
@@ -65,6 +68,7 @@ def test_height_fwhm_calculation(peakdata):
y = peakdata[1]
check_height_fwhm(x, y, lineshapes.voigt, models.VoigtModel())
check_height_fwhm(x, y, lineshapes.pvoigt, models.PseudoVoigtModel())
+ check_height_fwhm(x, y, lineshapes.pearson4, models.Pearson4Model())
check_height_fwhm(x, y, lineshapes.pearson7, models.Pearson7Model())
check_height_fwhm(x, y, lineshapes.moffat, models.MoffatModel())
check_height_fwhm(x, y, lineshapes.students_t, models.StudentsTModel())
@@ -111,6 +115,10 @@ def test_height_and_fwhm_expression_evalution_in_builtin_models():
params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, beta=0.0)
params.update_constraints()
+ mod = models.Pearson4Model()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, expon=1.0, skew=5.0)
+ params.update_constraints()
+
mod = models.Pearson7Model()
params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, expon=1.0)
params.update_constraints()
@@ -214,6 +222,12 @@ def test_guess_modelparams():
assert_allclose(pars['l_center'].value, 0.25, rtol=1)
assert_allclose(pars['l_sigma'].value, 1.3, rtol=1)
+ mod = models.Pearson4Model(prefix='g_')
+ pars = mod.guess(y, x=x)
+ assert_allclose(pars['g_amplitude'].value, 3, rtol=2)
+ assert_allclose(pars['g_center'].value, 0.25, rtol=1)
+ assert_allclose(pars['g_sigma'].value, 1.3, rtol=1)
+
mod = models.SplitLorentzianModel(prefix='s_')
pars = mod.guess(y, x=x)
assert_allclose(pars['s_amplitude'].value, 3, rtol=2)
@@ -287,10 +301,12 @@ def test_guess_from_peak2d():
assert np.abs((guess_increasing_x[param].value - value)/value) < 0.5
-def test_DonaichModel_emits_futurewarning():
- """Assert that using the wrong spelling emits a FutureWarning."""
- msg = ('Please correct the name of your built-in model: DonaichModel --> '
- 'DoniachModel. The incorrect spelling will be removed in a later '
- 'release.')
- with pytest.warns(FutureWarning, match=msg):
- models.DonaichModel()
+def test_guess_requires_x():
+ """Regression test for GH #747."""
+ x = np.arange(100)
+ y = np.exp(-(x-50)**2/(2*10**2))
+
+ mod = GaussianModel()
+ msg = r"guess\(\) missing 1 required positional argument: 'x'"
+ with pytest.raises(TypeError, match=msg):
+ mod.guess(y)
diff --git a/tests/test_confidence.py b/tests/test_confidence.py
index d133ede..78e22c9 100644
--- a/tests/test_confidence.py
+++ b/tests/test_confidence.py
@@ -191,7 +191,6 @@ def test_confidence_exceptions(data, pars):
lmfit.conf_interval(minimizer, out_lsq)
-@pytest.mark.xfail("np.__version__ == '1.20.0'") # FIXME
def test_confidence_warnings(data, pars):
"""Make sure the proper warnings are emitted when needed."""
minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
@@ -204,7 +203,7 @@ def test_confidence_warnings(data, pars):
def test_confidence_with_trace(data, pars):
"""Test calculation of confidence intervals with trace."""
- minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
out = minimizer.leastsq()
ci, tr = lmfit.conf_interval(minimizer, out, sigmas=[0.6827], trace=True)
@@ -254,7 +253,7 @@ def test_confidence_prob_func(data, pars):
nonlocal called
called += 1
nfree = best_fit.nfree
- nfix = best_fit.nfree - new_fit.nfree
+ nfix = best_fit.nvarys - new_fit.nvarys
dchi = new_fit.chisqr / best_fit.chisqr - 1.0
return f.cdf(dchi * nfree / nfix, nfix, nfree)
diff --git a/tests/test_covariance_matrix.py b/tests/test_covariance_matrix.py
index 55d1b85..8281029 100644
--- a/tests/test_covariance_matrix.py
+++ b/tests/test_covariance_matrix.py
@@ -53,7 +53,7 @@ def test_bounded_parameters():
[9.45395985e-06, 1.84110424e-07, -2.90588963e-07, 7.19107184e-08],
[-4.33997922e-05, -2.90588963e-07, 9.53427031e-05, -2.37750362e-05],
[1.07362106e-05, 7.19107184e-08, -2.37750362e-05, 9.60952336e-06]])
- assert_allclose(result.covar, cov_x, rtol=1e-6)
+ assert_allclose(result.covar, cov_x, rtol=1.5e-6)
# assert that stderr and correlations are correct [cf. lmfit v0.9.10]
assert_almost_equal(result.params['amp'].stderr, 0.03773967, decimal=6)
@@ -131,7 +131,7 @@ def test_numdifftools_no_bounds(fit_method):
assert_allclose(vals_ndt, vals, rtol=0.1)
assert_allclose(result_ndt.chisqr, result.chisqr, rtol=1e-5)
- # assert that parameter uncertaintes from leastsq and calculated from
+ # assert that parameter uncertainties from leastsq and calculated from
# the covariance matrix using numdifftools are very similar
stderr = [result.params[p].stderr for p in result.params.valuesdict()]
stderr_ndt = [result_ndt.params[p].stderr for p in result_ndt.params.valuesdict()]
@@ -154,8 +154,6 @@ def test_numdifftools_no_bounds(fit_method):
'shgo', 'dual_annealing'])
def test_numdifftools_with_bounds(fit_method):
pytest.importorskip("numdifftools")
- if fit_method in ['shgo', 'dual_annealing']:
- pytest.importorskip("scipy", minversion="1.2")
# load data to be fitted
data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples',
@@ -181,7 +179,7 @@ def test_numdifftools_with_bounds(fit_method):
assert_allclose(vals_ndt, vals, rtol=0.1)
assert_allclose(result_ndt.chisqr, result.chisqr, rtol=1e-5)
- # assert that parameter uncertaintes from leastsq and calculated from
+ # assert that parameter uncertainties from leastsq and calculated from
# the covariance matrix using numdifftools are very similar
stderr = [result.params[p].stderr for p in result.params.valuesdict()]
stderr_ndt = [result_ndt.params[p].stderr for p in result_ndt.params.valuesdict()]
diff --git a/tests/test_dual_annealing.py b/tests/test_dual_annealing.py
index 8279d18..c3e5ca0 100644
--- a/tests/test_dual_annealing.py
+++ b/tests/test_dual_annealing.py
@@ -2,14 +2,11 @@
import numpy as np
from numpy.testing import assert_allclose
-import pytest
import scipy
+from scipy import __version__ as scipy_version
import lmfit
-# dual_annealing algorithm is present in SciPy >= 1.2
-pytest.importorskip("scipy", minversion="1.2")
-
def eggholder(x):
return (-(x[1] + 47.0) * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
@@ -57,6 +54,13 @@ def test_da_Alpine02(minimizer_Alpine02):
assert_allclose(max(out_x), max(global_optimum), rtol=1e-3)
assert out.method == 'dual_annealing'
+ # FIXME: update when SciPy requirement is >= 1.8
+ # ``local_search_options`` deprecated in favor of ``minimizer_kwargs``
+ if int(scipy_version.split('.')[1]) >= 8:
+ assert 'minimizer_kwargs' in out.call_kws
+ else:
+ assert 'local_search_options' in out.call_kws
+
def test_da_bounds(minimizer_Alpine02):
"""Test dual_annealing algorithm with bounds."""
diff --git a/tests/test_itercb.py b/tests/test_itercb.py
index 884c6cf..16901a3 100644
--- a/tests/test_itercb.py
+++ b/tests/test_itercb.py
@@ -4,6 +4,7 @@ import numpy as np
import pytest
from lmfit.lineshapes import gaussian
+from lmfit.minimizer import Minimizer
from lmfit.models import GaussianModel, LinearModel
try:
@@ -19,6 +20,15 @@ y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
y -= 0.20*x + 3.333 + np.random.normal(scale=0.23, size=len(x))
mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
+
+def residual(pars, x, data):
+ parvals = pars.valuesdict()
+ gauss = gaussian(x, parvals['peak_amplitude'], parvals['peak_center'],
+ parvals['peak_sigma'])
+ linear = parvals['bkg_slope']*x + parvals['bkg_intercept']
+ return data - gauss - linear
+
+
pars = mod.make_params(peak_amplitude=21.0, peak_center=7.0,
peak_sigma=2.0, bkg_intercept=2, bkg_slope=0.0)
@@ -32,19 +42,89 @@ pars['peak_sigma'].set(min=0.5, max=2)
def per_iteration(pars, iteration, resid, *args, **kws):
"""Iteration callback, will abort at iteration 23."""
- return iteration == 23
+ return iteration == 17
+
+
+fitmethods = ['ampgo', 'brute', 'basinhopping', 'differential_evolution',
+ 'leastsq', 'least_squares', 'nelder', 'shgo', 'dual_annealing']
@pytest.mark.parametrize("calc_covar", calc_covar_options)
-@pytest.mark.parametrize("method", ['ampgo', 'brute', 'basinhopping',
- 'differential_evolution', 'leastsq',
- 'least_squares', 'nelder'])
-def test_itercb(method, calc_covar):
+@pytest.mark.parametrize("method", fitmethods)
+def test_itercb_model_class(method, calc_covar):
"""Test the iteration callback for all solvers."""
out = mod.fit(y, pars, x=x, method=method, iter_cb=per_iteration,
calc_covar=calc_covar)
- assert out.nfev == 23
+ assert out.nfev == 17
+ assert out.aborted
+ assert not out.errorbars
+ assert not out.success
+
+
+@pytest.mark.parametrize("calc_covar", calc_covar_options)
+@pytest.mark.parametrize("method", fitmethods)
+def test_itercb_minimizer_class(method, calc_covar):
+ """Test the iteration callback for all solvers."""
+ if method in ('nelder', 'differential_evolution'):
+ pytest.xfail("scalar_minimizers behave differently, but shouldn't!!")
+
+ mini = Minimizer(residual, pars, fcn_args=(x, y), iter_cb=per_iteration,
+ calc_covar=calc_covar)
+ out = mini.minimize(method=method)
+
+ assert out.nfev == 17
assert out.aborted
assert not out.errorbars
assert not out.success
+ assert mini._abort
+
+
+fitmethods = ['leastsq', 'least_squares']
+
+
+@pytest.mark.parametrize("method", fitmethods)
+def test_itercb_reset_abort(method):
+ """Regression test for GH Issue #756.
+
+ Make sure that ``self._abort`` is reset to ``False`` at the start of each
+ fit.
+
+ """
+ if method in ('nelder', 'differential_evolution'):
+ pytest.xfail("scalar_minimizers behave differently, but shouldn't!!")
+
+ must_stop = True
+
+ def callback(*args, **kwargs):
+ return must_stop
+
+ # perform minimization with ``iter_cb``
+ out_model = mod.fit(y, pars, x=x, method=method, iter_cb=callback)
+
+ mini = Minimizer(residual, pars, fcn_args=(x, y), iter_cb=callback)
+ out_minimizer = mini.minimize(method=method)
+
+ assert out_model.aborted is must_stop
+ assert out_model.errorbars is not must_stop
+ assert out_model.success is not must_stop
+ assert out_minimizer.aborted is must_stop
+ assert out_minimizer.errorbars is not must_stop
+ assert out_minimizer.success is not must_stop
+ assert mini._abort is must_stop
+
+ # perform another minimization now without ``iter_cb``
+ must_stop = False
+ out_minimizer_no_callback = mini.minimize(method=method)
+ assert out_minimizer_no_callback.aborted is must_stop
+ assert out_minimizer_no_callback.errorbars is not must_stop
+ assert out_minimizer_no_callback.success is not must_stop
+ assert mini._abort is must_stop
+
+ # reset to mini._abort to False and call the optimization method directly
+ func = getattr(mini, method)
+ out_no_callback = func()
+ assert out_no_callback.aborted is must_stop
+ assert out_no_callback.errorbars is not must_stop
+ assert out_no_callback.success is not must_stop
+ assert mini._abort is must_stop
diff --git a/tests/test_jsonutils.py b/tests/test_jsonutils.py
index 14e6f91..802f045 100644
--- a/tests/test_jsonutils.py
+++ b/tests/test_jsonutils.py
@@ -66,3 +66,22 @@ def test_encode_decode_pandas():
decoded_ser = decode4js(encoded_ser)
assert np.all(pd.Series.eq(obj_ser, decoded_ser))
assert isinstance(decoded_ser, pd.Series)
+
+
+def test_altered_params_json():
+ """Regression test for loading altered JSON Parameters (see GH #739)."""
+ pars = lmfit.Parameters()
+ pars.add('a', 3.0, min=0)
+ pars.add('b', 10.0, max=1000)
+ pars.add('c', 20.0)
+ pars.add('d', expr='c - b/a')
+
+ # mangle JSON as JavaScript or others might:
+ json_rep = pars.dumps().replace('-Infinity', 'null').replace('Infinity', 'null')
+
+ new = lmfit.Parameters()
+ new.loads(json_rep)
+ for vname in ('a', 'b', 'c', 'd'):
+ assert new[vname].value == pars[vname].value
+ assert new[vname].min == pars[vname].min
+ assert new[vname].max == pars[vname].max
diff --git a/tests/test_least_squares.py b/tests/test_least_squares.py
index b9aa64e..4761c7b 100644
--- a/tests/test_least_squares.py
+++ b/tests/test_least_squares.py
@@ -87,7 +87,7 @@ def test_least_squares_cov_x(peakdata, bounds):
assert_allclose(vals, vals_lsq, rtol=1e-5)
assert_allclose(result.chisqr, result_lsq.chisqr)
- # assert that parameter uncertaintes obtained from the leastsq method and
+ # assert that parameter uncertainties obtained from the leastsq method and
# those from the covariance matrix estimated from the Jacbian matrix in
# least_squares are similar
stderr = [result.params[p].stderr for p in result.params.valuesdict()]
diff --git a/tests/test_lineshapes.py b/tests/test_lineshapes.py
index 96cb70f..1d28aad 100644
--- a/tests/test_lineshapes.py
+++ b/tests/test_lineshapes.py
@@ -73,13 +73,8 @@ def test_x_float_value(lineshape):
if par_name != 'x']:
fnc_args.append(sig.parameters[par].default)
- if lineshape in ('step', 'rectangle'):
- msg = r"'float' object does not support item assignment"
- with pytest.raises(TypeError, match=msg):
- fnc_output = func(*fnc_args)
- else:
- fnc_output = func(*fnc_args)
- assert isinstance(fnc_output, float)
+ fnc_output = func(*fnc_args)
+ assert isinstance(fnc_output, float)
rising_form = ['erf', 'logistic', 'atan', 'arctan', 'linear', 'unknown']
@@ -111,6 +106,18 @@ def test_form_argument_step_rectangle(form, lineshape):
assert len(fnc_output) == len(xvals)
+@pytest.mark.parametrize('form', rising_form)
+@pytest.mark.parametrize('lineshape', ['step', 'rectangle'])
+def test_value_step_rectangle(form, lineshape):
+ """Test values at mu1/mu2 for step- and rectangle-functions."""
+ func = getattr(lmfit.lineshapes, lineshape)
+ # at position mu1 we should be at A/2
+ assert_almost_equal(func(0), 0.5)
+ # for a rectangular shape we have the same at mu2
+ if lineshape == 'rectangle':
+ assert_almost_equal(func(1), 0.5)
+
+
thermal_form = ['bose', 'maxwell', 'fermi', 'Bose-Einstein', 'unknown']
@@ -137,14 +144,3 @@ def test_form_argument_thermal_distribution(form):
else:
fnc_output = func(*fnc_args)
assert len(fnc_output) == len(xvals)
-
-
-def test_donaich_emits_futurewarning():
- """Assert that using the wrong spelling emits a FutureWarning."""
- xvals = np.linspace(0, 10, 100)
-
- msg = ('Please correct the name of your lineshape function: donaich --> '
- 'doniach. The incorrect spelling will be removed in a later '
- 'release.')
- with pytest.warns(FutureWarning, match=msg):
- lmfit.lineshapes.donaich(xvals)
diff --git a/tests/test_manypeaks_speed.py b/tests/test_manypeaks_speed.py
index 9734d35..5c51bd5 100644
--- a/tests/test_manypeaks_speed.py
+++ b/tests/test_manypeaks_speed.py
@@ -1,11 +1,10 @@
-#
-# test speed of building complex model
-#
+"""Test speed of building complex model."""
from copy import deepcopy
import sys
import time
import numpy as np
+import pytest
from lmfit import Model
from lmfit.lineshapes import gaussian
@@ -13,11 +12,12 @@ from lmfit.lineshapes import gaussian
sys.setrecursionlimit(2000)
+@pytest.mark.flaky(max_runs=5)
def test_manypeaks_speed():
model = None
t0 = time.time()
for i in np.arange(500):
- g = Model(gaussian, prefix='g%i_' % i)
+ g = Model(gaussian, prefix=f'g{i}')
if model is None:
model = g
else:
diff --git a/tests/test_model.py b/tests/test_model.py
index a4c2aa3..1c35ed5 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -1,6 +1,5 @@
"""Tests for the Model, CompositeModel, and ModelResult classes."""
-from collections import OrderedDict
import functools
import sys
import unittest
@@ -12,7 +11,7 @@ import pytest
import lmfit
from lmfit import Model, models
-from lmfit.lineshapes import gaussian
+from lmfit.lineshapes import gaussian, lorentzian
from lmfit.model import get_reducer, propagate_err
from lmfit.models import PseudoVoigtModel
@@ -20,7 +19,7 @@ from lmfit.models import PseudoVoigtModel
@pytest.fixture()
def gmodel():
"""Return a Gaussian model."""
- return Model(lmfit.lineshapes.gaussian)
+ return Model(gaussian)
def test_get_reducer_invalid_option():
@@ -123,11 +122,12 @@ def test_initialize_Model_class_default_arguments(gmodel):
assert gmodel.nan_policy == 'raise'
assert gmodel.name == 'Model(gaussian)'
assert gmodel.opts == {}
+ assert gmodel.def_vals == {'amplitude': 1.0, 'center': 0.0, 'sigma': 1.0}
def test_initialize_Model_class_independent_vars():
"""Test for Model class initialized with independent_vars."""
- model = Model(lmfit.lineshapes.gaussian, independent_vars=['amplitude'])
+ model = Model(gaussian, independent_vars=['amplitude'])
assert model._param_root_names == ['x', 'center', 'sigma']
assert model.param_names == ['x', 'center', 'sigma']
assert model.independent_vars == ['amplitude']
@@ -135,7 +135,7 @@ def test_initialize_Model_class_independent_vars():
def test_initialize_Model_class_param_names():
"""Test for Model class initialized with param_names."""
- model = Model(lmfit.lineshapes.gaussian, param_names=['amplitude'])
+ model = Model(gaussian, param_names=['amplitude'])
assert model._param_root_names == ['amplitude']
assert model.param_names == ['amplitude']
@@ -144,24 +144,28 @@ def test_initialize_Model_class_param_names():
@pytest.mark.parametrize("policy", ['raise', 'omit', 'propagate'])
def test_initialize_Model_class_nan_policy(policy):
"""Test for Model class initialized with nan_policy."""
- model = Model(lmfit.lineshapes.gaussian, nan_policy=policy)
+ model = Model(gaussian, nan_policy=policy)
assert model.nan_policy == policy
def test_initialize_Model_class_prefix():
"""Test for Model class initialized with prefix."""
- model = Model(lmfit.lineshapes.gaussian, prefix='test_')
+ model = Model(gaussian, prefix='test_')
assert model.prefix == 'test_'
assert model._param_root_names == ['amplitude', 'center', 'sigma']
assert model.param_names == ['test_amplitude', 'test_center', 'test_sigma']
assert model.name == "Model(gaussian, prefix='test_')"
+ model = Model(gaussian, prefix=None)
+
+ assert model.prefix == ''
+
def test_initialize_Model_name():
"""Test for Model class initialized with name."""
- model = Model(lmfit.lineshapes.gaussian, name='test_function')
+ model = Model(gaussian, name='test_function')
assert model.name == 'Model(test_function)'
@@ -169,7 +173,7 @@ def test_initialize_Model_name():
def test_initialize_Model_kws():
"""Test for Model class initialized with **kws."""
kws = {'amplitude': 10.0}
- model = Model(lmfit.lineshapes.gaussian,
+ model = Model(gaussian,
independent_vars=['x', 'amplitude'], **kws)
assert model._param_root_names == ['center', 'sigma']
@@ -186,7 +190,7 @@ test_reprstring_data = [(False, 'Model(gaussian)'),
def test_Model_reprstring(option, expected):
"""Test for Model class function _reprstring."""
kws = {'amplitude': 10.0}
- model = Model(lmfit.lineshapes.gaussian,
+ model = Model(gaussian,
independent_vars=['x', 'amplitude'], **kws)
assert model._reprstring(option) == expected
@@ -202,8 +206,7 @@ def test_Model_get_state(gmodel):
assert out[0][0] == 'gaussian'
assert out[0][2:] == ('gaussian', '', ['x'],
- ['amplitude', 'center', 'sigma'], OrderedDict(),
- 'raise', {})
+ ['amplitude', 'center', 'sigma'], {}, 'raise', {})
def test_Model_set_state(gmodel):
@@ -215,7 +218,7 @@ def test_Model_set_state(gmodel):
"""
out = gmodel._get_state()
- new_model = Model(lmfit.lineshapes.lorentzian)
+ new_model = Model(lorentzian)
new_model = new_model._set_state(out)
assert new_model.prefix == gmodel.prefix
@@ -231,7 +234,7 @@ def test_Model_dumps_loads(gmodel):
"""Test for Model class functions dumps and loads.
These function are used when saving/loading the Model class and will be
- tested more thoroughly later.
+ tested more thoroughly in test_model_saveload.py.
"""
model_json = gmodel.dumps()
@@ -277,6 +280,227 @@ def test_Model_copy(gmodel):
gmodel.copy()
+def test__parse_params_func_None():
+ """Test for _parse_params function with func=None."""
+ mod = Model(None)
+
+ assert mod._prefix == ''
+ assert mod.func is None
+ assert mod._func_allargs == []
+ assert mod._func_haskeywords is False
+ assert mod.independent_vars == []
+
+
+def test__parse_params_asteval_functions():
+ """Test for _parse_params function with asteval functions."""
+ # TODO: cannot find a use-case for this....
+ pass
+
+
+def test__parse_params_inspect_signature():
+ """Test for _parse_params function using inspect.signature."""
+ # 1. function with a positional argument
+ def func_var_positional(a, *b):
+ pass
+
+ with pytest.raises(ValueError, match=r"varargs '\*b' is not supported"):
+ Model(func_var_positional)
+
+ # 2. function with a keyword argument
+ def func_keyword(a, b, **c):
+ pass
+
+ mod = Model(func_keyword)
+ assert mod._func_allargs == ['a', 'b']
+ assert mod._func_haskeywords is True
+ assert mod.independent_vars == ['a']
+ assert mod.def_vals == {}
+
+ # 3. function with keyword argument only
+ def func_keyword_only(**b):
+ pass
+
+ mod = Model(func_keyword_only)
+ assert mod._func_allargs == []
+ assert mod._func_haskeywords is True
+ assert mod.independent_vars == []
+ assert mod._param_root_names is None
+
+ # 4. function with default value
+ def func_default_value(a, b, c=10):
+ pass
+
+ mod = Model(func_default_value)
+ assert mod._func_allargs == ['a', 'b', 'c']
+ assert mod._func_haskeywords is False
+ assert mod.independent_vars == ['a']
+
+ assert isinstance(mod.def_vals, dict)
+ assert_allclose(mod.def_vals['c'], 10)
+
+
+def test__parse_params_forbidden_variable_names():
+ """Tests for _parse_params function using invalid variable names."""
+
+ def func_invalid_var(data, a):
+ pass
+
+ def func_invalid_par(a, weights):
+ pass
+
+ msg = r"Invalid independent variable name \('data'\) for function func_invalid_var"
+ with pytest.raises(ValueError, match=msg):
+ Model(func_invalid_var)
+
+ msg = r"Invalid parameter name \('weights'\) for function func_invalid_par"
+ with pytest.raises(ValueError, match=msg):
+ Model(func_invalid_par)
+
+
+input_dtypes = [(np.int32, np.float64), (np.float32, np.float64),
+ (np.complex64, np.complex128), ('list', np.float64),
+ ('tuple', np.float64), ('pandas-real', np.float64),
+ ('pandas-complex', np.complex128)]
+
+
+@pytest.mark.parametrize('input_dtype, expected_dtype', input_dtypes)
+def test_coercion_of_input_data(peakdata, input_dtype, expected_dtype):
+ """Test for coercion of 'data' and 'independent_vars'.
+
+ - 'data' should become 'float64' or 'complex128'
+ - dtype for 'indepdendent_vars' is only changed when the input is a list,
+ tuple, numpy.ndarray, or pandas.Series
+
+ """
+ x, y = peakdata
+ model = lmfit.Model(gaussian)
+ pars = model.make_params()
+
+ if (not lmfit.minimizer.HAS_PANDAS and input_dtype in ['pandas-real',
+ 'pandas-complex']):
+ return
+
+ elif input_dtype == 'pandas-real':
+ result = model.fit(lmfit.model.Series(y, dtype=np.float32), pars,
+ x=lmfit.model.Series(x, dtype=np.float32))
+ elif input_dtype == 'pandas-complex':
+ result = model.fit(lmfit.model.Series(y, dtype=np.complex64), pars,
+ x=lmfit.model.Series(x, dtype=np.complex64))
+ elif input_dtype == 'list':
+ result = model.fit(y.tolist(), pars, x=x.tolist())
+ elif input_dtype == 'tuple':
+ result = model.fit(tuple(y), pars, x=tuple(x))
+ else:
+ result = model.fit(np.asarray(y, dtype=input_dtype), pars,
+ x=np.asarray(x, dtype=input_dtype))
+
+ assert result.__dict__['userkws']['x'].dtype == expected_dtype
+ assert result.__dict__['userargs'][0].dtype == expected_dtype
+
+
+def test_figure_default_title(peakdata):
+ """Test default figure title."""
+ pytest.importorskip('matplotlib')
+
+ x, y = peakdata
+ pvmodel = PseudoVoigtModel()
+ params = pvmodel.guess(y, x=x)
+ result = pvmodel.fit(y, params, x=x)
+
+ ax = result.plot_fit()
+ assert ax.axes.get_title() == 'Model(pvoigt)'
+
+ ax = result.plot_residuals()
+ assert ax.axes.get_title() == 'Model(pvoigt)'
+
+ fig = result.plot()
+ assert fig.axes[0].get_title() == 'Model(pvoigt)' # default model.name
+ assert fig.axes[1].get_title() == '' # no title for fit subplot
+
+
+def test_figure_title_using_title_keyword_argument(peakdata):
+ """Test setting figure title using title keyword argument."""
+ pytest.importorskip('matplotlib')
+
+ x, y = peakdata
+ pvmodel = PseudoVoigtModel()
+ params = pvmodel.guess(y, x=x)
+ result = pvmodel.fit(y, params, x=x)
+
+ ax = result.plot_fit(title='test')
+ assert ax.axes.get_title() == 'test'
+
+ ax = result.plot_residuals(title='test')
+ assert ax.axes.get_title() == 'test'
+
+ fig = result.plot(title='test')
+ assert fig.axes[0].get_title() == 'test'
+ assert fig.axes[1].get_title() == '' # no title for fit subplot
+
+
+def test_figure_title_using_title_to_ax_kws(peakdata):
+ """Test setting figure title by supplying ax_{fit,res}_kws."""
+ pytest.importorskip('matplotlib')
+
+ x, y = peakdata
+ pvmodel = PseudoVoigtModel()
+ params = pvmodel.guess(y, x=x)
+ result = pvmodel.fit(y, params, x=x)
+
+ ax = result.plot_fit(ax_kws={'title': 'ax_kws'})
+ assert ax.axes.get_title() == 'ax_kws'
+
+ ax = result.plot_residuals(ax_kws={'title': 'ax_kws'})
+ assert ax.axes.get_title() == 'ax_kws'
+
+ fig = result.plot(ax_res_kws={'title': 'ax_res_kws'})
+ assert fig.axes[0].get_title() == 'ax_res_kws'
+ assert fig.axes[1].get_title() == ''
+
+ fig = result.plot(ax_fit_kws={'title': 'ax_fit_kws'})
+ assert fig.axes[0].get_title() == 'Model(pvoigt)' # default model.name
+ assert fig.axes[1].get_title() == '' # no title for fit subplot
+
+
+def test_priority_setting_figure_title(peakdata):
+ """Test for setting figure title: title keyword argument has priority."""
+ pytest.importorskip('matplotlib')
+
+ x, y = peakdata
+ pvmodel = PseudoVoigtModel()
+ params = pvmodel.guess(y, x=x)
+ result = pvmodel.fit(y, params, x=x)
+
+ ax = result.plot_fit(ax_kws={'title': 'ax_kws'}, title='test')
+ assert ax.axes.get_title() == 'test'
+
+ ax = result.plot_residuals(ax_kws={'title': 'ax_kws'}, title='test')
+ assert ax.axes.get_title() == 'test'
+
+ fig = result.plot(ax_res_kws={'title': 'ax_res_kws'}, title='test')
+ assert fig.axes[0].get_title() == 'test'
+ assert fig.axes[1].get_title() == ''
+
+ fig = result.plot(ax_fit_kws={'title': 'ax_fit_kws'}, title='test')
+ assert fig.axes[0].get_title() == 'test'
+ assert fig.axes[1].get_title() == ''
+
+
+def test_guess_requires_x():
+ """Test to make sure that ``guess()`` method requires the argument ``x``.
+
+ The ``guess`` method needs ``x`` values (i.e., the independent variable)
+ to estimate initial parameters, but this was not a required argument.
+ See GH #747.
+
+ """
+ mod = lmfit.model.Model(gaussian)
+
+ msg = r"guess\(\) missing 2 required positional arguments: 'data' and 'x'"
+ with pytest.raises(TypeError, match=msg):
+ mod.guess()
+
+
# Below is the content of the original test_model.py file. These tests still
# need to be checked and possibly updated to the pytest-style. They work fine
# though so leave them in for now.
@@ -288,13 +512,6 @@ def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03, err_msg='',
verbose)
-def _skip_if_no_pandas():
- try:
- import pandas # noqa: F401
- except ImportError:
- raise pytest.skip("Skipping tests that require pandas.")
-
-
def firstarg_ndarray(func):
"""a simple wrapper used for testing that wrapped
functions can be model functions"""
@@ -347,7 +564,7 @@ class CommonTests:
result = model.fit(self.data, params, x=self.x)
assert_results_close(result.values, self.true_values())
- # Pass inidividual Parameter objects as kwargs.
+ # Pass individual Parameter objects as kwargs.
kwargs = dict(params.items())
result = self.model.fit(self.data, x=self.x, **kwargs)
assert_results_close(result.values, self.true_values())
@@ -437,7 +654,8 @@ class CommonTests:
assert " chi-square " in report
def test_data_alignment(self):
- _skip_if_no_pandas()
+ pytest.importorskip('pandas')
+
from pandas import Series
# Align data and indep var of different lengths using pandas index.
@@ -537,7 +755,6 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
params[param_name].value = value
self.model.fit(self.data, params, x=self.x)
- @pytest.mark.xfail("np.__version__ == '1.20.0'") # FIXME
def test_extra_param_issues_warning(self):
# The function accepts extra params, Model will warn but not raise.
def flexible_func(x, amplitude, center, sigma, **kwargs):
@@ -764,9 +981,10 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
for _, par in pars.items():
assert len(repr(par)) > 5
+ @pytest.mark.skipif(not lmfit.model._HAS_MATPLOTLIB,
+ reason="requires matplotlib.pyplot")
def test_composite_plotting(self):
# test that a composite model has non-empty best_values
- pytest.importorskip("matplotlib")
import matplotlib
matplotlib.use('Agg')
@@ -789,10 +1007,9 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
pars['g2_amplitude'].set(1)
result = mod.fit(data, params=pars, x=self.x)
- fig, ax = result.plot(show_init=True)
+ fig = result.plot(show_init=True)
assert isinstance(fig, matplotlib.figure.Figure)
- assert isinstance(ax, matplotlib.axes.GridSpec)
comps = result.eval_components(x=self.x)
assert len(comps) == 2
@@ -844,7 +1061,7 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
m.set_param_hint('amp', value=1)
m.set_param_hint('amp', value=25)
- models[i] = Model(func, prefix='mod%i_' % i)
+ models[i] = Model(func, prefix=f'mod{i}')
models[i].param_hints['amp'] = m.param_hints['amp']
self.assertEqual(models[0].param_hints['amp'],
@@ -879,6 +1096,36 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
self.assertTrue(result.params['sigma'].stderr > 0.02)
self.assertTrue(result.params['sigma'].stderr < 0.50)
+ def test_unprefixed_name_collisions(self):
+ # tests Github Issue 710
+ np.random.seed(0)
+ x = np.linspace(0, 20, 201)
+ y = 6 + x * 0.55 + gaussian(x, 4.5, 8.5, 2.1) + np.random.normal(size=len(x), scale=0.03)
+
+ def myline(x, a, b):
+ return a + b * x
+
+ def mygauss(x, a, b, c):
+ return gaussian(x, a, b, c)
+
+ mod = Model(myline, prefix='line_') + Model(mygauss, prefix='peak_')
+ pars = mod.make_params(line_a=5, line_b=1, peak_a=10, peak_b=10, peak_c=5)
+ pars.add('a', expr='line_a + peak_a')
+
+ result = mod.fit(y, pars, x=x)
+ self.assertTrue(result.params['peak_a'].value > 4)
+ self.assertTrue(result.params['peak_a'].value < 5)
+ self.assertTrue(result.params['peak_b'].value > 8)
+ self.assertTrue(result.params['peak_b'].value < 9)
+ self.assertTrue(result.params['peak_c'].value > 1.5)
+ self.assertTrue(result.params['peak_c'].value < 2.5)
+ self.assertTrue(result.params['line_a'].value > 5.5)
+ self.assertTrue(result.params['line_a'].value < 6.5)
+ self.assertTrue(result.params['line_b'].value > 0.25)
+ self.assertTrue(result.params['line_b'].value < 0.75)
+ self.assertTrue(result.params['a'].value > 10)
+ self.assertTrue(result.params['a'].value < 11)
+
def test_composite_model_with_expr_constrains(self):
"""Smoke test for composite model fitting with expr constraints."""
y = [0, 0, 4, 2, 1, 8, 21, 21, 23, 35, 50, 54, 46, 70, 77, 87, 98,
@@ -1000,6 +1247,21 @@ class TestUserDefiniedModel(CommonTests, unittest.TestCase):
self.assertTrue(abs(result.params['a'].value - 2.0) < 0.05)
self.assertTrue(abs(result.params['b'].value - 3.0) < 0.41)
+ def test_different_independent_vars_composite_modeld(self):
+ """Regression test for different independent variables in CompositeModel.
+
+ See: https://github.com/lmfit/lmfit-py/discussions/787
+
+ """
+ def two_independent_vars(y, z, a):
+ return a * y + z
+
+ BackgroundModel = Model(two_independent_vars,
+ independent_vars=["y", "z"], prefix="yz_")
+ PeakModel = Model(gaussian, independent_vars=["x"], prefix="x_")
+ CompModel = BackgroundModel + PeakModel
+ assert CompModel.independent_vars == ['x', 'y', 'z']
+
class TestLinear(CommonTests, unittest.TestCase):
diff --git a/tests/test_saveload.py b/tests/test_model_saveload.py
index 6accf9f..cd9a31a 100644
--- a/tests/test_saveload.py
+++ b/tests/test_model_saveload.py
@@ -1,4 +1,5 @@
"""Tests for saving/loading Models and ModelResults."""
+
import os
import time
@@ -143,7 +144,7 @@ def test_save_load_modelresult(dill):
text = ''
with open(SAVE_MODELRESULT) as fh:
text = fh.read()
- assert 12000 < len(text) < 15000 # depending on whether dill is present
+ assert 12000 < len(text) < 60000 # depending on whether dill is present
# load the saved ModelResult from file and compare results
result_saved = load_modelresult(SAVE_MODELRESULT)
@@ -186,11 +187,11 @@ def test_saveload_modelresult_exception():
@pytest.mark.parametrize("method", ['leastsq', 'nelder', 'powell', 'cobyla',
- 'bfgs', 'lbfgsb', 'differential_evolution', 'brute',
- 'basinhopping', 'ampgo', 'shgo',
+ 'bfgs', 'lbfgsb', 'differential_evolution',
+ 'brute', 'basinhopping', 'ampgo', 'shgo',
'dual_annealing'])
def test_saveload_modelresult_roundtrip(method):
- """Test for modelresult.loads()/dumps() and repeating that"""
+ """Test for modelresult.loads()/dumps() and repeating that."""
def mfunc(x, a, b):
return a * (x-b)
@@ -201,7 +202,7 @@ def test_saveload_modelresult_roundtrip(method):
np.random.seed(2020)
xx = np.linspace(-5, 5, 201)
- yy = 0.5 * (xx - 0.22) + np.random.normal(scale=0.01, size=len(xx))
+ yy = 0.5 * (xx - 0.22) + np.random.normal(scale=0.01, size=xx.size)
result1 = model.fit(yy, params=params, x=xx, method=method)
@@ -246,9 +247,12 @@ def test_saveload_modelresult_expression_model():
def test_saveload_usersyms():
- """Test save/load of modelresult with non-trivial user symbols,
- this example uses a VoigtModel, wheree `wofz()` is used in a
- constraint expression"""
+ """Test save/load of ModelResult with non-trivial user symbols.
+
+ This example uses a VoigtModel, where `wofz()` is used in a constraint
+ expression.
+
+ """
x = np.linspace(0, 20, 501)
y = gaussian(x, 1.1, 8.5, 2) + lorentzian(x, 1.7, 8.5, 1.5)
np.random.seed(20)
diff --git a/tests/test_model_uncertainties.py b/tests/test_model_uncertainties.py
index c6f2ecd..4771e4e 100644
--- a/tests/test_model_uncertainties.py
+++ b/tests/test_model_uncertainties.py
@@ -1,10 +1,11 @@
"""Tests of ModelResult.eval_uncertainty()"""
+import os
import numpy as np
from numpy.testing import assert_allclose
from lmfit.lineshapes import gaussian
-from lmfit.models import GaussianModel, LinearModel
+from lmfit.models import ExponentialModel, GaussianModel, LinearModel
def get_linearmodel(slope=0.8, intercept=0.5, noise=1.5):
@@ -93,3 +94,36 @@ def test_gauss_noiselevel():
dely_hinoise = ret2.eval_uncertainty(sigma=1)
assert_allclose(dely_hinoise.mean(), 10*dely_lonoise.mean(), rtol=1.e-2)
+
+
+def test_component_uncertainties():
+ "test dely_comps"
+ y, x = np.loadtxt(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'NIST_Gauss2.dat')).T
+ model = (GaussianModel(prefix='g1_') +
+ GaussianModel(prefix='g2_') +
+ ExponentialModel(prefix='bkg_'))
+
+ params = model.make_params(bkg_amplitude=100, bkg_decay=80,
+ g1_amplitude=3000,
+ g1_center=100,
+ g1_sigma=10,
+ g2_amplitude=3000,
+ g2_center=150,
+ g2_sigma=10)
+
+ result = model.fit(y, params, x=x)
+ comps = result.eval_components(x=x)
+ dely = result.eval_uncertainty(sigma=3)
+
+ assert 'g1_' in comps
+ assert 'g2_' in comps
+ assert 'bkg_' in comps
+ assert dely.mean() > 0.8
+ assert dely.mean() < 2.0
+ assert result.dely_comps['g1_'].mean() > 0.5
+ assert result.dely_comps['g1_'].mean() < 1.5
+ assert result.dely_comps['g2_'].mean() > 0.5
+ assert result.dely_comps['g2_'].mean() < 1.5
+ assert result.dely_comps['bkg_'].mean() > 0.5
+ assert result.dely_comps['bkg_'].mean() < 1.5
diff --git a/tests/test_models.py b/tests/test_models.py
index 71ab93e..fe2003a 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -1,6 +1,8 @@
import numpy as np
import lmfit
+from lmfit.lineshapes import gaussian
+from lmfit.models import GaussianModel, SplineModel
def _isclose(name, expected_value, fit_value, atol, rtol):
@@ -54,7 +56,7 @@ def testLinear():
check_fit(mod, params, x, y, dict(intercept=2, slope=10))
-def testQuadraric():
+def testQuadratic():
mod = lmfit.models.QuadraticModel()
x = np.linspace(-1, 1, 201)
y = 0.3*x*x + 10*x + 2
@@ -117,3 +119,21 @@ def testSineModel_guess():
assert params['frequency'] < 1.5
assert params['shift'] > 0.0
assert params['shift'] < 1.0
+
+
+def testSplineModel():
+ x = np.linspace(0, 25, 501)
+ y = gaussian(x, amplitude=10, center=16.2, sigma=0.8) + 3 + 0.03*x + np.sin(x/4)
+
+ model = GaussianModel(prefix='peak_')
+ params = model.make_params(amplitude=8, center=16, sigma=1)
+
+ knot_xvals = np.array([1, 3, 5, 7, 9, 11, 13, 19, 21, 23, 25])
+
+ bkg = SplineModel(prefix='bkg_', xknots=knot_xvals)
+ params.update(bkg.guess(y, x))
+
+ model = model + bkg
+
+ pars = dict(peak_amplitude=10, peak_center=16.2, peak_sigma=0.8)
+ check_fit(model, params, x, y, pars, noise_scale=0.05)
diff --git a/tests/test_multidatasets.py b/tests/test_multidatasets.py
index fe69e8a..3486c1a 100644
--- a/tests/test_multidatasets.py
+++ b/tests/test_multidatasets.py
@@ -9,9 +9,9 @@ from lmfit.lineshapes import gaussian
def gauss_dataset(params, i, x):
"""calc gaussian from params for data set i
using simple, hardwired naming convention"""
- amp = params['amp_%i' % (i+1)]
- cen = params['cen_%i' % (i+1)]
- sig = params['sig_%i' % (i+1)]
+ amp = params[f'amp_{i+1}']
+ cen = params[f'cen_{i+1}']
+ sig = params[f'sig_{i+1}']
return gaussian(x, amp, cen, sig)
@@ -46,14 +46,14 @@ def test_multidatasets():
# create 5 sets of parameters, one per data set
pars = Parameters()
for iy, _ in enumerate(data):
- pars.add('amp_%i' % (iy+1), value=0.5, min=0.0, max=200)
- pars.add('cen_%i' % (iy+1), value=0.4, min=-2.0, max=2.0)
- pars.add('sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0)
+ pars.add(f'amp_{iy+1}', value=0.5, min=0.0, max=200)
+ pars.add(f'cen_{iy+1}', value=0.4, min=-2.0, max=2.0)
+ pars.add(f'sig_{iy+1}', value=0.3, min=0.01, max=3.0)
# but now constrain all values of sigma to have the same value
# by assigning sig_2, sig_3, .. sig_5 to be equal to sig_1
for iy in (2, 3, 4, 5):
- pars['sig_%i' % iy].expr = 'sig_1'
+ pars[f'sig_{iy}'].expr = 'sig_1'
# run the global fit to all the data sets
out = minimize(objective, pars, args=(x, data))
diff --git a/tests/test_nose.py b/tests/test_nose.py
index 0157b24..7980d4a 100644
--- a/tests/test_nose.py
+++ b/tests/test_nose.py
@@ -1,9 +1,9 @@
+import sys
import unittest
import numpy as np
from numpy import pi
-from numpy.testing import (assert_allclose, assert_almost_equal, assert_equal,
- dec)
+from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
import pytest
from scipy.optimize import rosen_der
from uncertainties import ufloat
@@ -398,7 +398,9 @@ class CommonMinimizerTest(unittest.TestCase):
self.data[0] = np.nan
for method in SCALAR_METHODS:
- if method == 'differential_evolution':
+ if method == 'cobyla' and sys.platform == 'darwin':
+ pytest.xfail("this aborts Python on macOS...")
+ elif method == 'differential_evolution':
pytest.raises(RuntimeError, self.mini.scalar_minimize,
SCALAR_METHODS[method])
else:
@@ -427,7 +429,6 @@ class CommonMinimizerTest(unittest.TestCase):
assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
assert_equal(_nan_policy(a, handle_inf=False), a)
- @dec.slow
def test_emcee(self):
# test emcee
if not HAS_EMCEE:
@@ -438,7 +439,6 @@ class CommonMinimizerTest(unittest.TestCase):
check_paras(out.params, self.p_true, sig=3)
- @dec.slow
def test_emcee_method_kwarg(self):
# test with emcee as method keyword argument
if not HAS_EMCEE:
@@ -458,7 +458,6 @@ class CommonMinimizerTest(unittest.TestCase):
is_weighted=False)
assert out_unweighted.method == 'emcee'
- @dec.slow
def test_emcee_multiprocessing(self):
# test multiprocessing runs
raise pytest.skip("Pytest fails with multiprocessing")
@@ -480,7 +479,6 @@ class CommonMinimizerTest(unittest.TestCase):
self.mini.emcee(steps=10)
- @dec.slow
def test_emcee_partial_bounds(self):
# mcmc with partial bounds
if not HAS_EMCEE:
@@ -585,7 +583,6 @@ class CommonMinimizerTest(unittest.TestCase):
assert out.chain.shape == (7, 10, 4)
assert out.flatchain.shape == (70, 4)
- @dec.slow
def test_emcee_float(self):
# test that it works if the residuals returns a float, not a vector
if not HAS_EMCEE:
@@ -609,7 +606,6 @@ class CommonMinimizerTest(unittest.TestCase):
burn=50, thin=10, float_behavior='chi2')
check_paras(out.params, self.p_true, sig=3)
- @dec.slow
def test_emcee_seed(self):
# test emcee seeding can reproduce a sampling run
if not HAS_EMCEE:
diff --git a/tests/test_pandas.py b/tests/test_pandas.py
index b89db6a..71af67a 100644
--- a/tests/test_pandas.py
+++ b/tests/test_pandas.py
@@ -22,3 +22,14 @@ def test_pandas_guess_from_peak():
guess = model.guess(ydat, x=xdat)
assert guess_pd == guess
+
+
+def test_pandas_Voigt_model():
+ """Regression test for Series.real reported in GH Issues 727."""
+ data = pandas.read_csv(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'peak.csv'))
+ model = lmfit.models.VoigtModel()
+ params = model.make_params()
+ fit = model.fit(data['y'], params, x=data['x'])
+
+ assert fit.success
diff --git a/tests/test_parameter.py b/tests/test_parameter.py
index 243d7a2..a8bf0e3 100644
--- a/tests/test_parameter.py
+++ b/tests/test_parameter.py
@@ -360,7 +360,7 @@ def test_value_setter(parameter):
par.value = -200.0 # below minimum
assert_allclose(par.value, -100.0)
- del(par._expr_eval)
+ del par._expr_eval
par.value = 10.0
assert_allclose(par.value, 10.0)
assert hasattr(par, '_expr_eval')
diff --git a/tests/test_parameters.py b/tests/test_parameters.py
index 287c785..221bb6d 100644
--- a/tests/test_parameters.py
+++ b/tests/test_parameters.py
@@ -4,7 +4,6 @@
from copy import copy, deepcopy
import pickle
-import asteval
import numpy as np
from numpy.testing import assert_allclose
import pytest
@@ -45,21 +44,9 @@ def test_check_ast_errors():
pars.add('par1', expr='2.0*par2')
-def test_parameters_init_with_asteval():
- """Test for initialization of the Parameters class with asteval."""
- ast_int = asteval.Interpreter()
-
- msg = ("The use of the 'asteval' argument for the Parameters class was "
- "deprecated in lmfit v0.9.12 and will be removed in a later "
- "release. Please use the 'usersyms' argument instead!")
- with pytest.warns(FutureWarning, match=msg):
- pars = lmfit.Parameters(asteval=ast_int)
- assert pars._asteval == ast_int
-
-
def test_parameters_init_with_usersyms():
"""Test for initialization of the Parameters class with usersyms."""
- pars = lmfit.Parameters(asteval=None, usersyms={'test': np.sin})
+ pars = lmfit.Parameters(usersyms={'test': np.sin})
assert 'test' in pars._asteval.symtable
@@ -111,6 +98,18 @@ def test_parameters_deepcopy(parameters):
deepcopy_pars._asteval.symtable[unique_symbol])
+def test_parameters_deepcopy_subclass():
+ """Test that a subclass of parameters is preserved when performing a deepcopy"""
+ class ParametersSubclass(lmfit.Parameters):
+ pass
+
+ parameters = ParametersSubclass()
+ assert isinstance(parameters, ParametersSubclass)
+
+ parameterscopy = deepcopy(parameters)
+ assert isinstance(parameterscopy, ParametersSubclass)
+
+
def test_parameters_update(parameters):
"""Tests for updating a Parameters class."""
pars, exp_attr_values_A, exp_attr_values_B = parameters
@@ -274,7 +273,7 @@ def test_pickle_parameters():
# check that unpickling of Parameters is not affected by expr that
# refer to Parameter that are added later on. In the following
# example var_0.expr refers to var_1, which is a Parameter later
- # on in the Parameters OrderedDict.
+ # on in the Parameters dictionary.
p = lmfit.Parameters()
p.add('var_0', value=1)
p.add('var_1', value=2)
diff --git a/tests/test_printfuncs.py b/tests/test_printfuncs.py
index 69e7fcb..b41bf7d 100644
--- a/tests/test_printfuncs.py
+++ b/tests/test_printfuncs.py
@@ -4,10 +4,11 @@ import pytest
import lmfit
from lmfit import (Minimizer, Parameters, ci_report, conf_interval, fit_report,
- report_ci, report_errors, report_fit)
+ report_ci, report_fit)
from lmfit.lineshapes import gaussian
from lmfit.models import GaussianModel
-from lmfit.printfuncs import alphanumeric_sort, getfloat_attr, gformat
+from lmfit.printfuncs import (alphanumeric_sort, fitreport_html_table,
+ getfloat_attr, gformat)
np.random.seed(0)
@@ -222,12 +223,6 @@ def test_report_fit(fitresult, capsys):
assert header in captured.out
-def test_report_errors_deprecated(fitresult):
- """Verify that a FutureWarning is shown when calling report_errors."""
- with pytest.warns(FutureWarning):
- report_errors(params=fitresult.params)
-
-
def test_report_leastsq_no_errorbars(fitresult):
"""Verify correct message when uncertainties could not be estimated."""
# general warning is shown
@@ -333,6 +328,25 @@ def test_report_zero_value_spercent(fitresult):
assert '%' in html_params_split[indx+1]
+@pytest.mark.skipif(not lmfit.minimizer.HAS_EMCEE, reason="requires emcee v3")
+def test_spercent_html_table():
+ """Regression test for GitHub Issue #768."""
+ np.random.seed(2021)
+ x = np.random.uniform(size=100)
+ y = x + 0.1 * np.random.uniform(size=x.size)
+
+ def res(par, x, y):
+ return y - par['k'] * x + par['b']
+
+ params = lmfit.Parameters()
+ params.add('b', 0, vary=False)
+ params.add('k', 1)
+
+ fitter = lmfit.Minimizer(res, params, fcn_args=(x, y))
+ fit_res = fitter.minimize(method='emcee', steps=5)
+ fitreport_html_table(fit_res)
+
+
def test_ci_report(confidence_interval):
"""Verify that the CI report is created when using ci_report."""
report = ci_report(confidence_interval)
diff --git a/tests/test_shgo.py b/tests/test_shgo.py
index 31c5fb2..1504d38 100644
--- a/tests/test_shgo.py
+++ b/tests/test_shgo.py
@@ -4,12 +4,10 @@ import numpy as np
from numpy.testing import assert_allclose
import pytest
import scipy
+from scipy import __version__ as scipy_version
import lmfit
-# SHGO algorithm is present in SciPy >= 1.2
-pytest.importorskip("scipy", minversion="1.2")
-
def eggholder(x):
return (-(x[1] + 47.0) * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
@@ -29,7 +27,13 @@ def test_shgo_scipy_vs_lmfit():
bounds = [(-512, 512), (-512, 512)]
result_scipy = scipy.optimize.shgo(eggholder, bounds, n=30,
sampling_method='sobol')
- assert len(result_scipy.xl) == 13
+
+ # in SciPy v1.7.0: "sobol was fixed and is now using scipy.stats.qmc.Sobol"
+ # FIXME: clean this up after we require SciPy >= 1.7.0
+ if int(scipy_version.split('.')[1]) < 7:
+ assert len(result_scipy.xl) == 13
+ else:
+ assert len(result_scipy.xl) == 6
pars = lmfit.Parameters()
pars.add_many(('x0', 0, True, -512, 512), ('x1', 0, True, -512, 512))
@@ -48,7 +52,13 @@ def test_shgo_scipy_vs_lmfit_2():
bounds = [(-512, 512), (-512, 512)]
result_scipy = scipy.optimize.shgo(eggholder, bounds, n=60, iters=5,
sampling_method='sobol')
- assert len(result_scipy.xl) == 39
+
+ # in SciPy v1.7.0: "sobol was fixed and is now using scipy.stats.qmc.Sobol"
+ # FIXME: clean this up after we require SciPy >= 1.7.0
+ if int(scipy_version.split('.')[1]) < 7:
+ assert len(result_scipy.xl) == 39
+ else:
+ assert len(result_scipy.xl) == 74
pars = lmfit.Parameters()
pars.add_many(('x0', 0, True, -512, 512), ('x1', 0, True, -512, 512))
@@ -86,6 +96,12 @@ def test_shgo_sobol_Alpine02(minimizer_Alpine02):
assert_allclose(min(out_x), min(global_optimum), rtol=1e-3)
assert_allclose(max(out_x), max(global_optimum), rtol=1e-3)
+ # FIXME: update when SciPy requirement is >= 1.7
+ if int(scipy_version.split('.')[1]) >= 7:
+ assert out.call_kws['n'] is None
+ else:
+ assert out.call_kws['n'] == 100
+
def test_shgo_bounds(minimizer_Alpine02):
"""Test SHGO algorithm with bounds."""
diff --git a/tests/test_stepmodel.py b/tests/test_stepmodel.py
index 0ab8a3b..47f6dd8 100644
--- a/tests/test_stepmodel.py
+++ b/tests/test_stepmodel.py
@@ -4,6 +4,7 @@ from lmfit.models import ConstantModel, StepModel
def get_data():
+ np.random.seed(2021)
x = np.linspace(0, 10, 201)
dat = np.ones_like(x)
dat[:48] = 0.0
diff --git a/versioneer.py b/versioneer.py
deleted file mode 100644
index 1040c21..0000000
--- a/versioneer.py
+++ /dev/null
@@ -1,1855 +0,0 @@
-
-# Version: 0.19
-
-"""The Versioneer - like a rocketeer, but for versions.
-
-The Versioneer
-==============
-
-* like a rocketeer, but for versions!
-* https://github.com/python-versioneer/python-versioneer
-* Brian Warner
-* License: Public Domain
-* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
-* [![Latest Version][pypi-image]][pypi-url]
-* [![Build Status][travis-image]][travis-url]
-
-This is a tool for managing a recorded version number in distutils-based
-python projects. The goal is to remove the tedious and error-prone "update
-the embedded version string" step from your release process. Making a new
-release should be as easy as recording a new tag in your version-control
-system, and maybe making new tarballs.
-
-
-## Quick Install
-
-* `pip install versioneer` to somewhere in your $PATH
-* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
-* run `versioneer install` in your source tree, commit the results
-* Verify version information with `python setup.py version`
-
-## Version Identifiers
-
-Source trees come from a variety of places:
-
-* a version-control system checkout (mostly used by developers)
-* a nightly tarball, produced by build automation
-* a snapshot tarball, produced by a web-based VCS browser, like github's
- "tarball from tag" feature
-* a release tarball, produced by "setup.py sdist", distributed through PyPI
-
-Within each source tree, the version identifier (either a string or a number,
-this tool is format-agnostic) can come from a variety of places:
-
-* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
- about recent "tags" and an absolute revision-id
-* the name of the directory into which the tarball was unpacked
-* an expanded VCS keyword ($Id$, etc)
-* a `_version.py` created by some earlier build step
-
-For released software, the version identifier is closely related to a VCS
-tag. Some projects use tag names that include more than just the version
-string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
-needs to strip the tag prefix to extract the version identifier. For
-unreleased software (between tags), the version identifier should provide
-enough information to help developers recreate the same tree, while also
-giving them an idea of roughly how old the tree is (after version 1.2, before
-version 1.3). Many VCS systems can report a description that captures this,
-for example `git describe --tags --dirty --always` reports things like
-"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
-0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
-uncommitted changes).
-
-The version identifier is used for multiple purposes:
-
-* to allow the module to self-identify its version: `myproject.__version__`
-* to choose a name and prefix for a 'setup.py sdist' tarball
-
-## Theory of Operation
-
-Versioneer works by adding a special `_version.py` file into your source
-tree, where your `__init__.py` can import it. This `_version.py` knows how to
-dynamically ask the VCS tool for version information at import time.
-
-`_version.py` also contains `$Revision$` markers, and the installation
-process marks `_version.py` to have this marker rewritten with a tag name
-during the `git archive` command. As a result, generated tarballs will
-contain enough information to get the proper version.
-
-To allow `setup.py` to compute a version too, a `versioneer.py` is added to
-the top level of your source tree, next to `setup.py` and the `setup.cfg`
-that configures it. This overrides several distutils/setuptools commands to
-compute the version when invoked, and changes `setup.py build` and `setup.py
-sdist` to replace `_version.py` with a small static file that contains just
-the generated version data.
-
-## Installation
-
-See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
-
-## Version-String Flavors
-
-Code which uses Versioneer can learn about its version string at runtime by
-importing `_version` from your main `__init__.py` file and running the
-`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
-import the top-level `versioneer.py` and run `get_versions()`.
-
-Both functions return a dictionary with different flavors of version
-information:
-
-* `['version']`: A condensed version string, rendered using the selected
- style. This is the most commonly used value for the project's version
- string. The default "pep440" style yields strings like `0.11`,
- `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
- below for alternative styles.
-
-* `['full-revisionid']`: detailed revision identifier. For Git, this is the
- full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
-
-* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
- commit date in ISO 8601 format. This will be None if the date is not
- available.
-
-* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
- this is only accurate if run in a VCS checkout, otherwise it is likely to
- be False or None
-
-* `['error']`: if the version string could not be computed, this will be set
- to a string describing the problem, otherwise it will be None. It may be
- useful to throw an exception in setup.py if this is set, to avoid e.g.
- creating tarballs with a version string of "unknown".
-
-Some variants are more useful than others. Including `full-revisionid` in a
-bug report should allow developers to reconstruct the exact code being tested
-(or indicate the presence of local changes that should be shared with the
-developers). `version` is suitable for display in an "about" box or a CLI
-`--version` output: it can be easily compared against release notes and lists
-of bugs fixed in various releases.
-
-The installer adds the following text to your `__init__.py` to place a basic
-version in `YOURPROJECT.__version__`:
-
- from ._version import get_versions
- __version__ = get_versions()['version']
- del get_versions
-
-## Styles
-
-The setup.cfg `style=` configuration controls how the VCS information is
-rendered into a version string.
-
-The default style, "pep440", produces a PEP440-compliant string, equal to the
-un-prefixed tag name for actual releases, and containing an additional "local
-version" section with more detail for in-between builds. For Git, this is
-TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
---dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
-tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
-that this commit is two revisions ("+2") beyond the "0.11" tag. For released
-software (exactly equal to a known tag), the identifier will only contain the
-stripped tag, e.g. "0.11".
-
-Other styles are available. See [details.md](details.md) in the Versioneer
-source tree for descriptions.
-
-## Debugging
-
-Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
-to return a version of "0+unknown". To investigate the problem, run `setup.py
-version`, which will run the version-lookup code in a verbose mode, and will
-display the full contents of `get_versions()` (including the `error` string,
-which may help identify what went wrong).
-
-## Known Limitations
-
-Some situations are known to cause problems for Versioneer. This details the
-most significant ones. More can be found on Github
-[issues page](https://github.com/python-versioneer/python-versioneer/issues).
-
-### Subprojects
-
-Versioneer has limited support for source trees in which `setup.py` is not in
-the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
-two common reasons why `setup.py` might not be in the root:
-
-* Source trees which contain multiple subprojects, such as
- [Buildbot](https://github.com/buildbot/buildbot), which contains both
- "master" and "slave" subprojects, each with their own `setup.py`,
- `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
- distributions (and upload multiple independently-installable tarballs).
-* Source trees whose main purpose is to contain a C library, but which also
- provide bindings to Python (and perhaps other languages) in subdirectories.
-
-Versioneer will look for `.git` in parent directories, and most operations
-should get the right version string. However `pip` and `setuptools` have bugs
-and implementation details which frequently cause `pip install .` from a
-subproject directory to fail to find a correct version string (so it usually
-defaults to `0+unknown`).
-
-`pip install --editable .` should work correctly. `setup.py install` might
-work too.
-
-Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
-some later version.
-
-[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
-this issue. The discussion in
-[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
-issue from the Versioneer side in more detail.
-[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
-[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
-pip to let Versioneer work correctly.
-
-Versioneer-0.16 and earlier only looked for a `.git` directory next to the
-`setup.cfg`, so subprojects were completely unsupported with those releases.
-
-### Editable installs with setuptools <= 18.5
-
-`setup.py develop` and `pip install --editable .` allow you to install a
-project into a virtualenv once, then continue editing the source code (and
-test) without re-installing after every change.
-
-"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
-convenient way to specify executable scripts that should be installed along
-with the python package.
-
-These both work as expected when using modern setuptools. When using
-setuptools-18.5 or earlier, however, certain operations will cause
-`pkg_resources.DistributionNotFound` errors when running the entrypoint
-script, which must be resolved by re-installing the package. This happens
-when the install happens with one version, then the egg_info data is
-regenerated while a different version is checked out. Many setup.py commands
-cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
-a different virtualenv), so this can be surprising.
-
-[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
-this one, but upgrading to a newer version of setuptools should probably
-resolve it.
-
-
-## Updating Versioneer
-
-To upgrade your project to a new release of Versioneer, do the following:
-
-* install the new Versioneer (`pip install -U versioneer` or equivalent)
-* edit `setup.cfg`, if necessary, to include any new configuration settings
- indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
-* re-run `versioneer install` in your source tree, to replace
- `SRC/_version.py`
-* commit any changed files
-
-## Future Directions
-
-This tool is designed to make it easily extended to other version-control
-systems: all VCS-specific components are in separate directories like
-src/git/ . The top-level `versioneer.py` script is assembled from these
-components by running make-versioneer.py . In the future, make-versioneer.py
-will take a VCS name as an argument, and will construct a version of
-`versioneer.py` that is specific to the given VCS. It might also take the
-configuration arguments that are currently provided manually during
-installation by editing setup.py . Alternatively, it might go the other
-direction and include code from all supported VCS systems, reducing the
-number of intermediate scripts.
-
-## Similar projects
-
-* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
- dependency
-* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
- versioneer
-
-## License
-
-To make Versioneer easier to embed, all its code is dedicated to the public
-domain. The `_version.py` that it creates is also in the public domain.
-Specifically, both are released under the Creative Commons "Public Domain
-Dedication" license (CC0-1.0), as described in
-https://creativecommons.org/publicdomain/zero/1.0/ .
-
-[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
-[pypi-url]: https://pypi.python.org/pypi/versioneer/
-[travis-image]:
-https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
-[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
-
-"""
-
-import configparser
-import errno
-import json
-import os
-import re
-import subprocess
-import sys
-
-
-class VersioneerConfig:
- """Container for Versioneer configuration parameters."""
-
-
-def get_root():
- """Get the project root directory.
-
- We require that all commands are run from the project root, i.e. the
- directory that contains setup.py, setup.cfg, and versioneer.py .
- """
- root = os.path.realpath(os.path.abspath(os.getcwd()))
- setup_py = os.path.join(root, "setup.py")
- versioneer_py = os.path.join(root, "versioneer.py")
- if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
- # allow 'python path/to/setup.py COMMAND'
- root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
- setup_py = os.path.join(root, "setup.py")
- versioneer_py = os.path.join(root, "versioneer.py")
- if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
- err = ("Versioneer was unable to run the project root directory. "
- "Versioneer requires setup.py to be executed from "
- "its immediate directory (like 'python setup.py COMMAND'), "
- "or in a way that lets it use sys.argv[0] to find the root "
- "(like 'python path/to/setup.py COMMAND').")
- raise VersioneerBadRootError(err)
- try:
- # Certain runtime workflows (setup.py install/develop in a setuptools
- # tree) execute all dependencies in a single python process, so
- # "versioneer" may be imported multiple times, and python's shared
- # module-import table will cache the first one. So we can't use
- # os.path.dirname(__file__), as that will find whichever
- # versioneer.py was first imported, even in later projects.
- me = os.path.realpath(os.path.abspath(__file__))
- me_dir = os.path.normcase(os.path.splitext(me)[0])
- vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
- if me_dir != vsr_dir:
- print("Warning: build in %s is using versioneer.py from %s"
- % (os.path.dirname(me), versioneer_py))
- except NameError:
- pass
- return root
-
-
-def get_config_from_root(root):
- """Read the project setup.cfg file to determine Versioneer config."""
- # This might raise EnvironmentError (if setup.cfg is missing), or
- # configparser.NoSectionError (if it lacks a [versioneer] section), or
- # configparser.NoOptionError (if it lacks "VCS="). See the docstring at
- # the top of versioneer.py for instructions on writing your setup.cfg .
- setup_cfg = os.path.join(root, "setup.cfg")
- parser = configparser.ConfigParser()
- with open(setup_cfg, "r") as f:
- parser.read_file(f)
- VCS = parser.get("versioneer", "VCS") # mandatory
-
- def get(parser, name):
- if parser.has_option("versioneer", name):
- return parser.get("versioneer", name)
- return None
- cfg = VersioneerConfig()
- cfg.VCS = VCS
- cfg.style = get(parser, "style") or ""
- cfg.versionfile_source = get(parser, "versionfile_source")
- cfg.versionfile_build = get(parser, "versionfile_build")
- cfg.tag_prefix = get(parser, "tag_prefix")
- if cfg.tag_prefix in ("''", '""'):
- cfg.tag_prefix = ""
- cfg.parentdir_prefix = get(parser, "parentdir_prefix")
- cfg.verbose = get(parser, "verbose")
- return cfg
-
-
-class NotThisMethod(Exception):
- """Exception raised if a method is not valid for the current scenario."""
-
-
-# these dictionaries contain VCS-specific tools
-LONG_VERSION_PY = {}
-HANDLERS = {}
-
-
-def register_vcs_handler(vcs, method): # decorator
- """Create decorator to mark a method as the handler of a VCS."""
- def decorate(f):
- """Store f in HANDLERS[vcs][method]."""
- if vcs not in HANDLERS:
- HANDLERS[vcs] = {}
- HANDLERS[vcs][method] = f
- return f
- return decorate
-
-
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
- env=None):
- """Call the given command(s)."""
- assert isinstance(commands, list)
- p = None
- for c in commands:
- try:
- dispcmd = str([c] + args)
- # remember shell=False, so use git.cmd on windows, not just git
- p = subprocess.Popen([c] + args, cwd=cwd, env=env,
- stdout=subprocess.PIPE,
- stderr=(subprocess.PIPE if hide_stderr
- else None))
- break
- except EnvironmentError:
- e = sys.exc_info()[1]
- if e.errno == errno.ENOENT:
- continue
- if verbose:
- print("unable to run %s" % dispcmd)
- print(e)
- return None, None
- else:
- if verbose:
- print("unable to find command, tried %s" % (commands,))
- return None, None
- stdout = p.communicate()[0].strip().decode()
- if p.returncode != 0:
- if verbose:
- print("unable to run %s (error)" % dispcmd)
- print("stdout was %s" % stdout)
- return None, p.returncode
- return stdout, p.returncode
-
-
-LONG_VERSION_PY['git'] = r'''
-# This file helps to compute a version number in source trees obtained from
-# git-archive tarball (such as those provided by githubs download-from-tag
-# feature). Distribution tarballs (built by setup.py sdist) and build
-# directories (produced by setup.py build) will contain a much shorter file
-# that just contains the computed version number.
-
-# This file is released into the public domain. Generated by
-# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
-
-"""Git implementation of _version.py."""
-
-import errno
-import os
-import re
-import subprocess
-import sys
-
-
-def get_keywords():
- """Get the keywords needed to look up the version information."""
- # these strings will be replaced by git during git-archive.
- # setup.py/versioneer.py will grep for the variable names, so they must
- # each be defined on a line of their own. _version.py will just call
- # get_keywords().
- git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
- git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
- git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
- keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
- return keywords
-
-
-class VersioneerConfig:
- """Container for Versioneer configuration parameters."""
-
-
-def get_config():
- """Create, populate and return the VersioneerConfig() object."""
- # these strings are filled in when 'setup.py versioneer' creates
- # _version.py
- cfg = VersioneerConfig()
- cfg.VCS = "git"
- cfg.style = "%(STYLE)s"
- cfg.tag_prefix = "%(TAG_PREFIX)s"
- cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
- cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
- cfg.verbose = False
- return cfg
-
-
-class NotThisMethod(Exception):
- """Exception raised if a method is not valid for the current scenario."""
-
-
-LONG_VERSION_PY = {}
-HANDLERS = {}
-
-
-def register_vcs_handler(vcs, method): # decorator
- """Create decorator to mark a method as the handler of a VCS."""
- def decorate(f):
- """Store f in HANDLERS[vcs][method]."""
- if vcs not in HANDLERS:
- HANDLERS[vcs] = {}
- HANDLERS[vcs][method] = f
- return f
- return decorate
-
-
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
- env=None):
- """Call the given command(s)."""
- assert isinstance(commands, list)
- p = None
- for c in commands:
- try:
- dispcmd = str([c] + args)
- # remember shell=False, so use git.cmd on windows, not just git
- p = subprocess.Popen([c] + args, cwd=cwd, env=env,
- stdout=subprocess.PIPE,
- stderr=(subprocess.PIPE if hide_stderr
- else None))
- break
- except EnvironmentError:
- e = sys.exc_info()[1]
- if e.errno == errno.ENOENT:
- continue
- if verbose:
- print("unable to run %%s" %% dispcmd)
- print(e)
- return None, None
- else:
- if verbose:
- print("unable to find command, tried %%s" %% (commands,))
- return None, None
- stdout = p.communicate()[0].strip().decode()
- if p.returncode != 0:
- if verbose:
- print("unable to run %%s (error)" %% dispcmd)
- print("stdout was %%s" %% stdout)
- return None, p.returncode
- return stdout, p.returncode
-
-
-def versions_from_parentdir(parentdir_prefix, root, verbose):
- """Try to determine the version from the parent directory name.
-
- Source tarballs conventionally unpack into a directory that includes both
- the project name and a version string. We will also support searching up
- two directory levels for an appropriately named parent directory
- """
- rootdirs = []
-
- for i in range(3):
- dirname = os.path.basename(root)
- if dirname.startswith(parentdir_prefix):
- return {"version": dirname[len(parentdir_prefix):],
- "full-revisionid": None,
- "dirty": False, "error": None, "date": None}
- else:
- rootdirs.append(root)
- root = os.path.dirname(root) # up a level
-
- if verbose:
- print("Tried directories %%s but none started with prefix %%s" %%
- (str(rootdirs), parentdir_prefix))
- raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
-
-
-@register_vcs_handler("git", "get_keywords")
-def git_get_keywords(versionfile_abs):
- """Extract version information from the given file."""
- # the code embedded in _version.py can just fetch the value of these
- # keywords. When used from setup.py, we don't want to import _version.py,
- # so we do it with a regexp instead. This function is not used from
- # _version.py.
- keywords = {}
- try:
- f = open(versionfile_abs, "r")
- for line in f.readlines():
- if line.strip().startswith("git_refnames ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["refnames"] = mo.group(1)
- if line.strip().startswith("git_full ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["full"] = mo.group(1)
- if line.strip().startswith("git_date ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["date"] = mo.group(1)
- f.close()
- except EnvironmentError:
- pass
- return keywords
-
-
-@register_vcs_handler("git", "keywords")
-def git_versions_from_keywords(keywords, tag_prefix, verbose):
- """Get version information from git keywords."""
- if not keywords:
- raise NotThisMethod("no keywords at all, weird")
- date = keywords.get("date")
- if date is not None:
- # Use only the last line. Previous lines may contain GPG signature
- # information.
- date = date.splitlines()[-1]
-
- # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
- # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
- # -like" string, which we must then edit to make compliant), because
- # it's been around since git-1.5.3, and it's too difficult to
- # discover which version we're using, or to work around using an
- # older one.
- date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
- refnames = keywords["refnames"].strip()
- if refnames.startswith("$Format"):
- if verbose:
- print("keywords are unexpanded, not using")
- raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
- refs = set([r.strip() for r in refnames.strip("()").split(",")])
- # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
- # just "foo-1.0". If we see a "tag: " prefix, prefer those.
- TAG = "tag: "
- tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
- if not tags:
- # Either we're using git < 1.8.3, or there really are no tags. We use
- # a heuristic: assume all version tags have a digit. The old git %%d
- # expansion behaves like git log --decorate=short and strips out the
- # refs/heads/ and refs/tags/ prefixes that would let us distinguish
- # between branches and tags. By ignoring refnames without digits, we
- # filter out many common branch names like "release" and
- # "stabilization", as well as "HEAD" and "master".
- tags = set([r for r in refs if re.search(r'\d', r)])
- if verbose:
- print("discarding '%%s', no digits" %% ",".join(refs - tags))
- if verbose:
- print("likely tags: %%s" %% ",".join(sorted(tags)))
- for ref in sorted(tags):
- # sorting will prefer e.g. "2.0" over "2.0rc1"
- if ref.startswith(tag_prefix):
- r = ref[len(tag_prefix):]
- if verbose:
- print("picking %%s" %% r)
- return {"version": r,
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": None,
- "date": date}
- # no suitable tags, so version is "0+unknown", but full hex is still there
- if verbose:
- print("no suitable tags, using unknown + full revision id")
- return {"version": "0+unknown",
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": "no suitable tags", "date": None}
-
-
-@register_vcs_handler("git", "pieces_from_vcs")
-def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
- """Get version from 'git describe' in the root of the source tree.
-
- This only gets called if the git-archive 'subst' keywords were *not*
- expanded, and _version.py hasn't already been rewritten with a short
- version string, meaning we're inside a checked out source tree.
- """
- GITS = ["git"]
- if sys.platform == "win32":
- GITS = ["git.cmd", "git.exe"]
-
- out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
- hide_stderr=True)
- if rc != 0:
- if verbose:
- print("Directory %%s not under git control" %% root)
- raise NotThisMethod("'git rev-parse --git-dir' returned error")
-
- # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
- # if there isn't one, this yields HEX[-dirty] (no NUM)
- describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
- "--always", "--long",
- "--match", "%%s*" %% tag_prefix],
- cwd=root)
- # --long was added in git-1.5.5
- if describe_out is None:
- raise NotThisMethod("'git describe' failed")
- describe_out = describe_out.strip()
- full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
- if full_out is None:
- raise NotThisMethod("'git rev-parse' failed")
- full_out = full_out.strip()
-
- pieces = {}
- pieces["long"] = full_out
- pieces["short"] = full_out[:7] # maybe improved later
- pieces["error"] = None
-
- # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
- # TAG might have hyphens.
- git_describe = describe_out
-
- # look for -dirty suffix
- dirty = git_describe.endswith("-dirty")
- pieces["dirty"] = dirty
- if dirty:
- git_describe = git_describe[:git_describe.rindex("-dirty")]
-
- # now we have TAG-NUM-gHEX or HEX
-
- if "-" in git_describe:
- # TAG-NUM-gHEX
- mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
- if not mo:
- # unparseable. Maybe git-describe is misbehaving?
- pieces["error"] = ("unable to parse git-describe output: '%%s'"
- %% describe_out)
- return pieces
-
- # tag
- full_tag = mo.group(1)
- if not full_tag.startswith(tag_prefix):
- if verbose:
- fmt = "tag '%%s' doesn't start with prefix '%%s'"
- print(fmt %% (full_tag, tag_prefix))
- pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
- %% (full_tag, tag_prefix))
- return pieces
- pieces["closest-tag"] = full_tag[len(tag_prefix):]
-
- # distance: number of commits since tag
- pieces["distance"] = int(mo.group(2))
-
- # commit: short hex revision ID
- pieces["short"] = mo.group(3)
-
- else:
- # HEX: no tags
- pieces["closest-tag"] = None
- count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
- cwd=root)
- pieces["distance"] = int(count_out) # total number of commits
-
- # commit date: see ISO-8601 comment in git_versions_from_keywords()
- date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
- cwd=root)[0].strip()
- # Use only the last line. Previous lines may contain GPG signature
- # information.
- date = date.splitlines()[-1]
- pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
-
- return pieces
-
-
-def plus_or_dot(pieces):
- """Return a + if we don't already have one, else return a ."""
- if "+" in pieces.get("closest-tag", ""):
- return "."
- return "+"
-
-
-def render_pep440(pieces):
- """Build up version string, with post-release "local version identifier".
-
- Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
- get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
-
- Exceptions:
- 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += plus_or_dot(pieces)
- rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
- if pieces["dirty"]:
- rendered += ".dirty"
- else:
- # exception #1
- rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
- pieces["short"])
- if pieces["dirty"]:
- rendered += ".dirty"
- return rendered
-
-
-def render_pep440_pre(pieces):
- """TAG[.post0.devDISTANCE] -- No -dirty.
-
- Exceptions:
- 1: no tags. 0.post0.devDISTANCE
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"]:
- rendered += ".post0.dev%%d" %% pieces["distance"]
- else:
- # exception #1
- rendered = "0.post0.dev%%d" %% pieces["distance"]
- return rendered
-
-
-def render_pep440_post(pieces):
- """TAG[.postDISTANCE[.dev0]+gHEX] .
-
- The ".dev0" means dirty. Note that .dev0 sorts backwards
- (a dirty tree will appear "older" than the corresponding clean one),
- but you shouldn't be releasing software with -dirty anyways.
-
- Exceptions:
- 1: no tags. 0.postDISTANCE[.dev0]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += ".post%%d" %% pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- rendered += plus_or_dot(pieces)
- rendered += "g%%s" %% pieces["short"]
- else:
- # exception #1
- rendered = "0.post%%d" %% pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- rendered += "+g%%s" %% pieces["short"]
- return rendered
-
-
-def render_pep440_old(pieces):
- """TAG[.postDISTANCE[.dev0]] .
-
- The ".dev0" means dirty.
-
- Exceptions:
- 1: no tags. 0.postDISTANCE[.dev0]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += ".post%%d" %% pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- else:
- # exception #1
- rendered = "0.post%%d" %% pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- return rendered
-
-
-def render_git_describe(pieces):
- """TAG[-DISTANCE-gHEX][-dirty].
-
- Like 'git describe --tags --dirty --always'.
-
- Exceptions:
- 1: no tags. HEX[-dirty] (note: no 'g' prefix)
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"]:
- rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
- else:
- # exception #1
- rendered = pieces["short"]
- if pieces["dirty"]:
- rendered += "-dirty"
- return rendered
-
-
-def render_git_describe_long(pieces):
- """TAG-DISTANCE-gHEX[-dirty].
-
- Like 'git describe --tags --dirty --always -long'.
- The distance/hash is unconditional.
-
- Exceptions:
- 1: no tags. HEX[-dirty] (note: no 'g' prefix)
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
- else:
- # exception #1
- rendered = pieces["short"]
- if pieces["dirty"]:
- rendered += "-dirty"
- return rendered
-
-
-def render(pieces, style):
- """Render the given version pieces into the requested style."""
- if pieces["error"]:
- return {"version": "unknown",
- "full-revisionid": pieces.get("long"),
- "dirty": None,
- "error": pieces["error"],
- "date": None}
-
- if not style or style == "default":
- style = "pep440" # the default
-
- if style == "pep440":
- rendered = render_pep440(pieces)
- elif style == "pep440-pre":
- rendered = render_pep440_pre(pieces)
- elif style == "pep440-post":
- rendered = render_pep440_post(pieces)
- elif style == "pep440-old":
- rendered = render_pep440_old(pieces)
- elif style == "git-describe":
- rendered = render_git_describe(pieces)
- elif style == "git-describe-long":
- rendered = render_git_describe_long(pieces)
- else:
- raise ValueError("unknown style '%%s'" %% style)
-
- return {"version": rendered, "full-revisionid": pieces["long"],
- "dirty": pieces["dirty"], "error": None,
- "date": pieces.get("date")}
-
-
-def get_versions():
- """Get version information or return default if unable to do so."""
- # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
- # __file__, we can work backwards from there to the root. Some
- # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
- # case we can only use expanded keywords.
-
- cfg = get_config()
- verbose = cfg.verbose
-
- try:
- return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
- verbose)
- except NotThisMethod:
- pass
-
- try:
- root = os.path.realpath(__file__)
- # versionfile_source is the relative path from the top of the source
- # tree (where the .git directory might live) to this file. Invert
- # this to find the root from __file__.
- for i in cfg.versionfile_source.split('/'):
- root = os.path.dirname(root)
- except NameError:
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None,
- "error": "unable to find root of source tree",
- "date": None}
-
- try:
- pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
- return render(pieces, cfg.style)
- except NotThisMethod:
- pass
-
- try:
- if cfg.parentdir_prefix:
- return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
- except NotThisMethod:
- pass
-
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None,
- "error": "unable to compute version", "date": None}
-'''
-
-
-@register_vcs_handler("git", "get_keywords")
-def git_get_keywords(versionfile_abs):
- """Extract version information from the given file."""
- # the code embedded in _version.py can just fetch the value of these
- # keywords. When used from setup.py, we don't want to import _version.py,
- # so we do it with a regexp instead. This function is not used from
- # _version.py.
- keywords = {}
- try:
- f = open(versionfile_abs, "r")
- for line in f.readlines():
- if line.strip().startswith("git_refnames ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["refnames"] = mo.group(1)
- if line.strip().startswith("git_full ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["full"] = mo.group(1)
- if line.strip().startswith("git_date ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["date"] = mo.group(1)
- f.close()
- except EnvironmentError:
- pass
- return keywords
-
-
-@register_vcs_handler("git", "keywords")
-def git_versions_from_keywords(keywords, tag_prefix, verbose):
- """Get version information from git keywords."""
- if not keywords:
- raise NotThisMethod("no keywords at all, weird")
- date = keywords.get("date")
- if date is not None:
- # Use only the last line. Previous lines may contain GPG signature
- # information.
- date = date.splitlines()[-1]
-
- # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
- # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
- # -like" string, which we must then edit to make compliant), because
- # it's been around since git-1.5.3, and it's too difficult to
- # discover which version we're using, or to work around using an
- # older one.
- date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
- refnames = keywords["refnames"].strip()
- if refnames.startswith("$Format"):
- if verbose:
- print("keywords are unexpanded, not using")
- raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
- refs = set([r.strip() for r in refnames.strip("()").split(",")])
- # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
- # just "foo-1.0". If we see a "tag: " prefix, prefer those.
- TAG = "tag: "
- tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
- if not tags:
- # Either we're using git < 1.8.3, or there really are no tags. We use
- # a heuristic: assume all version tags have a digit. The old git %d
- # expansion behaves like git log --decorate=short and strips out the
- # refs/heads/ and refs/tags/ prefixes that would let us distinguish
- # between branches and tags. By ignoring refnames without digits, we
- # filter out many common branch names like "release" and
- # "stabilization", as well as "HEAD" and "master".
- tags = set([r for r in refs if re.search(r'\d', r)])
- if verbose:
- print("discarding '%s', no digits" % ",".join(refs - tags))
- if verbose:
- print("likely tags: %s" % ",".join(sorted(tags)))
- for ref in sorted(tags):
- # sorting will prefer e.g. "2.0" over "2.0rc1"
- if ref.startswith(tag_prefix):
- r = ref[len(tag_prefix):]
- if verbose:
- print("picking %s" % r)
- return {"version": r,
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": None,
- "date": date}
- # no suitable tags, so version is "0+unknown", but full hex is still there
- if verbose:
- print("no suitable tags, using unknown + full revision id")
- return {"version": "0+unknown",
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": "no suitable tags", "date": None}
-
-
-@register_vcs_handler("git", "pieces_from_vcs")
-def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
- """Get version from 'git describe' in the root of the source tree.
-
- This only gets called if the git-archive 'subst' keywords were *not*
- expanded, and _version.py hasn't already been rewritten with a short
- version string, meaning we're inside a checked out source tree.
- """
- GITS = ["git"]
- if sys.platform == "win32":
- GITS = ["git.cmd", "git.exe"]
-
- out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
- hide_stderr=True)
- if rc != 0:
- if verbose:
- print("Directory %s not under git control" % root)
- raise NotThisMethod("'git rev-parse --git-dir' returned error")
-
- # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
- # if there isn't one, this yields HEX[-dirty] (no NUM)
- describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
- "--always", "--long",
- "--match", "%s*" % tag_prefix],
- cwd=root)
- # --long was added in git-1.5.5
- if describe_out is None:
- raise NotThisMethod("'git describe' failed")
- describe_out = describe_out.strip()
- full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
- if full_out is None:
- raise NotThisMethod("'git rev-parse' failed")
- full_out = full_out.strip()
-
- pieces = {}
- pieces["long"] = full_out
- pieces["short"] = full_out[:7] # maybe improved later
- pieces["error"] = None
-
- # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
- # TAG might have hyphens.
- git_describe = describe_out
-
- # look for -dirty suffix
- dirty = git_describe.endswith("-dirty")
- pieces["dirty"] = dirty
- if dirty:
- git_describe = git_describe[:git_describe.rindex("-dirty")]
-
- # now we have TAG-NUM-gHEX or HEX
-
- if "-" in git_describe:
- # TAG-NUM-gHEX
- mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
- if not mo:
- # unparseable. Maybe git-describe is misbehaving?
- pieces["error"] = ("unable to parse git-describe output: '%s'"
- % describe_out)
- return pieces
-
- # tag
- full_tag = mo.group(1)
- if not full_tag.startswith(tag_prefix):
- if verbose:
- fmt = "tag '%s' doesn't start with prefix '%s'"
- print(fmt % (full_tag, tag_prefix))
- pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
- % (full_tag, tag_prefix))
- return pieces
- pieces["closest-tag"] = full_tag[len(tag_prefix):]
-
- # distance: number of commits since tag
- pieces["distance"] = int(mo.group(2))
-
- # commit: short hex revision ID
- pieces["short"] = mo.group(3)
-
- else:
- # HEX: no tags
- pieces["closest-tag"] = None
- count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
- cwd=root)
- pieces["distance"] = int(count_out) # total number of commits
-
- # commit date: see ISO-8601 comment in git_versions_from_keywords()
- date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
- cwd=root)[0].strip()
- # Use only the last line. Previous lines may contain GPG signature
- # information.
- date = date.splitlines()[-1]
- pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
-
- return pieces
-
-
-def do_vcs_install(manifest_in, versionfile_source, ipy):
- """Git-specific installation logic for Versioneer.
-
- For Git, this means creating/changing .gitattributes to mark _version.py
- for export-subst keyword substitution.
- """
- GITS = ["git"]
- if sys.platform == "win32":
- GITS = ["git.cmd", "git.exe"]
- files = [manifest_in, versionfile_source]
- if ipy:
- files.append(ipy)
- try:
- me = __file__
- if me.endswith(".pyc") or me.endswith(".pyo"):
- me = os.path.splitext(me)[0] + ".py"
- versioneer_file = os.path.relpath(me)
- except NameError:
- versioneer_file = "versioneer.py"
- files.append(versioneer_file)
- present = False
- try:
- f = open(".gitattributes", "r")
- for line in f.readlines():
- if line.strip().startswith(versionfile_source):
- if "export-subst" in line.strip().split()[1:]:
- present = True
- f.close()
- except EnvironmentError:
- pass
- if not present:
- f = open(".gitattributes", "a+")
- f.write("%s export-subst\n" % versionfile_source)
- f.close()
- files.append(".gitattributes")
- run_command(GITS, ["add", "--"] + files)
-
-
-def versions_from_parentdir(parentdir_prefix, root, verbose):
- """Try to determine the version from the parent directory name.
-
- Source tarballs conventionally unpack into a directory that includes both
- the project name and a version string. We will also support searching up
- two directory levels for an appropriately named parent directory
- """
- rootdirs = []
-
- for i in range(3):
- dirname = os.path.basename(root)
- if dirname.startswith(parentdir_prefix):
- return {"version": dirname[len(parentdir_prefix):],
- "full-revisionid": None,
- "dirty": False, "error": None, "date": None}
- else:
- rootdirs.append(root)
- root = os.path.dirname(root) # up a level
-
- if verbose:
- print("Tried directories %s but none started with prefix %s" %
- (str(rootdirs), parentdir_prefix))
- raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
-
-
-SHORT_VERSION_PY = """
-# This file was generated by 'versioneer.py' (0.19) from
-# revision-control system data, or from the parent directory name of an
-# unpacked source archive. Distribution tarballs contain a pre-generated copy
-# of this file.
-
-import json
-
-version_json = '''
-%s
-''' # END VERSION_JSON
-
-
-def get_versions():
- return json.loads(version_json)
-"""
-
-
-def versions_from_file(filename):
- """Try to determine the version from _version.py if present."""
- try:
- with open(filename) as f:
- contents = f.read()
- except EnvironmentError:
- raise NotThisMethod("unable to read _version.py")
- mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
- contents, re.M | re.S)
- if not mo:
- mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
- contents, re.M | re.S)
- if not mo:
- raise NotThisMethod("no version_json in _version.py")
- return json.loads(mo.group(1))
-
-
-def write_to_version_file(filename, versions):
- """Write the given version number to the given _version.py file."""
- os.unlink(filename)
- contents = json.dumps(versions, sort_keys=True,
- indent=1, separators=(",", ": "))
- with open(filename, "w") as f:
- f.write(SHORT_VERSION_PY % contents)
-
- print("set %s to '%s'" % (filename, versions["version"]))
-
-
-def plus_or_dot(pieces):
- """Return a + if we don't already have one, else return a ."""
- if "+" in pieces.get("closest-tag", ""):
- return "."
- return "+"
-
-
-def render_pep440(pieces):
- """Build up version string, with post-release "local version identifier".
-
- Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
- get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
-
- Exceptions:
- 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += plus_or_dot(pieces)
- rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
- if pieces["dirty"]:
- rendered += ".dirty"
- else:
- # exception #1
- rendered = "0+untagged.%d.g%s" % (pieces["distance"],
- pieces["short"])
- if pieces["dirty"]:
- rendered += ".dirty"
- return rendered
-
-
-def render_pep440_pre(pieces):
- """TAG[.post0.devDISTANCE] -- No -dirty.
-
- Exceptions:
- 1: no tags. 0.post0.devDISTANCE
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"]:
- rendered += ".post0.dev%d" % pieces["distance"]
- else:
- # exception #1
- rendered = "0.post0.dev%d" % pieces["distance"]
- return rendered
-
-
-def render_pep440_post(pieces):
- """TAG[.postDISTANCE[.dev0]+gHEX] .
-
- The ".dev0" means dirty. Note that .dev0 sorts backwards
- (a dirty tree will appear "older" than the corresponding clean one),
- but you shouldn't be releasing software with -dirty anyways.
-
- Exceptions:
- 1: no tags. 0.postDISTANCE[.dev0]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += ".post%d" % pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- rendered += plus_or_dot(pieces)
- rendered += "g%s" % pieces["short"]
- else:
- # exception #1
- rendered = "0.post%d" % pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- rendered += "+g%s" % pieces["short"]
- return rendered
-
-
-def render_pep440_old(pieces):
- """TAG[.postDISTANCE[.dev0]] .
-
- The ".dev0" means dirty.
-
- Exceptions:
- 1: no tags. 0.postDISTANCE[.dev0]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += ".post%d" % pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- else:
- # exception #1
- rendered = "0.post%d" % pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- return rendered
-
-
-def render_git_describe(pieces):
- """TAG[-DISTANCE-gHEX][-dirty].
-
- Like 'git describe --tags --dirty --always'.
-
- Exceptions:
- 1: no tags. HEX[-dirty] (note: no 'g' prefix)
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"]:
- rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
- else:
- # exception #1
- rendered = pieces["short"]
- if pieces["dirty"]:
- rendered += "-dirty"
- return rendered
-
-
-def render_git_describe_long(pieces):
- """TAG-DISTANCE-gHEX[-dirty].
-
- Like 'git describe --tags --dirty --always -long'.
- The distance/hash is unconditional.
-
- Exceptions:
- 1: no tags. HEX[-dirty] (note: no 'g' prefix)
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
- else:
- # exception #1
- rendered = pieces["short"]
- if pieces["dirty"]:
- rendered += "-dirty"
- return rendered
-
-
-def render(pieces, style):
- """Render the given version pieces into the requested style."""
- if pieces["error"]:
- return {"version": "unknown",
- "full-revisionid": pieces.get("long"),
- "dirty": None,
- "error": pieces["error"],
- "date": None}
-
- if not style or style == "default":
- style = "pep440" # the default
-
- if style == "pep440":
- rendered = render_pep440(pieces)
- elif style == "pep440-pre":
- rendered = render_pep440_pre(pieces)
- elif style == "pep440-post":
- rendered = render_pep440_post(pieces)
- elif style == "pep440-old":
- rendered = render_pep440_old(pieces)
- elif style == "git-describe":
- rendered = render_git_describe(pieces)
- elif style == "git-describe-long":
- rendered = render_git_describe_long(pieces)
- else:
- raise ValueError("unknown style '%s'" % style)
-
- return {"version": rendered, "full-revisionid": pieces["long"],
- "dirty": pieces["dirty"], "error": None,
- "date": pieces.get("date")}
-
-
-class VersioneerBadRootError(Exception):
- """The project root directory is unknown or missing key files."""
-
-
-def get_versions(verbose=False):
- """Get the project version from whatever source is available.
-
- Returns dict with two keys: 'version' and 'full'.
- """
- if "versioneer" in sys.modules:
- # see the discussion in cmdclass.py:get_cmdclass()
- del sys.modules["versioneer"]
-
- root = get_root()
- cfg = get_config_from_root(root)
-
- assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
- handlers = HANDLERS.get(cfg.VCS)
- assert handlers, "unrecognized VCS '%s'" % cfg.VCS
- verbose = verbose or cfg.verbose
- assert cfg.versionfile_source is not None, \
- "please set versioneer.versionfile_source"
- assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
-
- versionfile_abs = os.path.join(root, cfg.versionfile_source)
-
- # extract version from first of: _version.py, VCS command (e.g. 'git
- # describe'), parentdir. This is meant to work for developers using a
- # source checkout, for users of a tarball created by 'setup.py sdist',
- # and for users of a tarball/zipball created by 'git archive' or github's
- # download-from-tag feature or the equivalent in other VCSes.
-
- get_keywords_f = handlers.get("get_keywords")
- from_keywords_f = handlers.get("keywords")
- if get_keywords_f and from_keywords_f:
- try:
- keywords = get_keywords_f(versionfile_abs)
- ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
- if verbose:
- print("got version from expanded keyword %s" % ver)
- return ver
- except NotThisMethod:
- pass
-
- try:
- ver = versions_from_file(versionfile_abs)
- if verbose:
- print("got version from file %s %s" % (versionfile_abs, ver))
- return ver
- except NotThisMethod:
- pass
-
- from_vcs_f = handlers.get("pieces_from_vcs")
- if from_vcs_f:
- try:
- pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
- ver = render(pieces, cfg.style)
- if verbose:
- print("got version from VCS %s" % ver)
- return ver
- except NotThisMethod:
- pass
-
- try:
- if cfg.parentdir_prefix:
- ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
- if verbose:
- print("got version from parentdir %s" % ver)
- return ver
- except NotThisMethod:
- pass
-
- if verbose:
- print("unable to compute version")
-
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None, "error": "unable to compute version",
- "date": None}
-
-
-def get_version():
- """Get the short version string for this project."""
- return get_versions()["version"]
-
-
-def get_cmdclass(cmdclass=None):
- """Get the custom setuptools/distutils subclasses used by Versioneer.
-
- If the package uses a different cmdclass (e.g. one from numpy), it
- should be provide as an argument.
- """
- if "versioneer" in sys.modules:
- del sys.modules["versioneer"]
- # this fixes the "python setup.py develop" case (also 'install' and
- # 'easy_install .'), in which subdependencies of the main project are
- # built (using setup.py bdist_egg) in the same python process. Assume
- # a main project A and a dependency B, which use different versions
- # of Versioneer. A's setup.py imports A's Versioneer, leaving it in
- # sys.modules by the time B's setup.py is executed, causing B to run
- # with the wrong versioneer. Setuptools wraps the sub-dep builds in a
- # sandbox that restores sys.modules to it's pre-build state, so the
- # parent is protected against the child's "import versioneer". By
- # removing ourselves from sys.modules here, before the child build
- # happens, we protect the child from the parent's versioneer too.
- # Also see https://github.com/python-versioneer/python-versioneer/issues/52
-
- cmds = {} if cmdclass is None else cmdclass.copy()
-
- # we add "version" to both distutils and setuptools
- from distutils.core import Command
-
- class cmd_version(Command):
- description = "report generated version string"
- user_options = []
- boolean_options = []
-
- def initialize_options(self):
- pass
-
- def finalize_options(self):
- pass
-
- def run(self):
- vers = get_versions(verbose=True)
- print("Version: %s" % vers["version"])
- print(" full-revisionid: %s" % vers.get("full-revisionid"))
- print(" dirty: %s" % vers.get("dirty"))
- print(" date: %s" % vers.get("date"))
- if vers["error"]:
- print(" error: %s" % vers["error"])
- cmds["version"] = cmd_version
-
- # we override "build_py" in both distutils and setuptools
- #
- # most invocation pathways end up running build_py:
- # distutils/build -> build_py
- # distutils/install -> distutils/build ->..
- # setuptools/bdist_wheel -> distutils/install ->..
- # setuptools/bdist_egg -> distutils/install_lib -> build_py
- # setuptools/install -> bdist_egg ->..
- # setuptools/develop -> ?
- # pip install:
- # copies source tree to a tempdir before running egg_info/etc
- # if .git isn't copied too, 'git describe' will fail
- # then does setup.py bdist_wheel, or sometimes setup.py install
- # setup.py egg_info -> ?
-
- # we override different "build_py" commands for both environments
- if 'build_py' in cmds:
- _build_py = cmds['build_py']
- elif "setuptools" in sys.modules:
- from setuptools.command.build_py import build_py as _build_py
- else:
- from distutils.command.build_py import build_py as _build_py
-
- class cmd_build_py(_build_py):
- def run(self):
- root = get_root()
- cfg = get_config_from_root(root)
- versions = get_versions()
- _build_py.run(self)
- # now locate _version.py in the new build/ directory and replace
- # it with an updated value
- if cfg.versionfile_build:
- target_versionfile = os.path.join(self.build_lib,
- cfg.versionfile_build)
- print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile, versions)
- cmds["build_py"] = cmd_build_py
-
- if "setuptools" in sys.modules:
- from setuptools.command.build_ext import build_ext as _build_ext
- else:
- from distutils.command.build_ext import build_ext as _build_ext
-
- class cmd_build_ext(_build_ext):
- def run(self):
- root = get_root()
- cfg = get_config_from_root(root)
- versions = get_versions()
- _build_ext.run(self)
- if self.inplace:
- # build_ext --inplace will only build extensions in
- # build/lib<..> dir with no _version.py to write to.
- # As in place builds will already have a _version.py
- # in the module dir, we do not need to write one.
- return
- # now locate _version.py in the new build/ directory and replace
- # it with an updated value
- target_versionfile = os.path.join(self.build_lib,
- cfg.versionfile_source)
- print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile, versions)
- cmds["build_ext"] = cmd_build_ext
-
- if "cx_Freeze" in sys.modules: # cx_freeze enabled?
- from cx_Freeze.dist import build_exe as _build_exe
- # nczeczulin reports that py2exe won't like the pep440-style string
- # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
- # setup(console=[{
- # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
- # "product_version": versioneer.get_version(),
- # ...
-
- class cmd_build_exe(_build_exe):
- def run(self):
- root = get_root()
- cfg = get_config_from_root(root)
- versions = get_versions()
- target_versionfile = cfg.versionfile_source
- print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile, versions)
-
- _build_exe.run(self)
- os.unlink(target_versionfile)
- with open(cfg.versionfile_source, "w") as f:
- LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG %
- {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
- cmds["build_exe"] = cmd_build_exe
- del cmds["build_py"]
-
- if 'py2exe' in sys.modules: # py2exe enabled?
- from py2exe.distutils_buildexe import py2exe as _py2exe
-
- class cmd_py2exe(_py2exe):
- def run(self):
- root = get_root()
- cfg = get_config_from_root(root)
- versions = get_versions()
- target_versionfile = cfg.versionfile_source
- print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile, versions)
-
- _py2exe.run(self)
- os.unlink(target_versionfile)
- with open(cfg.versionfile_source, "w") as f:
- LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG %
- {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
- cmds["py2exe"] = cmd_py2exe
-
- # we override different "sdist" commands for both environments
- if 'sdist' in cmds:
- _sdist = cmds['sdist']
- elif "setuptools" in sys.modules:
- from setuptools.command.sdist import sdist as _sdist
- else:
- from distutils.command.sdist import sdist as _sdist
-
- class cmd_sdist(_sdist):
- def run(self):
- versions = get_versions()
- self._versioneer_generated_versions = versions
- # unless we update this, the command will keep using the old
- # version
- self.distribution.metadata.version = versions["version"]
- return _sdist.run(self)
-
- def make_release_tree(self, base_dir, files):
- root = get_root()
- cfg = get_config_from_root(root)
- _sdist.make_release_tree(self, base_dir, files)
- # now locate _version.py in the new base_dir directory
- # (remembering that it may be a hardlink) and replace it with an
- # updated value
- target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
- print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile,
- self._versioneer_generated_versions)
- cmds["sdist"] = cmd_sdist
-
- return cmds
-
-
-CONFIG_ERROR = """
-setup.cfg is missing the necessary Versioneer configuration. You need
-a section like:
-
- [versioneer]
- VCS = git
- style = pep440
- versionfile_source = src/myproject/_version.py
- versionfile_build = myproject/_version.py
- tag_prefix =
- parentdir_prefix = myproject-
-
-You will also need to edit your setup.py to use the results:
-
- import versioneer
- setup(version=versioneer.get_version(),
- cmdclass=versioneer.get_cmdclass(), ...)
-
-Please read the docstring in ./versioneer.py for configuration instructions,
-edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
-"""
-
-SAMPLE_CONFIG = """
-# See the docstring in versioneer.py for instructions. Note that you must
-# re-run 'versioneer.py setup' after changing this section, and commit the
-# resulting files.
-
-[versioneer]
-#VCS = git
-#style = pep440
-#versionfile_source =
-#versionfile_build =
-#tag_prefix =
-#parentdir_prefix =
-
-"""
-
-INIT_PY_SNIPPET = """
-from ._version import get_versions
-__version__ = get_versions()['version']
-del get_versions
-"""
-
-
-def do_setup():
- """Do main VCS-independent setup function for installing Versioneer."""
- root = get_root()
- try:
- cfg = get_config_from_root(root)
- except (EnvironmentError, configparser.NoSectionError,
- configparser.NoOptionError) as e:
- if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
- print("Adding sample versioneer config to setup.cfg",
- file=sys.stderr)
- with open(os.path.join(root, "setup.cfg"), "a") as f:
- f.write(SAMPLE_CONFIG)
- print(CONFIG_ERROR, file=sys.stderr)
- return 1
-
- print(" creating %s" % cfg.versionfile_source)
- with open(cfg.versionfile_source, "w") as f:
- LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG % {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
-
- ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
- "__init__.py")
- if os.path.exists(ipy):
- try:
- with open(ipy, "r") as f:
- old = f.read()
- except EnvironmentError:
- old = ""
- if INIT_PY_SNIPPET not in old:
- print(" appending to %s" % ipy)
- with open(ipy, "a") as f:
- f.write(INIT_PY_SNIPPET)
- else:
- print(" %s unmodified" % ipy)
- else:
- print(" %s doesn't exist, ok" % ipy)
- ipy = None
-
- # Make sure both the top-level "versioneer.py" and versionfile_source
- # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
- # they'll be copied into source distributions. Pip won't be able to
- # install the package without this.
- manifest_in = os.path.join(root, "MANIFEST.in")
- simple_includes = set()
- try:
- with open(manifest_in, "r") as f:
- for line in f:
- if line.startswith("include "):
- for include in line.split()[1:]:
- simple_includes.add(include)
- except EnvironmentError:
- pass
- # That doesn't cover everything MANIFEST.in can do
- # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
- # it might give some false negatives. Appending redundant 'include'
- # lines is safe, though.
- if "versioneer.py" not in simple_includes:
- print(" appending 'versioneer.py' to MANIFEST.in")
- with open(manifest_in, "a") as f:
- f.write("include versioneer.py\n")
- else:
- print(" 'versioneer.py' already in MANIFEST.in")
- if cfg.versionfile_source not in simple_includes:
- print(" appending versionfile_source ('%s') to MANIFEST.in" %
- cfg.versionfile_source)
- with open(manifest_in, "a") as f:
- f.write("include %s\n" % cfg.versionfile_source)
- else:
- print(" versionfile_source already in MANIFEST.in")
-
- # Make VCS-specific changes. For git, this means creating/changing
- # .gitattributes to mark _version.py for export-subst keyword
- # substitution.
- do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
- return 0
-
-
-def scan_setup_py():
- """Validate the contents of setup.py against Versioneer's expectations."""
- found = set()
- setters = False
- errors = 0
- with open("setup.py", "r") as f:
- for line in f.readlines():
- if "import versioneer" in line:
- found.add("import")
- if "versioneer.get_cmdclass()" in line:
- found.add("cmdclass")
- if "versioneer.get_version()" in line:
- found.add("get_version")
- if "versioneer.VCS" in line:
- setters = True
- if "versioneer.versionfile_source" in line:
- setters = True
- if len(found) != 3:
- print("")
- print("Your setup.py appears to be missing some important items")
- print("(but I might be wrong). Please make sure it has something")
- print("roughly like the following:")
- print("")
- print(" import versioneer")
- print(" setup( version=versioneer.get_version(),")
- print(" cmdclass=versioneer.get_cmdclass(), ...)")
- print("")
- errors += 1
- if setters:
- print("You should remove lines like 'versioneer.VCS = ' and")
- print("'versioneer.versionfile_source = ' . This configuration")
- print("now lives in setup.cfg, and should be removed from setup.py")
- print("")
- errors += 1
- return errors
-
-
-if __name__ == "__main__":
- cmd = sys.argv[1]
- if cmd == "setup":
- errors = do_setup()
- errors += scan_setup_py()
- if errors:
- sys.exit(1)