summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.codecov.yml17
-rw-r--r--.gitattributes19
-rw-r--r--.github/CONTRIBUTING.md101
-rw-r--r--.github/ISSUE_TEMPLATE.md53
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md46
-rw-r--r--.github/dependabot.yml11
-rw-r--r--.gitignore29
-rw-r--r--.pre-commit-config.yaml66
-rw-r--r--AUTHORS.txt62
-rw-r--r--LICENSE72
-rw-r--r--NIST_STRD/Bennett5.dat214
-rw-r--r--NIST_STRD/BoxBOD.dat66
-rw-r--r--NIST_STRD/Chwirut1.dat274
-rw-r--r--NIST_STRD/Chwirut2.dat114
-rw-r--r--NIST_STRD/DanWood.dat66
-rw-r--r--NIST_STRD/ENSO.dat228
-rw-r--r--NIST_STRD/Eckerle4.dat95
-rw-r--r--NIST_STRD/Gauss1.dat310
-rw-r--r--NIST_STRD/Gauss2.dat310
-rw-r--r--NIST_STRD/Gauss3.dat310
-rw-r--r--NIST_STRD/Hahn1.dat296
-rw-r--r--NIST_STRD/Kirby2.dat211
-rw-r--r--NIST_STRD/Lanczos1.dat84
-rw-r--r--NIST_STRD/Lanczos2.dat84
-rw-r--r--NIST_STRD/Lanczos3.dat84
-rw-r--r--NIST_STRD/MGH09.dat71
-rw-r--r--NIST_STRD/MGH10.dat76
-rw-r--r--NIST_STRD/MGH17.dat93
-rw-r--r--NIST_STRD/Misra1a.dat74
-rw-r--r--NIST_STRD/Misra1b.dat74
-rw-r--r--NIST_STRD/Misra1c.dat74
-rw-r--r--NIST_STRD/Misra1d.dat74
-rw-r--r--NIST_STRD/Models215
-rw-r--r--NIST_STRD/Nelson.dat188
-rw-r--r--NIST_STRD/Rat42.dat69
-rw-r--r--NIST_STRD/Rat43.dat75
-rw-r--r--NIST_STRD/Roszman1.dat85
-rw-r--r--NIST_STRD/Thurber.dat97
-rw-r--r--PKG-INFO180
-rw-r--r--README.rst143
-rw-r--r--asv_benchmarking/README.md1
-rw-r--r--asv_benchmarking/asv.conf.json75
-rw-r--r--asv_benchmarking/benchmarks/__init__.py0
-rw-r--r--asv_benchmarking/benchmarks/benchmarks.py136
-rw-r--r--asv_benchmarking/run_benchmark_code.py13
-rw-r--r--azure-pipelines.yml300
-rw-r--r--debian/changelog (renamed from changelog)0
-rw-r--r--debian/control (renamed from control)0
-rw-r--r--debian/copyright (renamed from copyright)0
-rw-r--r--debian/patches/0004-jupyter_sphinx-is-not-yet-working-on-Debian.patch (renamed from patches/0004-jupyter_sphinx-is-not-yet-working-on-Debian.patch)0
-rw-r--r--debian/patches/deactivate_test.patch (renamed from patches/deactivate_test.patch)0
-rw-r--r--debian/patches/series (renamed from patches/series)0
-rw-r--r--debian/python-lmfit-doc.doc-base (renamed from python-lmfit-doc.doc-base)0
-rw-r--r--debian/python3-lmfit.remove (renamed from python3-lmfit.remove)0
-rwxr-xr-xdebian/rules (renamed from rules)0
-rw-r--r--debian/source/format (renamed from source/format)0
-rw-r--r--debian/tests/control (renamed from tests/control)0
-rw-r--r--debian/upstream/metadata (renamed from upstream/metadata)0
-rw-r--r--debian/watch (renamed from watch)0
-rw-r--r--doc/Makefile127
-rw-r--r--doc/_static/empty0
-rw-r--r--doc/_templates/indexsidebar.html19
-rw-r--r--doc/bounds.rst78
-rw-r--r--doc/builtin_models.rst912
-rw-r--r--doc/conf.py180
-rw-r--r--doc/confidence.rst406
-rw-r--r--doc/constraints.rst216
-rw-r--r--doc/contents.rst21
-rwxr-xr-xdoc/doc_examples_to_gallery.py71
-rw-r--r--doc/faq.rst325
-rw-r--r--doc/fitting.rst742
-rw-r--r--doc/index.rst66
-rw-r--r--doc/installation.rst136
-rw-r--r--doc/intro.rst220
-rw-r--r--doc/make.bat43
-rw-r--r--doc/model.rst1225
-rw-r--r--doc/parameters.rst130
-rw-r--r--doc/sphinx/ext_imgmath.py12
-rw-r--r--doc/sphinx/ext_mathjax.py11
-rw-r--r--doc/sphinx/theme/sphinx13/basic_layout.html212
-rw-r--r--doc/sphinx/theme/sphinx13/layout.html83
-rw-r--r--doc/sphinx/theme/sphinx13/static/bodybg.pngbin0 -> 429 bytes
-rw-r--r--doc/sphinx/theme/sphinx13/static/footerbg.pngbin0 -> 180 bytes
-rw-r--r--doc/sphinx/theme/sphinx13/static/headerbg.pngbin0 -> 189 bytes
-rw-r--r--doc/sphinx/theme/sphinx13/static/listitem.pngbin0 -> 149 bytes
-rw-r--r--doc/sphinx/theme/sphinx13/static/lmfitheader.pngbin0 -> 9907 bytes
-rw-r--r--doc/sphinx/theme/sphinx13/static/relbg.pngbin0 -> 183 bytes
-rw-r--r--doc/sphinx/theme/sphinx13/static/sphinx13.css443
-rw-r--r--doc/sphinx/theme/sphinx13/theme.conf4
-rw-r--r--doc/support.rst30
-rw-r--r--doc/whatsnew.rst558
-rw-r--r--examples/NIST_Gauss2.dat310
-rw-r--r--examples/README.txt10
-rw-r--r--examples/doc_builtinmodels_nistgauss.py45
-rw-r--r--examples/doc_builtinmodels_nistgauss2.py43
-rw-r--r--examples/doc_builtinmodels_peakmodels.py62
-rw-r--r--examples/doc_builtinmodels_splinemodel.py61
-rw-r--r--examples/doc_builtinmodels_stepmodel.py30
-rw-r--r--examples/doc_confidence_advanced.py67
-rw-r--r--examples/doc_confidence_basic.py24
-rw-r--r--examples/doc_confidence_chi2_maps.py117
-rw-r--r--examples/doc_fitting_emcee.py107
-rw-r--r--examples/doc_fitting_withreport.py35
-rw-r--r--examples/doc_model_composite.py66
-rw-r--r--examples/doc_model_gaussian.py27
-rw-r--r--examples/doc_model_loadmodel.py33
-rw-r--r--examples/doc_model_loadmodelresult.py23
-rw-r--r--examples/doc_model_loadmodelresult2.py23
-rw-r--r--examples/doc_model_savemodel.py15
-rw-r--r--examples/doc_model_savemodelresult.py17
-rw-r--r--examples/doc_model_savemodelresult2.py33
-rw-r--r--examples/doc_model_two_components.py34
-rw-r--r--examples/doc_model_uncertainty.py31
-rw-r--r--examples/doc_model_uncertainty2.py79
-rw-r--r--examples/doc_model_with_iter_callback.py36
-rw-r--r--examples/doc_model_with_nan_policy.py33
-rw-r--r--examples/doc_parameters_basic.py55
-rw-r--r--examples/doc_parameters_valuesdict.py46
-rw-r--r--examples/example_Model_interface.py174
-rw-r--r--examples/example_brute.py392
-rw-r--r--examples/example_complex_resonator_model.py125
-rw-r--r--examples/example_confidence_interval.py142
-rw-r--r--examples/example_detect_outliers.py96
-rw-r--r--examples/example_diffev.py58
-rw-r--r--examples/example_emcee_Model_interface.py100
-rw-r--r--examples/example_expression_model.py36
-rw-r--r--examples/example_fit_multi_datasets.py82
-rw-r--r--examples/example_fit_with_algebraic_constraint.py44
-rw-r--r--examples/example_fit_with_bounds.py60
-rw-r--r--examples/example_fit_with_derivfunc.py76
-rw-r--r--examples/example_fit_with_inequality.py57
-rw-r--r--examples/example_reduce_fcn.py68
-rw-r--r--examples/example_sympy.py73
-rw-r--r--examples/example_two_dimensional_peak.py165
-rw-r--r--examples/example_use_pandas.py28
-rw-r--r--examples/lmfit_emcee_model_selection.py197
-rw-r--r--examples/model1d_gauss.dat103
-rw-r--r--examples/peak.csv102
-rw-r--r--examples/sinedata.dat103
-rw-r--r--examples/test_peak.dat404
-rw-r--r--examples/test_splinepeak.dat504
-rw-r--r--lmfit.egg-info/PKG-INFO180
-rw-r--r--lmfit.egg-info/SOURCES.txt186
-rw-r--r--lmfit.egg-info/dependency_links.txt1
-rw-r--r--lmfit.egg-info/requires.txt62
-rw-r--r--lmfit.egg-info/top_level.txt1
-rw-r--r--lmfit/__init__.py45
-rw-r--r--lmfit/_ampgo.py298
-rw-r--r--lmfit/conf_emcee.py500
-rw-r--r--lmfit/confidence.py458
-rw-r--r--lmfit/jsonutils.py150
-rw-r--r--lmfit/lineshapes.py519
-rw-r--r--lmfit/minimizer.py2610
-rw-r--r--lmfit/model.py2297
-rw-r--r--lmfit/models.py1721
-rw-r--r--lmfit/parameter.py1084
-rw-r--r--lmfit/printfuncs.py460
-rw-r--r--lmfit/version.py4
-rwxr-xr-xpublish_docs.sh42
-rw-r--r--pyproject.toml7
-rw-r--r--setup.cfg104
-rw-r--r--setup.py6
-rw-r--r--tests/NISTModels.py241
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/conftest.py32
-rw-r--r--tests/gauss_modelresult_lmfit100.sav1
-rw-r--r--tests/test_1variable.py49
-rw-r--r--tests/test_NIST_Strd.py297
-rw-r--r--tests/test_algebraic_constraint.py113
-rw-r--r--tests/test_ampgo.py132
-rw-r--r--tests/test_basicfit.py39
-rw-r--r--tests/test_basinhopping.py111
-rw-r--r--tests/test_bounded_jacobian.py70
-rw-r--r--tests/test_bounds.py49
-rw-r--r--tests/test_brute.py290
-rw-r--r--tests/test_builtin_models.py312
-rw-r--r--tests/test_confidence.py261
-rw-r--r--tests/test_covariance_matrix.py243
-rw-r--r--tests/test_custom_independentvar.py45
-rw-r--r--tests/test_default_kws.py24
-rw-r--r--tests/test_dual_annealing.py74
-rw-r--r--tests/test_itercb.py130
-rw-r--r--tests/test_jsonutils.py87
-rw-r--r--tests/test_least_squares.py171
-rw-r--r--tests/test_lineshapes.py146
-rw-r--r--tests/test_manypeaks_speed.py35
-rw-r--r--tests/test_max_nfev.py98
-rw-r--r--tests/test_minimizer.py20
-rw-r--r--tests/test_model.py1435
-rw-r--r--tests/test_model_saveload.py318
-rw-r--r--tests/test_model_uncertainties.py129
-rw-r--r--tests/test_models.py154
-rw-r--r--tests/test_multidatasets.py67
-rw-r--r--tests/test_nose.py698
-rw-r--r--tests/test_pandas.py35
-rw-r--r--tests/test_parameter.py580
-rw-r--r--tests/test_parameters.py636
-rw-r--r--tests/test_printfuncs.py411
-rw-r--r--tests/test_shgo.py129
-rw-r--r--tests/test_stepmodel.py56
200 files changed, 34709 insertions, 0 deletions
diff --git a/.codecov.yml b/.codecov.yml
new file mode 100644
index 0000000..51c4885
--- /dev/null
+++ b/.codecov.yml
@@ -0,0 +1,17 @@
+codecov:
+ token: 11c28e95-6e64-4829-9887-04e4cd661bf6
+ comment:
+ after_n_builds: 9
+
+coverage:
+ status:
+ project:
+ default:
+ target: auto
+ threshold: 0.5%
+ informational: true
+ patch:
+ default:
+ target: auto
+ threshold: 10%
+ informational: true
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..47dc601
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,19 @@
+lmfit/_version.py export-subst
+
+# Auto detect text files and perform LF normalization
+* text=auto eol=lf
+
+*.jpg -text
+*.png -text
+
+# Standard to msysgit
+*.doc diff=astextplain
+*.DOC diff=astextplain
+*.docx diff=astextplain
+*.DOCX diff=astextplain
+*.dot diff=astextplain
+*.DOT diff=astextplain
+*.pdf diff=astextplain
+*.PDF diff=astextplain
+*.rtf diff=astextplain
+*.RTF diff=astextplain
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
new file mode 100644
index 0000000..9d4065f
--- /dev/null
+++ b/.github/CONTRIBUTING.md
@@ -0,0 +1,101 @@
+## Contributing Code
+
+We would love your help, either as ideas, documentation, or code. If you have a
+new algorithm or want to add or fix existing code, please do! We try to follow
+the Python coding style conventions (i.e., [PEP 8](https://www.python.org/dev/peps/pep-0008/))
+closely. Additionally, we really want comprehensive docstrings that follow
+[PEP 257](https://www.python.org/dev/peps/pep-0257/) using the
+[numpydoc style](https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard),
+usable offline documentation, and good unit tests for the ``pytest`` framework. A
+good contribution includes all of these. To ensure compliance with our coding
+style, we make use of the [pre-commit](https://pre-commit.com/) framework to run
+several *hooks* when committing code. Please follow the instructions below if
+you intend to contribute to the lmfit repository:
+
+- fork the GitHub repository
+- clone your forked GitHub repository:
+ ``git clone https://github.com/<your-name>/lmfit-py.git``
+- install all (optional) dependencies either using ``pip`` or ``conda``:
+ ``pip install lmfit[all]`` or
+ ``conda install <all packages listed in setup.cfg>``
+- initialize ``pre-commit`` by running ``pre-commit install`` in the lmfit directory
+- create a new branch: ``git checkout -b <awesome_new_feature>``
+- start coding
+- install the latest version of your code using the PEP517/PEP518 way (``python -m build && pip install .``)
+- make sure the test-suite passes locally: run ``pytest`` in the lmfit directory
+- make sure the documentation builds locally: run ``make`` in the doc directory
+- push to your fork: ``git push origin``
+- open a Pull Request on [the lmfit GitHub repository](https://github.com/lmfit/lmfit-py/pulls)
+
+If you need any additional help, please send a message to the
+[mailing list](https://groups.google.com/group/lmfit-py) or use the
+[GitHub discussions page](https://github.com/lmfit/lmfit-py/discussions).
+
+## Using the Mailing List versus GitHub Issues
+
+If you have ***questions, comments, or suggestions*** for lmfit, please use
+the [mailing list](https://groups.google.com/group/lmfit-py) or
+[GitHub discussions page](https://github.com/lmfit/lmfit-py/discussions).
+These provide online conversations that are archived and can be searched
+easily.
+
+If you find a ***bug with the code or documentation***, please use
+[GitHub Issues](https://github.com/lmfit/lmfit-py/issues) to submit a bug report.
+If you have an idea for how to solve the problem and are familiar with Python
+and GitHub, submitting a [Pull Request](https://github.com/lmfit/lmfit-py/pulls)
+would be greatly appreciated (see above).
+
+**If you are at all unsure whether to open an Issue, please start a
+conversation on the discussions page or mailing list.**
+
+Starting the conversation with "How do I do this?" or "Why didn't this work
+the way I expected?" instead of "This doesn't work" is preferred, and will
+better help others with similar questions. No posting about fitting data is
+inappropriate for the mailing list, but many questions are not Issues. We
+will try our best to engage in all discussions, but we may simply close
+GitHub Issues that are actually questions.
+
+## Providing an Example with GitHub Issues
+
+If you are reporting a bug with GitHub Issues, we do expect a small, complete,
+working example that illustrates the problem. Yes, this forces you to invest
+some time in writing a careful example. That is intentional. If you need to
+read certain data or have code longer than a few pages, use a
+[GitHub Gist](https://gist.github.com/) and provide a link in the Issue.
+
+Please understand that the point of the example script is to be *read*.
+We may not even run your example. Please do not expect that we know much
+about your problem domain, or that we will read any example in enough detail
+to fully understand what you are trying to do without adequate explanation.
+State the problem, including what result you think you should have
+gotten, and include what you got. If you get a traceback, include the
+entire thing.
+
+In addition, please include information on your operating system, Python
+version and installed dependencies. You can paste the code below in your
+Python shell to get this information:
+
+```python
+import sys, lmfit, numpy, scipy, asteval, uncertainties
+print(f"Python: {sys.version}\n\nlmfit: {lmfit.__version__}, scipy: {scipy.__version__}, numpy: {numpy.__version__},"
+ f"asteval: {asteval.__version__}, uncertainties: {uncertainties.__version__}")
+```
+
+## Using IPython Notebooks to Show Examples
+
+IPython Notebooks are very useful for showing code snippets and outcomes,
+and are a good way to demonstrate a question or raise an issue. Please
+see the above about providing examples. The notebook you provide will be
+*read*, but will probably not be run.
+
+## Secret Code for First Time Issues
+
+If you have not done so in the past, and are going to submit a GitHub Issue,
+you will need to include the phrase
+
+```
+Yes, I read the instructions and I am sure this is a GitHub Issue.
+```
+
+as the First Time Issue Code. If you do not copy and paste this in verbatim,
+we will know that you did not read the instructions.
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000..e73a264
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,53 @@
+### DO NOT IGNORE ###
+
+READ THESE INSTRUCTIONS FULLY. IF YOU DO NOT, YOUR ISSUE WILL BE CLOSED.
+
+If you have not submitted a GitHub Issue to lmfit before, read
+[this](https://github.com/lmfit/lmfit-py/blob/master/.github/CONTRIBUTING.md) first.
+
+***DO NOT USE GitHub Issues for questions, it is only for bugs!***
+
+If you **think** something is an Issue, it probably is not an Issue.
+Getting a "bad fit" definitely does NOT qualify as an Issue! Issues here
+are concerned with errors or problems in the lmfit code.
+
+Use the [mailing list](https://groups.google.com/group/lmfit-py) or
+[GitHub discussions page](https://github.com/lmfit/lmfit-py/discussions) for
+questions about lmfit or things you think might be problems. We don't feel
+obligated to spend our free time helping people who do not respect our
+chosen work processes, so if you ignore this advice and post a question as
+a GitHub Issue anyway, it is quite likely that your Issue will be closed
+and not answered. If you have any doubt, start with a discussion either on
+the mailing list or discussions page.
+
+To submit an Issue, you MUST provide ALL of the following information. If
+you delete any of these sections, your Issue may be closed. If you think one
+of the sections does not apply to your Issue, state that explicitly.
+
+#### First Time Issue Code
+<!-- If this is your first Issue, you will write down the Secret Code for First Time Issues from the CONTRIBUTING.md file linked to above -->
+
+#### Description
+<!-- Provide a short description of the issue, describe the expected outcome, and give the actual result -->
+
+###### A Minimal, Complete, and Verifiable example
+<!-- see, for example, https://stackoverflow.com/help/mcve on how to do this -->
+
+###### Error message:
+<!-- If any, paste the *full* error message inside a code block (starting from line Traceback) -->
+
+```
+Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ...
+```
+
+###### Version information
+<!-- Generate version information with this command in the Python shell and copy the output here:
+import sys, lmfit, numpy, scipy, asteval, uncertainties
+print(f"Python: {sys.version}\n\nlmfit: {lmfit.__version__}, scipy: {scipy.__version__}, numpy: {numpy.__version__},"
+ f"asteval: {asteval.__version__}, uncertainties: {uncertainties.__version__}")
+-->
+
+###### Link(s)
+<!-- If you started a discussion on the lmfit mailing list, discussion page, or Stack Overflow, please provide the relevant link(s) -->
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..763b35f
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,46 @@
+<!--
+Thank you for submitting a PR to lmfit!
+
+To ease the process of reviewing your PR, do make sure to complete the following boxes.
+-->
+
+#### Description
+<!--- Describe your changes in detail: why is it required, what problem does it solve? -->
+<!--- If it fixes an open issue, please link to the issue here. -->
+<!--- If applicable, please provide the URL to the discussion on the mailing list. -->
+
+
+###### Type of Changes
+<!--- What type of changes does your code introduce? Put an `x` in all the boxes that apply: -->
+- [ ] Bug fix
+- [ ] New feature
+- [ ] Refactoring / maintenance
+- [ ] Documentation / examples
+
+
+###### Tested on
+<!-- Generate version information with this command in the Python shell and copy the output here:
+import sys, lmfit, numpy, scipy, asteval, uncertainties
+print('Python: {}\n\nlmfit: {}, scipy: {}, numpy: {}, asteval: {}, uncertainties: {}'\
+ .format(sys.version, lmfit.__version__, scipy.__version__, numpy.__version__, \
+ asteval.__version__, uncertainties.__version__))
+-->
+
+
+###### Verification <!-- (delete not applicable items) -->
+Have you
+<!--- Put an `x` in all the boxes that apply OR describe why you think this is unnecessary. -->
+- [ ] included docstrings that follow PEP 257?
+<!-- Please use your favorite linter (e.g., pydocstyle) to check your docstrings. -->
+- [ ] referenced existing Issue and/or provided relevant link to mailing list?
+<!-- Please don't open a new Issue if you are submitting a pull request. -->
+- [ ] verified that existing tests pass locally?
+<!-- Please run the test-suite locally with pytest and make sure it passes. -->
+- [ ] verified that the documentation builds locally?
+<!-- Please build the documentation (i.e., type make in the "doc" directory) and make sure it finishes. -->
+- [ ] squashed/minimized your commits and written descriptive commit messages?
+<!-- We value a clean history with useful commit messages. Ideally, you will take care of this
+ before submitting a PR; otherwise you'll be asked to do so before merging. -->
+- [ ] added or updated existing tests to cover the changes?
+- [ ] updated the documentation and/or added an entry to the release notes (doc/whatsnew.rst)?
+- [ ] added an example?
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..bc48256
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,11 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+ - package-ecosystem: "pip" # See documentation for possible values
+ directory: "/" # Location of package manifests
+ schedule:
+ interval: "weekly"
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..88ffc3a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,29 @@
+*.pyc
+*~
+*#
+doc/_build
+doc/examples
+examples/documentation
+doc/*.pdf
+doc/*.dat
+doc/*.csv
+doc/extensions.py
+build
+dist
+lmfit.egg-info
+sandbox/
+*.swp
+.idea/
+.DS_Store
+.ipynb_checkpoints/
+.vscode/
+doc/html
+doc/*.sav
+parameters.sav
+tmpvoigt_modelresult.sav
+examples/*.sav
+.pytest*
+*.coverage*
+*htmlcov/*
+.eggs
+lmfit/version.py
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..b37176b
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,66 @@
+exclude: 'doc/conf.py'
+
+repos:
+- repo: https://github.com/asottile/pyupgrade
+ rev: v3.3.2
+ hooks:
+ - id: pyupgrade
+ args: [--py37-plus]
+
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.4.0
+ hooks:
+ - id: check-ast
+ - id: check-builtin-literals
+ - id: check-case-conflict
+ - id: check-merge-conflict
+ - id: check-toml
+ - id: debug-statements
+ - id: end-of-file-fixer
+ - id: mixed-line-ending
+ - id: trailing-whitespace
+ - id: fix-encoding-pragma
+ args: [--remove]
+
+- repo: https://github.com/PyCQA/flake8
+ rev: 6.0.0
+ hooks:
+ - id: flake8
+ additional_dependencies: [flake8-deprecated, flake8-mutable]
+
+- repo: https://github.com/PyCQA/isort/
+ rev: 5.12.0
+ hooks:
+ - id: isort
+
+- repo: local
+ hooks:
+ - id: rstcheck
+ name: rstcheck
+ entry: rstcheck --report-level WARNING
+ files: '.rst'
+ language: python
+ additional_dependencies: [rstcheck, sphinx]
+
+- repo: https://github.com/pre-commit/pygrep-hooks
+ rev: v1.10.0
+ hooks:
+ - id: rst-backticks
+ - id: rst-directive-colons
+ - id: rst-inline-touching-normal
+ - id: python-check-blanket-noqa
+
+- repo: https://github.com/codespell-project/codespell
+ rev: v2.2.4
+ hooks:
+ - id: codespell
+ files: '.py|.rst'
+ exclude: 'doc/doc_examples_to_gallery.py'
+ # escaped characters currently do not work correctly
+ # so \nnumber is considered a spelling error....
+ args: ["-L nnumber", "-L mone"]
+
+- repo: https://github.com/asottile/yesqa
+ rev: v1.4.0
+ hooks:
+ - id: yesqa
diff --git a/AUTHORS.txt b/AUTHORS.txt
new file mode 100644
index 0000000..7ad1d47
--- /dev/null
+++ b/AUTHORS.txt
@@ -0,0 +1,62 @@
+Many people have contributed to lmfit. The attribution of credit in a
+project such as this is difficult to get perfect, and there are no doubt
+important contributions that are missing or under-represented here. Please
+consider this file as part of the code and documentation that may have bugs
+that need fixing.
+
+Some of the largest and most important contributions (in approximate order
+of size of the contribution to the existing code) are from:
+
+ Matthew Newville wrote the original version and maintains the project.
+
+ Renee Otten wrote the brute force method, implemented the basin-hopping
+ and AMPGO global solvers, implemented uncertainty calculations for scalar
+ minimizers and has greatly improved the code, testing, and documentation
+ and overall project.
+
+ Till Stensitzki wrote the improved estimates of confidence intervals, and
+ contributed many tests, bug fixes, and documentation.
+
+ A. R. J. Nelson added differential_evolution, emcee, and greatly improved
+ the code, docstrings, and overall project.
+
+ Antonino Ingargiola wrote much of the high level Model code and has
+ provided many bug fixes and improvements.
+
+ Daniel B. Allan wrote much of the original version of the high level Model
+ code, and many improvements to the testing and documentation.
+
+ Austen Fox fixed many of the built-in model functions and improved the
+ testing and documentation of these.
+
+ Michal Rawlik added plotting capabilities for Models.
+
+
+ The method used for placing bounds on parameters was derived from the
+ clear description in the MINUIT documentation, and adapted from
+ J. J. Helmus's Python implementation in leastsqbounds.py.
+
+ E. O. Le Bigot wrote the uncertainties package, a version of which was
+ used by lmfit for many years, and is now an external dependency.
+
+ The original AMPGO code came from Andrea Gavana and was adopted for
+ lmfit.
+
+ The propagation of parameter uncertainties to uncertainties in a Model
+ was adapted from the excellent description at
+ https://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html#confidence-and-prediction-intervals,
+ which references the original work of: J. Wolberg, Data Analysis Using the
+ Method of Least Squares, 2006, Springer.
+
+Additional patches, bug fixes, and suggestions have come from Faustin
+Carter, Christoph Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau,
+nmearl, Gustavo Pasquevich, Clemens Prescher, LiCode, Ben Gamari, Yoav
+Roam, Alexander Stark, Alexandre Beelen, Andrey Aristov, Nicholas Zobrist,
+Ethan Welty, Julius Zimmermann, Mark Dean, Arun Persaud, Ray Osborn, @lneuhaus,
+Marcel Stimberg, Yoshiera Huang, Leon Foks, Sebastian Weigand, Florian LB,
+Michael Hudson-Doyle, Ruben Verweij, @jedzill4, @spalato, Jens Hedegaard Nielsen,
+Martin Majli, Kristian Meyer, @azelcer, Ivan Usov, and many others.
+
+The lmfit code obviously depends on, and owes a very large debt to the code
+in scipy.optimize. Several discussions on the SciPy-user and lmfit mailing
+lists have also led to improvements in this code.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..13eb336
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,72 @@
+BSD-3
+
+Copyright 2022 Matthew Newville, The University of Chicago
+ Renee Otten, Brandeis University
+ Till Stensitzki, Freie Universitat Berlin
+ A. R. J. Nelson, Australian Nuclear Science and Technology Organisation
+ Antonino Ingargiola, University of California, Los Angeles
+ Daniel B. Allen, Johns Hopkins University
+ Michal Rawlik, Eidgenossische Technische Hochschule, Zurich
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+
+Some code has been taken from the scipy library whose licence is below.
+
+Copyright (c) 2001, 2002 Enthought, Inc.
+All rights reserved.
+
+Copyright (c) 2003-2019 SciPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ a. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ b. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ c. Neither the name of Enthought nor the names of the SciPy Developers
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+Some code has been taken from the AMPGO library of Andrea Gavana, which was
+released under a MIT license.
diff --git a/NIST_STRD/Bennett5.dat b/NIST_STRD/Bennett5.dat
new file mode 100644
index 0000000..b2004d5
--- /dev/null
+++ b/NIST_STRD/Bennett5.dat
@@ -0,0 +1,214 @@
+NIST/ITL StRD
+Dataset Name: Bennett5 (Bennett5.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 214)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ superconductivity magnetization modeling. The
+ response variable is magnetism, and the predictor
+ variable is the log of time in minutes.
+
+Reference: Bennett, L., L. Swartzendruber, and H. Brown,
+ NIST (1994).
+ Superconductivity Magnetization Modeling.
+
+
+
+
+
+
+Data: 1 Response Variable (y = magnetism)
+ 1 Predictor Variable (x = log[time])
+ 154 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 3 Parameters (b1 to b3)
+
+ y = b1 * (b2+x)**(-1/b3) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = -2000 -1500 -2.5235058043E+03 2.9715175411E+02
+ b2 = 50 45 4.6736564644E+01 1.2448871856E+00
+ b3 = 0.8 0.85 9.3218483193E-01 2.0272299378E-02
+
+Residual Sum of Squares: 5.2404744073E-04
+Residual Standard Deviation: 1.8629312528E-03
+Degrees of Freedom: 151
+Number of Observations: 154
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ -34.834702E0 7.447168E0
+ -34.393200E0 8.102586E0
+ -34.152901E0 8.452547E0
+ -33.979099E0 8.711278E0
+ -33.845901E0 8.916774E0
+ -33.732899E0 9.087155E0
+ -33.640301E0 9.232590E0
+ -33.559200E0 9.359535E0
+ -33.486801E0 9.472166E0
+ -33.423100E0 9.573384E0
+ -33.365101E0 9.665293E0
+ -33.313000E0 9.749461E0
+ -33.260899E0 9.827092E0
+ -33.217400E0 9.899128E0
+ -33.176899E0 9.966321E0
+ -33.139198E0 10.029280E0
+ -33.101601E0 10.088510E0
+ -33.066799E0 10.144430E0
+ -33.035000E0 10.197380E0
+ -33.003101E0 10.247670E0
+ -32.971298E0 10.295560E0
+ -32.942299E0 10.341250E0
+ -32.916302E0 10.384950E0
+ -32.890202E0 10.426820E0
+ -32.864101E0 10.467000E0
+ -32.841000E0 10.505640E0
+ -32.817799E0 10.542830E0
+ -32.797501E0 10.578690E0
+ -32.774300E0 10.613310E0
+ -32.757000E0 10.646780E0
+ -32.733799E0 10.679150E0
+ -32.716400E0 10.710520E0
+ -32.699100E0 10.740920E0
+ -32.678799E0 10.770440E0
+ -32.661400E0 10.799100E0
+ -32.644001E0 10.826970E0
+ -32.626701E0 10.854080E0
+ -32.612202E0 10.880470E0
+ -32.597698E0 10.906190E0
+ -32.583199E0 10.931260E0
+ -32.568699E0 10.955720E0
+ -32.554298E0 10.979590E0
+ -32.539799E0 11.002910E0
+ -32.525299E0 11.025700E0
+ -32.510799E0 11.047980E0
+ -32.499199E0 11.069770E0
+ -32.487598E0 11.091100E0
+ -32.473202E0 11.111980E0
+ -32.461601E0 11.132440E0
+ -32.435501E0 11.152480E0
+ -32.435501E0 11.172130E0
+ -32.426800E0 11.191410E0
+ -32.412300E0 11.210310E0
+ -32.400799E0 11.228870E0
+ -32.392101E0 11.247090E0
+ -32.380501E0 11.264980E0
+ -32.366001E0 11.282560E0
+ -32.357300E0 11.299840E0
+ -32.348598E0 11.316820E0
+ -32.339901E0 11.333520E0
+ -32.328400E0 11.349940E0
+ -32.319698E0 11.366100E0
+ -32.311001E0 11.382000E0
+ -32.299400E0 11.397660E0
+ -32.290699E0 11.413070E0
+ -32.282001E0 11.428240E0
+ -32.273300E0 11.443200E0
+ -32.264599E0 11.457930E0
+ -32.256001E0 11.472440E0
+ -32.247299E0 11.486750E0
+ -32.238602E0 11.500860E0
+ -32.229900E0 11.514770E0
+ -32.224098E0 11.528490E0
+ -32.215401E0 11.542020E0
+ -32.203800E0 11.555380E0
+ -32.198002E0 11.568550E0
+ -32.189400E0 11.581560E0
+ -32.183601E0 11.594420E0
+ -32.174900E0 11.607121E0
+ -32.169102E0 11.619640E0
+ -32.163300E0 11.632000E0
+ -32.154598E0 11.644210E0
+ -32.145901E0 11.656280E0
+ -32.140099E0 11.668200E0
+ -32.131401E0 11.679980E0
+ -32.125599E0 11.691620E0
+ -32.119801E0 11.703130E0
+ -32.111198E0 11.714510E0
+ -32.105400E0 11.725760E0
+ -32.096699E0 11.736880E0
+ -32.090900E0 11.747890E0
+ -32.088001E0 11.758780E0
+ -32.079300E0 11.769550E0
+ -32.073502E0 11.780200E0
+ -32.067699E0 11.790730E0
+ -32.061901E0 11.801160E0
+ -32.056099E0 11.811480E0
+ -32.050301E0 11.821700E0
+ -32.044498E0 11.831810E0
+ -32.038799E0 11.841820E0
+ -32.033001E0 11.851730E0
+ -32.027199E0 11.861550E0
+ -32.024300E0 11.871270E0
+ -32.018501E0 11.880890E0
+ -32.012699E0 11.890420E0
+ -32.004002E0 11.899870E0
+ -32.001099E0 11.909220E0
+ -31.995300E0 11.918490E0
+ -31.989500E0 11.927680E0
+ -31.983700E0 11.936780E0
+ -31.977900E0 11.945790E0
+ -31.972099E0 11.954730E0
+ -31.969299E0 11.963590E0
+ -31.963501E0 11.972370E0
+ -31.957701E0 11.981070E0
+ -31.951900E0 11.989700E0
+ -31.946100E0 11.998260E0
+ -31.940300E0 12.006740E0
+ -31.937401E0 12.015150E0
+ -31.931601E0 12.023490E0
+ -31.925800E0 12.031760E0
+ -31.922899E0 12.039970E0
+ -31.917101E0 12.048100E0
+ -31.911301E0 12.056170E0
+ -31.908400E0 12.064180E0
+ -31.902599E0 12.072120E0
+ -31.896900E0 12.080010E0
+ -31.893999E0 12.087820E0
+ -31.888201E0 12.095580E0
+ -31.885300E0 12.103280E0
+ -31.882401E0 12.110920E0
+ -31.876600E0 12.118500E0
+ -31.873699E0 12.126030E0
+ -31.867901E0 12.133500E0
+ -31.862101E0 12.140910E0
+ -31.859200E0 12.148270E0
+ -31.856300E0 12.155570E0
+ -31.850500E0 12.162830E0
+ -31.844700E0 12.170030E0
+ -31.841801E0 12.177170E0
+ -31.838900E0 12.184270E0
+ -31.833099E0 12.191320E0
+ -31.830200E0 12.198320E0
+ -31.827299E0 12.205270E0
+ -31.821600E0 12.212170E0
+ -31.818701E0 12.219030E0
+ -31.812901E0 12.225840E0
+ -31.809999E0 12.232600E0
+ -31.807100E0 12.239320E0
+ -31.801300E0 12.245990E0
+ -31.798401E0 12.252620E0
+ -31.795500E0 12.259200E0
+ -31.789700E0 12.265750E0
+ -31.786800E0 12.272240E0
diff --git a/NIST_STRD/BoxBOD.dat b/NIST_STRD/BoxBOD.dat
new file mode 100644
index 0000000..2906603
--- /dev/null
+++ b/NIST_STRD/BoxBOD.dat
@@ -0,0 +1,66 @@
+NIST/ITL StRD
+Dataset Name: BoxBOD (BoxBOD.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 66)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are described in detail in Box, Hunter and
+ Hunter (1978). The response variable is biochemical
+ oxygen demand (BOD) in mg/l, and the predictor
+ variable is incubation time in days.
+
+
+Reference: Box, G. P., W. G. Hunter, and J. S. Hunter (1978).
+ Statistics for Experimenters.
+ New York, NY: Wiley, pp. 483-487.
+
+
+
+
+
+Data: 1 Response (y = biochemical oxygen demand)
+ 1 Predictor (x = incubation time)
+ 6 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 2 Parameters (b1 and b2)
+
+ y = b1*(1-exp[-b2*x]) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1 100 2.1380940889E+02 1.2354515176E+01
+ b2 = 1 0.75 5.4723748542E-01 1.0455993237E-01
+
+Residual Sum of Squares: 1.1680088766E+03
+Residual Standard Deviation: 1.7088072423E+01
+Degrees of Freedom: 4
+Number of Observations: 6
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 109 1
+ 149 2
+ 149 3
+ 191 5
+ 213 7
+ 224 10
diff --git a/NIST_STRD/Chwirut1.dat b/NIST_STRD/Chwirut1.dat
new file mode 100644
index 0000000..0d0e928
--- /dev/null
+++ b/NIST_STRD/Chwirut1.dat
@@ -0,0 +1,274 @@
+NIST/ITL StRD
+Dataset Name: Chwirut1 (Chwirut1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 274)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ ultrasonic calibration. The response variable is
+ ultrasonic response, and the predictor variable is
+ metal distance.
+
+Reference: Chwirut, D., NIST (197?).
+ Ultrasonic Reference Block Study.
+
+
+
+
+
+
+
+Data: 1 Response Variable (y = ultrasonic response)
+ 1 Predictor Variable (x = metal distance)
+ 214 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = exp[-b1*x]/(b2+b3*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 0.1 0.15 1.9027818370E-01 2.1938557035E-02
+ b2 = 0.01 0.008 6.1314004477E-03 3.4500025051E-04
+ b3 = 0.02 0.010 1.0530908399E-02 7.9281847748E-04
+
+Residual Sum of Squares: 2.3844771393E+03
+Residual Standard Deviation: 3.3616721320E+00
+Degrees of Freedom: 211
+Number of Observations: 214
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 92.9000E0 0.5000E0
+ 78.7000E0 0.6250E0
+ 64.2000E0 0.7500E0
+ 64.9000E0 0.8750E0
+ 57.1000E0 1.0000E0
+ 43.3000E0 1.2500E0
+ 31.1000E0 1.7500E0
+ 23.6000E0 2.2500E0
+ 31.0500E0 1.7500E0
+ 23.7750E0 2.2500E0
+ 17.7375E0 2.7500E0
+ 13.8000E0 3.2500E0
+ 11.5875E0 3.7500E0
+ 9.4125E0 4.2500E0
+ 7.7250E0 4.7500E0
+ 7.3500E0 5.2500E0
+ 8.0250E0 5.7500E0
+ 90.6000E0 0.5000E0
+ 76.9000E0 0.6250E0
+ 71.6000E0 0.7500E0
+ 63.6000E0 0.8750E0
+ 54.0000E0 1.0000E0
+ 39.2000E0 1.2500E0
+ 29.3000E0 1.7500E0
+ 21.4000E0 2.2500E0
+ 29.1750E0 1.7500E0
+ 22.1250E0 2.2500E0
+ 17.5125E0 2.7500E0
+ 14.2500E0 3.2500E0
+ 9.4500E0 3.7500E0
+ 9.1500E0 4.2500E0
+ 7.9125E0 4.7500E0
+ 8.4750E0 5.2500E0
+ 6.1125E0 5.7500E0
+ 80.0000E0 0.5000E0
+ 79.0000E0 0.6250E0
+ 63.8000E0 0.7500E0
+ 57.2000E0 0.8750E0
+ 53.2000E0 1.0000E0
+ 42.5000E0 1.2500E0
+ 26.8000E0 1.7500E0
+ 20.4000E0 2.2500E0
+ 26.8500E0 1.7500E0
+ 21.0000E0 2.2500E0
+ 16.4625E0 2.7500E0
+ 12.5250E0 3.2500E0
+ 10.5375E0 3.7500E0
+ 8.5875E0 4.2500E0
+ 7.1250E0 4.7500E0
+ 6.1125E0 5.2500E0
+ 5.9625E0 5.7500E0
+ 74.1000E0 0.5000E0
+ 67.3000E0 0.6250E0
+ 60.8000E0 0.7500E0
+ 55.5000E0 0.8750E0
+ 50.3000E0 1.0000E0
+ 41.0000E0 1.2500E0
+ 29.4000E0 1.7500E0
+ 20.4000E0 2.2500E0
+ 29.3625E0 1.7500E0
+ 21.1500E0 2.2500E0
+ 16.7625E0 2.7500E0
+ 13.2000E0 3.2500E0
+ 10.8750E0 3.7500E0
+ 8.1750E0 4.2500E0
+ 7.3500E0 4.7500E0
+ 5.9625E0 5.2500E0
+ 5.6250E0 5.7500E0
+ 81.5000E0 .5000E0
+ 62.4000E0 .7500E0
+ 32.5000E0 1.5000E0
+ 12.4100E0 3.0000E0
+ 13.1200E0 3.0000E0
+ 15.5600E0 3.0000E0
+ 5.6300E0 6.0000E0
+ 78.0000E0 .5000E0
+ 59.9000E0 .7500E0
+ 33.2000E0 1.5000E0
+ 13.8400E0 3.0000E0
+ 12.7500E0 3.0000E0
+ 14.6200E0 3.0000E0
+ 3.9400E0 6.0000E0
+ 76.8000E0 .5000E0
+ 61.0000E0 .7500E0
+ 32.9000E0 1.5000E0
+ 13.8700E0 3.0000E0
+ 11.8100E0 3.0000E0
+ 13.3100E0 3.0000E0
+ 5.4400E0 6.0000E0
+ 78.0000E0 .5000E0
+ 63.5000E0 .7500E0
+ 33.8000E0 1.5000E0
+ 12.5600E0 3.0000E0
+ 5.6300E0 6.0000E0
+ 12.7500E0 3.0000E0
+ 13.1200E0 3.0000E0
+ 5.4400E0 6.0000E0
+ 76.8000E0 .5000E0
+ 60.0000E0 .7500E0
+ 47.8000E0 1.0000E0
+ 32.0000E0 1.5000E0
+ 22.2000E0 2.0000E0
+ 22.5700E0 2.0000E0
+ 18.8200E0 2.5000E0
+ 13.9500E0 3.0000E0
+ 11.2500E0 4.0000E0
+ 9.0000E0 5.0000E0
+ 6.6700E0 6.0000E0
+ 75.8000E0 .5000E0
+ 62.0000E0 .7500E0
+ 48.8000E0 1.0000E0
+ 35.2000E0 1.5000E0
+ 20.0000E0 2.0000E0
+ 20.3200E0 2.0000E0
+ 19.3100E0 2.5000E0
+ 12.7500E0 3.0000E0
+ 10.4200E0 4.0000E0
+ 7.3100E0 5.0000E0
+ 7.4200E0 6.0000E0
+ 70.5000E0 .5000E0
+ 59.5000E0 .7500E0
+ 48.5000E0 1.0000E0
+ 35.8000E0 1.5000E0
+ 21.0000E0 2.0000E0
+ 21.6700E0 2.0000E0
+ 21.0000E0 2.5000E0
+ 15.6400E0 3.0000E0
+ 8.1700E0 4.0000E0
+ 8.5500E0 5.0000E0
+ 10.1200E0 6.0000E0
+ 78.0000E0 .5000E0
+ 66.0000E0 .6250E0
+ 62.0000E0 .7500E0
+ 58.0000E0 .8750E0
+ 47.7000E0 1.0000E0
+ 37.8000E0 1.2500E0
+ 20.2000E0 2.2500E0
+ 21.0700E0 2.2500E0
+ 13.8700E0 2.7500E0
+ 9.6700E0 3.2500E0
+ 7.7600E0 3.7500E0
+ 5.4400E0 4.2500E0
+ 4.8700E0 4.7500E0
+ 4.0100E0 5.2500E0
+ 3.7500E0 5.7500E0
+ 24.1900E0 3.0000E0
+ 25.7600E0 3.0000E0
+ 18.0700E0 3.0000E0
+ 11.8100E0 3.0000E0
+ 12.0700E0 3.0000E0
+ 16.1200E0 3.0000E0
+ 70.8000E0 .5000E0
+ 54.7000E0 .7500E0
+ 48.0000E0 1.0000E0
+ 39.8000E0 1.5000E0
+ 29.8000E0 2.0000E0
+ 23.7000E0 2.5000E0
+ 29.6200E0 2.0000E0
+ 23.8100E0 2.5000E0
+ 17.7000E0 3.0000E0
+ 11.5500E0 4.0000E0
+ 12.0700E0 5.0000E0
+ 8.7400E0 6.0000E0
+ 80.7000E0 .5000E0
+ 61.3000E0 .7500E0
+ 47.5000E0 1.0000E0
+ 29.0000E0 1.5000E0
+ 24.0000E0 2.0000E0
+ 17.7000E0 2.5000E0
+ 24.5600E0 2.0000E0
+ 18.6700E0 2.5000E0
+ 16.2400E0 3.0000E0
+ 8.7400E0 4.0000E0
+ 7.8700E0 5.0000E0
+ 8.5100E0 6.0000E0
+ 66.7000E0 .5000E0
+ 59.2000E0 .7500E0
+ 40.8000E0 1.0000E0
+ 30.7000E0 1.5000E0
+ 25.7000E0 2.0000E0
+ 16.3000E0 2.5000E0
+ 25.9900E0 2.0000E0
+ 16.9500E0 2.5000E0
+ 13.3500E0 3.0000E0
+ 8.6200E0 4.0000E0
+ 7.2000E0 5.0000E0
+ 6.6400E0 6.0000E0
+ 13.6900E0 3.0000E0
+ 81.0000E0 .5000E0
+ 64.5000E0 .7500E0
+ 35.5000E0 1.5000E0
+ 13.3100E0 3.0000E0
+ 4.8700E0 6.0000E0
+ 12.9400E0 3.0000E0
+ 5.0600E0 6.0000E0
+ 15.1900E0 3.0000E0
+ 14.6200E0 3.0000E0
+ 15.6400E0 3.0000E0
+ 25.5000E0 1.7500E0
+ 25.9500E0 1.7500E0
+ 81.7000E0 .5000E0
+ 61.6000E0 .7500E0
+ 29.8000E0 1.7500E0
+ 29.8100E0 1.7500E0
+ 17.1700E0 2.7500E0
+ 10.3900E0 3.7500E0
+ 28.4000E0 1.7500E0
+ 28.6900E0 1.7500E0
+ 81.3000E0 .5000E0
+ 60.9000E0 .7500E0
+ 16.6500E0 2.7500E0
+ 10.0500E0 3.7500E0
+ 28.9000E0 1.7500E0
+ 28.9500E0 1.7500E0
diff --git a/NIST_STRD/Chwirut2.dat b/NIST_STRD/Chwirut2.dat
new file mode 100644
index 0000000..b973bb8
--- /dev/null
+++ b/NIST_STRD/Chwirut2.dat
@@ -0,0 +1,114 @@
+NIST/ITL StRD
+Dataset Name: Chwirut2 (Chwirut2.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 114)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ ultrasonic calibration. The response variable is
+ ultrasonic response, and the predictor variable is
+ metal distance.
+
+
+
+Reference: Chwirut, D., NIST (197?).
+ Ultrasonic Reference Block Study.
+
+
+
+
+
+Data: 1 Response (y = ultrasonic response)
+ 1 Predictor (x = metal distance)
+ 54 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = exp(-b1*x)/(b2+b3*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 0.1 0.15 1.6657666537E-01 3.8303286810E-02
+ b2 = 0.01 0.008 5.1653291286E-03 6.6621605126E-04
+ b3 = 0.02 0.010 1.2150007096E-02 1.5304234767E-03
+
+Residual Sum of Squares: 5.1304802941E+02
+Residual Standard Deviation: 3.1717133040E+00
+Degrees of Freedom: 51
+Number of Observations: 54
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 92.9000E0 0.500E0
+ 57.1000E0 1.000E0
+ 31.0500E0 1.750E0
+ 11.5875E0 3.750E0
+ 8.0250E0 5.750E0
+ 63.6000E0 0.875E0
+ 21.4000E0 2.250E0
+ 14.2500E0 3.250E0
+ 8.4750E0 5.250E0
+ 63.8000E0 0.750E0
+ 26.8000E0 1.750E0
+ 16.4625E0 2.750E0
+ 7.1250E0 4.750E0
+ 67.3000E0 0.625E0
+ 41.0000E0 1.250E0
+ 21.1500E0 2.250E0
+ 8.1750E0 4.250E0
+ 81.5000E0 .500E0
+ 13.1200E0 3.000E0
+ 59.9000E0 .750E0
+ 14.6200E0 3.000E0
+ 32.9000E0 1.500E0
+ 5.4400E0 6.000E0
+ 12.5600E0 3.000E0
+ 5.4400E0 6.000E0
+ 32.0000E0 1.500E0
+ 13.9500E0 3.000E0
+ 75.8000E0 .500E0
+ 20.0000E0 2.000E0
+ 10.4200E0 4.000E0
+ 59.5000E0 .750E0
+ 21.6700E0 2.000E0
+ 8.5500E0 5.000E0
+ 62.0000E0 .750E0
+ 20.2000E0 2.250E0
+ 7.7600E0 3.750E0
+ 3.7500E0 5.750E0
+ 11.8100E0 3.000E0
+ 54.7000E0 .750E0
+ 23.7000E0 2.500E0
+ 11.5500E0 4.000E0
+ 61.3000E0 .750E0
+ 17.7000E0 2.500E0
+ 8.7400E0 4.000E0
+ 59.2000E0 .750E0
+ 16.3000E0 2.500E0
+ 8.6200E0 4.000E0
+ 81.0000E0 .500E0
+ 4.8700E0 6.000E0
+ 14.6200E0 3.000E0
+ 81.7000E0 .500E0
+ 17.1700E0 2.750E0
+ 81.3000E0 .500E0
+ 28.9000E0 1.750E0
diff --git a/NIST_STRD/DanWood.dat b/NIST_STRD/DanWood.dat
new file mode 100644
index 0000000..a834830
--- /dev/null
+++ b/NIST_STRD/DanWood.dat
@@ -0,0 +1,66 @@
+NIST/ITL StRD
+Dataset Name: DanWood (DanWood.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 66)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data and model are described in Daniel and Wood
+ (1980), and originally published in E.S.Keeping,
+ "Introduction to Statistical Inference," Van Nostrand
+ Company, Princeton, NJ, 1962, p. 354. The response
+ variable is energy radieted from a carbon filament
+ lamp per cm**2 per second, and the predictor variable
+ is the absolute temperature of the filament in 1000
+ degrees Kelvin.
+
+Reference: Daniel, C. and F. S. Wood (1980).
+ Fitting Equations to Data, Second Edition.
+ New York, NY: John Wiley and Sons, pp. 428-431.
+
+
+Data: 1 Response Variable (y = energy)
+ 1 Predictor Variable (x = temperature)
+ 6 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 2 Parameters (b1 and b2)
+
+ y = b1*x**b2 + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1 0.7 7.6886226176E-01 1.8281973860E-02
+ b2 = 5 4 3.8604055871E+00 5.1726610913E-02
+
+Residual Sum of Squares: 4.3173084083E-03
+Residual Standard Deviation: 3.2853114039E-02
+Degrees of Freedom: 4
+Number of Observations: 6
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 2.138E0 1.309E0
+ 3.421E0 1.471E0
+ 3.597E0 1.490E0
+ 4.340E0 1.565E0
+ 4.882E0 1.611E0
+ 5.660E0 1.680E0
diff --git a/NIST_STRD/ENSO.dat b/NIST_STRD/ENSO.dat
new file mode 100644
index 0000000..309ac0f
--- /dev/null
+++ b/NIST_STRD/ENSO.dat
@@ -0,0 +1,228 @@
+NIST/ITL StRD
+Dataset Name: ENSO (ENSO.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 49)
+ Certified Values (lines 41 to 54)
+ Data (lines 61 to 228)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: The data are monthly averaged atmospheric pressure
+ differences between Easter Island and Darwin,
+ Australia. This difference drives the trade winds in
+ the southern hemisphere. Fourier analysis of the data
+ reveals 3 significant cycles. The annual cycle is the
+ strongest, but cycles with periods of approximately 44
+ and 26 months are also present. These cycles
+ correspond to the El Nino and the Southern Oscillation.
+ Arguments to the SIN and COS functions are in radians.
+
+Reference: Kahaner, D., C. Moler, and S. Nash, (1989).
+ Numerical Methods and Software.
+ Englewood Cliffs, NJ: Prentice Hall, pp. 441-445.
+
+Data: 1 Response (y = atmospheric pressure)
+ 1 Predictor (x = time)
+ 168 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 9 Parameters (b1 to b9)
+
+ y = b1 + b2*cos( 2*pi*x/12 ) + b3*sin( 2*pi*x/12 )
+ + b5*cos( 2*pi*x/b4 ) + b6*sin( 2*pi*x/b4 )
+ + b8*cos( 2*pi*x/b7 ) + b9*sin( 2*pi*x/b7 ) + e
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 11.0 10.0 1.0510749193E+01 1.7488832467E-01
+ b2 = 3.0 3.0 3.0762128085E+00 2.4310052139E-01
+ b3 = 0.5 0.5 5.3280138227E-01 2.4354686618E-01
+ b4 = 40.0 44.0 4.4311088700E+01 9.4408025976E-01
+ b5 = -0.7 -1.5 -1.6231428586E+00 2.8078369611E-01
+ b6 = -1.3 0.5 5.2554493756E-01 4.8073701119E-01
+ b7 = 25.0 26.0 2.6887614440E+01 4.1612939130E-01
+ b8 = -0.3 -0.1 2.1232288488E-01 5.1460022911E-01
+ b9 = 1.4 1.5 1.4966870418E+00 2.5434468893E-01
+
+Residual Sum of Squares: 7.8853978668E+02
+Residual Standard Deviation: 2.2269642403E+00
+Degrees of Freedom: 159
+Number of Observations: 168
+
+
+
+
+
+Data: y x
+ 12.90000 1.000000
+ 11.30000 2.000000
+ 10.60000 3.000000
+ 11.20000 4.000000
+ 10.90000 5.000000
+ 7.500000 6.000000
+ 7.700000 7.000000
+ 11.70000 8.000000
+ 12.90000 9.000000
+ 14.30000 10.000000
+ 10.90000 11.00000
+ 13.70000 12.00000
+ 17.10000 13.00000
+ 14.00000 14.00000
+ 15.30000 15.00000
+ 8.500000 16.00000
+ 5.700000 17.00000
+ 5.500000 18.00000
+ 7.600000 19.00000
+ 8.600000 20.00000
+ 7.300000 21.00000
+ 7.600000 22.00000
+ 12.70000 23.00000
+ 11.00000 24.00000
+ 12.70000 25.00000
+ 12.90000 26.00000
+ 13.00000 27.00000
+ 10.90000 28.00000
+ 10.400000 29.00000
+ 10.200000 30.00000
+ 8.000000 31.00000
+ 10.90000 32.00000
+ 13.60000 33.00000
+ 10.500000 34.00000
+ 9.200000 35.00000
+ 12.40000 36.00000
+ 12.70000 37.00000
+ 13.30000 38.00000
+ 10.100000 39.00000
+ 7.800000 40.00000
+ 4.800000 41.00000
+ 3.000000 42.00000
+ 2.500000 43.00000
+ 6.300000 44.00000
+ 9.700000 45.00000
+ 11.60000 46.00000
+ 8.600000 47.00000
+ 12.40000 48.00000
+ 10.500000 49.00000
+ 13.30000 50.00000
+ 10.400000 51.00000
+ 8.100000 52.00000
+ 3.700000 53.00000
+ 10.70000 54.00000
+ 5.100000 55.00000
+ 10.400000 56.00000
+ 10.90000 57.00000
+ 11.70000 58.00000
+ 11.40000 59.00000
+ 13.70000 60.00000
+ 14.10000 61.00000
+ 14.00000 62.00000
+ 12.50000 63.00000
+ 6.300000 64.00000
+ 9.600000 65.00000
+ 11.70000 66.00000
+ 5.000000 67.00000
+ 10.80000 68.00000
+ 12.70000 69.00000
+ 10.80000 70.00000
+ 11.80000 71.00000
+ 12.60000 72.00000
+ 15.70000 73.00000
+ 12.60000 74.00000
+ 14.80000 75.00000
+ 7.800000 76.00000
+ 7.100000 77.00000
+ 11.20000 78.00000
+ 8.100000 79.00000
+ 6.400000 80.00000
+ 5.200000 81.00000
+ 12.00000 82.00000
+ 10.200000 83.00000
+ 12.70000 84.00000
+ 10.200000 85.00000
+ 14.70000 86.00000
+ 12.20000 87.00000
+ 7.100000 88.00000
+ 5.700000 89.00000
+ 6.700000 90.00000
+ 3.900000 91.00000
+ 8.500000 92.00000
+ 8.300000 93.00000
+ 10.80000 94.00000
+ 16.70000 95.00000
+ 12.60000 96.00000
+ 12.50000 97.00000
+ 12.50000 98.00000
+ 9.800000 99.00000
+ 7.200000 100.00000
+ 4.100000 101.00000
+ 10.60000 102.00000
+ 10.100000 103.00000
+ 10.100000 104.00000
+ 11.90000 105.00000
+ 13.60000 106.0000
+ 16.30000 107.0000
+ 17.60000 108.0000
+ 15.50000 109.0000
+ 16.00000 110.0000
+ 15.20000 111.0000
+ 11.20000 112.0000
+ 14.30000 113.0000
+ 14.50000 114.0000
+ 8.500000 115.0000
+ 12.00000 116.0000
+ 12.70000 117.0000
+ 11.30000 118.0000
+ 14.50000 119.0000
+ 15.10000 120.0000
+ 10.400000 121.0000
+ 11.50000 122.0000
+ 13.40000 123.0000
+ 7.500000 124.0000
+ 0.6000000 125.0000
+ 0.3000000 126.0000
+ 5.500000 127.0000
+ 5.000000 128.0000
+ 4.600000 129.0000
+ 8.200000 130.0000
+ 9.900000 131.0000
+ 9.200000 132.0000
+ 12.50000 133.0000
+ 10.90000 134.0000
+ 9.900000 135.0000
+ 8.900000 136.0000
+ 7.600000 137.0000
+ 9.500000 138.0000
+ 8.400000 139.0000
+ 10.70000 140.0000
+ 13.60000 141.0000
+ 13.70000 142.0000
+ 13.70000 143.0000
+ 16.50000 144.0000
+ 16.80000 145.0000
+ 17.10000 146.0000
+ 15.40000 147.0000
+ 9.500000 148.0000
+ 6.100000 149.0000
+ 10.100000 150.0000
+ 9.300000 151.0000
+ 5.300000 152.0000
+ 11.20000 153.0000
+ 16.60000 154.0000
+ 15.60000 155.0000
+ 12.00000 156.0000
+ 11.50000 157.0000
+ 8.600000 158.0000
+ 13.80000 159.0000
+ 8.700000 160.0000
+ 8.600000 161.0000
+ 8.600000 162.0000
+ 8.700000 163.0000
+ 12.80000 164.0000
+ 13.20000 165.0000
+ 14.00000 166.0000
+ 13.40000 167.0000
+ 14.80000 168.0000
diff --git a/NIST_STRD/Eckerle4.dat b/NIST_STRD/Eckerle4.dat
new file mode 100644
index 0000000..53d0942
--- /dev/null
+++ b/NIST_STRD/Eckerle4.dat
@@ -0,0 +1,95 @@
+NIST/ITL StRD
+Dataset Name: Eckerle4 (Eckerle4.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 95)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ circular interference transmittance. The response
+ variable is transmittance, and the predictor variable
+ is wavelength.
+
+
+Reference: Eckerle, K., NIST (197?).
+ Circular Interference Transmittance Study.
+
+
+
+
+
+
+Data: 1 Response Variable (y = transmittance)
+ 1 Predictor Variable (x = wavelength)
+ 35 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = (b1/b2) * exp[-0.5*((x-b3)/b2)**2] + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1 1.5 1.5543827178E+00 1.5408051163E-02
+ b2 = 10 5 4.0888321754E+00 4.6803020753E-02
+ b3 = 500 450 4.5154121844E+02 4.6800518816E-02
+
+Residual Sum of Squares: 1.4635887487E-03
+Residual Standard Deviation: 6.7629245447E-03
+Degrees of Freedom: 32
+Number of Observations: 35
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 0.0001575E0 400.000000E0
+ 0.0001699E0 405.000000E0
+ 0.0002350E0 410.000000E0
+ 0.0003102E0 415.000000E0
+ 0.0004917E0 420.000000E0
+ 0.0008710E0 425.000000E0
+ 0.0017418E0 430.000000E0
+ 0.0046400E0 435.000000E0
+ 0.0065895E0 436.500000E0
+ 0.0097302E0 438.000000E0
+ 0.0149002E0 439.500000E0
+ 0.0237310E0 441.000000E0
+ 0.0401683E0 442.500000E0
+ 0.0712559E0 444.000000E0
+ 0.1264458E0 445.500000E0
+ 0.2073413E0 447.000000E0
+ 0.2902366E0 448.500000E0
+ 0.3445623E0 450.000000E0
+ 0.3698049E0 451.500000E0
+ 0.3668534E0 453.000000E0
+ 0.3106727E0 454.500000E0
+ 0.2078154E0 456.000000E0
+ 0.1164354E0 457.500000E0
+ 0.0616764E0 459.000000E0
+ 0.0337200E0 460.500000E0
+ 0.0194023E0 462.000000E0
+ 0.0117831E0 463.500000E0
+ 0.0074357E0 465.000000E0
+ 0.0022732E0 470.000000E0
+ 0.0008800E0 475.000000E0
+ 0.0004579E0 480.000000E0
+ 0.0002345E0 485.000000E0
+ 0.0001586E0 490.000000E0
+ 0.0001143E0 495.000000E0
+ 0.0000710E0 500.000000E0
diff --git a/NIST_STRD/Gauss1.dat b/NIST_STRD/Gauss1.dat
new file mode 100644
index 0000000..d6bc176
--- /dev/null
+++ b/NIST_STRD/Gauss1.dat
@@ -0,0 +1,310 @@
+NIST/ITL StRD
+Dataset Name: Gauss1 (Gauss1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 48)
+ Certified Values (lines 41 to 53)
+ Data (lines 61 to 310)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: The data are two well-separated Gaussians on a
+ decaying exponential baseline plus normally
+ distributed zero-mean noise with variance = 6.25.
+
+Reference: Rust, B., NIST (1996).
+
+
+
+
+
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 250 Observations
+ Lower Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 8 Parameters (b1 to b8)
+
+ y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ + b6*exp( -(x-b7)**2 / b8**2 ) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 97.0 94.0 9.8778210871E+01 5.7527312730E-01
+ b2 = 0.009 0.0105 1.0497276517E-02 1.1406289017E-04
+ b3 = 100.0 99.0 1.0048990633E+02 5.8831775752E-01
+ b4 = 65.0 63.0 6.7481111276E+01 1.0460593412E-01
+ b5 = 20.0 25.0 2.3129773360E+01 1.7439951146E-01
+ b6 = 70.0 71.0 7.1994503004E+01 6.2622793913E-01
+ b7 = 178.0 180.0 1.7899805021E+02 1.2436988217E-01
+ b8 = 16.5 20.0 1.8389389025E+01 2.0134312832E-01
+
+Residual Sum of Squares: 1.3158222432E+03
+Residual Standard Deviation: 2.3317980180E+00
+Degrees of Freedom: 242
+Number of Observations: 250
+
+
+
+
+
+
+Data: y x
+ 97.62227 1.000000
+ 97.80724 2.000000
+ 96.62247 3.000000
+ 92.59022 4.000000
+ 91.23869 5.000000
+ 95.32704 6.000000
+ 90.35040 7.000000
+ 89.46235 8.000000
+ 91.72520 9.000000
+ 89.86916 10.000000
+ 86.88076 11.00000
+ 85.94360 12.00000
+ 87.60686 13.00000
+ 86.25839 14.00000
+ 80.74976 15.00000
+ 83.03551 16.00000
+ 88.25837 17.00000
+ 82.01316 18.00000
+ 82.74098 19.00000
+ 83.30034 20.00000
+ 81.27850 21.00000
+ 81.85506 22.00000
+ 80.75195 23.00000
+ 80.09573 24.00000
+ 81.07633 25.00000
+ 78.81542 26.00000
+ 78.38596 27.00000
+ 79.93386 28.00000
+ 79.48474 29.00000
+ 79.95942 30.00000
+ 76.10691 31.00000
+ 78.39830 32.00000
+ 81.43060 33.00000
+ 82.48867 34.00000
+ 81.65462 35.00000
+ 80.84323 36.00000
+ 88.68663 37.00000
+ 84.74438 38.00000
+ 86.83934 39.00000
+ 85.97739 40.00000
+ 91.28509 41.00000
+ 97.22411 42.00000
+ 93.51733 43.00000
+ 94.10159 44.00000
+ 101.91760 45.00000
+ 98.43134 46.00000
+ 110.4214 47.00000
+ 107.6628 48.00000
+ 111.7288 49.00000
+ 116.5115 50.00000
+ 120.7609 51.00000
+ 123.9553 52.00000
+ 124.2437 53.00000
+ 130.7996 54.00000
+ 133.2960 55.00000
+ 130.7788 56.00000
+ 132.0565 57.00000
+ 138.6584 58.00000
+ 142.9252 59.00000
+ 142.7215 60.00000
+ 144.1249 61.00000
+ 147.4377 62.00000
+ 148.2647 63.00000
+ 152.0519 64.00000
+ 147.3863 65.00000
+ 149.2074 66.00000
+ 148.9537 67.00000
+ 144.5876 68.00000
+ 148.1226 69.00000
+ 148.0144 70.00000
+ 143.8893 71.00000
+ 140.9088 72.00000
+ 143.4434 73.00000
+ 139.3938 74.00000
+ 135.9878 75.00000
+ 136.3927 76.00000
+ 126.7262 77.00000
+ 124.4487 78.00000
+ 122.8647 79.00000
+ 113.8557 80.00000
+ 113.7037 81.00000
+ 106.8407 82.00000
+ 107.0034 83.00000
+ 102.46290 84.00000
+ 96.09296 85.00000
+ 94.57555 86.00000
+ 86.98824 87.00000
+ 84.90154 88.00000
+ 81.18023 89.00000
+ 76.40117 90.00000
+ 67.09200 91.00000
+ 72.67155 92.00000
+ 68.10848 93.00000
+ 67.99088 94.00000
+ 63.34094 95.00000
+ 60.55253 96.00000
+ 56.18687 97.00000
+ 53.64482 98.00000
+ 53.70307 99.00000
+ 48.07893 100.00000
+ 42.21258 101.00000
+ 45.65181 102.00000
+ 41.69728 103.00000
+ 41.24946 104.00000
+ 39.21349 105.00000
+ 37.71696 106.0000
+ 36.68395 107.0000
+ 37.30393 108.0000
+ 37.43277 109.0000
+ 37.45012 110.0000
+ 32.64648 111.0000
+ 31.84347 112.0000
+ 31.39951 113.0000
+ 26.68912 114.0000
+ 32.25323 115.0000
+ 27.61008 116.0000
+ 33.58649 117.0000
+ 28.10714 118.0000
+ 30.26428 119.0000
+ 28.01648 120.0000
+ 29.11021 121.0000
+ 23.02099 122.0000
+ 25.65091 123.0000
+ 28.50295 124.0000
+ 25.23701 125.0000
+ 26.13828 126.0000
+ 33.53260 127.0000
+ 29.25195 128.0000
+ 27.09847 129.0000
+ 26.52999 130.0000
+ 25.52401 131.0000
+ 26.69218 132.0000
+ 24.55269 133.0000
+ 27.71763 134.0000
+ 25.20297 135.0000
+ 25.61483 136.0000
+ 25.06893 137.0000
+ 27.63930 138.0000
+ 24.94851 139.0000
+ 25.86806 140.0000
+ 22.48183 141.0000
+ 26.90045 142.0000
+ 25.39919 143.0000
+ 17.90614 144.0000
+ 23.76039 145.0000
+ 25.89689 146.0000
+ 27.64231 147.0000
+ 22.86101 148.0000
+ 26.47003 149.0000
+ 23.72888 150.0000
+ 27.54334 151.0000
+ 30.52683 152.0000
+ 28.07261 153.0000
+ 34.92815 154.0000
+ 28.29194 155.0000
+ 34.19161 156.0000
+ 35.41207 157.0000
+ 37.09336 158.0000
+ 40.98330 159.0000
+ 39.53923 160.0000
+ 47.80123 161.0000
+ 47.46305 162.0000
+ 51.04166 163.0000
+ 54.58065 164.0000
+ 57.53001 165.0000
+ 61.42089 166.0000
+ 62.79032 167.0000
+ 68.51455 168.0000
+ 70.23053 169.0000
+ 74.42776 170.0000
+ 76.59911 171.0000
+ 81.62053 172.0000
+ 83.42208 173.0000
+ 79.17451 174.0000
+ 88.56985 175.0000
+ 85.66525 176.0000
+ 86.55502 177.0000
+ 90.65907 178.0000
+ 84.27290 179.0000
+ 85.72220 180.0000
+ 83.10702 181.0000
+ 82.16884 182.0000
+ 80.42568 183.0000
+ 78.15692 184.0000
+ 79.79691 185.0000
+ 77.84378 186.0000
+ 74.50327 187.0000
+ 71.57289 188.0000
+ 65.88031 189.0000
+ 65.01385 190.0000
+ 60.19582 191.0000
+ 59.66726 192.0000
+ 52.95478 193.0000
+ 53.87792 194.0000
+ 44.91274 195.0000
+ 41.09909 196.0000
+ 41.68018 197.0000
+ 34.53379 198.0000
+ 34.86419 199.0000
+ 33.14787 200.0000
+ 29.58864 201.0000
+ 27.29462 202.0000
+ 21.91439 203.0000
+ 19.08159 204.0000
+ 24.90290 205.0000
+ 19.82341 206.0000
+ 16.75551 207.0000
+ 18.24558 208.0000
+ 17.23549 209.0000
+ 16.34934 210.0000
+ 13.71285 211.0000
+ 14.75676 212.0000
+ 13.97169 213.0000
+ 12.42867 214.0000
+ 14.35519 215.0000
+ 7.703309 216.0000
+ 10.234410 217.0000
+ 11.78315 218.0000
+ 13.87768 219.0000
+ 4.535700 220.0000
+ 10.059280 221.0000
+ 8.424824 222.0000
+ 10.533120 223.0000
+ 9.602255 224.0000
+ 7.877514 225.0000
+ 6.258121 226.0000
+ 8.899865 227.0000
+ 7.877754 228.0000
+ 12.51191 229.0000
+ 10.66205 230.0000
+ 6.035400 231.0000
+ 6.790655 232.0000
+ 8.783535 233.0000
+ 4.600288 234.0000
+ 8.400915 235.0000
+ 7.216561 236.0000
+ 10.017410 237.0000
+ 7.331278 238.0000
+ 6.527863 239.0000
+ 2.842001 240.0000
+ 10.325070 241.0000
+ 4.790995 242.0000
+ 8.377101 243.0000
+ 6.264445 244.0000
+ 2.706213 245.0000
+ 8.362329 246.0000
+ 8.983658 247.0000
+ 3.362571 248.0000
+ 1.182746 249.0000
+ 4.875359 250.0000
diff --git a/NIST_STRD/Gauss2.dat b/NIST_STRD/Gauss2.dat
new file mode 100644
index 0000000..898e6d2
--- /dev/null
+++ b/NIST_STRD/Gauss2.dat
@@ -0,0 +1,310 @@
+NIST/ITL StRD
+Dataset Name: Gauss2 (Gauss2.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 48)
+ Certified Values (lines 41 to 53)
+ Data (lines 61 to 310)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: The data are two slightly-blended Gaussians on a
+ decaying exponential baseline plus normally
+ distributed zero-mean noise with variance = 6.25.
+
+Reference: Rust, B., NIST (1996).
+
+
+
+
+
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 250 Observations
+ Lower Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 8 Parameters (b1 to b8)
+
+ y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ + b6*exp( -(x-b7)**2 / b8**2 ) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 96.0 98.0 9.9018328406E+01 5.3748766879E-01
+ b2 = 0.009 0.0105 1.0994945399E-02 1.3335306766E-04
+ b3 = 103.0 103.0 1.0188022528E+02 5.9217315772E-01
+ b4 = 106.0 105.0 1.0703095519E+02 1.5006798316E-01
+ b5 = 18.0 20.0 2.3578584029E+01 2.2695595067E-01
+ b6 = 72.0 73.0 7.2045589471E+01 6.1721965884E-01
+ b7 = 151.0 150.0 1.5327010194E+02 1.9466674341E-01
+ b8 = 18.0 20.0 1.9525972636E+01 2.6416549393E-01
+
+Residual Sum of Squares: 1.2475282092E+03
+Residual Standard Deviation: 2.2704790782E+00
+Degrees of Freedom: 242
+Number of Observations: 250
+
+
+
+
+
+
+Data: y x
+ 97.58776 1.000000
+ 97.76344 2.000000
+ 96.56705 3.000000
+ 92.52037 4.000000
+ 91.15097 5.000000
+ 95.21728 6.000000
+ 90.21355 7.000000
+ 89.29235 8.000000
+ 91.51479 9.000000
+ 89.60966 10.000000
+ 86.56187 11.00000
+ 85.55316 12.00000
+ 87.13054 13.00000
+ 85.67940 14.00000
+ 80.04851 15.00000
+ 82.18925 16.00000
+ 87.24081 17.00000
+ 80.79407 18.00000
+ 81.28570 19.00000
+ 81.56940 20.00000
+ 79.22715 21.00000
+ 79.43275 22.00000
+ 77.90195 23.00000
+ 76.75468 24.00000
+ 77.17377 25.00000
+ 74.27348 26.00000
+ 73.11900 27.00000
+ 73.84826 28.00000
+ 72.47870 29.00000
+ 71.92292 30.00000
+ 66.92176 31.00000
+ 67.93835 32.00000
+ 69.56207 33.00000
+ 69.07066 34.00000
+ 66.53983 35.00000
+ 63.87883 36.00000
+ 69.71537 37.00000
+ 63.60588 38.00000
+ 63.37154 39.00000
+ 60.01835 40.00000
+ 62.67481 41.00000
+ 65.80666 42.00000
+ 59.14304 43.00000
+ 56.62951 44.00000
+ 61.21785 45.00000
+ 54.38790 46.00000
+ 62.93443 47.00000
+ 56.65144 48.00000
+ 57.13362 49.00000
+ 58.29689 50.00000
+ 58.91744 51.00000
+ 58.50172 52.00000
+ 55.22885 53.00000
+ 58.30375 54.00000
+ 57.43237 55.00000
+ 51.69407 56.00000
+ 49.93132 57.00000
+ 53.70760 58.00000
+ 55.39712 59.00000
+ 52.89709 60.00000
+ 52.31649 61.00000
+ 53.98720 62.00000
+ 53.54158 63.00000
+ 56.45046 64.00000
+ 51.32276 65.00000
+ 53.11676 66.00000
+ 53.28631 67.00000
+ 49.80555 68.00000
+ 54.69564 69.00000
+ 56.41627 70.00000
+ 54.59362 71.00000
+ 54.38520 72.00000
+ 60.15354 73.00000
+ 59.78773 74.00000
+ 60.49995 75.00000
+ 65.43885 76.00000
+ 60.70001 77.00000
+ 63.71865 78.00000
+ 67.77139 79.00000
+ 64.70934 80.00000
+ 70.78193 81.00000
+ 70.38651 82.00000
+ 77.22359 83.00000
+ 79.52665 84.00000
+ 80.13077 85.00000
+ 85.67823 86.00000
+ 85.20647 87.00000
+ 90.24548 88.00000
+ 93.61953 89.00000
+ 95.86509 90.00000
+ 93.46992 91.00000
+ 105.8137 92.00000
+ 107.8269 93.00000
+ 114.0607 94.00000
+ 115.5019 95.00000
+ 118.5110 96.00000
+ 119.6177 97.00000
+ 122.1940 98.00000
+ 126.9903 99.00000
+ 125.7005 100.00000
+ 123.7447 101.00000
+ 130.6543 102.00000
+ 129.7168 103.00000
+ 131.8240 104.00000
+ 131.8759 105.00000
+ 131.9994 106.0000
+ 132.1221 107.0000
+ 133.4414 108.0000
+ 133.8252 109.0000
+ 133.6695 110.0000
+ 128.2851 111.0000
+ 126.5182 112.0000
+ 124.7550 113.0000
+ 118.4016 114.0000
+ 122.0334 115.0000
+ 115.2059 116.0000
+ 118.7856 117.0000
+ 110.7387 118.0000
+ 110.2003 119.0000
+ 105.17290 120.0000
+ 103.44720 121.0000
+ 94.54280 122.0000
+ 94.40526 123.0000
+ 94.57964 124.0000
+ 88.76605 125.0000
+ 87.28747 126.0000
+ 92.50443 127.0000
+ 86.27997 128.0000
+ 82.44307 129.0000
+ 80.47367 130.0000
+ 78.36608 131.0000
+ 78.74307 132.0000
+ 76.12786 133.0000
+ 79.13108 134.0000
+ 76.76062 135.0000
+ 77.60769 136.0000
+ 77.76633 137.0000
+ 81.28220 138.0000
+ 79.74307 139.0000
+ 81.97964 140.0000
+ 80.02952 141.0000
+ 85.95232 142.0000
+ 85.96838 143.0000
+ 79.94789 144.0000
+ 87.17023 145.0000
+ 90.50992 146.0000
+ 93.23373 147.0000
+ 89.14803 148.0000
+ 93.11492 149.0000
+ 90.34337 150.0000
+ 93.69421 151.0000
+ 95.74256 152.0000
+ 91.85105 153.0000
+ 96.74503 154.0000
+ 87.60996 155.0000
+ 90.47012 156.0000
+ 88.11690 157.0000
+ 85.70673 158.0000
+ 85.01361 159.0000
+ 78.53040 160.0000
+ 81.34148 161.0000
+ 75.19295 162.0000
+ 72.66115 163.0000
+ 69.85504 164.0000
+ 66.29476 165.0000
+ 63.58502 166.0000
+ 58.33847 167.0000
+ 57.50766 168.0000
+ 52.80498 169.0000
+ 50.79319 170.0000
+ 47.03490 171.0000
+ 46.47090 172.0000
+ 43.09016 173.0000
+ 34.11531 174.0000
+ 39.28235 175.0000
+ 32.68386 176.0000
+ 30.44056 177.0000
+ 31.98932 178.0000
+ 23.63330 179.0000
+ 23.69643 180.0000
+ 20.26812 181.0000
+ 19.07074 182.0000
+ 17.59544 183.0000
+ 16.08785 184.0000
+ 18.94267 185.0000
+ 18.61354 186.0000
+ 17.25800 187.0000
+ 16.62285 188.0000
+ 13.48367 189.0000
+ 15.37647 190.0000
+ 13.47208 191.0000
+ 15.96188 192.0000
+ 12.32547 193.0000
+ 16.33880 194.0000
+ 10.438330 195.0000
+ 9.628715 196.0000
+ 13.12268 197.0000
+ 8.772417 198.0000
+ 11.76143 199.0000
+ 12.55020 200.0000
+ 11.33108 201.0000
+ 11.20493 202.0000
+ 7.816916 203.0000
+ 6.800675 204.0000
+ 14.26581 205.0000
+ 10.66285 206.0000
+ 8.911574 207.0000
+ 11.56733 208.0000
+ 11.58207 209.0000
+ 11.59071 210.0000
+ 9.730134 211.0000
+ 11.44237 212.0000
+ 11.22912 213.0000
+ 10.172130 214.0000
+ 12.50905 215.0000
+ 6.201493 216.0000
+ 9.019605 217.0000
+ 10.80607 218.0000
+ 13.09625 219.0000
+ 3.914271 220.0000
+ 9.567886 221.0000
+ 8.038448 222.0000
+ 10.231040 223.0000
+ 9.367410 224.0000
+ 7.695971 225.0000
+ 6.118575 226.0000
+ 8.793207 227.0000
+ 7.796692 228.0000
+ 12.45065 229.0000
+ 10.61601 230.0000
+ 6.001003 231.0000
+ 6.765098 232.0000
+ 8.764653 233.0000
+ 4.586418 234.0000
+ 8.390783 235.0000
+ 7.209202 236.0000
+ 10.012090 237.0000
+ 7.327461 238.0000
+ 6.525136 239.0000
+ 2.840065 240.0000
+ 10.323710 241.0000
+ 4.790035 242.0000
+ 8.376431 243.0000
+ 6.263980 244.0000
+ 2.705892 245.0000
+ 8.362109 246.0000
+ 8.983507 247.0000
+ 3.362469 248.0000
+ 1.182678 249.0000
+ 4.875312 250.0000
diff --git a/NIST_STRD/Gauss3.dat b/NIST_STRD/Gauss3.dat
new file mode 100644
index 0000000..b047bea
--- /dev/null
+++ b/NIST_STRD/Gauss3.dat
@@ -0,0 +1,310 @@
+NIST/ITL StRD
+Dataset Name: Gauss3 (Gauss3.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 48)
+ Certified Values (lines 41 to 53)
+ Data (lines 61 to 310)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: The data are two strongly-blended Gaussians on a
+ decaying exponential baseline plus normally
+ distributed zero-mean noise with variance = 6.25.
+
+Reference: Rust, B., NIST (1996).
+
+
+
+
+
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 250 Observations
+ Average Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 8 Parameters (b1 to b8)
+
+ y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ + b6*exp( -(x-b7)**2 / b8**2 ) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 94.9 96.0 9.8940368970E+01 5.3005192833E-01
+ b2 = 0.009 0.0096 1.0945879335E-02 1.2554058911E-04
+ b3 = 90.1 80.0 1.0069553078E+02 8.1256587317E-01
+ b4 = 113.0 110.0 1.1163619459E+02 3.5317859757E-01
+ b5 = 20.0 25.0 2.3300500029E+01 3.6584783023E-01
+ b6 = 73.8 74.0 7.3705031418E+01 1.2091239082E+00
+ b7 = 140.0 139.0 1.4776164251E+02 4.0488183351E-01
+ b8 = 20.0 25.0 1.9668221230E+01 3.7806634336E-01
+
+Residual Sum of Squares: 1.2444846360E+03
+Residual Standard Deviation: 2.2677077625E+00
+Degrees of Freedom: 242
+Number of Observations: 250
+
+
+
+
+
+
+Data: y x
+ 97.58776 1.000000
+ 97.76344 2.000000
+ 96.56705 3.000000
+ 92.52037 4.000000
+ 91.15097 5.000000
+ 95.21728 6.000000
+ 90.21355 7.000000
+ 89.29235 8.000000
+ 91.51479 9.000000
+ 89.60965 10.000000
+ 86.56187 11.00000
+ 85.55315 12.00000
+ 87.13053 13.00000
+ 85.67938 14.00000
+ 80.04849 15.00000
+ 82.18922 16.00000
+ 87.24078 17.00000
+ 80.79401 18.00000
+ 81.28564 19.00000
+ 81.56932 20.00000
+ 79.22703 21.00000
+ 79.43259 22.00000
+ 77.90174 23.00000
+ 76.75438 24.00000
+ 77.17338 25.00000
+ 74.27296 26.00000
+ 73.11830 27.00000
+ 73.84732 28.00000
+ 72.47746 29.00000
+ 71.92128 30.00000
+ 66.91962 31.00000
+ 67.93554 32.00000
+ 69.55841 33.00000
+ 69.06592 34.00000
+ 66.53371 35.00000
+ 63.87094 36.00000
+ 69.70526 37.00000
+ 63.59295 38.00000
+ 63.35509 39.00000
+ 59.99747 40.00000
+ 62.64843 41.00000
+ 65.77345 42.00000
+ 59.10141 43.00000
+ 56.57750 44.00000
+ 61.15313 45.00000
+ 54.30767 46.00000
+ 62.83535 47.00000
+ 56.52957 48.00000
+ 56.98427 49.00000
+ 58.11459 50.00000
+ 58.69576 51.00000
+ 58.23322 52.00000
+ 54.90490 53.00000
+ 57.91442 54.00000
+ 56.96629 55.00000
+ 51.13831 56.00000
+ 49.27123 57.00000
+ 52.92668 58.00000
+ 54.47693 59.00000
+ 51.81710 60.00000
+ 51.05401 61.00000
+ 52.51731 62.00000
+ 51.83710 63.00000
+ 54.48196 64.00000
+ 49.05859 65.00000
+ 50.52315 66.00000
+ 50.32755 67.00000
+ 46.44419 68.00000
+ 50.89281 69.00000
+ 52.13203 70.00000
+ 49.78741 71.00000
+ 49.01637 72.00000
+ 54.18198 73.00000
+ 53.17456 74.00000
+ 53.20827 75.00000
+ 57.43459 76.00000
+ 51.95282 77.00000
+ 54.20282 78.00000
+ 57.46687 79.00000
+ 53.60268 80.00000
+ 58.86728 81.00000
+ 57.66652 82.00000
+ 63.71034 83.00000
+ 65.24244 84.00000
+ 65.10878 85.00000
+ 69.96313 86.00000
+ 68.85475 87.00000
+ 73.32574 88.00000
+ 76.21241 89.00000
+ 78.06311 90.00000
+ 75.37701 91.00000
+ 87.54449 92.00000
+ 89.50588 93.00000
+ 95.82098 94.00000
+ 97.48390 95.00000
+ 100.86070 96.00000
+ 102.48510 97.00000
+ 105.7311 98.00000
+ 111.3489 99.00000
+ 111.0305 100.00000
+ 110.1920 101.00000
+ 118.3581 102.00000
+ 118.8086 103.00000
+ 122.4249 104.00000
+ 124.0953 105.00000
+ 125.9337 106.0000
+ 127.8533 107.0000
+ 131.0361 108.0000
+ 133.3343 109.0000
+ 135.1278 110.0000
+ 131.7113 111.0000
+ 131.9151 112.0000
+ 132.1107 113.0000
+ 127.6898 114.0000
+ 133.2148 115.0000
+ 128.2296 116.0000
+ 133.5902 117.0000
+ 127.2539 118.0000
+ 128.3482 119.0000
+ 124.8694 120.0000
+ 124.6031 121.0000
+ 117.0648 122.0000
+ 118.1966 123.0000
+ 119.5408 124.0000
+ 114.7946 125.0000
+ 114.2780 126.0000
+ 120.3484 127.0000
+ 114.8647 128.0000
+ 111.6514 129.0000
+ 110.1826 130.0000
+ 108.4461 131.0000
+ 109.0571 132.0000
+ 106.5308 133.0000
+ 109.4691 134.0000
+ 106.8709 135.0000
+ 107.3192 136.0000
+ 106.9000 137.0000
+ 109.6526 138.0000
+ 107.1602 139.0000
+ 108.2509 140.0000
+ 104.96310 141.0000
+ 109.3601 142.0000
+ 107.6696 143.0000
+ 99.77286 144.0000
+ 104.96440 145.0000
+ 106.1376 146.0000
+ 106.5816 147.0000
+ 100.12860 148.0000
+ 101.66910 149.0000
+ 96.44254 150.0000
+ 97.34169 151.0000
+ 96.97412 152.0000
+ 90.73460 153.0000
+ 93.37949 154.0000
+ 82.12331 155.0000
+ 83.01657 156.0000
+ 78.87360 157.0000
+ 74.86971 158.0000
+ 72.79341 159.0000
+ 65.14744 160.0000
+ 67.02127 161.0000
+ 60.16136 162.0000
+ 57.13996 163.0000
+ 54.05769 164.0000
+ 50.42265 165.0000
+ 47.82430 166.0000
+ 42.85748 167.0000
+ 42.45495 168.0000
+ 38.30808 169.0000
+ 36.95794 170.0000
+ 33.94543 171.0000
+ 34.19017 172.0000
+ 31.66097 173.0000
+ 23.56172 174.0000
+ 29.61143 175.0000
+ 23.88765 176.0000
+ 22.49812 177.0000
+ 24.86901 178.0000
+ 17.29481 179.0000
+ 18.09291 180.0000
+ 15.34813 181.0000
+ 14.77997 182.0000
+ 13.87832 183.0000
+ 12.88891 184.0000
+ 16.20763 185.0000
+ 16.29024 186.0000
+ 15.29712 187.0000
+ 14.97839 188.0000
+ 12.11330 189.0000
+ 14.24168 190.0000
+ 12.53824 191.0000
+ 15.19818 192.0000
+ 11.70478 193.0000
+ 15.83745 194.0000
+ 10.035850 195.0000
+ 9.307574 196.0000
+ 12.86800 197.0000
+ 8.571671 198.0000
+ 11.60415 199.0000
+ 12.42772 200.0000
+ 11.23627 201.0000
+ 11.13198 202.0000
+ 7.761117 203.0000
+ 6.758250 204.0000
+ 14.23375 205.0000
+ 10.63876 206.0000
+ 8.893581 207.0000
+ 11.55398 208.0000
+ 11.57221 209.0000
+ 11.58347 210.0000
+ 9.724857 211.0000
+ 11.43854 212.0000
+ 11.22636 213.0000
+ 10.170150 214.0000
+ 12.50765 215.0000
+ 6.200494 216.0000
+ 9.018902 217.0000
+ 10.80557 218.0000
+ 13.09591 219.0000
+ 3.914033 220.0000
+ 9.567723 221.0000
+ 8.038338 222.0000
+ 10.230960 223.0000
+ 9.367358 224.0000
+ 7.695937 225.0000
+ 6.118552 226.0000
+ 8.793192 227.0000
+ 7.796682 228.0000
+ 12.45064 229.0000
+ 10.61601 230.0000
+ 6.001000 231.0000
+ 6.765096 232.0000
+ 8.764652 233.0000
+ 4.586417 234.0000
+ 8.390782 235.0000
+ 7.209201 236.0000
+ 10.012090 237.0000
+ 7.327461 238.0000
+ 6.525136 239.0000
+ 2.840065 240.0000
+ 10.323710 241.0000
+ 4.790035 242.0000
+ 8.376431 243.0000
+ 6.263980 244.0000
+ 2.705892 245.0000
+ 8.362109 246.0000
+ 8.983507 247.0000
+ 3.362469 248.0000
+ 1.182678 249.0000
+ 4.875312 250.0000
diff --git a/NIST_STRD/Hahn1.dat b/NIST_STRD/Hahn1.dat
new file mode 100644
index 0000000..7857591
--- /dev/null
+++ b/NIST_STRD/Hahn1.dat
@@ -0,0 +1,296 @@
+NIST/ITL StRD
+Dataset Name: Hahn1 (Hahn1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 47)
+ Certified Values (lines 41 to 52)
+ Data (lines 61 to 296)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ the thermal expansion of copper. The response
+ variable is the coefficient of thermal expansion, and
+ the predictor variable is temperature in degrees
+ kelvin.
+
+
+Reference: Hahn, T., NIST (197?).
+ Copper Thermal Expansion Study.
+
+
+
+
+
+Data: 1 Response (y = coefficient of thermal expansion)
+ 1 Predictor (x = temperature, degrees kelvin)
+ 236 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Rational Class (cubic/cubic)
+ 7 Parameters (b1 to b7)
+
+ y = (b1+b2*x+b3*x**2+b4*x**3) /
+ (1+b5*x+b6*x**2+b7*x**3) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 10 1 1.0776351733E+00 1.7070154742E-01
+ b2 = -1 -0.1 -1.2269296921E-01 1.2000289189E-02
+ b3 = 0.05 0.005 4.0863750610E-03 2.2508314937E-04
+ b4 = -0.00001 -0.000001 -1.4262662514E-06 2.7578037666E-07
+ b5 = -0.05 -0.005 -5.7609940901E-03 2.4712888219E-04
+ b6 = 0.001 0.0001 2.4053735503E-04 1.0449373768E-05
+ b7 = -0.000001 -0.0000001 -1.2314450199E-07 1.3027335327E-08
+
+Residual Sum of Squares: 1.5324382854E+00
+Residual Standard Deviation: 8.1803852243E-02
+Degrees of Freedom: 229
+Number of Observations: 236
+
+
+
+
+
+
+
+Data: y x
+ .591E0 24.41E0
+ 1.547E0 34.82E0
+ 2.902E0 44.09E0
+ 2.894E0 45.07E0
+ 4.703E0 54.98E0
+ 6.307E0 65.51E0
+ 7.03E0 70.53E0
+ 7.898E0 75.70E0
+ 9.470E0 89.57E0
+ 9.484E0 91.14E0
+ 10.072E0 96.40E0
+ 10.163E0 97.19E0
+ 11.615E0 114.26E0
+ 12.005E0 120.25E0
+ 12.478E0 127.08E0
+ 12.982E0 133.55E0
+ 12.970E0 133.61E0
+ 13.926E0 158.67E0
+ 14.452E0 172.74E0
+ 14.404E0 171.31E0
+ 15.190E0 202.14E0
+ 15.550E0 220.55E0
+ 15.528E0 221.05E0
+ 15.499E0 221.39E0
+ 16.131E0 250.99E0
+ 16.438E0 268.99E0
+ 16.387E0 271.80E0
+ 16.549E0 271.97E0
+ 16.872E0 321.31E0
+ 16.830E0 321.69E0
+ 16.926E0 330.14E0
+ 16.907E0 333.03E0
+ 16.966E0 333.47E0
+ 17.060E0 340.77E0
+ 17.122E0 345.65E0
+ 17.311E0 373.11E0
+ 17.355E0 373.79E0
+ 17.668E0 411.82E0
+ 17.767E0 419.51E0
+ 17.803E0 421.59E0
+ 17.765E0 422.02E0
+ 17.768E0 422.47E0
+ 17.736E0 422.61E0
+ 17.858E0 441.75E0
+ 17.877E0 447.41E0
+ 17.912E0 448.7E0
+ 18.046E0 472.89E0
+ 18.085E0 476.69E0
+ 18.291E0 522.47E0
+ 18.357E0 522.62E0
+ 18.426E0 524.43E0
+ 18.584E0 546.75E0
+ 18.610E0 549.53E0
+ 18.870E0 575.29E0
+ 18.795E0 576.00E0
+ 19.111E0 625.55E0
+ .367E0 20.15E0
+ .796E0 28.78E0
+ 0.892E0 29.57E0
+ 1.903E0 37.41E0
+ 2.150E0 39.12E0
+ 3.697E0 50.24E0
+ 5.870E0 61.38E0
+ 6.421E0 66.25E0
+ 7.422E0 73.42E0
+ 9.944E0 95.52E0
+ 11.023E0 107.32E0
+ 11.87E0 122.04E0
+ 12.786E0 134.03E0
+ 14.067E0 163.19E0
+ 13.974E0 163.48E0
+ 14.462E0 175.70E0
+ 14.464E0 179.86E0
+ 15.381E0 211.27E0
+ 15.483E0 217.78E0
+ 15.59E0 219.14E0
+ 16.075E0 262.52E0
+ 16.347E0 268.01E0
+ 16.181E0 268.62E0
+ 16.915E0 336.25E0
+ 17.003E0 337.23E0
+ 16.978E0 339.33E0
+ 17.756E0 427.38E0
+ 17.808E0 428.58E0
+ 17.868E0 432.68E0
+ 18.481E0 528.99E0
+ 18.486E0 531.08E0
+ 19.090E0 628.34E0
+ 16.062E0 253.24E0
+ 16.337E0 273.13E0
+ 16.345E0 273.66E0
+ 16.388E0 282.10E0
+ 17.159E0 346.62E0
+ 17.116E0 347.19E0
+ 17.164E0 348.78E0
+ 17.123E0 351.18E0
+ 17.979E0 450.10E0
+ 17.974E0 450.35E0
+ 18.007E0 451.92E0
+ 17.993E0 455.56E0
+ 18.523E0 552.22E0
+ 18.669E0 553.56E0
+ 18.617E0 555.74E0
+ 19.371E0 652.59E0
+ 19.330E0 656.20E0
+ 0.080E0 14.13E0
+ 0.248E0 20.41E0
+ 1.089E0 31.30E0
+ 1.418E0 33.84E0
+ 2.278E0 39.70E0
+ 3.624E0 48.83E0
+ 4.574E0 54.50E0
+ 5.556E0 60.41E0
+ 7.267E0 72.77E0
+ 7.695E0 75.25E0
+ 9.136E0 86.84E0
+ 9.959E0 94.88E0
+ 9.957E0 96.40E0
+ 11.600E0 117.37E0
+ 13.138E0 139.08E0
+ 13.564E0 147.73E0
+ 13.871E0 158.63E0
+ 13.994E0 161.84E0
+ 14.947E0 192.11E0
+ 15.473E0 206.76E0
+ 15.379E0 209.07E0
+ 15.455E0 213.32E0
+ 15.908E0 226.44E0
+ 16.114E0 237.12E0
+ 17.071E0 330.90E0
+ 17.135E0 358.72E0
+ 17.282E0 370.77E0
+ 17.368E0 372.72E0
+ 17.483E0 396.24E0
+ 17.764E0 416.59E0
+ 18.185E0 484.02E0
+ 18.271E0 495.47E0
+ 18.236E0 514.78E0
+ 18.237E0 515.65E0
+ 18.523E0 519.47E0
+ 18.627E0 544.47E0
+ 18.665E0 560.11E0
+ 19.086E0 620.77E0
+ 0.214E0 18.97E0
+ 0.943E0 28.93E0
+ 1.429E0 33.91E0
+ 2.241E0 40.03E0
+ 2.951E0 44.66E0
+ 3.782E0 49.87E0
+ 4.757E0 55.16E0
+ 5.602E0 60.90E0
+ 7.169E0 72.08E0
+ 8.920E0 85.15E0
+ 10.055E0 97.06E0
+ 12.035E0 119.63E0
+ 12.861E0 133.27E0
+ 13.436E0 143.84E0
+ 14.167E0 161.91E0
+ 14.755E0 180.67E0
+ 15.168E0 198.44E0
+ 15.651E0 226.86E0
+ 15.746E0 229.65E0
+ 16.216E0 258.27E0
+ 16.445E0 273.77E0
+ 16.965E0 339.15E0
+ 17.121E0 350.13E0
+ 17.206E0 362.75E0
+ 17.250E0 371.03E0
+ 17.339E0 393.32E0
+ 17.793E0 448.53E0
+ 18.123E0 473.78E0
+ 18.49E0 511.12E0
+ 18.566E0 524.70E0
+ 18.645E0 548.75E0
+ 18.706E0 551.64E0
+ 18.924E0 574.02E0
+ 19.1E0 623.86E0
+ 0.375E0 21.46E0
+ 0.471E0 24.33E0
+ 1.504E0 33.43E0
+ 2.204E0 39.22E0
+ 2.813E0 44.18E0
+ 4.765E0 55.02E0
+ 9.835E0 94.33E0
+ 10.040E0 96.44E0
+ 11.946E0 118.82E0
+ 12.596E0 128.48E0
+ 13.303E0 141.94E0
+ 13.922E0 156.92E0
+ 14.440E0 171.65E0
+ 14.951E0 190.00E0
+ 15.627E0 223.26E0
+ 15.639E0 223.88E0
+ 15.814E0 231.50E0
+ 16.315E0 265.05E0
+ 16.334E0 269.44E0
+ 16.430E0 271.78E0
+ 16.423E0 273.46E0
+ 17.024E0 334.61E0
+ 17.009E0 339.79E0
+ 17.165E0 349.52E0
+ 17.134E0 358.18E0
+ 17.349E0 377.98E0
+ 17.576E0 394.77E0
+ 17.848E0 429.66E0
+ 18.090E0 468.22E0
+ 18.276E0 487.27E0
+ 18.404E0 519.54E0
+ 18.519E0 523.03E0
+ 19.133E0 612.99E0
+ 19.074E0 638.59E0
+ 19.239E0 641.36E0
+ 19.280E0 622.05E0
+ 19.101E0 631.50E0
+ 19.398E0 663.97E0
+ 19.252E0 646.9E0
+ 19.89E0 748.29E0
+ 20.007E0 749.21E0
+ 19.929E0 750.14E0
+ 19.268E0 647.04E0
+ 19.324E0 646.89E0
+ 20.049E0 746.9E0
+ 20.107E0 748.43E0
+ 20.062E0 747.35E0
+ 20.065E0 749.27E0
+ 19.286E0 647.61E0
+ 19.972E0 747.78E0
+ 20.088E0 750.51E0
+ 20.743E0 851.37E0
+ 20.83E0 845.97E0
+ 20.935E0 847.54E0
+ 21.035E0 849.93E0
+ 20.93E0 851.61E0
+ 21.074E0 849.75E0
+ 21.085E0 850.98E0
+ 20.935E0 848.23E0
diff --git a/NIST_STRD/Kirby2.dat b/NIST_STRD/Kirby2.dat
new file mode 100644
index 0000000..8f6c6db
--- /dev/null
+++ b/NIST_STRD/Kirby2.dat
@@ -0,0 +1,211 @@
+NIST/ITL StRD
+Dataset Name: Kirby2 (Kirby2.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 45)
+ Certified Values (lines 41 to 50)
+ Data (lines 61 to 211)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ scanning electron microscope line with standards.
+
+
+Reference: Kirby, R., NIST (197?).
+ Scanning electron microscope line width standards.
+
+
+
+
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 151 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Rational Class (quadratic/quadratic)
+ 5 Parameters (b1 to b5)
+
+ y = (b1 + b2*x + b3*x**2) /
+ (1 + b4*x + b5*x**2) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 2 1.5 1.6745063063E+00 8.7989634338E-02
+ b2 = -0.1 -0.15 -1.3927397867E-01 4.1182041386E-03
+ b3 = 0.003 0.0025 2.5961181191E-03 4.1856520458E-05
+ b4 = -0.001 -0.0015 -1.7241811870E-03 5.8931897355E-05
+ b5 = 0.00001 0.00002 2.1664802578E-05 2.0129761919E-07
+
+Residual Sum of Squares: 3.9050739624E+00
+Residual Standard Deviation: 1.6354535131E-01
+Degrees of Freedom: 146
+Number of Observations: 151
+
+
+
+
+
+
+
+
+
+Data: y x
+ 0.0082E0 9.65E0
+ 0.0112E0 10.74E0
+ 0.0149E0 11.81E0
+ 0.0198E0 12.88E0
+ 0.0248E0 14.06E0
+ 0.0324E0 15.28E0
+ 0.0420E0 16.63E0
+ 0.0549E0 18.19E0
+ 0.0719E0 19.88E0
+ 0.0963E0 21.84E0
+ 0.1291E0 24.00E0
+ 0.1710E0 26.25E0
+ 0.2314E0 28.86E0
+ 0.3227E0 31.85E0
+ 0.4809E0 35.79E0
+ 0.7084E0 40.18E0
+ 1.0220E0 44.74E0
+ 1.4580E0 49.53E0
+ 1.9520E0 53.94E0
+ 2.5410E0 58.29E0
+ 3.2230E0 62.63E0
+ 3.9990E0 67.03E0
+ 4.8520E0 71.25E0
+ 5.7320E0 75.22E0
+ 6.7270E0 79.33E0
+ 7.8350E0 83.56E0
+ 9.0250E0 87.75E0
+ 10.2670E0 91.93E0
+ 11.5780E0 96.10E0
+ 12.9440E0 100.28E0
+ 14.3770E0 104.46E0
+ 15.8560E0 108.66E0
+ 17.3310E0 112.71E0
+ 18.8850E0 116.88E0
+ 20.5750E0 121.33E0
+ 22.3200E0 125.79E0
+ 22.3030E0 125.79E0
+ 23.4600E0 128.74E0
+ 24.0600E0 130.27E0
+ 25.2720E0 133.33E0
+ 25.8530E0 134.79E0
+ 27.1100E0 137.93E0
+ 27.6580E0 139.33E0
+ 28.9240E0 142.46E0
+ 29.5110E0 143.90E0
+ 30.7100E0 146.91E0
+ 31.3500E0 148.51E0
+ 32.5200E0 151.41E0
+ 33.2300E0 153.17E0
+ 34.3300E0 155.97E0
+ 35.0600E0 157.76E0
+ 36.1700E0 160.56E0
+ 36.8400E0 162.30E0
+ 38.0100E0 165.21E0
+ 38.6700E0 166.90E0
+ 39.8700E0 169.92E0
+ 40.0300E0 170.32E0
+ 40.5000E0 171.54E0
+ 41.3700E0 173.79E0
+ 41.6700E0 174.57E0
+ 42.3100E0 176.25E0
+ 42.7300E0 177.34E0
+ 43.4600E0 179.19E0
+ 44.1400E0 181.02E0
+ 44.5500E0 182.08E0
+ 45.2200E0 183.88E0
+ 45.9200E0 185.75E0
+ 46.3000E0 186.80E0
+ 47.0000E0 188.63E0
+ 47.6800E0 190.45E0
+ 48.0600E0 191.48E0
+ 48.7400E0 193.35E0
+ 49.4100E0 195.22E0
+ 49.7600E0 196.23E0
+ 50.4300E0 198.05E0
+ 51.1100E0 199.97E0
+ 51.5000E0 201.06E0
+ 52.1200E0 202.83E0
+ 52.7600E0 204.69E0
+ 53.1800E0 205.86E0
+ 53.7800E0 207.58E0
+ 54.4600E0 209.50E0
+ 54.8300E0 210.65E0
+ 55.4000E0 212.33E0
+ 56.4300E0 215.43E0
+ 57.0300E0 217.16E0
+ 58.0000E0 220.21E0
+ 58.6100E0 221.98E0
+ 59.5800E0 225.06E0
+ 60.1100E0 226.79E0
+ 61.1000E0 229.92E0
+ 61.6500E0 231.69E0
+ 62.5900E0 234.77E0
+ 63.1200E0 236.60E0
+ 64.0300E0 239.63E0
+ 64.6200E0 241.50E0
+ 65.4900E0 244.48E0
+ 66.0300E0 246.40E0
+ 66.8900E0 249.35E0
+ 67.4200E0 251.32E0
+ 68.2300E0 254.22E0
+ 68.7700E0 256.24E0
+ 69.5900E0 259.11E0
+ 70.1100E0 261.18E0
+ 70.8600E0 264.02E0
+ 71.4300E0 266.13E0
+ 72.1600E0 268.94E0
+ 72.7000E0 271.09E0
+ 73.4000E0 273.87E0
+ 73.9300E0 276.08E0
+ 74.6000E0 278.83E0
+ 75.1600E0 281.08E0
+ 75.8200E0 283.81E0
+ 76.3400E0 286.11E0
+ 76.9800E0 288.81E0
+ 77.4800E0 291.08E0
+ 78.0800E0 293.75E0
+ 78.6000E0 295.99E0
+ 79.1700E0 298.64E0
+ 79.6200E0 300.84E0
+ 79.8800E0 302.02E0
+ 80.1900E0 303.48E0
+ 80.6600E0 305.65E0
+ 81.2200E0 308.27E0
+ 81.6600E0 310.41E0
+ 82.1600E0 313.01E0
+ 82.5900E0 315.12E0
+ 83.1400E0 317.71E0
+ 83.5000E0 319.79E0
+ 84.0000E0 322.36E0
+ 84.4000E0 324.42E0
+ 84.8900E0 326.98E0
+ 85.2600E0 329.01E0
+ 85.7400E0 331.56E0
+ 86.0700E0 333.56E0
+ 86.5400E0 336.10E0
+ 86.8900E0 338.08E0
+ 87.3200E0 340.60E0
+ 87.6500E0 342.57E0
+ 88.1000E0 345.08E0
+ 88.4300E0 347.02E0
+ 88.8300E0 349.52E0
+ 89.1200E0 351.44E0
+ 89.5400E0 353.93E0
+ 89.8500E0 355.83E0
+ 90.2500E0 358.32E0
+ 90.5500E0 360.20E0
+ 90.9300E0 362.67E0
+ 91.2000E0 364.53E0
+ 91.5500E0 367.00E0
+ 92.2000E0 371.30E0
diff --git a/NIST_STRD/Lanczos1.dat b/NIST_STRD/Lanczos1.dat
new file mode 100644
index 0000000..3db894b
--- /dev/null
+++ b/NIST_STRD/Lanczos1.dat
@@ -0,0 +1,84 @@
+NIST/ITL StRD
+Dataset Name: Lanczos1 (Lanczos1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 46)
+ Certified Values (lines 41 to 51)
+ Data (lines 61 to 84)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are taken from an example discussed in
+ Lanczos (1956). The data were generated to 14-digits
+ of accuracy using
+ f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x)
+ + 1.5576*exp(-5*x).
+
+
+Reference: Lanczos, C. (1956).
+ Applied Analysis.
+ Englewood Cliffs, NJ: Prentice Hall, pp. 272-280.
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 24 Observations
+ Average Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 6 Parameters (b1 to b6)
+
+ y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1.2 0.5 9.5100000027E-02 5.3347304234E-11
+ b2 = 0.3 0.7 1.0000000001E+00 2.7473038179E-10
+ b3 = 5.6 3.6 8.6070000013E-01 1.3576062225E-10
+ b4 = 5.5 4.2 3.0000000002E+00 3.3308253069E-10
+ b5 = 6.5 4 1.5575999998E+00 1.8815731448E-10
+ b6 = 7.6 6.3 5.0000000001E+00 1.1057500538E-10
+
+Residual Sum of Squares: 1.4307867721E-25
+Residual Standard Deviation: 8.9156129349E-14
+Degrees of Freedom: 18
+Number of Observations: 24
+
+
+
+
+
+
+
+
+Data: y x
+ 2.513400000000E+00 0.000000000000E+00
+ 2.044333373291E+00 5.000000000000E-02
+ 1.668404436564E+00 1.000000000000E-01
+ 1.366418021208E+00 1.500000000000E-01
+ 1.123232487372E+00 2.000000000000E-01
+ 9.268897180037E-01 2.500000000000E-01
+ 7.679338563728E-01 3.000000000000E-01
+ 6.388775523106E-01 3.500000000000E-01
+ 5.337835317402E-01 4.000000000000E-01
+ 4.479363617347E-01 4.500000000000E-01
+ 3.775847884350E-01 5.000000000000E-01
+ 3.197393199326E-01 5.500000000000E-01
+ 2.720130773746E-01 6.000000000000E-01
+ 2.324965529032E-01 6.500000000000E-01
+ 1.996589546065E-01 7.000000000000E-01
+ 1.722704126914E-01 7.500000000000E-01
+ 1.493405660168E-01 8.000000000000E-01
+ 1.300700206922E-01 8.500000000000E-01
+ 1.138119324644E-01 9.000000000000E-01
+ 1.000415587559E-01 9.500000000000E-01
+ 8.833209084540E-02 1.000000000000E+00
+ 7.833544019350E-02 1.050000000000E+00
+ 6.976693743449E-02 1.100000000000E+00
+ 6.239312536719E-02 1.150000000000E+00
diff --git a/NIST_STRD/Lanczos2.dat b/NIST_STRD/Lanczos2.dat
new file mode 100644
index 0000000..7f90fa0
--- /dev/null
+++ b/NIST_STRD/Lanczos2.dat
@@ -0,0 +1,84 @@
+NIST/ITL StRD
+Dataset Name: Lanczos2 (Lanczos2.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 46)
+ Certified Values (lines 41 to 51)
+ Data (lines 61 to 84)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are taken from an example discussed in
+ Lanczos (1956). The data were generated to 6-digits
+ of accuracy using
+ f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x)
+ + 1.5576*exp(-5*x).
+
+
+Reference: Lanczos, C. (1956).
+ Applied Analysis.
+ Englewood Cliffs, NJ: Prentice Hall, pp. 272-280.
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 24 Observations
+ Average Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 6 Parameters (b1 to b6)
+
+ y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1.2 0.5 9.6251029939E-02 6.6770575477E-04
+ b2 = 0.3 0.7 1.0057332849E+00 3.3989646176E-03
+ b3 = 5.6 3.6 8.6424689056E-01 1.7185846685E-03
+ b4 = 5.5 4.2 3.0078283915E+00 4.1707005856E-03
+ b5 = 6.5 4 1.5529016879E+00 2.3744381417E-03
+ b6 = 7.6 6.3 5.0028798100E+00 1.3958787284E-03
+
+Residual Sum of Squares: 2.2299428125E-11
+Residual Standard Deviation: 1.1130395851E-06
+Degrees of Freedom: 18
+Number of Observations: 24
+
+
+
+
+
+
+
+
+Data: y x
+ 2.51340E+00 0.00000E+00
+ 2.04433E+00 5.00000E-02
+ 1.66840E+00 1.00000E-01
+ 1.36642E+00 1.50000E-01
+ 1.12323E+00 2.00000E-01
+ 9.26890E-01 2.50000E-01
+ 7.67934E-01 3.00000E-01
+ 6.38878E-01 3.50000E-01
+ 5.33784E-01 4.00000E-01
+ 4.47936E-01 4.50000E-01
+ 3.77585E-01 5.00000E-01
+ 3.19739E-01 5.50000E-01
+ 2.72013E-01 6.00000E-01
+ 2.32497E-01 6.50000E-01
+ 1.99659E-01 7.00000E-01
+ 1.72270E-01 7.50000E-01
+ 1.49341E-01 8.00000E-01
+ 1.30070E-01 8.50000E-01
+ 1.13812E-01 9.00000E-01
+ 1.00042E-01 9.50000E-01
+ 8.83321E-02 1.00000E+00
+ 7.83354E-02 1.05000E+00
+ 6.97669E-02 1.10000E+00
+ 6.23931E-02 1.15000E+00
diff --git a/NIST_STRD/Lanczos3.dat b/NIST_STRD/Lanczos3.dat
new file mode 100644
index 0000000..879af6a
--- /dev/null
+++ b/NIST_STRD/Lanczos3.dat
@@ -0,0 +1,84 @@
+NIST/ITL StRD
+Dataset Name: Lanczos3 (Lanczos3.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 46)
+ Certified Values (lines 41 to 51)
+ Data (lines 61 to 84)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are taken from an example discussed in
+ Lanczos (1956). The data were generated to 5-digits
+ of accuracy using
+ f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x)
+ + 1.5576*exp(-5*x).
+
+
+Reference: Lanczos, C. (1956).
+ Applied Analysis.
+ Englewood Cliffs, NJ: Prentice Hall, pp. 272-280.
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 24 Observations
+ Lower Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 6 Parameters (b1 to b6)
+
+ y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1.2 0.5 8.6816414977E-02 1.7197908859E-02
+ b2 = 0.3 0.7 9.5498101505E-01 9.7041624475E-02
+ b3 = 5.6 3.6 8.4400777463E-01 4.1488663282E-02
+ b4 = 5.5 4.2 2.9515951832E+00 1.0766312506E-01
+ b5 = 6.5 4 1.5825685901E+00 5.8371576281E-02
+ b6 = 7.6 6.3 4.9863565084E+00 3.4436403035E-02
+
+Residual Sum of Squares: 1.6117193594E-08
+Residual Standard Deviation: 2.9923229172E-05
+Degrees of Freedom: 18
+Number of Observations: 24
+
+
+
+
+
+
+
+
+Data: y x
+ 2.5134E+00 0.00000E+00
+ 2.0443E+00 5.00000E-02
+ 1.6684E+00 1.00000E-01
+ 1.3664E+00 1.50000E-01
+ 1.1232E+00 2.00000E-01
+ 0.9269E+00 2.50000E-01
+ 0.7679E+00 3.00000E-01
+ 0.6389E+00 3.50000E-01
+ 0.5338E+00 4.00000E-01
+ 0.4479E+00 4.50000E-01
+ 0.3776E+00 5.00000E-01
+ 0.3197E+00 5.50000E-01
+ 0.2720E+00 6.00000E-01
+ 0.2325E+00 6.50000E-01
+ 0.1997E+00 7.00000E-01
+ 0.1723E+00 7.50000E-01
+ 0.1493E+00 8.00000E-01
+ 0.1301E+00 8.50000E-01
+ 0.1138E+00 9.00000E-01
+ 0.1000E+00 9.50000E-01
+ 0.0883E+00 1.00000E+00
+ 0.0783E+00 1.05000E+00
+ 0.0698E+00 1.10000E+00
+ 0.0624E+00 1.15000E+00
diff --git a/NIST_STRD/MGH09.dat b/NIST_STRD/MGH09.dat
new file mode 100644
index 0000000..42ee348
--- /dev/null
+++ b/NIST_STRD/MGH09.dat
@@ -0,0 +1,71 @@
+NIST/ITL StRD
+Dataset Name: MGH09 (MGH09.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 44)
+ Certified Values (lines 41 to 49)
+ Data (lines 61 to 71)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This problem was found to be difficult for some very
+ good algorithms. There is a local minimum at (+inf,
+ -14.07..., -inf, -inf) with final sum of squares
+ 0.00102734....
+
+ See More, J. J., Garbow, B. S., and Hillstrom, K. E.
+ (1981). Testing unconstrained optimization software.
+ ACM Transactions on Mathematical Software. 7(1):
+ pp. 17-41.
+
+Reference: Kowalik, J.S., and M. R. Osborne, (1978).
+ Methods for Unconstrained Optimization Problems.
+ New York, NY: Elsevier North-Holland.
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 11 Observations
+ Higher Level of Difficulty
+ Generated Data
+
+Model: Rational Class (linear/quadratic)
+ 4 Parameters (b1 to b4)
+
+ y = b1*(x**2+x*b2) / (x**2+x*b3+b4) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 25 0.25 1.9280693458E-01 1.1435312227E-02
+ b2 = 39 0.39 1.9128232873E-01 1.9633220911E-01
+ b3 = 41.5 0.415 1.2305650693E-01 8.0842031232E-02
+ b4 = 39 0.39 1.3606233068E-01 9.0025542308E-02
+
+Residual Sum of Squares: 3.0750560385E-04
+Residual Standard Deviation: 6.6279236551E-03
+Degrees of Freedom: 7
+Number of Observations: 11
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 1.957000E-01 4.000000E+00
+ 1.947000E-01 2.000000E+00
+ 1.735000E-01 1.000000E+00
+ 1.600000E-01 5.000000E-01
+ 8.440000E-02 2.500000E-01
+ 6.270000E-02 1.670000E-01
+ 4.560000E-02 1.250000E-01
+ 3.420000E-02 1.000000E-01
+ 3.230000E-02 8.330000E-02
+ 2.350000E-02 7.140000E-02
+ 2.460000E-02 6.250000E-02
diff --git a/NIST_STRD/MGH10.dat b/NIST_STRD/MGH10.dat
new file mode 100644
index 0000000..5e3d946
--- /dev/null
+++ b/NIST_STRD/MGH10.dat
@@ -0,0 +1,76 @@
+NIST/ITL StRD
+Dataset Name: MGH10 (MGH10.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 76)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This problem was found to be difficult for some very
+ good algorithms.
+
+ See More, J. J., Garbow, B. S., and Hillstrom, K. E.
+ (1981). Testing unconstrained optimization software.
+ ACM Transactions on Mathematical Software. 7(1):
+ pp. 17-41.
+
+Reference: Meyer, R. R. (1970).
+ Theoretical and computational aspects of nonlinear
+ regression. In Nonlinear Programming, Rosen,
+ Mangasarian and Ritter (Eds).
+ New York, NY: Academic Press, pp. 465-486.
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 16 Observations
+ Higher Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = b1 * exp[b2/(x+b3)] + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 2 0.02 5.6096364710E-03 1.5687892471E-04
+ b2 = 400000 4000 6.1813463463E+03 2.3309021107E+01
+ b3 = 25000 250 3.4522363462E+02 7.8486103508E-01
+
+Residual Sum of Squares: 8.7945855171E+01
+Residual Standard Deviation: 2.6009740065E+00
+Degrees of Freedom: 13
+Number of Observations: 16
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 3.478000E+04 5.000000E+01
+ 2.861000E+04 5.500000E+01
+ 2.365000E+04 6.000000E+01
+ 1.963000E+04 6.500000E+01
+ 1.637000E+04 7.000000E+01
+ 1.372000E+04 7.500000E+01
+ 1.154000E+04 8.000000E+01
+ 9.744000E+03 8.500000E+01
+ 8.261000E+03 9.000000E+01
+ 7.030000E+03 9.500000E+01
+ 6.005000E+03 1.000000E+02
+ 5.147000E+03 1.050000E+02
+ 4.427000E+03 1.100000E+02
+ 3.820000E+03 1.150000E+02
+ 3.307000E+03 1.200000E+02
+ 2.872000E+03 1.250000E+02
diff --git a/NIST_STRD/MGH17.dat b/NIST_STRD/MGH17.dat
new file mode 100644
index 0000000..b7463cd
--- /dev/null
+++ b/NIST_STRD/MGH17.dat
@@ -0,0 +1,93 @@
+NIST/ITL StRD
+Dataset Name: MGH17 (MGH17.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 45)
+ Certified Values (lines 41 to 50)
+ Data (lines 61 to 93)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This problem was found to be difficult for some very
+ good algorithms.
+
+ See More, J. J., Garbow, B. S., and Hillstrom, K. E.
+ (1981). Testing unconstrained optimization software.
+ ACM Transactions on Mathematical Software. 7(1):
+ pp. 17-41.
+
+Reference: Osborne, M. R. (1972).
+ Some aspects of nonlinear least squares
+ calculations. In Numerical Methods for Nonlinear
+ Optimization, Lootsma (Ed).
+ New York, NY: Academic Press, pp. 171-189.
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 33 Observations
+ Average Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 5 Parameters (b1 to b5)
+
+ y = b1 + b2*exp[-x*b4] + b3*exp[-x*b5] + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 50 0.5 3.7541005211E-01 2.0723153551E-03
+ b2 = 150 1.5 1.9358469127E+00 2.2031669222E-01
+ b3 = -100 -1 -1.4646871366E+00 2.2175707739E-01
+ b4 = 1 0.01 1.2867534640E-02 4.4861358114E-04
+ b5 = 2 0.02 2.2122699662E-02 8.9471996575E-04
+
+Residual Sum of Squares: 5.4648946975E-05
+Residual Standard Deviation: 1.3970497866E-03
+Degrees of Freedom: 28
+Number of Observations: 33
+
+
+
+
+
+
+
+
+
+Data: y x
+ 8.440000E-01 0.000000E+00
+ 9.080000E-01 1.000000E+01
+ 9.320000E-01 2.000000E+01
+ 9.360000E-01 3.000000E+01
+ 9.250000E-01 4.000000E+01
+ 9.080000E-01 5.000000E+01
+ 8.810000E-01 6.000000E+01
+ 8.500000E-01 7.000000E+01
+ 8.180000E-01 8.000000E+01
+ 7.840000E-01 9.000000E+01
+ 7.510000E-01 1.000000E+02
+ 7.180000E-01 1.100000E+02
+ 6.850000E-01 1.200000E+02
+ 6.580000E-01 1.300000E+02
+ 6.280000E-01 1.400000E+02
+ 6.030000E-01 1.500000E+02
+ 5.800000E-01 1.600000E+02
+ 5.580000E-01 1.700000E+02
+ 5.380000E-01 1.800000E+02
+ 5.220000E-01 1.900000E+02
+ 5.060000E-01 2.000000E+02
+ 4.900000E-01 2.100000E+02
+ 4.780000E-01 2.200000E+02
+ 4.670000E-01 2.300000E+02
+ 4.570000E-01 2.400000E+02
+ 4.480000E-01 2.500000E+02
+ 4.380000E-01 2.600000E+02
+ 4.310000E-01 2.700000E+02
+ 4.240000E-01 2.800000E+02
+ 4.200000E-01 2.900000E+02
+ 4.140000E-01 3.000000E+02
+ 4.110000E-01 3.100000E+02
+ 4.060000E-01 3.200000E+02
diff --git a/NIST_STRD/Misra1a.dat b/NIST_STRD/Misra1a.dat
new file mode 100644
index 0000000..10ee5c3
--- /dev/null
+++ b/NIST_STRD/Misra1a.dat
@@ -0,0 +1,74 @@
+NIST/ITL StRD
+Dataset Name: Misra1a (Misra1a.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 74)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study regarding
+ dental research in monomolecular adsorption. The
+ response variable is volume, and the predictor
+ variable is pressure.
+
+Reference: Misra, D., NIST (1978).
+ Dental Research Monomolecular Adsorption Study.
+
+
+
+
+
+
+
+Data: 1 Response Variable (y = volume)
+ 1 Predictor Variable (x = pressure)
+ 14 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 2 Parameters (b1 and b2)
+
+ y = b1*(1-exp[-b2*x]) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 500 250 2.3894212918E+02 2.7070075241E+00
+ b2 = 0.0001 0.0005 5.5015643181E-04 7.2668688436E-06
+
+Residual Sum of Squares: 1.2455138894E-01
+Residual Standard Deviation: 1.0187876330E-01
+Degrees of Freedom: 12
+Number of Observations: 14
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 10.07E0 77.6E0
+ 14.73E0 114.9E0
+ 17.94E0 141.1E0
+ 23.93E0 190.8E0
+ 29.61E0 239.9E0
+ 35.18E0 289.0E0
+ 40.02E0 332.8E0
+ 44.82E0 378.4E0
+ 50.76E0 434.8E0
+ 55.05E0 477.3E0
+ 61.01E0 536.8E0
+ 66.40E0 593.1E0
+ 75.47E0 689.1E0
+ 81.78E0 760.0E0
diff --git a/NIST_STRD/Misra1b.dat b/NIST_STRD/Misra1b.dat
new file mode 100644
index 0000000..3005ca6
--- /dev/null
+++ b/NIST_STRD/Misra1b.dat
@@ -0,0 +1,74 @@
+NIST/ITL StRD
+Dataset Name: Misra1b (Misra1b.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 74)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study regarding
+ dental research in monomolecular adsorption. The
+ response variable is volume, and the predictor
+ variable is pressure.
+
+Reference: Misra, D., NIST (1978).
+ Dental Research Monomolecular Adsorption Study.
+
+
+
+
+
+
+
+Data: 1 Response (y = volume)
+ 1 Predictor (x = pressure)
+ 14 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 2 Parameters (b1 and b2)
+
+ y = b1 * (1-(1+b2*x/2)**(-2)) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 500 300 3.3799746163E+02 3.1643950207E+00
+ b2 = 0.0001 0.0002 3.9039091287E-04 4.2547321834E-06
+
+Residual Sum of Squares: 7.5464681533E-02
+Residual Standard Deviation: 7.9301471998E-02
+Degrees of Freedom: 12
+Number of Observations: 14
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 10.07E0 77.6E0
+ 14.73E0 114.9E0
+ 17.94E0 141.1E0
+ 23.93E0 190.8E0
+ 29.61E0 239.9E0
+ 35.18E0 289.0E0
+ 40.02E0 332.8E0
+ 44.82E0 378.4E0
+ 50.76E0 434.8E0
+ 55.05E0 477.3E0
+ 61.01E0 536.8E0
+ 66.40E0 593.1E0
+ 75.47E0 689.1E0
+ 81.78E0 760.0E0
diff --git a/NIST_STRD/Misra1c.dat b/NIST_STRD/Misra1c.dat
new file mode 100644
index 0000000..6e7dbbc
--- /dev/null
+++ b/NIST_STRD/Misra1c.dat
@@ -0,0 +1,74 @@
+NIST/ITL StRD
+Dataset Name: Misra1c (Misra1c.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 74)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study regarding
+ dental research in monomolecular adsorption. The
+ response variable is volume, and the predictor
+ variable is pressure.
+
+Reference: Misra, D., NIST (1978).
+ Dental Research Monomolecular Adsorption.
+
+
+
+
+
+
+
+Data: 1 Response (y = volume)
+ 1 Predictor (x = pressure)
+ 14 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 2 Parameters (b1 and b2)
+
+ y = b1 * (1-(1+2*b2*x)**(-.5)) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 500 600 6.3642725809E+02 4.6638326572E+00
+ b2 = 0.0001 0.0002 2.0813627256E-04 1.7728423155E-06
+
+Residual Sum of Squares: 4.0966836971E-02
+Residual Standard Deviation: 5.8428615257E-02
+Degrees of Freedom: 12
+Number of Observations: 14
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 10.07E0 77.6E0
+ 14.73E0 114.9E0
+ 17.94E0 141.1E0
+ 23.93E0 190.8E0
+ 29.61E0 239.9E0
+ 35.18E0 289.0E0
+ 40.02E0 332.8E0
+ 44.82E0 378.4E0
+ 50.76E0 434.8E0
+ 55.05E0 477.3E0
+ 61.01E0 536.8E0
+ 66.40E0 593.1E0
+ 75.47E0 689.1E0
+ 81.78E0 760.0E0
diff --git a/NIST_STRD/Misra1d.dat b/NIST_STRD/Misra1d.dat
new file mode 100644
index 0000000..98351d8
--- /dev/null
+++ b/NIST_STRD/Misra1d.dat
@@ -0,0 +1,74 @@
+NIST/ITL StRD
+Dataset Name: Misra1d (Misra1d.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 74)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study regarding
+ dental research in monomolecular adsorption. The
+ response variable is volume, and the predictor
+ variable is pressure.
+
+Reference: Misra, D., NIST (1978).
+ Dental Research Monomolecular Adsorption Study.
+
+
+
+
+
+
+
+Data: 1 Response (y = volume)
+ 1 Predictor (x = pressure)
+ 14 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 2 Parameters (b1 and b2)
+
+ y = b1*b2*x*((1+b2*x)**(-1)) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 500 450 4.3736970754E+02 3.6489174345E+00
+ b2 = 0.0001 0.0003 3.0227324449E-04 2.9334354479E-06
+
+Residual Sum of Squares: 5.6419295283E-02
+Residual Standard Deviation: 6.8568272111E-02
+Degrees of Freedom: 12
+Number of Observations: 14
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 10.07E0 77.6E0
+ 14.73E0 114.9E0
+ 17.94E0 141.1E0
+ 23.93E0 190.8E0
+ 29.61E0 239.9E0
+ 35.18E0 289.0E0
+ 40.02E0 332.8E0
+ 44.82E0 378.4E0
+ 50.76E0 434.8E0
+ 55.05E0 477.3E0
+ 61.01E0 536.8E0
+ 66.40E0 593.1E0
+ 75.47E0 689.1E0
+ 81.78E0 760.0E0
diff --git a/NIST_STRD/Models b/NIST_STRD/Models
new file mode 100644
index 0000000..ddc9ab5
--- /dev/null
+++ b/NIST_STRD/Models
@@ -0,0 +1,215 @@
+Bennett5.dat:Model: Miscellaneous Class
+Bennett5.dat- 3 Parameters (b1 to b3)
+Bennett5.dat-
+Bennett5.dat- y = b1 * (b2+x)**(-1/b3) + e
+Bennett5.dat-
+Bennett5.dat-
+Bennett5.dat-
+--
+BoxBOD.dat:Model: Exponential Class
+BoxBOD.dat- 2 Parameters (b1 and b2)
+BoxBOD.dat-
+BoxBOD.dat- y = b1*(1-exp[-b2*x]) + e
+BoxBOD.dat-
+BoxBOD.dat-
+BoxBOD.dat-
+--
+Chwirut1.dat:Model: Exponential Class
+Chwirut1.dat- 3 Parameters (b1 to b3)
+Chwirut1.dat-
+Chwirut1.dat- y = exp[-b1*x]/(b2+b3*x) + e
+Chwirut1.dat-
+Chwirut1.dat-
+Chwirut1.dat-
+--
+Chwirut2.dat:Model: Exponential Class
+Chwirut2.dat- 3 Parameters (b1 to b3)
+Chwirut2.dat-
+Chwirut2.dat- y = exp(-b1*x)/(b2+b3*x) + e
+Chwirut2.dat-
+Chwirut2.dat-
+Chwirut2.dat-
+--
+DanWood.dat:Model: Miscellaneous Class
+DanWood.dat- 2 Parameters (b1 and b2)
+DanWood.dat-
+DanWood.dat- y = b1*x**b2 + e
+DanWood.dat-
+DanWood.dat-
+DanWood.dat-
+--
+ENSO.dat:Model: Miscellaneous Class
+ENSO.dat- 9 Parameters (b1 to b9)
+ENSO.dat-
+ENSO.dat- y = b1 + b2*cos( 2*pi*x/12 ) + b3*sin( 2*pi*x/12 )
+ENSO.dat- + b5*cos( 2*pi*x/b4 ) + b6*sin( 2*pi*x/b4 )
+ENSO.dat- + b8*cos( 2*pi*x/b7 ) + b9*sin( 2*pi*x/b7 ) + e
+ENSO.dat-
+--
+Eckerle4.dat:Model: Exponential Class
+Eckerle4.dat- 3 Parameters (b1 to b3)
+Eckerle4.dat-
+Eckerle4.dat- y = (b1/b2) * exp[-0.5*((x-b3)/b2)**2] + e
+Eckerle4.dat-
+Eckerle4.dat-
+Eckerle4.dat-
+--
+Gauss1.dat:Model: Exponential Class
+Gauss1.dat- 8 Parameters (b1 to b8)
+Gauss1.dat-
+Gauss1.dat- y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+Gauss1.dat- + b6*exp( -(x-b7)**2 / b8**2 ) + e
+Gauss1.dat-
+Gauss1.dat-
+--
+Gauss2.dat:Model: Exponential Class
+Gauss2.dat- 8 Parameters (b1 to b8)
+Gauss2.dat-
+Gauss2.dat- y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+Gauss2.dat- + b6*exp( -(x-b7)**2 / b8**2 ) + e
+Gauss2.dat-
+Gauss2.dat-
+--
+Gauss3.dat:Model: Exponential Class
+Gauss3.dat- 8 Parameters (b1 to b8)
+Gauss3.dat-
+Gauss3.dat- y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+Gauss3.dat- + b6*exp( -(x-b7)**2 / b8**2 ) + e
+Gauss3.dat-
+Gauss3.dat-
+--
+Hahn1.dat:Model: Rational Class (cubic/cubic)
+Hahn1.dat- 7 Parameters (b1 to b7)
+Hahn1.dat-
+Hahn1.dat- y = (b1+b2*x+b3*x**2+b4*x**3) /
+Hahn1.dat- (1+b5*x+b6*x**2+b7*x**3) + e
+Hahn1.dat-
+Hahn1.dat-
+--
+Kirby2.dat:Model: Rational Class (quadratic/quadratic)
+Kirby2.dat- 5 Parameters (b1 to b5)
+Kirby2.dat-
+Kirby2.dat- y = (b1 + b2*x + b3*x**2) /
+Kirby2.dat- (1 + b4*x + b5*x**2) + e
+Kirby2.dat-
+Kirby2.dat-
+--
+Lanczos1.dat:Model: Exponential Class
+Lanczos1.dat- 6 Parameters (b1 to b6)
+Lanczos1.dat-
+Lanczos1.dat- y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+Lanczos1.dat-
+Lanczos1.dat-
+Lanczos1.dat-
+--
+Lanczos2.dat:Model: Exponential Class
+Lanczos2.dat- 6 Parameters (b1 to b6)
+Lanczos2.dat-
+Lanczos2.dat- y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+Lanczos2.dat-
+Lanczos2.dat-
+Lanczos2.dat-
+--
+Lanczos3.dat:Model: Exponential Class
+Lanczos3.dat- 6 Parameters (b1 to b6)
+Lanczos3.dat-
+Lanczos3.dat- y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+Lanczos3.dat-
+Lanczos3.dat-
+Lanczos3.dat-
+--
+MGH09.dat:Model: Rational Class (linear/quadratic)
+MGH09.dat- 4 Parameters (b1 to b4)
+MGH09.dat-
+MGH09.dat- y = b1*(x**2+x*b2) / (x**2+x*b3+b4) + e
+MGH09.dat-
+MGH09.dat-
+MGH09.dat-
+--
+MGH10.dat:Model: Exponential Class
+MGH10.dat- 3 Parameters (b1 to b3)
+MGH10.dat-
+MGH10.dat- y = b1 * exp[b2/(x+b3)] + e
+MGH10.dat-
+MGH10.dat-
+MGH10.dat-
+--
+MGH17.dat:Model: Exponential Class
+MGH17.dat- 5 Parameters (b1 to b5)
+MGH17.dat-
+MGH17.dat- y = b1 + b2*exp[-x*b4] + b3*exp[-x*b5] + e
+MGH17.dat-
+MGH17.dat-
+MGH17.dat-
+--
+Misra1a.dat:Model: Exponential Class
+Misra1a.dat- 2 Parameters (b1 and b2)
+Misra1a.dat-
+Misra1a.dat- y = b1*(1-exp[-b2*x]) + e
+Misra1a.dat-
+Misra1a.dat-
+Misra1a.dat-
+--
+Misra1b.dat:Model: Miscellaneous Class
+Misra1b.dat- 2 Parameters (b1 and b2)
+Misra1b.dat-
+Misra1b.dat- y = b1 * (1-(1+b2*x/2)**(-2)) + e
+Misra1b.dat-
+Misra1b.dat-
+Misra1b.dat-
+--
+Misra1c.dat:Model: Miscellaneous Class
+Misra1c.dat- 2 Parameters (b1 and b2)
+Misra1c.dat-
+Misra1c.dat- y = b1 * (1-(1+2*b2*x)**(-.5)) + e
+Misra1c.dat-
+Misra1c.dat-
+Misra1c.dat-
+--
+Misra1d.dat:Model: Miscellaneous Class
+Misra1d.dat- 2 Parameters (b1 and b2)
+Misra1d.dat-
+Misra1d.dat- y = b1*b2*x*((1+b2*x)**(-1)) + e
+Misra1d.dat-
+Misra1d.dat-
+Misra1d.dat-
+--
+Nelson.dat:Model: Exponential Class
+Nelson.dat- 3 Parameters (b1 to b3)
+Nelson.dat-
+Nelson.dat- log[y] = b1 - b2*x1 * exp[-b3*x2] + e
+Nelson.dat-
+Nelson.dat-
+Nelson.dat-
+--
+Rat42.dat:Model: Exponential Class
+Rat42.dat- 3 Parameters (b1 to b3)
+Rat42.dat-
+Rat42.dat- y = b1 / (1+exp[b2-b3*x]) + e
+Rat42.dat-
+Rat42.dat-
+Rat42.dat-
+--
+Rat43.dat:Model: Exponential Class
+Rat43.dat- 4 Parameters (b1 to b4)
+Rat43.dat-
+Rat43.dat- y = b1 / ((1+exp[b2-b3*x])**(1/b4)) + e
+Rat43.dat-
+Rat43.dat-
+Rat43.dat-
+--
+Roszman1.dat:Model: Miscellaneous Class
+Roszman1.dat- 4 Parameters (b1 to b4)
+Roszman1.dat-
+Roszman1.dat- pi = 3.141592653589793238462643383279E0
+Roszman1.dat- y = b1 - b2*x - arctan[b3/(x-b4)]/pi + e
+Roszman1.dat-
+Roszman1.dat-
+--
+Thurber.dat:Model: Rational Class (cubic/cubic)
+Thurber.dat- 7 Parameters (b1 to b7)
+Thurber.dat-
+Thurber.dat- y = (b1 + b2*x + b3*x**2 + b4*x**3) /
+Thurber.dat- (1 + b5*x + b6*x**2 + b7*x**3) + e
+Thurber.dat-
+Thurber.dat-
diff --git a/NIST_STRD/Nelson.dat b/NIST_STRD/Nelson.dat
new file mode 100644
index 0000000..2393829
--- /dev/null
+++ b/NIST_STRD/Nelson.dat
@@ -0,0 +1,188 @@
+NIST/ITL StRD
+Dataset Name: Nelson (Nelson.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 188)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a study involving
+ the analysis of performance degradation data from
+ accelerated tests, published in IEEE Transactions
+ on Reliability. The response variable is dialectric
+ breakdown strength in kilo-volts, and the predictor
+ variables are time in weeks and temperature in degrees
+ Celcius.
+
+
+Reference: Nelson, W. (1981).
+ Analysis of Performance-Degradation Data.
+ IEEE Transactions on Reliability.
+ Vol. 2, R-30, No. 2, pp. 149-155.
+
+Data: 1 Response ( y = dialectric breakdown strength)
+ 2 Predictors (x1 = time; x2 = temperature)
+ 128 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ log[y] = b1 - b2*x1 * exp[-b3*x2] + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 2 2.5 2.5906836021E+00 1.9149996413E-02
+ b2 = 0.0001 0.000000005 5.6177717026E-09 6.1124096540E-09
+ b3 = -0.01 -0.05 -5.7701013174E-02 3.9572366543E-03
+
+Residual Sum of Squares: 3.7976833176E+00
+Residual Standard Deviation: 1.7430280130E-01
+Degrees of Freedom: 125
+Number of Observations: 128
+
+
+
+
+
+
+
+
+
+
+
+Data: y x1 x2
+ 15.00E0 1E0 180E0
+ 17.00E0 1E0 180E0
+ 15.50E0 1E0 180E0
+ 16.50E0 1E0 180E0
+ 15.50E0 1E0 225E0
+ 15.00E0 1E0 225E0
+ 16.00E0 1E0 225E0
+ 14.50E0 1E0 225E0
+ 15.00E0 1E0 250E0
+ 14.50E0 1E0 250E0
+ 12.50E0 1E0 250E0
+ 11.00E0 1E0 250E0
+ 14.00E0 1E0 275E0
+ 13.00E0 1E0 275E0
+ 14.00E0 1E0 275E0
+ 11.50E0 1E0 275E0
+ 14.00E0 2E0 180E0
+ 16.00E0 2E0 180E0
+ 13.00E0 2E0 180E0
+ 13.50E0 2E0 180E0
+ 13.00E0 2E0 225E0
+ 13.50E0 2E0 225E0
+ 12.50E0 2E0 225E0
+ 12.50E0 2E0 225E0
+ 12.50E0 2E0 250E0
+ 12.00E0 2E0 250E0
+ 11.50E0 2E0 250E0
+ 12.00E0 2E0 250E0
+ 13.00E0 2E0 275E0
+ 11.50E0 2E0 275E0
+ 13.00E0 2E0 275E0
+ 12.50E0 2E0 275E0
+ 13.50E0 4E0 180E0
+ 17.50E0 4E0 180E0
+ 17.50E0 4E0 180E0
+ 13.50E0 4E0 180E0
+ 12.50E0 4E0 225E0
+ 12.50E0 4E0 225E0
+ 15.00E0 4E0 225E0
+ 13.00E0 4E0 225E0
+ 12.00E0 4E0 250E0
+ 13.00E0 4E0 250E0
+ 12.00E0 4E0 250E0
+ 13.50E0 4E0 250E0
+ 10.00E0 4E0 275E0
+ 11.50E0 4E0 275E0
+ 11.00E0 4E0 275E0
+ 9.50E0 4E0 275E0
+ 15.00E0 8E0 180E0
+ 15.00E0 8E0 180E0
+ 15.50E0 8E0 180E0
+ 16.00E0 8E0 180E0
+ 13.00E0 8E0 225E0
+ 10.50E0 8E0 225E0
+ 13.50E0 8E0 225E0
+ 14.00E0 8E0 225E0
+ 12.50E0 8E0 250E0
+ 12.00E0 8E0 250E0
+ 11.50E0 8E0 250E0
+ 11.50E0 8E0 250E0
+ 6.50E0 8E0 275E0
+ 5.50E0 8E0 275E0
+ 6.00E0 8E0 275E0
+ 6.00E0 8E0 275E0
+ 18.50E0 16E0 180E0
+ 17.00E0 16E0 180E0
+ 15.30E0 16E0 180E0
+ 16.00E0 16E0 180E0
+ 13.00E0 16E0 225E0
+ 14.00E0 16E0 225E0
+ 12.50E0 16E0 225E0
+ 11.00E0 16E0 225E0
+ 12.00E0 16E0 250E0
+ 12.00E0 16E0 250E0
+ 11.50E0 16E0 250E0
+ 12.00E0 16E0 250E0
+ 6.00E0 16E0 275E0
+ 6.00E0 16E0 275E0
+ 5.00E0 16E0 275E0
+ 5.50E0 16E0 275E0
+ 12.50E0 32E0 180E0
+ 13.00E0 32E0 180E0
+ 16.00E0 32E0 180E0
+ 12.00E0 32E0 180E0
+ 11.00E0 32E0 225E0
+ 9.50E0 32E0 225E0
+ 11.00E0 32E0 225E0
+ 11.00E0 32E0 225E0
+ 11.00E0 32E0 250E0
+ 10.00E0 32E0 250E0
+ 10.50E0 32E0 250E0
+ 10.50E0 32E0 250E0
+ 2.70E0 32E0 275E0
+ 2.70E0 32E0 275E0
+ 2.50E0 32E0 275E0
+ 2.40E0 32E0 275E0
+ 13.00E0 48E0 180E0
+ 13.50E0 48E0 180E0
+ 16.50E0 48E0 180E0
+ 13.60E0 48E0 180E0
+ 11.50E0 48E0 225E0
+ 10.50E0 48E0 225E0
+ 13.50E0 48E0 225E0
+ 12.00E0 48E0 225E0
+ 7.00E0 48E0 250E0
+ 6.90E0 48E0 250E0
+ 8.80E0 48E0 250E0
+ 7.90E0 48E0 250E0
+ 1.20E0 48E0 275E0
+ 1.50E0 48E0 275E0
+ 1.00E0 48E0 275E0
+ 1.50E0 48E0 275E0
+ 13.00E0 64E0 180E0
+ 12.50E0 64E0 180E0
+ 16.50E0 64E0 180E0
+ 16.00E0 64E0 180E0
+ 11.00E0 64E0 225E0
+ 11.50E0 64E0 225E0
+ 10.50E0 64E0 225E0
+ 10.00E0 64E0 225E0
+ 7.27E0 64E0 250E0
+ 7.50E0 64E0 250E0
+ 6.70E0 64E0 250E0
+ 7.60E0 64E0 250E0
+ 1.50E0 64E0 275E0
+ 1.00E0 64E0 275E0
+ 1.20E0 64E0 275E0
+ 1.20E0 64E0 275E0
diff --git a/NIST_STRD/Rat42.dat b/NIST_STRD/Rat42.dat
new file mode 100644
index 0000000..aba7cde
--- /dev/null
+++ b/NIST_STRD/Rat42.dat
@@ -0,0 +1,69 @@
+NIST/ITL StRD
+Dataset Name: Rat42 (Rat42.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 69)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This model and data are an example of fitting
+ sigmoidal growth curves taken from Ratkowsky (1983).
+ The response variable is pasture yield, and the
+ predictor variable is growing time.
+
+
+Reference: Ratkowsky, D.A. (1983).
+ Nonlinear Regression Modeling.
+ New York, NY: Marcel Dekker, pp. 61 and 88.
+
+
+
+
+
+Data: 1 Response (y = pasture yield)
+ 1 Predictor (x = growing time)
+ 9 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = b1 / (1+exp[b2-b3*x]) + e
+
+
+
+ Starting Values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 100 75 7.2462237576E+01 1.7340283401E+00
+ b2 = 1 2.5 2.6180768402E+00 8.8295217536E-02
+ b3 = 0.1 0.07 6.7359200066E-02 3.4465663377E-03
+
+Residual Sum of Squares: 8.0565229338E+00
+Residual Standard Deviation: 1.1587725499E+00
+Degrees of Freedom: 6
+Number of Observations: 9
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 8.930E0 9.000E0
+ 10.800E0 14.000E0
+ 18.590E0 21.000E0
+ 22.330E0 28.000E0
+ 39.350E0 42.000E0
+ 56.110E0 57.000E0
+ 61.730E0 63.000E0
+ 64.620E0 70.000E0
+ 67.080E0 79.000E0
diff --git a/NIST_STRD/Rat43.dat b/NIST_STRD/Rat43.dat
new file mode 100644
index 0000000..f1616fb
--- /dev/null
+++ b/NIST_STRD/Rat43.dat
@@ -0,0 +1,75 @@
+NIST/ITL StRD
+Dataset Name: Rat43 (Rat43.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 44)
+ Certified Values (lines 41 to 49)
+ Data (lines 61 to 75)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This model and data are an example of fitting
+ sigmoidal growth curves taken from Ratkowsky (1983).
+ The response variable is the dry weight of onion bulbs
+ and tops, and the predictor variable is growing time.
+
+
+Reference: Ratkowsky, D.A. (1983).
+ Nonlinear Regression Modeling.
+ New York, NY: Marcel Dekker, pp. 62 and 88.
+
+
+
+
+
+Data: 1 Response (y = onion bulb dry weight)
+ 1 Predictor (x = growing time)
+ 15 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 4 Parameters (b1 to b4)
+
+ y = b1 / ((1+exp[b2-b3*x])**(1/b4)) + e
+
+
+
+ Starting Values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 100 700 6.9964151270E+02 1.6302297817E+01
+ b2 = 10 5 5.2771253025E+00 2.0828735829E+00
+ b3 = 1 0.75 7.5962938329E-01 1.9566123451E-01
+ b4 = 1 1.3 1.2792483859E+00 6.8761936385E-01
+
+Residual Sum of Squares: 8.7864049080E+03
+Residual Standard Deviation: 2.8262414662E+01
+Degrees of Freedom: 9
+Number of Observations: 15
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 16.08E0 1.0E0
+ 33.83E0 2.0E0
+ 65.80E0 3.0E0
+ 97.20E0 4.0E0
+ 191.55E0 5.0E0
+ 326.20E0 6.0E0
+ 386.87E0 7.0E0
+ 520.53E0 8.0E0
+ 590.03E0 9.0E0
+ 651.92E0 10.0E0
+ 724.93E0 11.0E0
+ 699.56E0 12.0E0
+ 689.96E0 13.0E0
+ 637.56E0 14.0E0
+ 717.41E0 15.0E0
diff --git a/NIST_STRD/Roszman1.dat b/NIST_STRD/Roszman1.dat
new file mode 100644
index 0000000..da22b1a
--- /dev/null
+++ b/NIST_STRD/Roszman1.dat
@@ -0,0 +1,85 @@
+NIST/ITL StRD
+Dataset Name: Roszman1 (Roszman1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 44)
+ Certified Values (lines 41 to 49)
+ Data (lines 61 to 85)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ quantum defects in iodine atoms. The response
+ variable is the number of quantum defects, and the
+ predictor variable is the excited energy state.
+ The argument to the ARCTAN function is in radians.
+
+Reference: Roszman, L., NIST (19??).
+ Quantum Defects for Sulfur I Atom.
+
+
+
+
+
+
+Data: 1 Response (y = quantum defect)
+ 1 Predictor (x = excited state energy)
+ 25 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 4 Parameters (b1 to b4)
+
+ pi = 3.141592653589793238462643383279E0
+ y = b1 - b2*x - arctan[b3/(x-b4)]/pi + e
+
+
+ Starting Values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 0.1 0.2 2.0196866396E-01 1.9172666023E-02
+ b2 = -0.00001 -0.000005 -6.1953516256E-06 3.2058931691E-06
+ b3 = 1000 1200 1.2044556708E+03 7.4050983057E+01
+ b4 = -100 -150 -1.8134269537E+02 4.9573513849E+01
+
+Residual Sum of Squares: 4.9484847331E-04
+Residual Standard Deviation: 4.8542984060E-03
+Degrees of Freedom: 21
+Number of Observations: 25
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 0.252429 -4868.68
+ 0.252141 -4868.09
+ 0.251809 -4867.41
+ 0.297989 -3375.19
+ 0.296257 -3373.14
+ 0.295319 -3372.03
+ 0.339603 -2473.74
+ 0.337731 -2472.35
+ 0.333820 -2469.45
+ 0.389510 -1894.65
+ 0.386998 -1893.40
+ 0.438864 -1497.24
+ 0.434887 -1495.85
+ 0.427893 -1493.41
+ 0.471568 -1208.68
+ 0.461699 -1206.18
+ 0.461144 -1206.04
+ 0.513532 -997.92
+ 0.506641 -996.61
+ 0.505062 -996.31
+ 0.535648 -834.94
+ 0.533726 -834.66
+ 0.568064 -710.03
+ 0.612886 -530.16
+ 0.624169 -464.17
diff --git a/NIST_STRD/Thurber.dat b/NIST_STRD/Thurber.dat
new file mode 100644
index 0000000..b868a4d
--- /dev/null
+++ b/NIST_STRD/Thurber.dat
@@ -0,0 +1,97 @@
+NIST/ITL StRD
+Dataset Name: Thurber (Thurber.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 47)
+ Certified Values (lines 41 to 52)
+ Data (lines 61 to 97)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ semiconductor electron mobility. The response
+ variable is a measure of electron mobility, and the
+ predictor variable is the natural log of the density.
+
+
+Reference: Thurber, R., NIST (197?).
+ Semiconductor electron mobility modeling.
+
+
+
+
+
+
+Data: 1 Response Variable (y = electron mobility)
+ 1 Predictor Variable (x = log[density])
+ 37 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Rational Class (cubic/cubic)
+ 7 Parameters (b1 to b7)
+
+ y = (b1 + b2*x + b3*x**2 + b4*x**3) /
+ (1 + b5*x + b6*x**2 + b7*x**3) + e
+
+
+ Starting Values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1000 1300 1.2881396800E+03 4.6647963344E+00
+ b2 = 1000 1500 1.4910792535E+03 3.9571156086E+01
+ b3 = 400 500 5.8323836877E+02 2.8698696102E+01
+ b4 = 40 75 7.5416644291E+01 5.5675370270E+00
+ b5 = 0.7 1 9.6629502864E-01 3.1333340687E-02
+ b6 = 0.3 0.4 3.9797285797E-01 1.4984928198E-02
+ b7 = 0.03 0.05 4.9727297349E-02 6.5842344623E-03
+
+Residual Sum of Squares: 5.6427082397E+03
+Residual Standard Deviation: 1.3714600784E+01
+Degrees of Freedom: 30
+Number of Observations: 37
+
+
+
+
+
+
+
+Data: y x
+ 80.574E0 -3.067E0
+ 84.248E0 -2.981E0
+ 87.264E0 -2.921E0
+ 87.195E0 -2.912E0
+ 89.076E0 -2.840E0
+ 89.608E0 -2.797E0
+ 89.868E0 -2.702E0
+ 90.101E0 -2.699E0
+ 92.405E0 -2.633E0
+ 95.854E0 -2.481E0
+ 100.696E0 -2.363E0
+ 101.060E0 -2.322E0
+ 401.672E0 -1.501E0
+ 390.724E0 -1.460E0
+ 567.534E0 -1.274E0
+ 635.316E0 -1.212E0
+ 733.054E0 -1.100E0
+ 759.087E0 -1.046E0
+ 894.206E0 -0.915E0
+ 990.785E0 -0.714E0
+ 1090.109E0 -0.566E0
+ 1080.914E0 -0.545E0
+ 1122.643E0 -0.400E0
+ 1178.351E0 -0.309E0
+ 1260.531E0 -0.109E0
+ 1273.514E0 -0.103E0
+ 1288.339E0 0.010E0
+ 1327.543E0 0.119E0
+ 1353.863E0 0.377E0
+ 1414.509E0 0.790E0
+ 1425.208E0 0.963E0
+ 1421.384E0 1.006E0
+ 1442.962E0 1.115E0
+ 1464.350E0 1.572E0
+ 1468.705E0 1.841E0
+ 1447.894E0 2.047E0
+ 1457.628E0 2.200E0
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..019a3da
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,180 @@
+Metadata-Version: 2.1
+Name: lmfit
+Version: 1.2.1
+Summary: Least-Squares Minimization with Bounds and Constraints
+Home-page: https://lmfit.github.io//lmfit-py/
+Author: LMFit Development Team
+Author-email: matt.newville@gmail.com
+License: BSD 3-Clause
+Project-URL: Source, https://github.com/lmfit/lmfit-py
+Project-URL: Changelog, https://lmfit.github.io/lmfit-py/whatsnew.html
+Project-URL: Documentation, https://lmfit.github.io/lmfit-py/
+Project-URL: Tracker, https://github.com/lmfit/lmfit-py/issues
+Keywords: curve-fitting,least-squares minimization
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Science/Research
+Classifier: Topic :: Scientific/Engineering
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.7
+Description-Content-Type: text/x-rst
+Provides-Extra: dev
+Provides-Extra: doc
+Provides-Extra: test
+Provides-Extra: all
+License-File: LICENSE
+License-File: AUTHORS.txt
+
+LMfit-py
+========
+
+.. image:: https://dev.azure.com/lmfit/lmfit-py/_apis/build/status/lmfit.lmfit-py?branchName=master
+ :target: https://dev.azure.com/lmfit/lmfit-py/_build/latest?definitionId=1&branchName=master
+
+.. image:: https://codecov.io/gh/lmfit/lmfit-py/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/lmfit/lmfit-py
+
+.. image:: https://img.shields.io/pypi/v/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/pypi/dm/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/badge/docs-read-brightgreen
+ :target: https://lmfit.github.io/lmfit-py/
+
+.. image:: https://zenodo.org/badge/4185/lmfit/lmfit-py.svg
+ :target: https://doi.org/10.5281/zenodo.598352
+
+.. _LMfit google mailing list: https://groups.google.com/group/lmfit-py
+.. _Github Discussions: https://github.com/lmfit/lmfit-py/discussions
+.. _Github Issues: https://github.com/lmfit/lmfit-py/issues
+
+
+..
+ Note: the Zenodo target should be
+ https://zenodo.org/badge/latestdoi/4185/lmfit/lmfit-py
+ but see https://github.com/lmfit/lmfit-py/discussions/862
+
+
+Overview
+---------
+
+The lmfit Python library supports provides tools for non-linear least-squares
+minimization and curve fitting. The goal is to make these optimization
+algorithms more flexible, more comprehensible, and easier to use well, with the
+key feature of casting variables in minimization and fitting routines as named
+parameters that can have many attributes beside just a current value.
+
+LMfit is a pure Python package, built on top of Scipy and Numpy, and so easy to
+install with ``pip install lmfit``.
+
+For questions, comments, and suggestions, please use the `LMfit google mailing
+list`_ or `Github discussions`_. For software issues and bugs, use `Github
+Issues`_, but please read `Contributing.md <.github/CONTRIBUTING.md>`_ before
+creating an Issue.
+
+
+Parameters and Minimization
+------------------------------
+
+LMfit provides optimization routines similar to (and based on) those from
+``scipy.optimize``, but with a simple, flexible approach to parameterizing a
+model for fitting to data using named parameters. These named Parameters can be
+held fixed or freely adjusted in the fit, or held between lower and upper
+bounds. Parameters can also be constrained as a simple mathematical expression
+of other Parameters.
+
+A Parameters object (which acts like a Python dictionary) contains named
+parameters, and can be built as with::
+
+ import lmfit
+ fit_params = lmfit.Parameters()
+ fit_params['amp'] = lmfit.Parameter(value=1.2)
+ fit_params['cen'] = lmfit.Parameter(value=40.0, vary=False)
+ fit_params['wid'] = lmfit.Parameter(value=4, min=0)
+ fit_params['fwhm'] = lmfit.Parameter(expr='wid*2.355')
+
+or using the equivalent::
+
+ fit_params = lmfit.create_params(amp=1.2,
+ cen={'value':40, 'vary':False},
+ wid={'value': 4, 'min':0},
+ fwhm={'expr': 'wid*2.355'})
+
+
+
+In the general minimization case (see below for Curve-fitting), the user will
+also write an objective function to be minimized (in the least-squares sense)
+with its first argument being this Parameters object, and additional positional
+and keyword arguments as desired::
+
+ def myfunc(params, x, data, someflag=True):
+ amp = params['amp'].value
+ cen = params['cen'].value
+ wid = params['wid'].value
+ ...
+ return residual_array
+
+For each call of this function, the values for the ``params`` may have changed,
+subject to the bounds and constraint settings for each Parameter. The function
+should return the residual (i.e., ``data-model``) array to be minimized.
+
+The advantage here is that the function to be minimized does not have to be
+changed if different bounds or constraints are placed on the fitting Parameters.
+The fitting model (as described in myfunc) is instead written in terms of
+physical parameters of the system, and remains remains independent of what is
+actually varied in the fit. In addition, which parameters are adjusted and which
+are fixed happens at run-time, so that changing what is varied and what
+constraints are placed on the parameters can easily be modified by the user in
+real-time data analysis.
+
+To perform the fit, the user calls::
+
+ result = lmfit.minimize(myfunc, fit_params, args=(x, data), kws={'someflag':True}, ....)
+
+After the fit, a ``MinimizerResult`` class is returned that holds the results
+the fit (e.g., fitting statistics and optimized parameters). The dictionary
+``result.params`` contains the best-fit values, estimated standard deviations,
+and correlations with other variables in the fit.
+
+By default, the underlying fit algorithm is the Levenberg-Marquardt algorithm
+with numerically-calculated derivatives from MINPACK's lmdif function, as used
+by ``scipy.optimize.leastsq``. Most other solvers that are present in ``scipy``
+(e.g., Nelder-Mead, differential_evolution, basin-hopping, and more) are also
+supported.
+
+
+Curve-Fitting with lmfit.Model
+----------------------------------
+
+One of the most common use of least-squares minimization is for curve fitting,
+where minimization of ``data-model``, or ``(data-model)*weights``. Using
+``lmfit.minimize`` as above, the objective function would take ``data`` and
+``weights`` and effectively calculated the model and then return the value of
+``(data-model)*weights``.
+
+To simplify this, and make curve-fitting more flexible, lmfit provides a Model
+class that wraps a *model function* that represents the model (without the data
+or weights). Parameters are then automatically found from the named arguments
+of the model function. In addition, simple model functions can be readily
+combined and reused, and several common model functions are included in lmfit.
+
+Exploration of Confidence Intervals
+-------------------------------------
+
+Lmfit tries to always estimate uncertainties in fitting parameters and
+correlations between them. It does this even for those methods where the
+corresponding ``scipy.optimize`` routines do not estimate uncertainties. Lmfit
+also provides methods to explicitly explore and evaluate the confidence
+intervals in fit results.
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..9dab9f7
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,143 @@
+LMfit-py
+========
+
+.. image:: https://dev.azure.com/lmfit/lmfit-py/_apis/build/status/lmfit.lmfit-py?branchName=master
+ :target: https://dev.azure.com/lmfit/lmfit-py/_build/latest?definitionId=1&branchName=master
+
+.. image:: https://codecov.io/gh/lmfit/lmfit-py/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/lmfit/lmfit-py
+
+.. image:: https://img.shields.io/pypi/v/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/pypi/dm/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/badge/docs-read-brightgreen
+ :target: https://lmfit.github.io/lmfit-py/
+
+.. image:: https://zenodo.org/badge/4185/lmfit/lmfit-py.svg
+ :target: https://doi.org/10.5281/zenodo.598352
+
+.. _LMfit google mailing list: https://groups.google.com/group/lmfit-py
+.. _Github Discussions: https://github.com/lmfit/lmfit-py/discussions
+.. _Github Issues: https://github.com/lmfit/lmfit-py/issues
+
+
+..
+ Note: the Zenodo target should be
+ https://zenodo.org/badge/latestdoi/4185/lmfit/lmfit-py
+ but see https://github.com/lmfit/lmfit-py/discussions/862
+
+
+Overview
+---------
+
+The lmfit Python library supports provides tools for non-linear least-squares
+minimization and curve fitting. The goal is to make these optimization
+algorithms more flexible, more comprehensible, and easier to use well, with the
+key feature of casting variables in minimization and fitting routines as named
+parameters that can have many attributes beside just a current value.
+
+LMfit is a pure Python package, built on top of Scipy and Numpy, and so easy to
+install with ``pip install lmfit``.
+
+For questions, comments, and suggestions, please use the `LMfit google mailing
+list`_ or `Github discussions`_. For software issues and bugs, use `Github
+Issues`_, but please read `Contributing.md <.github/CONTRIBUTING.md>`_ before
+creating an Issue.
+
+
+Parameters and Minimization
+------------------------------
+
+LMfit provides optimization routines similar to (and based on) those from
+``scipy.optimize``, but with a simple, flexible approach to parameterizing a
+model for fitting to data using named parameters. These named Parameters can be
+held fixed or freely adjusted in the fit, or held between lower and upper
+bounds. Parameters can also be constrained as a simple mathematical expression
+of other Parameters.
+
+A Parameters object (which acts like a Python dictionary) contains named
+parameters, and can be built as with::
+
+ import lmfit
+ fit_params = lmfit.Parameters()
+ fit_params['amp'] = lmfit.Parameter(value=1.2)
+ fit_params['cen'] = lmfit.Parameter(value=40.0, vary=False)
+ fit_params['wid'] = lmfit.Parameter(value=4, min=0)
+ fit_params['fwhm'] = lmfit.Parameter(expr='wid*2.355')
+
+or using the equivalent::
+
+ fit_params = lmfit.create_params(amp=1.2,
+ cen={'value':40, 'vary':False},
+ wid={'value': 4, 'min':0},
+ fwhm={'expr': 'wid*2.355'})
+
+
+
+In the general minimization case (see below for Curve-fitting), the user will
+also write an objective function to be minimized (in the least-squares sense)
+with its first argument being this Parameters object, and additional positional
+and keyword arguments as desired::
+
+ def myfunc(params, x, data, someflag=True):
+ amp = params['amp'].value
+ cen = params['cen'].value
+ wid = params['wid'].value
+ ...
+ return residual_array
+
+For each call of this function, the values for the ``params`` may have changed,
+subject to the bounds and constraint settings for each Parameter. The function
+should return the residual (i.e., ``data-model``) array to be minimized.
+
+The advantage here is that the function to be minimized does not have to be
+changed if different bounds or constraints are placed on the fitting Parameters.
+The fitting model (as described in myfunc) is instead written in terms of
+physical parameters of the system, and remains remains independent of what is
+actually varied in the fit. In addition, which parameters are adjusted and which
+are fixed happens at run-time, so that changing what is varied and what
+constraints are placed on the parameters can easily be modified by the user in
+real-time data analysis.
+
+To perform the fit, the user calls::
+
+ result = lmfit.minimize(myfunc, fit_params, args=(x, data), kws={'someflag':True}, ....)
+
+After the fit, a ``MinimizerResult`` class is returned that holds the results
+the fit (e.g., fitting statistics and optimized parameters). The dictionary
+``result.params`` contains the best-fit values, estimated standard deviations,
+and correlations with other variables in the fit.
+
+By default, the underlying fit algorithm is the Levenberg-Marquardt algorithm
+with numerically-calculated derivatives from MINPACK's lmdif function, as used
+by ``scipy.optimize.leastsq``. Most other solvers that are present in ``scipy``
+(e.g., Nelder-Mead, differential_evolution, basin-hopping, and more) are also
+supported.
+
+
+Curve-Fitting with lmfit.Model
+----------------------------------
+
+One of the most common use of least-squares minimization is for curve fitting,
+where minimization of ``data-model``, or ``(data-model)*weights``. Using
+``lmfit.minimize`` as above, the objective function would take ``data`` and
+``weights`` and effectively calculated the model and then return the value of
+``(data-model)*weights``.
+
+To simplify this, and make curve-fitting more flexible, lmfit provides a Model
+class that wraps a *model function* that represents the model (without the data
+or weights). Parameters are then automatically found from the named arguments
+of the model function. In addition, simple model functions can be readily
+combined and reused, and several common model functions are included in lmfit.
+
+Exploration of Confidence Intervals
+-------------------------------------
+
+Lmfit tries to always estimate uncertainties in fitting parameters and
+correlations between them. It does this even for those methods where the
+corresponding ``scipy.optimize`` routines do not estimate uncertainties. Lmfit
+also provides methods to explicitly explore and evaluate the confidence
+intervals in fit results.
diff --git a/asv_benchmarking/README.md b/asv_benchmarking/README.md
new file mode 100644
index 0000000..58bb076
--- /dev/null
+++ b/asv_benchmarking/README.md
@@ -0,0 +1 @@
+Benchmarking with asv (air-speed-velocity)
diff --git a/asv_benchmarking/asv.conf.json b/asv_benchmarking/asv.conf.json
new file mode 100644
index 0000000..e4299e6
--- /dev/null
+++ b/asv_benchmarking/asv.conf.json
@@ -0,0 +1,75 @@
+{
+ // The version of the config file format. Do not change, unless
+ // you know what you are doing.
+ "version": 1,
+
+ // The name of the project being benchmarked
+ "project": "lmfit-py",
+
+ // The project's homepage
+ "project_url": "http://lmfit.github.io/lmfit-py",
+
+ // The URL or local path of the source code repository for the
+ // project being benchmarked
+ "repo": "http://github.com/lmfit/lmfit-py/",
+
+ // List of branches to benchmark. If not provided, defaults to "master"
+ // (for git) or "tip" (for mercurial).
+ "branches": ["master"], // for git
+ // "branches": ["tip"], // for mercurial
+
+ // The DVCS being used. If not set, it will be automatically
+ // determined from "repo" by looking at the protocol in the URL
+ // (if remote), or by looking for special directories, such as
+ // ".git" (if local).
+ "dvcs": "git",
+
+ // The tool to use to create environments. May be "conda",
+ // "virtualenv" or other value depending on the plugins in use.
+ // If missing or the empty string, the tool will be automatically
+ // determined by looking for tools on the PATH environment
+ // variable.
+ "environment_type": "conda",
+
+ // the base URL to show a commit for the project.
+ "show_commit_url": "http://github.com/lmfit/lmfit-py/commit/",
+
+ // The Pythons you'd like to test against. If not provided, defaults
+ // to the current version of Python used to run `asv`.
+ "pythons": ["2.7", "3.6"],
+
+ // The matrix of dependencies to test. Each key is the name of a
+ // package (in PyPI) and the values are version numbers. An empty
+ // list indicates to just test against the default (latest)
+ // version.
+ "matrix": {
+ "pip+emcee": [""],
+ "scipy": ["0.18", "0.19"],
+ "six": ["1.10.0"],
+ },
+
+ // The directory (relative to the current directory) that benchmarks are
+ // stored in. If not provided, defaults to "benchmarks"
+ "benchmark_dir": "benchmarks",
+
+ // The directory (relative to the current directory) to cache the Python
+ // environments in. If not provided, defaults to "env"
+ // "env_dir": "env",
+
+
+ // The directory (relative to the current directory) that raw benchmark
+ // results are stored in. If not provided, defaults to "results".
+ "results_dir": "results",
+
+ // The directory (relative to the current directory) that the html tree
+ // should be written to. If not provided, defaults to "html".
+ "html_dir": "html",
+
+ // The number of characters to retain in the commit hashes.
+ // "hash_length": 8,
+
+ // `asv` will cache wheels of the recent builds in each
+ // environment, making them faster to install next time. This is
+ // number of builds to keep, per environment.
+ // "wheel_cache_size": 0
+}
diff --git a/asv_benchmarking/benchmarks/__init__.py b/asv_benchmarking/benchmarks/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/asv_benchmarking/benchmarks/__init__.py
diff --git a/asv_benchmarking/benchmarks/benchmarks.py b/asv_benchmarking/benchmarks/benchmarks.py
new file mode 100644
index 0000000..46557a1
--- /dev/null
+++ b/asv_benchmarking/benchmarks/benchmarks.py
@@ -0,0 +1,136 @@
+# Benchmarking scripts for lmfit
+
+from copy import deepcopy
+
+import numpy as np
+
+from lmfit import Minimizer, Parameters, conf_interval, minimize
+
+
+def obj_func(params, x, data):
+ """ decaying sine wave, subtract data"""
+ amp = params['amp'].value
+ shift = params['shift'].value
+ omega = params['omega'].value
+ decay = params['decay'].value
+ model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+ return model - data
+
+
+class MinimizeSuite:
+ """
+ Benchmarks using minimize() and least-squares
+ """
+ def setup(self):
+ pass
+
+ def time_minimize(self):
+ np.random.seed(201)
+ x = np.linspace(0, 15, 601)
+
+ data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+ np.random.normal(size=len(x), scale=0.3))
+ params = Parameters()
+ params.add('amp', value=1, min=0, max=100)
+ params.add('decay', value=0.0, min=0, max=10)
+ params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2)
+ params.add('omega', value=1.0, min=0, max=10)
+
+ return minimize(obj_func, params, args=(x, data))
+
+ def time_minimize_withnan(self):
+ np.random.seed(201)
+ x = np.linspace(0, 15, 601)
+ x[53] = np.nan
+
+ data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+ np.random.normal(size=len(x), scale=0.3))
+ params = Parameters()
+ params.add('amp', value=1, min=0, max=100)
+ params.add('decay', value=0.0, min=0, max=10)
+ params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2)
+ params.add('omega', value=1.0, min=0, max=10)
+
+ return minimize(obj_func, params, args=(x, data), nan_policy='omit')
+
+ def time_minimize_large(self):
+ np.random.seed(201)
+ x = np.linspace(0, 19, 70001)
+ data = (5. * np.sin(0.6*x - 0.1) * np.exp(-x*x*0.0165) +
+ np.random.normal(size=len(x), scale=0.3))
+ params = Parameters()
+ params.add('amp', value=1, min=0, max=100)
+ params.add('decay', value=0.0, min=0, max=10)
+ params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2)
+ params.add('omega', value=0.40, min=0, max=10)
+
+ return minimize(obj_func, params, args=(x, data))
+
+ def time_confinterval(self):
+ np.random.seed(0)
+ x = np.linspace(0.3, 10, 100)
+ y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
+
+ p = Parameters()
+ p.add_many(('a', 0.1), ('b', 1))
+
+ def residual(p):
+ a = p['a'].value
+ b = p['b'].value
+
+ return 1/(a*x)+b-y
+
+ minimizer = Minimizer(residual, p)
+ out = minimizer.leastsq()
+ return conf_interval(minimizer, out)
+
+
+class MinimizerClassSuite:
+ """
+ Benchmarks for the Minimizer class
+ """
+ def setup(self):
+ self.x = np.linspace(1, 10, 250)
+ np.random.seed(0)
+ self.y = (3.0 * np.exp(-self.x / 2)
+ - 5.0 * np.exp(-(self.x - 0.1) / 10.)
+ + 0.1 * np.random.randn(len(self.x)))
+
+ self.p = Parameters()
+ self.p.add_many(('a1', 4., True, 0., 10.),
+ ('a2', 4., True, -10., 10.),
+ ('t1', 3., True, 0.01, 10.),
+ ('t2', 3., True, 0.01, 20.))
+
+ self.p_emcee = deepcopy(self.p)
+ self.p_emcee.add('noise', 0.2, True, 0.001, 1.)
+
+ self.mini_de = Minimizer(Minimizer_Residual,
+ self.p,
+ fcn_args=(self.x, self.y),
+ kws={'seed': 1,
+ 'polish': False,
+ 'maxiter': 100})
+
+ self.mini_emcee = Minimizer(Minimizer_lnprob,
+ self.p_emcee,
+ fcn_args=(self.x, self.y))
+
+ def time_differential_evolution(self):
+ return self.mini_de.minimize(method='differential_evolution')
+
+ def time_emcee(self):
+ return self.mini_emcee.emcee(self.p_emcee, steps=100, seed=1)
+
+
+def Minimizer_Residual(p, x, y):
+ v = p.valuesdict()
+ return (v['a1'] * np.exp(-x / v['t1'])
+ + v['a2'] * np.exp(-(x - 0.1) / v['t2'])
+ - y)
+
+
+def Minimizer_lnprob(p, x, y):
+ noise = p['noise'].value
+ return -0.5 * np.sum((Minimizer_Residual(p, x, y) / noise)**2
+ + np.log(2 * np.pi * noise**2))
diff --git a/asv_benchmarking/run_benchmark_code.py b/asv_benchmarking/run_benchmark_code.py
new file mode 100644
index 0000000..ffdff31
--- /dev/null
+++ b/asv_benchmarking/run_benchmark_code.py
@@ -0,0 +1,13 @@
+from benchmarks.benchmarks import MinimizerClassSuite, MinimizeSuite
+
+mtest = MinimizeSuite()
+mtest.setup()
+out = mtest.time_minimize()
+out = mtest.time_minimize_large()
+out = mtest.time_minimize_withnan()
+out = mtest.time_confinterval()
+
+mtest = MinimizerClassSuite()
+mtest.setup()
+out = mtest.time_differential_evolution()
+out = mtest.time_emcee()
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
new file mode 100644
index 0000000..d3f2a95
--- /dev/null
+++ b/azure-pipelines.yml
@@ -0,0 +1,300 @@
+# Python package
+# Create and test a Python package on multiple Python versions.
+# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
+# https://docs.microsoft.com/azure/devops/pipelines/languages/python
+
+stages:
+
+- stage: check_codestyle
+ jobs:
+ - job: pre_commit
+ pool:
+ vmImage: 'ubuntu-latest'
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '3.11'
+ - script: |
+ python -m pip install --upgrade build pip setuptools wheel
+ displayName: 'Install Python build tools and dependencies'
+ - script: |
+ python -m build
+ python -m pip install '.[dev]'
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ pre-commit install ; pre-commit run --all-files
+ displayName: 'Run pre-commit hooks'
+ - script: |
+ check-wheel-contents dist/*.whl
+ displayName: 'Run check-wheel-contents'
+ - script: |
+ twine check dist/*
+ displayName: "Run twine check"
+
+- stage: build_documentation
+ dependsOn: check_codestyle
+ condition: succeededOrFailed()
+ jobs:
+ - job: build_documentation
+ pool:
+ vmImage: 'ubuntu-latest'
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '3.11'
+ - script: |
+ python -m pip install --upgrade build pip setuptools wheel
+ displayName: 'Install Python build tools'
+ - script: |
+ python -m build
+ python -m pip install '.[doc]'
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ sudo apt-get update && sudo apt-get install -qq -y texlive-latex-extra latexmk
+ displayName: 'Install TeX Live'
+ - script: |
+ cd doc ; make all
+ displayName: 'Build the documentation'
+
+- stage: tests_minimum_dependencies
+ dependsOn: check_codestyle
+ condition: succeededOrFailed()
+ jobs:
+ - job:
+ pool:
+ vmImage: 'ubuntu-latest'
+ strategy:
+ matrix:
+ Python37:
+ python.version: '3.7'
+ Python38:
+ python.version: '3.8'
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '$(python.version)'
+ displayName: 'Use Python $(python.version)'
+ - script: |
+ sudo apt-get update && sudo apt-get install -yq --no-install-suggests --no-install-recommends \
+ libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libsuitesparse-dev ccache \
+ swig libmpc-dev
+ displayName: 'Install dependencies'
+ - script: |
+ python -m pip install --upgrade build pip wheel
+ python -m pip install asteval==0.9.28 numpy==1.19.0 scipy==1.6.0 uncertainties==3.1.4
+ displayName: 'Install minimum required version of dependencies'
+ - script: |
+ python -m build
+ python -m pip install ".[test]"
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ python -m pip list
+ displayName: 'List installed Python packages'
+ - script: |
+ python -m pip install pytest-azurepipelines
+ pytest
+ displayName: 'Run test-suite and collect coverage'
+ - script: |
+ curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --import # One-time step
+ curl -Os https://uploader.codecov.io/latest/linux/codecov
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig
+ gpg --verify codecov.SHA256SUM.sig codecov.SHA256SUM
+ shasum -a 256 -c codecov.SHA256SUM
+ chmod +x codecov
+ displayName: 'Download and verify codecov uploader'
+ - script: |
+ ./codecov -v -f "coverage.xml"
+ displayName: 'Upload to codecov.io'
+
+- stage: tests_latest_dependencies
+ dependsOn: check_codestyle
+ condition: succeededOrFailed()
+ jobs:
+ - job:
+ pool:
+ vmImage: 'ubuntu-latest'
+ strategy:
+ matrix:
+ Python37:
+ python.version: '3.7'
+ Python38:
+ python.version: '3.8'
+ Python39:
+ python.version: '3.9'
+ Python310:
+ python.version: '3.10'
+ Python311:
+ python.version: '3.11'
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '$(python.version)'
+ displayName: 'Use Python $(python.version)'
+ - script: |
+ sudo apt-get update && sudo apt-get install -yq --no-install-suggests --no-install-recommends \
+ libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libsuitesparse-dev ccache \
+ swig libmpc-dev
+ displayName: 'Install dependencies'
+ - script: |
+ python -m pip install --upgrade build pip setuptools wheel
+ displayName: 'Install latest available version of Python dependencies'
+ - script: |
+ python -m build
+ python -m pip install '.[all]'
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ python -m pip list
+ displayName: 'List installed Python packages'
+ - script: |
+ python -m pip install pytest-azurepipelines
+ pytest
+ displayName: 'Run test-suite and collect coverage'
+ - script: |
+ curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --import # One-time step
+ curl -Os https://uploader.codecov.io/latest/linux/codecov
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig
+ gpg --verify codecov.SHA256SUM.sig codecov.SHA256SUM
+ shasum -a 256 -c codecov.SHA256SUM
+ chmod +x codecov
+ displayName: 'Download and verify codecov uploader'
+ - script: |
+ ./codecov -v -f "coverage.xml"
+ displayName: 'Upload to codecov.io'
+
+# Python 3.11 on Windows currently fails to build pycairo
+- stage: test_Windows_latest
+ dependsOn: check_codestyle
+ condition: succeededOrFailed()
+ jobs:
+ - job:
+ pool:
+ vmImage: 'windows-latest'
+ strategy:
+ matrix:
+ Python310:
+ python.version: '3.10'
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '$(python.version)'
+ displayName: 'Use Python $(python.version)'
+ - script: |
+ python -m pip install --upgrade build pip setuptools wheel
+ displayName: 'Install latest available version of Python dependencies'
+ - script: |
+ python -m build
+ python -m pip install .[all]
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ python -m pip list
+ displayName: 'List installed Python packages'
+ - script: |
+ python -m pip install pytest-azurepipelines
+ pytest
+ displayName: 'Run test-suite'
+ - powershell:
+ cd doc ; .\make.bat html
+ displayName: 'Build the HTML documentation'
+
+- stage: test_macOS_latest
+ dependsOn: check_codestyle
+ condition: succeededOrFailed()
+ jobs:
+ - job:
+ pool:
+ vmImage: 'macos-latest'
+ strategy:
+ matrix:
+ Python311:
+ python.version: '3.11'
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '$(python.version)'
+ displayName: 'Use Python $(python.version)'
+ - script: |
+ python -m pip install --upgrade build pip setuptools wheel
+ displayName: 'Install latest available version of Python dependencies'
+ - script: |
+ python -m build
+ python -m pip install '.[all]'
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ python -m pip list
+ displayName: 'List installed Python packages'
+ - script: |
+ python -m pip install pytest-azurepipelines
+ pytest
+ displayName: 'Run test-suite and collect coverage'
+
+- stage: development_version
+ dependsOn: check_codestyle
+ condition: succeededOrFailed()
+ jobs:
+ - job: Python312_dev
+ pool:
+ vmImage: 'ubuntu-latest'
+ steps:
+ - script: |
+ sudo add-apt-repository ppa:deadsnakes/nightly
+ sudo apt-get update && sudo apt-get install -y --no-install-recommends python3.12-dev python3.12-venv
+ displayName: Install Python development version from the deadsnakes PPA
+ - script: |
+ sudo apt-get update && sudo apt-get install -yq --no-install-suggests --no-install-recommends \
+ libatlas-base-dev liblapack-dev gfortran libgmp-dev libmpfr-dev libsuitesparse-dev ccache \
+ swig libmpc-dev
+ displayName: 'Install dependencies'
+ - script: |
+ export PATH=/home/vsts/.local/bin:$PATH
+ ##curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
+ ##python3.12 get-pip.py --user
+ python3.12 -m ensurepip --upgrade
+ pip3.12 install -U build pip setuptools wheel pybind11 cython || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Install build, pip, setuptools, wheel, pybind11, and cython'
+ - script: |
+ export PATH=/home/vsts/.local/bin:$PATH
+ export numpy_version=1.24.3
+ wget https://github.com/numpy/numpy/releases/download/v${numpy_version}/numpy-${numpy_version}.tar.gz
+ tar xzvf numpy-${numpy_version}.tar.gz
+ cd numpy-${numpy_version}
+ python3.12 setup.py install --user || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Install latest available version of NumPy'
+ - script: |
+ export PATH=/home/vsts/.local/bin:$PATH
+ pip3.12 install -U pythran || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Install pythran'
+ - script: |
+ export PATH=/home/vsts/.local/bin:$PATH
+ export scipy_version=1.10.1
+ wget https://github.com/scipy/scipy/releases/download/v${scipy_version}/scipy-${scipy_version}.tar.gz
+ tar xzvf scipy-${scipy_version}.tar.gz
+ cd scipy-${scipy_version}
+ python3.12 setup.py install --user || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Install latest available version of SciPy'
+ - script: |
+ export PATH=/home/vsts/.local/bin:$PATH
+ # remove numdifftools for now as it pulls in statsmodels, that wants to build with NumPy 1.14.5
+ pip3.12 install asteval uncertainties dill emcee flaky pytest pytest-cov || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Install latest available version of Python dependencies'
+ - script: |
+ python3.12 -m build
+ python3.12 -m pip install '.[test]' --user || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Build wheel/sdist and install lmfit'
+ - script: |
+ export PATH=/home/vsts/.local/bin:$PATH
+ pip3.12 list || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'List installed Python packages'
+ - script: |
+ export PATH=/home/vsts/.local/bin:$PATH
+ pip3.12 install pytest-azurepipelines
+ cd $(Agent.BuildDirectory)/s/tests
+ pytest || echo -e "\043#vso[task.logissue type=warning;] Allowed failure for development version!!"
+ displayName: 'Run test-suite'
diff --git a/changelog b/debian/changelog
index 03186ed..03186ed 100644
--- a/changelog
+++ b/debian/changelog
diff --git a/control b/debian/control
index 9f46275..9f46275 100644
--- a/control
+++ b/debian/control
diff --git a/copyright b/debian/copyright
index 36eacdb..36eacdb 100644
--- a/copyright
+++ b/debian/copyright
diff --git a/patches/0004-jupyter_sphinx-is-not-yet-working-on-Debian.patch b/debian/patches/0004-jupyter_sphinx-is-not-yet-working-on-Debian.patch
index 02d7be7..02d7be7 100644
--- a/patches/0004-jupyter_sphinx-is-not-yet-working-on-Debian.patch
+++ b/debian/patches/0004-jupyter_sphinx-is-not-yet-working-on-Debian.patch
diff --git a/patches/deactivate_test.patch b/debian/patches/deactivate_test.patch
index 625550a..625550a 100644
--- a/patches/deactivate_test.patch
+++ b/debian/patches/deactivate_test.patch
diff --git a/patches/series b/debian/patches/series
index 9ae3e28..9ae3e28 100644
--- a/patches/series
+++ b/debian/patches/series
diff --git a/python-lmfit-doc.doc-base b/debian/python-lmfit-doc.doc-base
index 8ca28f3..8ca28f3 100644
--- a/python-lmfit-doc.doc-base
+++ b/debian/python-lmfit-doc.doc-base
diff --git a/python3-lmfit.remove b/debian/python3-lmfit.remove
index 0ba55b6..0ba55b6 100644
--- a/python3-lmfit.remove
+++ b/debian/python3-lmfit.remove
diff --git a/rules b/debian/rules
index f05654b..f05654b 100755
--- a/rules
+++ b/debian/rules
diff --git a/source/format b/debian/source/format
index 163aaf8..163aaf8 100644
--- a/source/format
+++ b/debian/source/format
diff --git a/tests/control b/debian/tests/control
index 11b44e5..11b44e5 100644
--- a/tests/control
+++ b/debian/tests/control
diff --git a/upstream/metadata b/debian/upstream/metadata
index 488b63d..488b63d 100644
--- a/upstream/metadata
+++ b/debian/upstream/metadata
diff --git a/watch b/debian/watch
index 4087808..4087808 100644
--- a/watch
+++ b/debian/watch
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..ecd46cb
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,127 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXBUILD = sphinx-build
+SPHINX_OPTS = -W
+SPHINX_DEBUGOPTS = --keep-going -n
+BUILDDIR = _build
+
+# Internal variables.
+PAPER =
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+SPHINX_OUTPUT = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER))
+ALLSPHINXOPTS = $(SPHINX_OUTPUT) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest latexpdf htmlzip
+.PHONY: all pdf gallery debug
+
+html: gallery
+ cp sphinx/ext_mathjax.py extensions.py
+ $(SPHINXBUILD) -b html $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/html
+ @echo
+ @echo "html build finished: $(BUILDDIR)/html."
+
+debug: gallery
+ cp sphinx/ext_mathjax.py extensions.py
+ $(SPHINXBUILD) -b html $(SPHINX_OUTPUT) $(SPHINX_DEBUGOPTS) . $(BUILDDIR)/html
+ @echo
+ @echo "html build finished: $(BUILDDIR)/html."
+
+gallery: examples/index.rst
+
+examples/index.rst:
+ ./doc_examples_to_gallery.py
+
+htmlzip: html
+ cp sphinx/ext_mathjax.py extensions.py
+ $(SPHINXBUILD) -b html $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/lmfit_doc
+ cd $(BUILDDIR) && zip -pur html/lmfit_doc.zip lmfit_doc
+
+epub: gallery
+ cp sphinx/ext_imgmath.py extensions.py
+ $(SPHINXBUILD) -b epub $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/epub
+ mkdir -p $(BUILDDIR)/html
+ cp -pr $(BUILDDIR)/epub/*.epub $(BUILDDIR)/html/.
+
+pdf: latex
+ cp sphinx/ext_imgmath.py extensions.py
+ cd $(BUILDDIR)/latex && make all-pdf
+ mkdir -p $(BUILDDIR)/html
+ cp -pr $(BUILDDIR)/latex/lmfit.pdf $(BUILDDIR)/html/.
+
+all: html htmlzip epub pdf
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)
+ -rm -f extensions.py
+ -rm -f *.dat *.sav *.csv
+ -rm -rf examples/*
+ -rm -rf ../examples/documentation
+ -rm -rf __pycache__
+
+dirhtml: gallery
+ $(SPHINXBUILD) -b dirhtml $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle: gallery
+ $(SPHINXBUILD) -b pickle $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json: gallery
+ $(SPHINXBUILD) -b json $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp: gallery
+ $(SPHINXBUILD) -b htmlhelp $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+latex: gallery
+ cp sphinx/ext_imgmath.py extensions.py
+ $(SPHINXBUILD) -b latex $(SPHINX_OUTPUT) $(SPHINX_OPTS) . _build/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in _build/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+latexpdf:
+ cp sphinx/ext_imgmath.py extensions.py
+ $(SPHINXBUILD) -b latex $(SPHINX_OUTPUT) $(SPHINX_OPTS) . _build/latex
+ @echo "Running LaTeX files through pdflatex..."
+ make -C _build/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in _build/latex."
+
+changes:
+ $(SPHINXBUILD) -b changes $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(SPHINX_OUTPUT) $(SPHINX_OPTS) . $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/_static/empty b/doc/_static/empty
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/doc/_static/empty
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
new file mode 100644
index 0000000..5359efa
--- /dev/null
+++ b/doc/_templates/indexsidebar.html
@@ -0,0 +1,19 @@
+<h3 style="text-align:center;">Getting LMFIT</h3>
+<p>Current version: <b><a href="whatsnew.html">{{ release }}</b></a></p>
+<p>Install: &nbsp; <tt>pip install lmfit</tt>
+<p>Download: &nbsp; <a href="https://pypi.python.org/pypi/lmfit/">PyPI</a>
+<p>Develop: &nbsp; <a href="https://github.com/lmfit/lmfit-py/">GitHub</a> <br>
+
+<h3 style="text-align:center;">Questions?</h3>
+
+&nbsp; <a href="faq.html">Frequently Asked Questions</a><br>
+&nbsp; <a href="https://groups.google.com/group/lmfit-py">Mailing List</a><br>
+&nbsp; <a href="support.html">Getting Help</a><br>
+
+<h3 style="text-align:center;">Static, off-line docs</h3>
+
+&nbsp; &nbsp; &nbsp;[ <a href="lmfit.pdf">PDF</a>
+| <a href="lmfit.epub">EPUB</a>
+| <a href="lmfit_doc.zip">HTML (zip)</a> ]
+
+<p>
diff --git a/doc/bounds.rst b/doc/bounds.rst
new file mode 100644
index 0000000..4acfc01
--- /dev/null
+++ b/doc/bounds.rst
@@ -0,0 +1,78 @@
+.. _bounds_chapter:
+
+=====================
+Bounds Implementation
+=====================
+
+.. _MINPACK-1: https://en.wikipedia.org/wiki/MINPACK
+.. _MINUIT: https://en.wikipedia.org/wiki/MINUIT
+.. _leastsqbound: https://github.com/jjhelmus/leastsqbound-scipy
+
+This section describes the implementation of :class:`Parameter` bounds.
+The `MINPACK-1`_ implementation used in :scipydoc:`optimize.leastsq` for
+the Levenberg-Marquardt algorithm does not explicitly support bounds on
+parameters, and expects to be able to fully explore the available range of
+values for any Parameter. Simply placing hard constraints (that is,
+resetting the value when it exceeds the desired bounds) prevents the
+algorithm from determining the partial derivatives, and leads to unstable
+results.
+
+Instead of placing such hard constraints, bounded parameters are
+mathematically transformed using the formulation devised (and documented)
+for `MINUIT`_. This is implemented following (and borrowing heavily from)
+the `leastsqbound`_ from J. J. Helmus. Parameter values are mapped from
+internally used, freely variable values :math:`P_{\rm internal}` to bounded
+parameters :math:`P_{\rm bounded}`. When both ``min`` and ``max`` bounds
+are specified, the mapping is:
+
+.. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ P_{\rm internal} &=& \arcsin\big(\frac{2 (P_{\rm bounded} - {\rm min})}{({\rm max} - {\rm min})} - 1\big) \\
+ P_{\rm bounded} &=& {\rm min} + \big(\sin(P_{\rm internal}) + 1\big) \frac{({\rm max} - {\rm min})}{2}
+ \end{eqnarray*}
+
+With only an upper limit ``max`` supplied, but ``min`` left unbounded, the
+mapping is:
+
+.. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ P_{\rm internal} &=& \sqrt{({\rm max} - P_{\rm bounded} + 1)^2 - 1} \\
+ P_{\rm bounded} &=& {\rm max} + 1 - \sqrt{P_{\rm internal}^2 + 1}
+ \end{eqnarray*}
+
+With only a lower limit ``min`` supplied, but ``max`` left unbounded, the
+mapping is:
+
+.. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ P_{\rm internal} &=& \sqrt{(P_{\rm bounded} - {\rm min} + 1)^2 - 1} \\
+ P_{\rm bounded} &=& {\rm min} - 1 + \sqrt{P_{\rm internal}^2 + 1}
+ \end{eqnarray*}
+
+With these mappings, the value for the bounded Parameter cannot exceed the
+specified bounds, though the internally varied value can be freely varied.
+
+It bears repeating that code from `leastsqbound`_ was adopted to implement
+the transformation described above. The challenging part (thanks again to
+Jonathan J. Helmus!) here is to re-transform the covariance matrix so that
+the uncertainties can be estimated for bounded Parameters. This is
+included by using the derivate :math:`dP_{\rm internal}/dP_{\rm bounded}`
+from the equations above to re-scale the Jacobin matrix before
+constructing the covariance matrix from it. Tests show that this
+re-scaling of the covariance matrix works quite well, and that
+uncertainties estimated for bounded are quite reasonable. Of course, if
+the best fit value is very close to a boundary, the derivative estimated
+uncertainty and correlations for that parameter may not be reliable.
+
+The `MINUIT`_ documentation recommends caution in using bounds. Setting
+bounds can certainly increase the number of function evaluations (and so
+computation time), and in some cases may cause some instabilities, as the
+range of acceptable parameter values is not fully explored. On the other
+hand, preliminary tests suggest that using ``max`` and ``min`` to set
+clearly outlandish bounds does not greatly affect performance or results.
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
new file mode 100644
index 0000000..ea0d6f9
--- /dev/null
+++ b/doc/builtin_models.rst
@@ -0,0 +1,912 @@
+.. _builtin_models_chapter:
+
+===================================================
+Built-in Fitting Models in the :mod:`models` module
+===================================================
+
+.. module:: lmfit.models
+
+Lmfit provides several built-in fitting models in the :mod:`models` module.
+These pre-defined models each subclass from the :class:`~lmfit.model.Model` class of the
+previous chapter and wrap relatively well-known functional forms, such as
+Gaussian, Lorentzian, and Exponential that are used in a wide range of
+scientific domains. In fact, all the models are based on simple, plain
+Python functions defined in the :mod:`~lmfit.lineshapes` module. In addition to
+wrapping a function into a :class:`~lmfit.model.Model`, these models also provide a
+:meth:`~lmfit.model.Model.guess` method that is intended to give a reasonable
+set of starting values from a data array that closely approximates the
+data to be fit.
+
+As shown in the previous chapter, a key feature of the :class:`~lmfit.model.Model` class
+is that models can easily be combined to give a composite
+:class:`~lmfit.model.CompositeModel`. Thus, while some of the models listed here may
+seem pretty trivial (notably, :class:`ConstantModel` and :class:`LinearModel`),
+the main point of having these is to be able to use them in composite models. For
+example, a Lorentzian plus a linear background might be represented as:
+
+.. jupyter-execute::
+ :hide-output:
+
+ from lmfit.models import LinearModel, LorentzianModel
+
+ peak = LorentzianModel()
+ background = LinearModel()
+ model = peak + background
+
+Almost all the models listed below are one-dimensional, with an independent
+variable named ``x``. Many of these models represent a function with a
+distinct peak, and so share common features. To maintain uniformity,
+common parameter names are used whenever possible. Thus, most models have
+a parameter called ``amplitude`` that represents the overall intensity (or
+area of) a peak or function and a ``sigma`` parameter that gives a
+characteristic width.
+
+After a list of built-in models, a few examples of their use are given.
+
+Peak-like models
+----------------
+
+There are many peak-like models available. These include
+:class:`GaussianModel`, :class:`LorentzianModel`, :class:`VoigtModel`,
+:class:`PseudoVoigtModel`, and some less commonly used variations. Most of
+these models are *unit-normalized* and share the same parameter names so
+that you can easily switch between models and interpret the results. The
+``amplitude`` parameter is the multiplicative factor for the
+unit-normalized peak lineshape, and so will represent the strength of that
+peak or the area under that curve. The ``center`` parameter will be the
+centroid ``x`` value. The ``sigma`` parameter is the characteristic width
+of the peak, with many functions using :math:`(x-\mu)/\sigma` where
+:math:`\mu` is the centroid value. Most of these peak functions will have
+two additional parameters derived from and constrained by the other
+parameters. The first of these is ``fwhm`` which will hold the estimated
+"Full Width at Half Max" for the peak, which is often easier to compare
+between different models than ``sigma``. The second of these is ``height``
+which will contain the maximum value of the peak, typically the value at
+:math:`x = \mu`. Finally, each of these models has a :meth:`guess` method
+that uses data to make a fairly crude but usually sufficient guess for the
+value of ``amplitude``, ``center``, and ``sigma``, and sets a lower bound
+of 0 on the value of ``sigma``.
+
+:class:`GaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: GaussianModel
+
+:class:`LorentzianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: LorentzianModel
+
+:class:`SplitLorentzianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: SplitLorentzianModel
+
+:class:`VoigtModel`
+~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: VoigtModel
+
+:class:`PseudoVoigtModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: PseudoVoigtModel
+
+:class:`MoffatModel`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: MoffatModel
+
+:class:`Pearson4Model`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Pearson4Model
+
+:class:`Pearson7Model`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Pearson7Model
+
+:class:`StudentsTModel`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: StudentsTModel
+
+:class:`BreitWignerModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: BreitWignerModel
+
+:class:`LognormalModel`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: LognormalModel
+
+:class:`DampedOscillatorModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: DampedOscillatorModel
+
+:class:`DampedHarmonicOscillatorModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: DampedHarmonicOscillatorModel
+
+:class:`ExponentialGaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ExponentialGaussianModel
+
+:class:`SkewedGaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: SkewedGaussianModel
+
+:class:`SkewedVoigtModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: SkewedVoigtModel
+
+:class:`ThermalDistributionModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ThermalDistributionModel
+
+:class:`DoniachModel`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: DoniachModel
+
+
+Linear and Polynomial Models
+----------------------------
+
+These models correspond to polynomials of some degree. Of course, lmfit is
+a very inefficient way to do linear regression (see :numpydoc:`polyfit`
+or :scipydoc:`stats.linregress`), but these models may be useful as one
+of many components of a composite model. The SplineModel below corresponds
+to a cubic spline.
+
+
+:class:`ConstantModel`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ConstantModel
+
+:class:`LinearModel`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: LinearModel
+
+:class:`QuadraticModel`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: QuadraticModel
+
+:class:`PolynomialModel`
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: PolynomialModel
+
+:class:`SplinelModel`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: SplineModel
+
+
+
+Periodic Models
+---------------
+
+These models correspond to periodic functions.
+
+:class:`SineModel`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: SineModel
+
+
+Step-like models
+----------------
+
+Two models represent step-like functions, and share many characteristics.
+
+:class:`StepModel`
+~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: StepModel
+
+:class:`RectangleModel`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: RectangleModel
+
+
+Exponential and Power law models
+--------------------------------
+
+:class:`ExponentialModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ExponentialModel
+
+:class:`PowerLawModel`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: PowerLawModel
+
+
+Two dimensional Peak-like models
+--------------------------------
+
+The one example of a two-dimensional peak is a two-dimensional Gaussian.
+
+:class:`Gaussian2dModel`
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Gaussian2dModel
+
+User-defined Models
+-------------------
+
+.. _asteval: https://newville.github.io/asteval/
+
+As shown in the previous chapter (:ref:`model_chapter`), it is fairly
+straightforward to build fitting models from parametrized Python functions.
+The number of model classes listed so far in the present chapter should
+make it clear that this process is not too difficult. Still, it is
+sometimes desirable to build models from a user-supplied function. This
+may be especially true if model-building is built-in to some larger library
+or application for fitting in which the user may not be able to easily
+build and use a new model from Python code.
+
+
+The :class:`ExpressionModel` allows a model to be built from a
+user-supplied expression. This uses the `asteval`_ module also used for
+mathematical constraints as discussed in :ref:`constraints_chapter`.
+
+
+:class:`ExpressionModel`
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ExpressionModel
+
+Since the point of this model is that an arbitrary expression will be
+supplied, the determination of what are the parameter names for the model
+happens when the model is created. To do this, the expression is parsed,
+and all symbol names are found. Names that are already known (there are
+over 500 function and value names in the asteval namespace, including most
+Python built-ins, more than 200 functions inherited from NumPy, and more
+than 20 common lineshapes defined in the :mod:`lineshapes` module) are not
+converted to parameters. Unrecognized names are expected to be names of either
+parameters or independent variables. If ``independent_vars`` is the
+default value of ``None``, and if the expression contains a variable named
+``x``, that will be used as the independent variable. Otherwise,
+``independent_vars`` must be given.
+
+For example, if one creates an :class:`ExpressionModel` as:
+
+.. jupyter-execute::
+
+ from lmfit.models import ExpressionModel
+
+ mod = ExpressionModel('off + amp * exp(-x/x0) * sin(x*phase)')
+
+The name ``exp`` will be recognized as the exponent function, so the model
+will be interpreted to have parameters named ``off``, ``amp``, ``x0`` and
+``phase``. In addition, ``x`` will be assumed to be the sole independent variable.
+In general, there is no obvious way to set default parameter values or
+parameter hints for bounds, so this will have to be handled explicitly.
+
+To evaluate this model, you might do the following:
+
+.. jupyter-execute::
+
+ from numpy import exp, linspace, sin
+
+ x = linspace(0, 10, 501)
+ params = mod.make_params(off=0.25, amp=1.0, x0=2.0, phase=0.04)
+ y = mod.eval(params, x=x)
+
+While many custom models can be built with a single line expression
+(especially since the names of the lineshapes like ``gaussian``, ``lorentzian``
+and so on, as well as many NumPy functions, are available), more complex
+models will inevitably require multiple line functions. You can include
+such Python code with the ``init_script`` argument. The text of this script
+is evaluated when the model is initialized (and before the actual
+expression is parsed), so that you can define functions to be used
+in your expression.
+
+As a probably unphysical example, to make a model that is the derivative of
+a Gaussian function times the logarithm of a Lorentzian function you may
+could to define this in a script:
+
+.. jupyter-execute::
+
+ script = """
+ def mycurve(x, amp, cen, sig):
+ loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
+ gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
+ return log(loren) * gradient(gauss) / gradient(x)
+ """
+
+and then use this with :class:`ExpressionModel` as:
+
+.. jupyter-execute::
+
+ mod = ExpressionModel('mycurve(x, height, mid, wid)', init_script=script,
+ independent_vars=['x'])
+
+As above, this will interpret the parameter names to be ``height``, ``mid``,
+and ``wid``, and build a model that can be used to fit data.
+
+
+Example 1: Fit Peak data to Gaussian, Lorentzian, and Voigt profiles
+--------------------------------------------------------------------
+
+Here, we will fit data to three similar lineshapes, in order to decide which
+might be the better model. We will start with a Gaussian profile, as in
+the previous chapter, but use the built-in :class:`GaussianModel` instead
+of writing one ourselves. This is a slightly different version from the
+one in previous example in that the parameter names are different, and have
+built-in default values. We will simply use:
+
+.. jupyter-execute::
+ :hide-output:
+
+ from numpy import loadtxt
+
+ from lmfit.models import GaussianModel
+
+ data = loadtxt('test_peak.dat')
+ x = data[:, 0]
+ y = data[:, 1]
+
+ mod = GaussianModel()
+
+ pars = mod.guess(y, x=x)
+ out = mod.fit(y, pars, x=x)
+
+ print(out.fit_report(min_correl=0.25))
+
+which prints out the results:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(out.fit_report(min_correl=0.25))
+
+We see a few interesting differences from the results of the previous
+chapter. First, the parameter names are longer. Second, there are ``fwhm``
+and ``height`` parameters, to give the full-width-at-half-maximum and
+maximum peak height, respectively. And third, the automated initial guesses
+are pretty good. A plot of the fit:
+
+.. jupyter-execute::
+ :hide-code:
+
+ import matplotlib as mpl
+ mpl.rcParams['figure.dpi'] = 150
+ %matplotlib inline
+ %config InlineBackend.figure_format = 'svg'
+
+ import matplotlib.pyplot as plt
+ plt.plot(x, y, '-')
+ plt.plot(x, out.best_fit, '-', label='Gaussian Model')
+ plt.legend()
+ plt.show()
+
+shows a decent match to the data -- the fit worked with no explicit setting
+of initial parameter values. Looking more closely, the fit is not perfect,
+especially in the tails of the peak, suggesting that a different peak
+shape, with longer tails, should be used. Perhaps a Lorentzian would be
+better? To do this, we simply replace ``GaussianModel`` with
+``LorentzianModel`` to get a :class:`LorentzianModel`:
+
+.. jupyter-execute::
+
+ from lmfit.models import LorentzianModel
+
+ mod = LorentzianModel()
+
+with the rest of the script as above. Perhaps predictably, the first thing
+we try gives results that are worse by comparing the fit statistics:
+
+.. jupyter-execute::
+ :hide-code:
+
+ pars = mod.guess(y, x=x)
+ out = mod.fit(y, pars, x=x)
+ print(out.fit_report(min_correl=0.25))
+
+and also by visual inspection of the fit to the data (figure below).
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, '-')
+ plt.plot(x, out.best_fit, '-', label='Lorentzian Model')
+ plt.legend()
+ plt.show()
+
+The tails are now too big, and the value for :math:`\chi^2` almost doubled.
+A Voigt model does a better job. Using :class:`VoigtModel`, this is as simple as using:
+
+.. jupyter-execute::
+
+ from lmfit.models import VoigtModel
+
+ mod = VoigtModel()
+
+with all the rest of the script as above. This gives:
+
+.. jupyter-execute::
+ :hide-code:
+
+ pars = mod.guess(y, x=x)
+ out = mod.fit(y, pars, x=x)
+ print(out.fit_report(min_correl=0.25))
+
+which has a much better value for :math:`\chi^2` and the other
+goodness-of-fit measures, and an obviously better match to the data as seen
+in the figure below (left).
+
+.. jupyter-execute::
+ :hide-code:
+
+ fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+ axes[0].plot(x, y, '-')
+ axes[0].plot(x, out.best_fit, '-', label='Voigt Model\ngamma constrained')
+ axes[0].legend()
+ # free gamma parameter
+ pars['gamma'].set(value=0.7, vary=True, expr='')
+ out_gamma = mod.fit(y, pars, x=x)
+ axes[1].plot(x, y, '-')
+ axes[1].plot(x, out_gamma.best_fit, '-', label='Voigt Model\ngamma unconstrained')
+ axes[1].legend()
+ plt.show()
+
+Fit to peak with Voigt model (left) and Voigt model with ``gamma``
+varying independently of ``sigma`` (right).
+
+Can we do better? The Voigt function has a :math:`\gamma` parameter
+(``gamma``) that can be distinct from ``sigma``. The default behavior used
+above constrains ``gamma`` to have exactly the same value as ``sigma``. If
+we allow these to vary separately, does the fit improve? To do this, we
+have to change the ``gamma`` parameter from a constrained expression and
+give it a starting value using something like::
+
+ mod = VoigtModel()
+ pars = mod.guess(y, x=x)
+ pars['gamma'].set(value=0.7, vary=True, expr='')
+
+which gives:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(out_gamma.fit_report(min_correl=0.25))
+
+and the fit shown on the right above.
+
+Comparing the two fits with the Voigt function, we see that :math:`\chi^2`
+is definitely improved with a separately varying ``gamma`` parameter. In
+addition, the two values for ``gamma`` and ``sigma`` differ significantly
+-- well outside the estimated uncertainties. More compelling, reduced
+:math:`\chi^2` is improved even though a fourth variable has been added to
+the fit. In the simplest statistical sense, this suggests that ``gamma``
+is a significant variable in the model. In addition, we can use both the
+Akaike or Bayesian Information Criteria (see
+:ref:`information_criteria_label`) to assess how likely the model with
+variable ``gamma`` is to explain the data than the model with ``gamma``
+fixed to the value of ``sigma``. According to theory,
+:math:`\exp(-(\rm{AIC1}-\rm{AIC0})/2)` gives the probability that a model with
+AIC1 is more likely than a model with AIC0. For the two models here, with
+AIC values of -1436 and -1324 (Note: if we had more carefully set the value
+for ``weights`` based on the noise in the data, these values might be
+positive, but there difference would be roughly the same), this says that
+the model with ``gamma`` fixed to ``sigma`` has a probability less than 5.e-25
+of being the better model.
+
+
+Example 2: Fit data to a Composite Model with pre-defined models
+----------------------------------------------------------------
+
+Here, we repeat the point made at the end of the last chapter that
+instances of :class:`~lmfit.model.Model` class can be added together to make a
+*composite model*. By using the large number of built-in models available,
+it is therefore very simple to build models that contain multiple peaks and
+various backgrounds. An example of a simple fit to a noisy step function
+plus a constant:
+
+.. jupyter-execute:: ../examples/doc_builtinmodels_stepmodel.py
+ :hide-output:
+
+After constructing step-like data, we first create a :class:`StepModel`
+telling it to use the ``erf`` form (see details above), and a
+:class:`ConstantModel`. We set initial values, in one case using the data
+and :meth:`guess` method for the initial step function parameters, and
+:meth:`make_params` arguments for the linear component.
+After making a composite model, we run :meth:`fit` and report the
+results, which gives:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(out.fit_report())
+
+with a plot of
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y)
+ plt.plot(x, out.init_fit, '--', label='initial fit')
+ plt.plot(x, out.best_fit, '-', label='best fit')
+ plt.legend()
+ plt.show()
+
+
+Example 3: Fitting Multiple Peaks -- and using Prefixes
+-------------------------------------------------------
+
+.. _NIST StRD: https://itl.nist.gov/div898/strd/nls/nls_main.shtml
+
+As shown above, many of the models have similar parameter names. For
+composite models, this could lead to a problem of having parameters for
+different parts of the model having the same name. To overcome this, each
+:class:`~lmfit.model.Model` can have a ``prefix`` attribute (normally set to a blank
+string) that will be put at the beginning of each parameter name. To
+illustrate, we fit one of the classic datasets from the `NIST StRD`_ suite
+involving a decaying exponential and two Gaussians.
+
+.. jupyter-execute:: ../examples/doc_builtinmodels_nistgauss.py
+ :hide-output:
+
+where we give a separate prefix to each model (they all have an
+``amplitude`` parameter). The ``prefix`` values are attached transparently
+to the models.
+
+Note that the calls to :meth:`make_param` used the bare name, without the
+prefix. We could have used the prefixes, but because we used the
+individual model ``gauss1`` and ``gauss2``, there was no need.
+
+Note also in the example here that we explicitly set bounds on many of the
+parameter values.
+
+The fit results printed out are:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(out.fit_report())
+
+We get a very good fit to this problem (described at the NIST site as of
+average difficulty, but the tests there are generally deliberately challenging) by
+applying reasonable initial guesses and putting modest but explicit bounds
+on the parameter values. The overall fit is shown on the left, with its individual
+components displayed on the right:
+
+.. jupyter-execute::
+ :hide-code:
+
+ fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+ axes[0].plot(x, y)
+ axes[0].plot(x, init, '--', label='initial fit')
+ axes[0].plot(x, out.best_fit, '-', label='best fit')
+ axes[0].legend()
+
+ comps = out.eval_components(x=x)
+ axes[1].plot(x, y)
+ axes[1].plot(x, comps['g1_'], '--', label='Gaussian component 1')
+ axes[1].plot(x, comps['g2_'], '--', label='Gaussian component 2')
+ axes[1].plot(x, comps['exp_'], '--', label='Exponential component')
+ axes[1].legend()
+
+ plt.show()
+
+One final point on setting initial values. From looking at the data
+itself, we can see the two Gaussian peaks are reasonably well separated but
+do overlap. Furthermore, we can tell that the initial guess for the
+decaying exponential component was poorly estimated because we used the
+full data range. We can simplify the initial parameter values by using
+this, and by defining an :func:`index_of` function to limit the data range.
+That is, with::
+
+ def index_of(arrval, value):
+ """Return index of array *at or below* value."""
+ if value < min(arrval):
+ return 0
+ return max(np.where(arrval <= value)[0])
+
+
+ ix1 = index_of(x, 75)
+ ix2 = index_of(x, 135)
+ ix3 = index_of(x, 175)
+
+ exp_mod.guess(y[:ix1], x=x[:ix1])
+ gauss1.guess(y[ix1:ix2], x=x[ix1:ix2])
+ gauss2.guess(y[ix2:ix3], x=x[ix2:ix3])
+
+
+.. jupyter-execute:: ../examples/doc_builtinmodels_nistgauss2.py
+ :hide-code:
+ :hide-output:
+
+we can get a better initial estimate (see below).
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y)
+ plt.plot(x, out.init_fit, '--', label='initial fit')
+ plt.plot(x, out.best_fit, '-', label='best fit')
+ plt.legend()
+
+ plt.show()
+
+The fit converges to the same answer, giving to identical values
+(to the precision printed out in the report), but in fewer steps,
+and without any bounds on parameters at all:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(out.fit_report())
+
+This script is in the file ``doc_builtinmodels_nistgauss2.py`` in the examples folder,
+and the figure above shows an improved initial estimate of the data.
+
+
+Example 4: Using a Spline Model
+--------------------------------
+
+In the example above, the two peaks might represent the interesting part of
+the data, and the exponential decay could be viewed a "background" which
+might be due to other physical effects or part of some response of the
+instrumentation used to make the measurement. That is, the background
+might be well-understood to be modeled as an exponential decay, as in the
+example above and so easily included in the full analysis. As the results
+above show, there is some -- but not huge -- correlation of the parameters
+between the peak amplitudes and the decay of the exponential function.
+That means that it is helpful to include all of those components in a
+single fit, as the uncertainties in the peak amplitudes (which would be
+interpreted as "line strength" or "area") will reflect some of the
+uncertainty in how well we modeled the background.
+
+Sometimes a background is more complex or at least has a less obvious
+functional form. In these cases, it can be useful to use a *spline* to
+model part of the curve. Just for completeness, a spline is a piecewise
+continuous polynomial function (typically made of cubic polynomials) that
+has a series of ``x`` values known as "knots" at which the highest order
+derivative is allowed to be discontinuous. By adding more knots, the
+spline function has more flexibility to follow a particular function.
+
+As an example (see the example file "doc_builtinmodels_splinemodel.py"), we
+start with data with a single peak and a background that is hard to
+characterize clearly as a simple decay, oscillatory structure.
+
+.. jupyter-execute::
+ :hide-output:
+
+ import numpy as np
+ import matplotlib.pyplot as plt
+ from lmfit.models import SplineModel, GaussianModel
+
+ data = np.loadtxt('test_splinepeak.dat')
+ x = data[:, 0]
+ y = data[:, 1]
+
+ plt.plot(x, y, label='data')
+ plt.legend()
+ plt.show()
+
+which shows (figure below):
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, label='data')
+ plt.legend()
+ plt.show()
+
+
+There is definitely a peak there, so we could start with building a model
+for a Gaussian peak, say with:
+
+.. jupyter-execute::
+ :hide-output:
+
+ model = GaussianModel(prefix='peak_')
+ params = model.make_params(amplitude=8, center=16, sigma=1)
+
+
+To account for that changing background, we'll use a spline, but need to
+know where to put the "knots". Picking points away from the peak makes
+sense -- we don't want to fit the peak -- but we want it to have some
+flexibility near the peak. Let's try spacing knot points at ``x=1, 3, ...,
+13``, then skip over the peak at around ``x=16`` and then pick up knots points
+at ``x=19, 21, 23, 25``.
+
+.. jupyter-execute::
+ :hide-output:
+
+ knot_xvals = np.array([1, 3, 5, 7, 9, 11, 13, 19, 21, 23, 25])
+
+ bkg = SplineModel(prefix='bkg_', xknots=knot_xvals)
+ params.update(bkg.guess(y, x))
+
+
+Note that we used ``bkg.guess()`` to guess the initial values of the spline
+parameters and then update the ``params`` Parameters object with these 11
+parameters to account for the spline. These will be very close to the ``y``
+values at the knot ``x`` values. The precise definition of the spline knot
+parameters is not "the y-values through which the resulting spline curve
+goes", but these values are pretty good estimates for the resulting spline
+values. You'll see below that these initial values are close.
+
+With a spline background defined, we can create a composite model, and run
+a fit.
+
+.. jupyter-execute::
+ :hide-output:
+
+ model = model + bkg
+
+ params['peak_amplitude'].min = 0
+ params['peak_center'].min = 10
+ params['peak_center'].max = 20
+
+ out = model.fit(y, params, x=x)
+ print(out.fit_report(min_correl=0.3))
+
+You'll see that we first set some "sanity bounds" on the peak parameters to
+prevent the peak from going completely wrong. This really is not necessary
+in this case, but it is often a reasonable thing to do - the general advice
+for this is to be generous in the bounds, not overly restrictive.
+
+This fit will print out a report of
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(out.fit_report(min_correl=0.3))
+
+
+from this we can make a few observations. First, the correlation between
+the "spline" parameters" and the "peak parameters" is noticeable, but not
+extremely high -- that's good, and the estimated uncertainties do account
+for this correlation. The spline components are correlated with each other
+(especially with the N-1 and N+1 spline parameter). Second, we can see
+that the initial values for the background spline parameters are pretty
+good.
+
+We can plot the results and fit components with
+
+.. jupyter-execute::
+ :hide-output:
+
+ comps = out.eval_components()
+ plt.plot(x, out.best_fit, label='best fit')
+ plt.plot(x, comps['bkg_'], label='background')
+ plt.plot(x, comps['peak_'], label='peak')
+ plt.legend()
+
+which will generate the plot shown below:
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, label='data')
+ plt.plot(x, out.best_fit, label='best fit')
+ plt.plot(x, comps['bkg_'], label='background')
+ plt.plot(x, comps['peak_'], label='peak')
+ plt.legend()
+ plt.show()
+
+
+If we're interested in seeing the locations of the knots, you might do
+
+.. jupyter-execute::
+ :hide-output:
+
+ knot_yvals = np.array([o.value for o in out.params.values() if o.name.startswith('bkg')])
+ plt.plot(knot_xvals, knot_yvals, 'o', color='black', label='spline knots values')
+
+which will generate be shown as
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, label='data')
+ plt.plot(x, out.best_fit, label='best fit')
+ plt.plot(x, comps['bkg_'], label='background')
+ plt.plot(x, comps['peak_'], label='peak')
+ knot_yvals = np.array([o.value for o in out.params.values() if o.name.startswith('bkg')])
+ plt.plot(knot_xvals, knot_yvals, 'o', color='black', label='spline knots values')
+
+ plt.legend()
+ plt.show()
+
+
+You might be interested in trying to assess what impact the select of the
+knots has on the resulting peak intensity. For example, you might try some
+of the following set of knot values:
+
+.. jupyter-execute::
+ :hide-output:
+
+ knot_xvals1 = np.array([1, 3, 5, 7, 9, 11, 13, 19, 21, 23, 25])
+ knot_xvals2 = np.array([1, 3, 5, 7, 9, 11, 13, 16, 19, 21, 23, 25])
+ knot_xvals3 = np.array([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25])
+
+
+and re-run the fit with these different sets of knot points. The results
+are shown in the table below.
+
+
+.. _models_spline_results-table:
+
+ Table of Peak amplitudes with varying spline points
+
+ +-------------------+------+----------------------------------------+
+ | spline x points | N | Peak amplitude value and uncertainty |
+ +===================+======+========================================+
+ | knot_xvals1 | 11 | 12.223 (0.295) |
+ +-------------------+------+----------------------------------------+
+ | knot_xvals2 | 12 | 11.746 (0.594) |
+ +-------------------+------+----------------------------------------+
+ | knot_xvals3 | 13 | 12.052 (0.872) |
+ +-------------------+------+----------------------------------------+
+
+Adding more spline points, especially near the peak center around ``x=16.4``,
+can impact the measurement of the amplitude but the uncertainty increases
+dramatically enough to mostly cover the same range of values. This is a
+interesting case of adding more parameters to a fit and having the
+uncertainties in the fitted parameters getting worse. The interested
+reader is encouraged to explore the fit reports and plot these different case.
+
+
+Finally, the basic case above used 11 spline points to fit the baseline.
+In fact, it would be reasonable to ask whether that is enough parameters
+to fit the full spectra. By imposing that there is also a Gaussian
+peak nearby makes the spline fit only the background, but without the
+Gaussian, the spline could fit the full curve. By way of example, we'll
+just try increasing the number of spline points to fit this data
+
+.. jupyter-execute::
+ :hide-output:
+
+ plt.plot(x, y, 'o', label='data')
+ for nknots in (10, 15, 20, 25):
+ model = SplineModel(prefix='bkg_', xknots=np.linspace(0, 25, nknots))
+ params = model.guess(y, x)
+ out = model.fit(y, params, x=x)
+ plt.plot(x, out.best_fit, label=f'best-fit ({nknots} knots)')
+
+ plt.legend()
+ plt.show()
+
+
+
+which will show the fit below:
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, 'o', label='data')
+ for nknots in (10, 15, 20, 25):
+ model = SplineModel(prefix='bkg_', xknots=np.linspace(0, 25, nknots))
+ params = model.guess(y, x)
+ out = model.fit(y, params, x=x)
+ plt.plot(x, out.best_fit, label=f'best-fit ({nknots} knots)')
+
+ plt.legend()
+ plt.show()
+
+
+By itself, 10 knots does not give a very good fit, but 25 knots or more
+does give a very good fit to the peak. This should give some confidence
+that the fit with 11 parameters for the background spline is acceptable,
+but also give some reason to be careful in selecting the number of spline
+points to use.
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000..93969c6
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,180 @@
+# lmfit documentation build configuration file
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+from datetime import date
+import os
+import sys
+import warnings
+
+import lmfit
+
+# -------------------------- General configuration --------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.append(os.path.abspath(os.path.join('.')))
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+
+# we want to swap sphinx.ext.mathjax and sphinx.ext.pngmath depending on the
+# type of documentation we build
+from extensions import extensions
+
+# shpinx.ext.napoleon settings
+napoleon_google_docstring = False
+
+# shpinx.ext.autodoc settings
+autoclass_content = 'both'
+
+# shpinx.ext.intersphinx settings
+intersphinx_mapping = {'py': ('https://docs.python.org/3', None),
+ 'numpy': ('https://numpy.org/doc/stable/', None),
+ 'scipy': ('https://matplotlib.org/stable/', None),
+ 'matplotlib': ('https://matplotlib.org/stable/', None),
+ 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
+ 'sympy': ('https://docs.sympy.org/latest/', None),
+ }
+
+# shpinx.ext.extlinks settings
+extlinks = {
+ 'scipydoc': ('https://docs.scipy.org/doc/scipy/reference/generated/scipy.%s.html', 'scipy.%s'),
+ 'numpydoc': ('https://docs.scipy.org/doc/numpy/reference/generated/numpy.%s.html', 'numpy.%s'),
+ }
+
+# sphinx.ext.imgmath settings
+imgmath_image_format = 'svg'
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = {'.rst': 'restructuredtext'}
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'lmfit'
+copyright = f'{date.today().year}, Matthew Newville, Till Stensitzki, Renee Otten, and others'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+version = release = lmfit.__version__.split('.post')[0]
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = False
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ['sphinx/theme']
+html_theme = 'sphinx13'
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+html_short_title = 'Minimization and Curve-Fitting for Python'
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
+
+html_domain_indices = False
+html_use_index = True
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = False
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'lmfitdoc'
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'lmfit.tex',
+ 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
+ 'Matthew Newville, Till Stensitzki, Renee Otten, and others', 'manual'),
+]
+
+# configuration for jupyter_sphinx
+package_path = os.path.abspath('../..')
+os.environ['PYTHONPATH'] = ':'.join((package_path, os.environ.get('PYTHONPATH', '')))
+
+# Sphinx-gallery configuration
+sphinx_gallery_conf = {
+ 'examples_dirs': '../examples',
+ 'gallery_dirs': 'examples',
+ 'filename_pattern': r'(\\|/)documentation|(\\|/)example_',
+ 'ignore_pattern': r'(\\|/)doc_',
+ 'ignore_repr_types': r'matplotlib',
+ 'image_srcset': ["3x"],
+}
+
+# remove certain RuntimeWarnings from examples
+warnings.filterwarnings("ignore", category=RuntimeWarning,
+ message="overflow encountered")
+
+# Suppress "WARNING: unknown mimetype for _static/empty
+suppress_warnings = ['epub.unknown_project_files']
diff --git a/doc/confidence.rst b/doc/confidence.rst
new file mode 100644
index 0000000..abb57c9
--- /dev/null
+++ b/doc/confidence.rst
@@ -0,0 +1,406 @@
+.. _confidence_chapter:
+
+Calculation of confidence intervals
+===================================
+
+.. module:: lmfit.confidence
+
+The lmfit :mod:`confidence` module allows you to explicitly calculate
+confidence intervals for variable parameters. For most models, it is not
+necessary since the estimation of the standard error from the estimated
+covariance matrix is normally quite good.
+
+But for some models, the sum of two exponentials for example, the approximation
+begins to fail. For this case, lmfit has the function :func:`conf_interval`
+to calculate confidence intervals directly. This is substantially slower
+than using the errors estimated from the covariance matrix, but the results
+are more robust.
+
+
+Method used for calculating confidence intervals
+------------------------------------------------
+
+The F-test is used to compare our null model, which is the best fit we have
+found, with an alternate model, where one of the parameters is fixed to a
+specific value. The value is changed until the difference between :math:`\chi^2_0`
+and :math:`\chi^2_{f}` can't be explained by the loss of a degree of freedom
+within a certain confidence.
+
+.. math::
+
+ F(P_{fix},N-P) = \left(\frac{\chi^2_f}{\chi^2_{0}}-1\right)\frac{N-P}{P_{fix}}
+
+``N`` is the number of data points and ``P`` the number of parameters of the null model.
+:math:`P_{fix}` is the number of fixed parameters (or to be more clear, the
+difference of number of parameters between our null model and the alternate
+model).
+
+Adding a log-likelihood method is under consideration.
+
+A basic example
+---------------
+
+First we create an example problem:
+
+.. jupyter-execute::
+
+ import numpy as np
+
+ import lmfit
+
+ x = np.linspace(0.3, 10, 100)
+ np.random.seed(0)
+ y = 1/(0.1*x) + 2 + 0.1*np.random.randn(x.size)
+ pars = lmfit.Parameters()
+ pars.add_many(('a', 0.1), ('b', 1))
+
+
+ def residual(p):
+ return 1/(p['a']*x) + p['b'] - y
+
+
+before we can generate the confidence intervals, we have to run a fit, so
+that the automated estimate of the standard errors can be used as a
+starting point:
+
+.. jupyter-execute::
+
+ mini = lmfit.Minimizer(residual, pars)
+ result = mini.minimize()
+
+ print(lmfit.fit_report(result.params))
+
+Now it is just a simple function call to calculate the confidence
+intervals:
+
+.. jupyter-execute::
+
+ ci = lmfit.conf_interval(mini, result)
+ lmfit.printfuncs.report_ci(ci)
+
+This shows the best-fit values for the parameters in the ``_BEST_`` column,
+and parameter values that are at the varying confidence levels given by
+steps in :math:`\sigma`. As we can see, the estimated error is almost the
+same, and the uncertainties are well behaved: Going from 1-:math:`\sigma`
+(68% confidence) to 3-:math:`\sigma` (99.7% confidence) uncertainties is
+fairly linear. It can also be seen that the errors are fairly symmetric
+around the best fit value. For this problem, it is not necessary to
+calculate confidence intervals, and the estimates of the uncertainties from
+the covariance matrix are sufficient.
+
+Working without standard error estimates
+----------------------------------------
+
+Sometimes the estimation of the standard errors from the covariance
+matrix fails, especially if values are near given bounds. Hence, to
+find the confidence intervals in these cases, it is necessary to set
+the errors by hand. Note that the standard error is only used to find an
+upper limit for each value, hence the exact value is not important.
+
+To set the step-size to 10% of the initial value we loop through all
+parameters and set it manually:
+
+.. jupyter-execute::
+
+ for p in result.params:
+ result.params[p].stderr = abs(result.params[p].value * 0.1)
+
+
+.. _label-confidence-chi2_maps:
+
+Calculating and visualizing maps of :math:`\chi^2`
+--------------------------------------------------
+
+The estimated values for the :math:`1-\sigma` standard error calculated by
+default for each fit include the effects of correlation between pairs of
+variables, but assumes the uncertainties are symmetric. While it doesn't
+exactly say what the values of the :math:`n-\sigma` uncertainties would be, the
+implication is that the :math:`n-\sigma` error is simply :math:`n^2\sigma`.
+
+The :func:`conf_interval` function described above improves on these
+automatically (and quickly) calculated uncertainies by explicitly finding
+:math:`n-\sigma` confidence levels in both directions -- it does not assume
+that the uncertainties are symmetric. This function also takes into account the
+correlations between pairs of variables, but it does not convey this
+information very well.
+
+For even further exploration of the confidence levels of parameter values, it
+can be useful to calculate maps of :math:`\chi^2` values for pairs of
+variables around their best fit values and visualize these as contour plots.
+Typically, pairs of variables will have elliptical contours of constant
+:math:`n-\sigma` level, with highly-correlated pairs of variables having high
+ratios of major and minor axes.
+
+The :func:`conf_interval2d` can calculate 2-d arrays or maps of either
+probability or :math:`\delta \chi^2 = \chi^2 - \chi_{\mathrm{best}}^2` for any
+pair of variables. Visualizing these can help better understand the nature of
+the uncertainties and correlations between parameters. To illustrate this,
+we'll start with an example fit to data that we deliberately add components not
+accounted for in the model, and with slightly non-Gaussian noise -- a
+constructed but "real-world" example:
+
+.. jupyter-execute::
+
+ # <examples/doc_confidence_chi2_maps.py>
+ import matplotlib.pyplot as plt
+ import numpy as np
+
+ from lmfit import conf_interval, conf_interval2d, report_ci
+ from lmfit.lineshapes import gaussian
+ from lmfit.models import GaussianModel, LinearModel
+
+ sigma_levels = [1, 2, 3]
+
+ rng = np.random.default_rng(seed=102)
+
+ #########################
+ # set up data -- deliberately adding imperfections and
+ # a small amount of non-Gaussian noise
+ npts = 501
+ x = np.linspace(1, 100, num=npts)
+ noise = rng.normal(scale=0.3, size=npts) + 0.2*rng.f(3, 9, size=npts)
+ y = (gaussian(x, amplitude=83, center=47., sigma=5.)
+ + 0.02*x + 4 + 0.25*np.cos((x-20)/8.0) + noise)
+
+ mod = GaussianModel() + LinearModel()
+ params = mod.make_params(amplitude=100, center=50, sigma=5,
+ slope=0, intecept=2)
+ out = mod.fit(y, params, x=x)
+ print(out.fit_report())
+
+ #########################
+ # run conf_intervale, print report
+ sigma_levels = [1, 2, 3]
+ ci = conf_interval(out, out, sigmas=sigma_levels)
+
+ print("## Confidence Report:")
+ report_ci(ci)
+
+The reports show that we obtained a pretty good fit, and that the automated
+estimates of the uncertainties are actually pretty good -- agreeing to the
+second decimal place. But we also see that some of the uncertainties do become
+noticeably asymmetric at high :math:`n-\sigma` levels.
+
+We'll plot this data and fit, and then further explore these uncertainties
+using :func:`conf_interval2d`:
+
+.. jupyter-execute::
+
+ #########################
+ # plot initial fit
+ colors = ('#2030b0', '#b02030', '#207070')
+ fig, axes = plt.subplots(2, 3, figsize=(15, 9.5))
+
+ axes[0, 0].plot(x, y, 'o', markersize=3, label='data', color=colors[0])
+ axes[0, 0].plot(x, out.best_fit, label='fit', color=colors[1])
+ axes[0, 0].set_xlabel('x')
+ axes[0, 0].set_ylabel('y')
+ axes[0, 0].legend()
+
+ aix, aiy = 0, 0
+ nsamples = 30
+ for pairs in (('sigma', 'amplitude'), ('intercept', 'amplitude'),
+ ('slope', 'intercept'), ('slope', 'center'), ('sigma', 'center')):
+ xpar, ypar = pairs
+ print("Generating chi-square map for ", pairs)
+ c_x, c_y, dchi2_mat = conf_interval2d(out, out, xpar, ypar,
+ nsamples, nsamples,
+ nsigma=3.5, chi2_out=True)
+ # sigma matrix: sigma increases chi_square
+ # from chi_square_best
+ # to chi_square + sigma**2 * reduced_chi_square
+ # so: sigma = sqrt(dchi2 / reduced_chi_square)
+ sigma_mat = np.sqrt(abs(dchi2_mat)/out.redchi)
+
+ # you could calculate the matrix of probabilities from sigma as:
+ # prob_mat = np.erf(sigma_mat/np.sqrt(2))
+
+ aix += 1
+ if aix == 2:
+ aix = 0
+ aiy += 1
+ ax = axes[aix, aiy]
+
+ cnt = ax.contour(c_x, c_y, sigma_mat, levels=sigma_levels, colors=colors,
+ linestyles='-')
+ ax.clabel(cnt, inline=True, fmt="$\sigma=%.0f$", fontsize=13)
+
+ # draw boxes for estimated uncertaties:
+ # dotted : scaled stderr from initial fit
+ # dashed : values found from conf_interval()
+ xv = out.params[xpar].value
+ xs = out.params[xpar].stderr
+ yv = out.params[ypar].value
+ ys = out.params[ypar].stderr
+
+ cix = ci[xpar]
+ ciy = ci[ypar]
+ nc = len(sigma_levels)
+ for i in sigma_levels:
+ # dotted line: scaled stderr
+ ax.plot((xv-i*xs, xv+i*xs, xv+i*xs, xv-i*xs, xv-i*xs),
+ (yv-i*ys, yv-i*ys, yv+i*ys, yv+i*ys, yv-i*ys),
+ linestyle='dotted', color=colors[i-1])
+
+ # dashed line: refined uncertainties from conf_interval
+ xsp, xsm = cix[nc+i][1], cix[nc-i][1]
+ ysp, ysm = ciy[nc+i][1], ciy[nc-i][1]
+ ax.plot((xsm, xsp, xsp, xsm, xsm), (ysm, ysm, ysp, ysp, ysm),
+ linestyle='dashed', color=colors[i-1])
+
+ ax.set_xlabel(xpar)
+ ax.set_ylabel(ypar)
+ ax.grid(True, color='#d0d0d0')
+ plt.show()
+ # <end examples/doc_confidence_chi2_maps.py>
+
+Here we made contours for the :math:`n-\sigma` levels from the 2-D array of
+:math:`\chi^2` by noting that the :math:`n-\sigma` level will have
+:math:`\chi^2` increased by :math:`n^2\chi_\nu^2` where :math:`\chi_\nu^2` is
+reduced chi-square.
+
+The dotted boxes show both the scaled values of the standard errors from the
+initial fit, and the dashed boxes show the confidence levels from
+:meth:`conf_interval`. You can see that the notion of increasing
+:math:`\chi^2` by :math:`\chi_\nu^2` works very well, and that there is a small
+asymmetry in the uncertainties for the ``amplitude`` and ``sigma`` parameters.
+
+
+.. _label-confidence-advanced:
+
+An advanced example for evaluating confidence intervals
+-------------------------------------------------------
+
+Now we look at a problem where calculating the error from approximated
+covariance can lead to misleading result -- the same double exponential
+problem shown in :ref:`label-emcee`. In fact such a problem is particularly
+hard for the Levenberg-Marquardt method, so we first estimate the results
+using the slower but robust Nelder-Mead method. We can then compare the
+uncertainties computed (if the ``numdifftools`` package is installed) with
+those estimated using Levenberg-Marquardt around the previously found
+solution. We can also compare to the results of using ``emcee``.
+
+
+.. jupyter-execute::
+ :hide-code:
+
+ import warnings
+ warnings.filterwarnings(action="ignore")
+ import matplotlib as mpl
+ import matplotlib.pyplot as plt
+ mpl.rcParams['figure.dpi'] = 150
+ %matplotlib inline
+ %config InlineBackend.figure_format = 'svg'
+
+.. jupyter-execute:: ../examples/doc_confidence_advanced.py
+ :hide-output:
+
+which will report:
+
+.. jupyter-execute::
+ :hide-code:
+
+ lmfit.report_fit(out2.params, min_correl=0.5)
+ print('')
+ lmfit.printfuncs.report_ci(ci)
+
+Again we called :func:`conf_interval`, this time with tracing and only for
+1- and 2-:math:`\sigma`. Comparing these two different estimates, we see
+that the estimate for ``a1`` is reasonably well approximated from the
+covariance matrix, but the estimates for ``a2`` and especially for ``t1``, and
+``t2`` are very asymmetric and that going from 1 :math:`\sigma` (68%
+confidence) to 2 :math:`\sigma` (95% confidence) is not very predictable.
+
+Plots of the confidence region are shown in the figures below for ``a1`` and
+``t2`` (left), and ``a2`` and ``t2`` (right):
+
+.. jupyter-execute::
+ :hide-code:
+
+ fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+ cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a1', 't2', 30, 30)
+ ctp = axes[0].contourf(cx, cy, grid, np.linspace(0, 1, 11))
+ fig.colorbar(ctp, ax=axes[0])
+ axes[0].set_xlabel('a1')
+ axes[0].set_ylabel('t2')
+
+ cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a2', 't2', 30, 30)
+ ctp = axes[1].contourf(cx, cy, grid, np.linspace(0, 1, 11))
+ fig.colorbar(ctp, ax=axes[1])
+ axes[1].set_xlabel('a2')
+ axes[1].set_ylabel('t2')
+
+ plt.show()
+
+Neither of these plots is very much like an ellipse, which is implicitly
+assumed by the approach using the covariance matrix. The plots actually
+look quite a bit like those found with MCMC and shown in the "corner plot"
+in :ref:`label-emcee`. In fact, comparing the confidence interval results
+here with the results for the 1- and 2-:math:`\sigma` error estimated with
+``emcee``, we can see that the agreement is pretty good and that the
+asymmetry in the parameter distributions are reflected well in the
+asymmetry of the uncertainties.
+
+The trace returned as the optional second argument from
+:func:`conf_interval` contains a dictionary for each variable parameter.
+The values are dictionaries with arrays of values for each variable, and an
+array of corresponding probabilities for the corresponding cumulative
+variables. This can be used to show the dependence between two
+parameters:
+
+.. jupyter-execute::
+ :hide-output:
+
+ fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+ cx1, cy1, prob = trace['a1']['a1'], trace['a1']['t2'], trace['a1']['prob']
+ cx2, cy2, prob2 = trace['t2']['t2'], trace['t2']['a1'], trace['t2']['prob']
+
+ axes[0].scatter(cx1, cy1, c=prob, s=30)
+ axes[0].set_xlabel('a1')
+ axes[0].set_ylabel('t2')
+
+ axes[1].scatter(cx2, cy2, c=prob2, s=30)
+ axes[1].set_xlabel('t2')
+ axes[1].set_ylabel('a1')
+
+ plt.show()
+
+which shows the trace of values:
+
+.. jupyter-execute::
+ :hide-code:
+
+ fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+ cx1, cy1, prob = trace['a1']['a1'], trace['a1']['t2'], trace['a1']['prob']
+ cx2, cy2, prob2 = trace['t2']['t2'], trace['t2']['a1'], trace['t2']['prob']
+ axes[0].scatter(cx1, cy1, c=prob, s=30)
+ axes[0].set_xlabel('a1')
+ axes[0].set_ylabel('t2')
+ axes[1].scatter(cx2, cy2, c=prob2, s=30)
+ axes[1].set_xlabel('t2')
+ axes[1].set_ylabel('a1')
+ plt.show()
+
+As an alternative/complement to the confidence intervals, the :meth:`Minimizer.emcee`
+method uses Markov Chain Monte Carlo to sample the posterior probability distribution.
+These distributions demonstrate the range of solutions that the data supports and we
+refer to :ref:`label-emcee` where this methodology was used on the same problem.
+
+Credible intervals (the Bayesian equivalent of the frequentist confidence
+interval) can be obtained with this method. MCMC can be used for model
+selection, to determine outliers, to marginalize over nuisance parameters, etcetera.
+For example, you may have fractionally underestimated the uncertainties on a
+dataset. MCMC can be used to estimate the true level of uncertainty on each
+data point. A tutorial on the possibilities offered by MCMC can be found at [1]_.
+
+.. [1] https://jakevdp.github.io/blog/2014/03/11/frequentism-and-bayesianism-a-practical-intro/
+
+
+Confidence Interval Functions
+-----------------------------
+
+.. autofunction:: lmfit.conf_interval
+
+.. autofunction:: lmfit.conf_interval2d
+
+.. autofunction:: lmfit.ci_report
diff --git a/doc/constraints.rst b/doc/constraints.rst
new file mode 100644
index 0000000..45e838f
--- /dev/null
+++ b/doc/constraints.rst
@@ -0,0 +1,216 @@
+.. _constraints_chapter:
+
+==============================
+Using Mathematical Constraints
+==============================
+
+.. _asteval: https://newville.github.io/asteval/
+
+Being able to fix variables to a constant value or place upper and lower
+bounds on their values can greatly simplify modeling real data. These
+capabilities are key to lmfit's Parameters. In addition, it is sometimes
+highly desirable to place mathematical constraints on parameter values.
+For example, one might want to require that two Gaussian peaks have the
+same width, or have amplitudes that are constrained to add to some value.
+Of course, one could rewrite the objective or model function to place such
+requirements, but this is somewhat error-prone, and limits the flexibility
+so that exploring constraints becomes laborious.
+
+To simplify the setting of constraints, Parameters can be assigned a
+mathematical expression of other Parameters, builtin constants, and builtin
+mathematical functions that will be used to determine its value. The
+expressions used for constraints are evaluated using the `asteval`_ module,
+which uses Python syntax, and evaluates the constraint expressions in a safe
+and isolated namespace.
+
+This approach to mathematical constraints allows one to not have to write a
+separate model function for two Gaussians where the two ``sigma`` values are
+forced to be equal, or where amplitudes are related. Instead, one can write a
+more general two Gaussian model (perhaps using :class:`GaussianModel`) and
+impose such constraints on the Parameters for a particular fit.
+
+
+Overview
+========
+
+Just as one can place bounds on a Parameter, or keep it fixed during the
+fit, so too can one place mathematical constraints on parameters. The way
+this is done with lmfit is to write a Parameter as a mathematical
+expression of the other parameters and a set of pre-defined operators and
+functions. The constraint expressions are simple Python statements,
+allowing one to place constraints like:
+
+.. jupyter-execute::
+
+ from lmfit import Parameters
+
+ pars = Parameters()
+ pars.add('frac_curve1', value=0.5, min=0, max=1)
+ pars.add('frac_curve2', expr='1-frac_curve1')
+
+as the value of the ``frac_curve1`` parameter is updated at each step in the
+fit, the value of ``frac_curve2`` will be updated so that the two values are
+constrained to add to 1.0. Of course, such a constraint could be placed in
+the fitting function, but the use of such constraints allows the end-user
+to modify the model of a more general-purpose fitting function.
+
+Nearly any valid mathematical expression can be used, and a variety of
+built-in functions are available for flexible modeling.
+
+Supported Operators, Functions, and Constants
+=============================================
+
+The mathematical expressions used to define constrained Parameters need to
+be valid Python expressions. As you would expect, the operators ``+``, ``-``,
+``*``, ``/``, and ``**``, are supported. In fact, a much more complete set can
+be used, including Python's bit- and logical operators::
+
+ +, -, *, /, **, &, |, ^, <<, >>, %, and, or,
+ ==, >, >=, <, <=, !=, ~, not, is, is not, in, not in
+
+
+The values for ``e`` (2.7182818...) and ``pi`` (3.1415926...) are available,
+as are several supported mathematical and trigonometric function::
+
+ abs, acos, acosh, asin, asinh, atan, atan2, atanh, ceil,
+ copysign, cos, cosh, degrees, exp, fabs, factorial,
+ floor, fmod, frexp, fsum, hypot, isinf, isnan, ldexp,
+ log, log10, log1p, max, min, modf, pow, radians, sin,
+ sinh, sqrt, tan, tanh, trunc
+
+
+In addition, all Parameter names will be available in the mathematical
+expressions. Thus, with parameters for a few peak-like functions:
+
+.. jupyter-execute::
+
+ pars = Parameters()
+ pars.add('amp_1', value=0.5, min=0, max=1)
+ pars.add('cen_1', value=2.2)
+ pars.add('wid_1', value=0.2)
+
+The following expression are all valid:
+
+.. jupyter-execute::
+
+ pars.add('amp_2', expr='(2.0 - amp_1**2)')
+ pars.add('wid_2', expr='sqrt(pi)*wid_1')
+ pars.add('cen_2', expr='cen_1 * wid_2 / max(wid_1, 0.001)')
+
+In fact, almost any valid Python expression is allowed. A notable example
+is that Python's 1-line *if expression* is supported:
+
+.. jupyter-execute::
+
+ pars.add('param_a', value=1)
+ pars.add('param_b', value=2)
+ pars.add('test_val', value=100)
+
+ pars.add('bounded', expr='param_a if test_val/2. > 100 else param_b')
+
+which is equivalent to the more familiar:
+
+.. jupyter-execute::
+
+ if pars['test_val'].value/2. > 100:
+ bounded = pars['param_a'].value
+ else:
+ bounded = pars['param_b'].value
+
+Using Inequality Constraints
+============================
+
+A rather common question about how to set up constraints
+that use an inequality, say, :math:`x + y \le 10`. This
+can be done with algebraic constraints by recasting the
+problem, as :math:`x + y = \delta` and :math:`\delta \le
+10`. That is, first, allow :math:`x` to be held by the
+freely varying parameter ``x``. Next, define a parameter
+``delta`` to be variable with a maximum value of 10, and
+define parameter ``y`` as ``delta - x``:
+
+.. jupyter-execute::
+
+ pars = Parameters()
+ pars.add('x', value=5, vary=True)
+ pars.add('delta', value=5, max=10, vary=True)
+ pars.add('y', expr='delta-x')
+
+The essential point is that an inequality still implies
+that a variable (here, ``delta``) is needed to describe the
+constraint. The secondary point is that upper and lower
+bounds can be used as part of the inequality to make the
+definitions more convenient.
+
+
+Advanced usage of Expressions in lmfit
+======================================
+
+The expression used in a constraint is converted to a
+Python `Abstract Syntax Tree
+<https://docs.python.org/library/ast.html>`_, which is an
+intermediate version of the expression -- a syntax-checked,
+partially compiled expression. Among other things, this
+means that Python's own parser is used to parse and convert
+the expression into something that can easily be evaluated
+within Python. It also means that the symbols in the
+expressions can point to any Python object.
+
+In fact, the use of Python's AST allows a nearly full version of Python to
+be supported, without using Python's built-in :meth:`eval` function. The
+`asteval`_ module actually supports most Python syntax, including for- and
+while-loops, conditional expressions, and user-defined functions. There
+are several unsupported Python constructs, most notably the class
+statement, so that new classes cannot be created, and the import statement,
+which helps make the `asteval`_ module safe from malicious use.
+
+One important feature of the `asteval`_ module is that you can add
+domain-specific functions into the it, for later use in constraint
+expressions. To do this, you would use the :attr:`_asteval` attribute of
+the :class:`Parameters` class, which contains a complete AST interpreter.
+The `asteval`_ interpreter uses a flat namespace, implemented as a single
+dictionary. That means you can preload any Python symbol into the namespace
+for the constraints, for example this Lorentzian function:
+
+.. jupyter-execute::
+
+ def mylorentzian(x, amp, cen, wid):
+ "lorentzian function: wid = half-width at half-max"
+ return (amp / (1 + ((x-cen) / wid)**2))
+
+
+You can add this user-defined function to the `asteval`_ interpreter of
+the :class:`Parameters` class:
+
+.. jupyter-execute::
+
+ from lmfit import Parameters
+
+ pars = Parameters()
+ pars._asteval.symtable['lorentzian'] = mylorentzian
+
+and then initialize the :class:`Minimizer` class with this parameter set:
+
+.. jupyter-execute::
+
+ from lmfit import Minimizer
+
+
+ def userfcn(x, params):
+ pass
+
+
+ fitter = Minimizer(userfcn, pars)
+
+Alternatively, one can first initialize the :class:`Minimizer` class and
+add the function to the `asteval`_ interpreter of :attr:`Minimizer.params`
+afterwards:
+
+.. jupyter-execute::
+
+ pars = Parameters()
+ fitter = Minimizer(userfcn, pars)
+ fitter.params._asteval.symtable['lorentzian'] = mylorentzian
+
+In both cases the user-defined :meth:`lorentzian` function can now be
+used in constraint expressions.
diff --git a/doc/contents.rst b/doc/contents.rst
new file mode 100644
index 0000000..1c244b4
--- /dev/null
+++ b/doc/contents.rst
@@ -0,0 +1,21 @@
+:orphan:
+
+Contents
+========
+
+.. toctree::
+ :maxdepth: 3
+
+ intro
+ installation
+ whatsnew
+ support
+ faq
+ parameters
+ fitting
+ model
+ builtin_models
+ confidence
+ bounds
+ constraints
+ examples/index
diff --git a/doc/doc_examples_to_gallery.py b/doc/doc_examples_to_gallery.py
new file mode 100755
index 0000000..4cfeb5b
--- /dev/null
+++ b/doc/doc_examples_to_gallery.py
@@ -0,0 +1,71 @@
+#! /usr/bin/env python
+
+"""
+Process the examples in the documentation for inclusion in the Gallery:
+
+- create a "documentation" directory within "examples"
+- add a README.txt file
+- copy the examples from the documentation, bu remove the "doc_" from the
+ filename
+- add the required docstring to the files for proper rendering
+- copy the data files
+
+"""
+
+import os
+from pathlib import Path
+import shlex
+from shutil import copy2
+import subprocess
+
+
+def copy_data_files(src_dir, dest_dir):
+ """Copy files with datafile extension from src_dir to dest_dir."""
+ data_file_extension = [".dat", ".csv", ".sav"]
+
+ for file in src_dir.glob("*"):
+ if file.suffix in data_file_extension:
+ copy2(file, dest_dir)
+
+
+doc_dir = Path(__file__).parent.absolute()
+
+examples_dir = doc_dir.parent / "examples"
+files = examples_dir.glob("doc[_]*.py")
+
+examples_documentation_dir = examples_dir / "documentation"
+examples_documentation_dir.mkdir(exist_ok=True)
+
+
+scripts_to_run = []
+
+(examples_documentation_dir / "README.txt").write_text(
+ "Examples from the documentation\n"
+ "===============================\n\n"
+ "Below are all the examples that are part of the lmfit documentation."
+)
+
+for fn in files:
+
+ script_text = fn.read_text()
+
+ gallery_file = examples_documentation_dir / fn.name[4:]
+ msg = "" # add optional message f
+ gallery_file.write_text(f'"""\n{fn.name}\n{"=" * len(fn.name)}\n\n'
+ f'{msg}\n"""\n{script_text}')
+
+ # make sure the saved Models and ModelResult are available
+ if "save" in fn.name:
+ scripts_to_run.append(gallery_file)
+
+copy_data_files(examples_dir, examples_documentation_dir)
+
+os.chdir(examples_documentation_dir)
+
+for script in scripts_to_run:
+ subprocess.run(shlex.split(f"python {script.as_posix()}"), check=True)
+
+os.chdir(doc_dir)
+
+# # data files for the other Gallery examples
+copy_data_files(examples_documentation_dir, doc_dir)
diff --git a/doc/faq.rst b/doc/faq.rst
new file mode 100644
index 0000000..6b92543
--- /dev/null
+++ b/doc/faq.rst
@@ -0,0 +1,325 @@
+.. _faq_chapter:
+
+==========================
+Frequently Asked Questions
+==========================
+
+A list of common questions.
+
+What's the best way to ask for help or submit a bug report?
+===========================================================
+
+See :ref:`support_chapter`.
+
+
+Why did my script break when upgrading from lmfit 0.8.3 to 0.9.0?
+=================================================================
+
+See :ref:`whatsnew_090_label`.
+
+
+I get import errors from IPython
+================================
+
+If you see something like::
+
+ from IPython.html.widgets import Dropdown
+
+ ImportError: No module named 'widgets'
+
+then you need to install the ``ipywidgets`` package, try: ``pip install ipywidgets``.
+
+
+How can I fit multi-dimensional data?
+=====================================
+
+The fitting routines accept data arrays that are one-dimensional and double
+precision. So you need to convert the data and model (or the value
+returned by the objective function) to be one-dimensional. A simple way to
+do this is to use :numpydoc:`ndarray.flatten`, for example::
+
+ def residual(params, x, data=None):
+ ....
+ resid = calculate_multidim_residual()
+ return resid.flatten()
+
+
+How can I fit multiple data sets?
+=================================
+
+As above, the fitting routines accept data arrays that are one-dimensional
+and double precision. So you need to convert the sets of data and models
+(or the value returned by the objective function) to be one-dimensional. A
+simple way to do this is to use :numpydoc:`concatenate`. As an
+example, here is a residual function to simultaneously fit two lines to two
+different arrays. As a bonus, the two lines share the 'offset' parameter::
+
+ import numpy as np
+
+
+ def fit_function(params, x=None, dat1=None, dat2=None):
+ model1 = params['offset'] + x * params['slope1']
+ model2 = params['offset'] + x * params['slope2']
+
+ resid1 = dat1 - model1
+ resid2 = dat2 - model2
+ return np.concatenate((resid1, resid2))
+
+
+How can I fit complex data?
+===========================
+
+As with working with multi-dimensional data, you need to convert your data
+and model (or the value returned by the objective function) to be double
+precision, floating point numbers. The simplest approach is to use
+:numpydoc:`ndarray.view`, perhaps like::
+
+ import numpy as np
+
+
+ def residual(params, x, data=None):
+ ....
+ resid = calculate_complex_residual()
+ return resid.view(float)
+
+Alternately, you can use the :class:`lmfit.Model` class to wrap a fit function
+that returns a complex vector. It will automatically apply the above
+prescription when calculating the residual. The benefit to this method
+is that you also get access to the plot routines from the ModelResult
+class, which are also complex-aware.
+
+
+How should I cite LMFIT?
+========================
+
+See https://dx.doi.org/10.5281/zenodo.11813
+
+
+I get errors from NaN in my fit. What can I do?
+================================================
+
+The solvers used by lmfit use NaN (see
+https://en.wikipedia.org/wiki/NaN) values as signals that the calculation
+cannot continue. If any value in the residual array (typically
+``(data-model)*weight``) is NaN, then calculations of chi-square or
+comparisons with other residual arrays to try find a better fit will also
+give NaN and fail. There is no sensible way for lmfit or any of the
+optimization routines to know how to handle such NaN values. They
+indicate that numerical calculations are not sensible and must stop.
+
+This means that if your objective function (if using ``minimize``) or model
+function (if using ``Model``) generates a NaN, the fit will stop
+immediately. If your objective or model function generates a NaN, you
+really must handle that.
+
+
+``nan_policy``
+~~~~~~~~~~~~~~
+
+If you are using :class:`lmfit.Model` and the NaN values come from your
+data array and are meant to indicate missing values, or if you using
+:func:`lmfit.minimize` with the same basic intention, then it might be
+possible to get a successful fit in spite of the NaN values. To do this,
+you can add a ``nan_policy='omit'`` argument to :func:`lmfit.minimize`, or
+when creating a :class:`lmfit.Model`, or when running
+:meth:`lmfit.Model.fit`.
+
+In order for this to be effective, the number of NaN values cannot ever
+change during the fit. If the NaN values come from the data and not the
+calculated model, that should be the case.
+
+
+Common sources of NaN
+~~~~~~~~~~~~~~~~~~~~~
+
+If you are seeing errors due to NaN values, you will need to figure out
+where they are coming from and eliminate them. It is sometimes difficult
+to tell what causes NaN values. Keep in mind that all values should be
+assumed to be either scalar values or numpy arrays of double precision real
+numbers when fitting. Some of the most likely causes of NaNs are:
+
+ * taking ``sqrt(x)`` or ``log(x)`` where ``x`` is negative.
+
+ * doing ``x**y`` where ``x`` is negative. Since ``y`` is real, there will
+ be a fractional component, and a negative number to a fractional
+ exponent is not a real number.
+
+ * doing ``x/y`` where both ``x`` and ``y`` are 0.
+
+If you use these very common constructs in your objective or model
+function, you should take some caution for what values you are passing
+these functions and operators. Many special functions have similar
+limitations and should also be viewed with some suspicion if NaNs are being
+generated.
+
+A related problem is the generation of Inf (Infinity in floating point),
+which generally comes from ``exp(x)`` where ``x`` has values greater than 700
+or so, so that the resulting value is greater than 1.e308. Inf is only
+slightly better than NaN. It will completely ruin the ability to do the
+fit. However, unlike NaN, it is also usually clear how to handle Inf, as
+you probably won't ever have values greater than 1.e308 and can therefore
+(usually) safely clip the argument passed to ``exp()`` to be smaller than
+about 700.
+
+
+.. _faq_params_stuck:
+
+Why are Parameter values sometimes stuck at initial values?
+===========================================================
+
+In order for a Parameter to be optimized in a fit, changing its value must
+have an impact on the fit residual (``data-model`` when curve fitting, for
+example). If a fit has not changed one or more of the Parameters, it means
+that changing those Parameters did not change the fit residual.
+
+Normally (that is, unless you specifically provide a function for
+calculating the derivatives, in which case you probably would not be asking
+this question ;)), the fitting process begins by making a very small change
+to each Parameter value to determine which way and how large of a change to
+make for the parameter: This is the derivative or Jacobian (change in
+residual per change in parameter value). By default, the change made for
+each variable Parameter is to multiply its value by (1.0+1.0e-8) or so
+(unless the value is below about 1.e-15, in which case it adds 1.0e-8). If
+that small change does not change the residual, then the value of the
+Parameter will not be updated.
+
+Parameter values that are "way off" are a common reason for Parameters
+being stuck at initial values. As an example, imagine fitting peak-like
+data with and ``x`` range of 0 to 10, peak centered at 6, and a width of 1 or
+2 or so, as in the example at
+:ref:`sphx_glr_examples_documentation_model_gaussian.py`. A Gaussian
+function with an initial value of for the peak center at 5 and an initial
+width or 5 will almost certainly find a good fit. An initial value of the
+peak center of -50 will end up being stuck with a "bad fit" because a small
+change in Parameters will still lead the modeled Gaussian to have no
+intensity over the actual range of the data. You should make sure that
+initial values for Parameters are reasonable enough to actually effect the
+fit. As it turns out in the example linked to above, changing the center
+value to any value between about 0 and 10 (that is, the data range) will
+result to a good fit.
+
+Another common cause for Parameters being stuck at initial values is when
+the initial value is at a boundary value. For this case, too, a small
+change in the initial value for the Parameter will still leave the value at
+the boundary value and not show any real change in the residual.
+
+If you're using bounds, make sure the initial values for the Parameters are
+not at the boundary values.
+
+Finally, one reason for a Parameter to not change is that they are actually
+used as discrete values. This is discussed below in :ref:`faq_discrete_params`.
+
+
+.. _faq_params_no_uncertainties:
+
+Why are uncertainties in Parameters sometimes not determined?
+=============================================================
+
+In order for Parameter uncertainties to be estimated, each variable
+Parameter must actually change the fit, and cannot be stuck at an initial
+value or at a boundary value. See :ref:`faq_params_stuck` for why values may
+not change from their initial values.
+
+
+.. _faq_discrete_params:
+
+Can Parameters be used for Array Indices or Discrete Values?
+=============================================================
+
+The short answer is "No": variables in all of the fitting methods used in
+``lmfit`` (and all of those available in ``scipy.optimize``) are treated as
+continuous values, and represented as double precision floating point
+values. As an important example, you cannot have a variable that is
+somehow constrained to be an integer.
+
+Still, it is a rather common question of how to fit data to a model that
+includes a breakpoint, perhaps
+
+ .. math::
+
+ f(x; x_0, a, b, c) =
+ \begin{cases}
+ c & \quad \text{for} \> x < x_0 \\
+ a + bx^2 & \quad \text{for} \> x > x_0
+ \end{cases}
+
+
+That you implement with a model function and use to fit data like this:
+
+.. jupyter-execute::
+
+ import numpy as np
+
+ import lmfit
+
+
+ def quad_off(x, x0, a, b, c):
+ model = a + b * x**2
+ model[np.where(x < x0)] = c
+ return model
+
+
+ x0 = 19
+ b = 0.02
+ a = 2.0
+ xdat = np.linspace(0, 100, 101)
+ ydat = a + b * xdat**2
+ ydat[np.where(xdat < x0)] = a + b * x0**2
+ ydat += np.random.normal(scale=0.1, size=xdat.size)
+
+ mod = lmfit.Model(quad_off)
+ pars = mod.make_params(x0=22, a=1, b=1, c=1)
+
+ result = mod.fit(ydat, pars, x=xdat)
+ print(result.fit_report())
+
+This will not result in a very good fit, as the value for ``x0`` cannot be
+found by making a small change in its value. Specifically,
+``model[np.where(x < x0)]`` will give the same result for ``x0=22`` and
+``x0=22.001``, and so that value is not changed during the fit.
+
+There are a couple ways around this problem. First, you may be able to
+make the fit depend on ``x0`` in a way that is not just discrete. That
+depends on your model function. A second option is to treat the break not as a
+hard break but as a more gentle transition with a sigmoidal function, such
+as an error function. Like the break-point, these will go from 0 to 1, but
+more gently and with some finite value leaking into neighboring points.
+The amount of leakage or width of the step can also be adjusted.
+
+A simple modification of the above to use an error function would
+look like this and give better fit results:
+
+.. jupyter-execute::
+
+ import numpy as np
+ from scipy.special import erf
+
+ import lmfit
+
+
+ def quad_off(x, x0, a, b, c):
+ m1 = a + b * x**2
+ m2 = c * np.ones(len(x))
+ # step up from 0 to 1 at x0: (erf(x-x0)+1)/2
+ # step down from 1 to 0 at x0: (1-erf(x-x0))/2
+ model = m1 * (erf(x-x0)+1)/2 + m2 * (1-erf(x-x0))/2
+ return model
+
+
+ x0 = 19
+ b = 0.02
+ a = 2.0
+ xdat = np.linspace(0, 100, 101)
+ ydat = a + b * xdat**2
+ ydat[np.where(xdat < x0)] = a + b * x0**2
+ ydat += np.random.normal(scale=0.1, size=xdat.size)
+
+ mod = lmfit.Model(quad_off)
+ pars = mod.make_params(x0=22, a=1, b=1, c=1)
+
+ result = mod.fit(ydat, pars, x=xdat)
+ print(result.fit_report())
+
+The natural width of the error function is about 2 ``x`` units, but you can
+adjust this, shortening it with ``erf((x-x0)*2)`` to give a sharper
+transition for example.
diff --git a/doc/fitting.rst b/doc/fitting.rst
new file mode 100644
index 0000000..50a05c8
--- /dev/null
+++ b/doc/fitting.rst
@@ -0,0 +1,742 @@
+.. _minimize_chapter:
+
+.. module:: lmfit.minimizer
+
+=====================================
+Performing Fits and Analyzing Outputs
+=====================================
+
+As shown in the previous chapter, a simple fit can be performed with the
+:func:`minimize` function. For more sophisticated modeling, the
+:class:`Minimizer` class can be used to gain a bit more control, especially
+when using complicated constraints or comparing results from related fits.
+
+
+The :func:`minimize` function
+=============================
+
+The :func:`minimize` function is a wrapper around :class:`Minimizer` for
+running an optimization problem. It takes an objective function (the
+function that calculates the array to be minimized), a :class:`Parameters`
+object, and several optional arguments. See :ref:`fit-func-label` for
+details on writing the objective function.
+
+.. autofunction:: minimize
+
+.. _fit-func-label:
+
+Writing a Fitting Function
+==========================
+
+An important component of a fit is writing a function to be minimized --
+the *objective function*. Since this function will be called by other
+routines, there are fairly stringent requirements for its call signature
+and return value. In principle, your function can be any Python callable,
+but it must look like this:
+
+.. function:: func(params, *args, **kws):
+
+ Calculate objective residual to be minimized from parameters.
+
+ :param params: Parameters.
+ :type params: :class:`~lmfit.parameter.Parameters`
+ :param args: Positional arguments. Must match ``args`` argument to :func:`minimize`.
+ :param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize`.
+ :return: Residual array (generally ``data-model``) to be minimized in the least-squares sense.
+ :rtype: :numpydoc:`ndarray`. The length of this array cannot change between calls.
+
+
+A common use for the positional and keyword arguments would be to pass in other
+data needed to calculate the residual, including things as the data array,
+dependent variable, uncertainties in the data, and other data structures for the
+model calculation.
+
+The objective function should return the value to be minimized. For the
+Levenberg-Marquardt algorithm from :meth:`leastsq`, this returned value **must** be an
+array, with a length greater than or equal to the number of fitting variables in the
+model. For the other methods, the return value can either be a scalar or an array. If an
+array is returned, the sum of squares of the array will be sent to the underlying fitting
+method, effectively doing a least-squares optimization of the return values.
+
+Since the function will be passed in a dictionary of :class:`Parameters`, it is advisable
+to unpack these to get numerical values at the top of the function. A
+simple way to do this is with :meth:`Parameters.valuesdict`, as shown below:
+
+.. jupyter-execute::
+
+ from numpy import exp, sign, sin, pi
+
+
+ def residual(pars, x, data=None, eps=None):
+ # unpack parameters: extract .value attribute for each parameter
+ parvals = pars.valuesdict()
+ period = parvals['period']
+ shift = parvals['shift']
+ decay = parvals['decay']
+
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+
+ if abs(period) < 1.e-10:
+ period = sign(period)*1.e-10
+
+ model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay)
+
+ if data is None:
+ return model
+ if eps is None:
+ return model - data
+ return (model-data) / eps
+
+In this example, ``x`` is a positional (required) argument, while the
+``data`` array is actually optional (so that the function returns the model
+calculation if the data is neglected). Also note that the model
+calculation will divide ``x`` by the value of the ``period`` Parameter. It
+might be wise to ensure this parameter cannot be 0. It would be possible
+to use bounds on the :class:`Parameter` to do this:
+
+.. jupyter-execute::
+ :hide-code:
+
+ from lmfit import Parameter, Parameters
+
+ params = Parameters()
+
+.. jupyter-execute::
+
+ params['period'] = Parameter(name='period', value=2, min=1.e-10)
+
+but putting this directly in the function with:
+
+.. jupyter-execute::
+ :hide-code:
+
+ period = 1
+
+.. jupyter-execute::
+
+ if abs(period) < 1.e-10:
+ period = sign(period)*1.e-10
+
+is also a reasonable approach. Similarly, one could place bounds on the
+``decay`` parameter to take values only between ``-pi/2`` and ``pi/2``.
+
+.. _fit-methods-label:
+
+Choosing Different Fitting Methods
+==================================
+
+By default, the `Levenberg-Marquardt
+<https://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm>`_ algorithm is
+used for fitting. While often criticized, including the fact it finds a
+*local* minimum, this approach has some distinct advantages. These include
+being fast, and well-behaved for most curve-fitting needs, and making it
+easy to estimate uncertainties for and correlations between pairs of fit
+variables, as discussed in :ref:`fit-results-label`.
+
+Alternative algorithms can also be used by providing the ``method``
+keyword to the :func:`minimize` function or :meth:`Minimizer.minimize`
+class as listed in the :ref:`Table of Supported Fitting Methods
+<fit-methods-table>`. If you have the ``numdifftools`` package installed, lmfit
+will try to estimate the covariance matrix and determine parameter
+uncertainties and correlations if ``calc_covar`` is ``True`` (default).
+
+.. _fit-methods-table:
+
+ Table of Supported Fitting Methods:
+
+ +--------------------------+------------------------------------------------------------------+
+ | Fitting Method | ``method`` arg to :func:`minimize` or :meth:`Minimizer.minimize` |
+ +==========================+==================================================================+
+ | Levenberg-Marquardt | ``leastsq`` or ``least_squares`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Nelder-Mead | ``nelder`` |
+ +--------------------------+------------------------------------------------------------------+
+ | L-BFGS-B | ``lbfgsb`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Powell | ``powell`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Conjugate Gradient | ``cg`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Newton-CG | ``newton`` |
+ +--------------------------+------------------------------------------------------------------+
+ | COBYLA | ``cobyla`` |
+ +--------------------------+------------------------------------------------------------------+
+ | BFGS | ``bfgsb`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Truncated Newton | ``tnc`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Newton CG trust-region | ``trust-ncg`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Exact trust-region | ``trust-exact`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Newton GLTR trust-region | ``trust-krylov`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Constrained trust-region | ``trust-constr`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Dogleg | ``dogleg`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Sequential Linear | ``slsqp`` |
+ | Squares Programming | |
+ +--------------------------+------------------------------------------------------------------+
+ | Differential | ``differential_evolution`` |
+ | Evolution | |
+ +--------------------------+------------------------------------------------------------------+
+ | Brute force method | ``brute`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Basinhopping | ``basinhopping`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Adaptive Memory | ``ampgo`` |
+ | Programming for Global | |
+ | Optimization | |
+ +--------------------------+------------------------------------------------------------------+
+ | Simplicial Homology | ``shgo`` |
+ | Global Ooptimization | |
+ +--------------------------+------------------------------------------------------------------+
+ | Dual Annealing | ``dual_annealing`` |
+ +--------------------------+------------------------------------------------------------------+
+ | Maximum likelihood via | ``emcee`` |
+ | Monte-Carlo Markov Chain | |
+ +--------------------------+------------------------------------------------------------------+
+
+
+.. note::
+
+ The objective function for the Levenberg-Marquardt method **must**
+ return an array, with more elements than variables. All other methods
+ can return either a scalar value or an array. The Monte-Carlo Markov
+ Chain or ``emcee`` method has two different operating methods when the
+ objective function returns a scalar value. See the documentation for ``emcee``.
+
+
+.. warning::
+
+ Much of this documentation assumes that the Levenberg-Marquardt (``leastsq``)
+ method is used. Many of the fit statistics and estimates for uncertainties in
+ parameters discussed in :ref:`fit-results-label` are done only unconditionally
+ for this (and the ``least_squares``) method. Lmfit versions newer than 0.9.11
+ provide the capability to use ``numdifftools`` to estimate the covariance matrix
+ and calculate parameter uncertainties and correlations for other methods as
+ well.
+
+.. _fit-results-label:
+
+:class:`MinimizerResult` -- the optimization result
+===================================================
+
+.. versionadded:: 0.9.0
+
+An optimization with :func:`minimize` or :meth:`Minimizer.minimize`
+will return a :class:`MinimizerResult` object. This is an otherwise
+plain container object (that is, with no methods of its own) that
+simply holds the results of the minimization. These results will
+include several pieces of informational data such as status and error
+messages, fit statistics, and the updated parameters themselves.
+
+Importantly, the parameters passed in to :meth:`Minimizer.minimize`
+will be not be changed. To find the best-fit values, uncertainties
+and so on for each parameter, one must use the
+:attr:`MinimizerResult.params` attribute. For example, to print the
+fitted values, bounds and other parameter attributes in a
+well-formatted text tables you can execute::
+
+ result.params.pretty_print()
+
+with ``results`` being a ``MinimizerResult`` object. Note that the method
+:meth:`~lmfit.parameter.Parameters.pretty_print` accepts several arguments
+for customizing the output (e.g., column width, numeric format, etcetera).
+
+.. autoclass:: MinimizerResult
+
+
+
+Goodness-of-Fit Statistics
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _goodfit-table:
+
+ Table of Fit Results: These values, including the standard Goodness-of-Fit statistics,
+ are all attributes of the :class:`MinimizerResult` object returned by
+ :func:`minimize` or :meth:`Minimizer.minimize`.
+
++----------------------+----------------------------------------------------------------------------+
+| Attribute Name | Description / Formula |
++======================+============================================================================+
+| nfev | number of function evaluations |
++----------------------+----------------------------------------------------------------------------+
+| nvarys | number of variables in fit :math:`N_{\rm varys}` |
++----------------------+----------------------------------------------------------------------------+
+| ndata | number of data points: :math:`N` |
++----------------------+----------------------------------------------------------------------------+
+| nfree | degrees of freedom in fit: :math:`N - N_{\rm varys}` |
++----------------------+----------------------------------------------------------------------------+
+| residual | residual array, returned by the objective function: :math:`\{\rm Resid_i\}`|
++----------------------+----------------------------------------------------------------------------+
+| chisqr | chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2` |
++----------------------+----------------------------------------------------------------------------+
+| redchi | reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm varys})}` |
++----------------------+----------------------------------------------------------------------------+
+| aic | Akaike Information Criterion statistic (see below) |
++----------------------+----------------------------------------------------------------------------+
+| bic | Bayesian Information Criterion statistic (see below) |
++----------------------+----------------------------------------------------------------------------+
+| var_names | ordered list of variable parameter names used for init_vals and covar |
++----------------------+----------------------------------------------------------------------------+
+| covar | covariance matrix (with rows/columns using var_names) |
++----------------------+----------------------------------------------------------------------------+
+| init_vals | list of initial values for variable parameters |
++----------------------+----------------------------------------------------------------------------+
+| call_kws | dict of keyword arguments sent to underlying solver |
++----------------------+----------------------------------------------------------------------------+
+
+Note that the calculation of chi-square and reduced chi-square assume
+that the returned residual function is scaled properly to the
+uncertainties in the data. For these statistics to be meaningful, the
+person writing the function to be minimized **must** scale them properly.
+
+After a fit using the :meth:`leastsq` or :meth:`least_squares` method has
+completed successfully, standard errors for the fitted variables and
+correlations between pairs of fitted variables are automatically calculated from
+the covariance matrix. For other methods, the ``calc_covar`` parameter (default
+is ``True``) in the :class:`Minimizer` class determines whether or not to use the
+``numdifftools`` package to estimate the covariance matrix. The standard error
+(estimated :math:`1\sigma` error-bar) goes into the :attr:`stderr` attribute of
+the Parameter. The correlations with all other variables will be put into the
+:attr:`correl` attribute of the Parameter -- a dictionary with keys for all
+other Parameters and values of the corresponding correlation.
+
+In some cases, it may not be possible to estimate the errors and
+correlations. For example, if a variable actually has no practical effect
+on the fit, it will likely cause the covariance matrix to be singular,
+making standard errors impossible to estimate. Placing bounds on varied
+Parameters makes it more likely that errors cannot be estimated, as being
+near the maximum or minimum value makes the covariance matrix singular. In
+these cases, the :attr:`errorbars` attribute of the fit result
+(:class:`Minimizer` object) will be ``False``.
+
+
+.. _information_criteria_label:
+
+Akaike and Bayesian Information Criteria
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The :class:`MinimizerResult` includes the traditional chi-square and
+reduced chi-square statistics:
+
+.. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ \chi^2 &=& \sum_i^N r_i^2 \\
+ \chi^2_\nu &=& \chi^2 / (N-N_{\rm varys})
+ \end{eqnarray*}
+
+where :math:`r` is the residual array returned by the objective function
+(likely to be ``(data-model)/uncertainty`` for data modeling usages),
+:math:`N` is the number of data points (``ndata``), and :math:`N_{\rm
+varys}` is number of variable parameters.
+
+Also included are the `Akaike Information Criterion
+<https://en.wikipedia.org/wiki/Akaike_information_criterion>`_, and
+`Bayesian Information Criterion
+<https://en.wikipedia.org/wiki/Bayesian_information_criterion>`_ statistics,
+held in the ``aic`` and ``bic`` attributes, respectively. These give slightly
+different measures of the relative quality for a fit, trying to balance
+quality of fit with the number of variable parameters used in the fit.
+These are calculated as:
+
+.. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ {\rm aic} &=& N \ln(\chi^2/N) + 2 N_{\rm varys} \\
+ {\rm bic} &=& N \ln(\chi^2/N) + \ln(N) N_{\rm varys} \\
+ \end{eqnarray*}
+
+
+When comparing fits with different numbers of varying parameters, one
+typically selects the model with lowest reduced chi-square, Akaike
+information criterion, and/or Bayesian information criterion. Generally,
+the Bayesian information criterion is considered the most conservative of
+these statistics.
+
+
+Uncertainties in Variable Parameters, and their Correlations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As mentioned above, when a fit is complete the uncertainties for fitted
+Parameters as well as the correlations between pairs of Parameters are
+usually calculated. This happens automatically either when using the
+default :meth:`leastsq` method, the :meth:`least_squares` method, or for
+most other fitting methods if the highly-recommended ``numdifftools``
+package is available. The estimated standard error (the :math:`1\sigma`
+uncertainty) for each variable Parameter will be contained in the
+:attr:`stderr`, while the :attr:`correl` attribute for each Parameter will
+contain a dictionary of the correlation with each other variable Parameter.
+
+These estimates of the uncertainties are done by inverting the Hessian
+matrix which represents the second derivative of fit quality for each
+variable parameter. There are situations for which the uncertainties cannot
+be estimated, which generally indicates that this matrix cannot be inverted
+because one of the fit is not actually sensitive to one of the variables.
+This can happen if a Parameter is stuck at an upper or lower bound, if the
+variable is simply not used by the fit, or if the value for the variable is
+such that it has no real influence on the fit.
+
+In principle, the scale of the uncertainties in the Parameters is closely
+tied to the goodness-of-fit statistics chi-square and reduced chi-square
+(``chisqr`` and ``redchi``). The standard errors or :math:`1 \sigma`
+uncertainties are those that increase chi-square by 1. Since a "good fit"
+should have ``redchi`` of around 1, this requires that the data
+uncertainties (and to some extent the sampling of the N data points) is
+correct. Unfortunately, it is often not the case that one has high-quality
+estimates of the data uncertainties (getting the data is hard enough!).
+Because of this common situation, the uncertainties reported and held in
+:attr:`stderr` are not those that increase chi-square by 1, but those that
+increase chi-square by reduced chi-square. This is equivalent to rescaling
+the uncertainty in the data such that reduced chi-square would be 1. To be
+clear, this rescaling is done by default because if reduced chi-square is
+far from 1, this rescaling often makes the reported uncertainties sensible,
+and if reduced chi-square is near 1 it does little harm. If you have good
+scaling of the data uncertainty and believe the scale of the residual
+array is correct, this automatic rescaling can be turned off using
+``scale_covar=False``.
+
+Note that the simple (and fast!) approach to estimating uncertainties and
+correlations by inverting the second derivative matrix assumes that the
+components of the residual array (if, indeed, an array is used) are
+distributed around 0 with a normal (Gaussian distribution), and that a map
+of probability distributions for pairs would be elliptical -- the size of
+the of ellipse gives the uncertainty itself and the eccentricity of the
+ellipse gives the correlation. This simple approach to assessing
+uncertainties ignores outliers, highly asymmetric uncertainties, or complex
+correlations between Parameters. In fact, it is not too hard to come up
+with problems where such effects are important. Our experience is that the
+automated results are usually the right scale and quite reasonable as
+initial estimates, but a more thorough exploration of the Parameter space
+using the tools described in :ref:`label-emcee` and
+:ref:`label-confidence-advanced` can give a more complete understanding of
+the distributions and relations between Parameters.
+
+
+.. _fit-reports-label:
+
+Getting and Printing Fit Reports
+================================
+
+.. currentmodule:: lmfit.printfuncs
+
+.. autofunction:: fit_report
+
+An example using this to write out a fit report would be:
+
+.. jupyter-execute:: ../examples/doc_fitting_withreport.py
+ :hide-output:
+
+which would give as output:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(fit_report(out))
+
+To be clear, you can get at all of these values from the fit result ``out``
+and ``out.params``. For example, a crude printout of the best fit variables
+and standard errors could be done as
+
+.. jupyter-execute::
+
+ print('-------------------------------')
+ print('Parameter Value Stderr')
+ for name, param in out.params.items():
+ print(f'{name:7s} {param.value:11.5f} {param.stderr:11.5f}')
+
+
+.. _fit-itercb-label:
+
+Using a Iteration Callback Function
+===================================
+
+.. currentmodule:: lmfit.minimizer
+
+An iteration callback function is a function to be called at each
+iteration, just after the objective function is called. The iteration
+callback allows user-supplied code to be run at each iteration, and can
+be used to abort a fit.
+
+.. function:: iter_cb(params, iter, resid, *args, **kws):
+
+ User-supplied function to be run at each iteration.
+
+ :param params: Parameters.
+ :type params: :class:`~lmfit.parameter.Parameters`
+ :param iter: Iteration number.
+ :type iter: int
+ :param resid: Residual array.
+ :type resid: numpy.ndarray
+ :param args: Positional arguments. Must match ``args`` argument to :func:`minimize`
+ :param kws: Keyword arguments. Must match ``kws`` argument to :func:`minimize`
+ :return: Iteration abort flag.
+ :rtype: None for normal behavior, any value like ``True`` to abort the fit.
+
+
+Normally, the iteration callback would have no return value or return
+``None``. To abort a fit, have this function return a value that is
+``True`` (including any non-zero integer). The fit will also abort if any
+exception is raised in the iteration callback. When a fit is aborted this
+way, the parameters will have the values from the last iteration. The fit
+statistics are not likely to be meaningful, and uncertainties will not be computed.
+
+
+.. _fit-minimizer-label:
+
+Using the :class:`Minimizer` class
+==================================
+
+.. currentmodule:: lmfit.minimizer
+
+For full control of the fitting process, you will want to create a
+:class:`Minimizer` object.
+
+.. autoclass :: Minimizer
+
+The Minimizer object has a few public methods:
+
+.. automethod:: Minimizer.minimize
+
+.. automethod:: Minimizer.leastsq
+
+.. automethod:: Minimizer.least_squares
+
+.. automethod:: Minimizer.scalar_minimize
+
+.. automethod:: Minimizer.prepare_fit
+
+.. automethod:: Minimizer.brute
+
+For more information, check the examples in ``examples/lmfit_brute_example.ipynb``.
+
+.. automethod:: Minimizer.basinhopping
+
+.. automethod:: Minimizer.ampgo
+
+.. automethod:: Minimizer.shgo
+
+.. automethod:: Minimizer.dual_annealing
+
+.. automethod:: Minimizer.emcee
+
+
+.. _label-emcee:
+
+:meth:`Minimizer.emcee` - calculating the posterior probability distribution of parameters
+==========================================================================================
+
+:meth:`Minimizer.emcee` can be used to obtain the posterior probability
+distribution of parameters, given a set of experimental data. Note that this
+method does *not* actually perform a fit at all. Instead, it explores
+parameter space to determine the probability distributions for the parameters,
+but without an explicit goal of attempting to refine the solution. It should
+not be used for fitting, but it is a useful method to to more thoroughly
+explore the parameter space around the solution after a fit has been done and
+thereby get an improved understanding of the probability distribution for the
+parameters. It may be able to refine your estimate of the most likely values
+for a set of parameters, but it will not iteratively find a good solution to
+the minimization problem. To use this method effectively, you should first
+use another minimization method and then use this method to explore the
+parameter space around thosee best-fit values.
+
+To illustrate this, we'll use an example problem of fitting data to function
+of a double exponential decay, including a modest amount of Gaussian noise to
+the data. Note that this example is the same problem used in
+:ref:`label-confidence-advanced` for evaluating confidence intervals in the
+parameters, which is a similar goal to the one here.
+
+.. jupyter-execute::
+ :hide-code:
+
+ import warnings
+ warnings.filterwarnings(action="ignore")
+
+ import matplotlib as mpl
+ import matplotlib.pyplot as plt
+ mpl.rcParams['figure.dpi'] = 150
+ %matplotlib inline
+ %config InlineBackend.figure_format = 'svg'
+
+
+.. jupyter-execute::
+
+ import matplotlib.pyplot as plt
+ import numpy as np
+
+ import lmfit
+
+ x = np.linspace(1, 10, 250)
+ np.random.seed(0)
+ y = 3.0 * np.exp(-x / 2) - 5.0 * np.exp(-(x - 0.1) / 10.) + 0.1 * np.random.randn(x.size)
+
+Create a Parameter set for the initial guesses:
+
+.. jupyter-execute::
+
+ p = lmfit.Parameters()
+ p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3., True))
+
+ def residual(p):
+ v = p.valuesdict()
+ return v['a1'] * np.exp(-x / v['t1']) + v['a2'] * np.exp(-(x - 0.1) / v['t2']) - y
+
+Solving with :func:`minimize` gives the Maximum Likelihood solution. Note
+that we use the robust Nelder-Mead method here. The default Levenberg-Marquardt
+method seems to have difficulty with exponential decays, though it can refine
+the solution if starting near the solution:
+
+.. jupyter-execute::
+
+ mi = lmfit.minimize(residual, p, method='nelder', nan_policy='omit')
+ lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
+
+and plotting the fit using the Maximum Likelihood solution gives the graph below:
+
+.. jupyter-execute::
+
+ plt.plot(x, y, 'o')
+ plt.plot(x, residual(mi.params) + y, label='best fit')
+ plt.legend()
+ plt.show()
+
+Note that the fit here (for which the ``numdifftools`` package is installed)
+does estimate and report uncertainties in the parameters and correlations for
+the parameters, and reports the correlation of parameters ``a2`` and ``t2`` to
+be very high. As we'll see, these estimates are pretty good, but when faced
+with such high correlation, it can be helpful to get the full probability
+distribution for the parameters. MCMC methods are very good for this.
+
+Furthermore, we wish to deal with the data uncertainty. This is called
+marginalisation of a nuisance parameter. ``emcee`` requires a function that
+returns the log-posterior probability. The log-posterior probability is a sum
+of the log-prior probability and log-likelihood functions. The log-prior
+probability is assumed to be zero if all the parameters are within their
+bounds and ``-np.inf`` if any of the parameters are outside their bounds.
+
+If the objective function returns an array of unweighted residuals (i.e.,
+``data-model``) as is the case here, you can use ``is_weighted=False`` as an
+argument for ``emcee``. In that case, ``emcee`` will automatically add/use the
+``__lnsigma`` parameter to estimate the true uncertainty in the data. To
+place boundaries on this parameter one can do:
+
+.. jupyter-execute::
+
+ mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2))
+
+Now we have to set up the minimizer and do the sampling (again, just to be
+clear, this is *not* doing a fit):
+
+.. jupyter-execute::
+ :hide-output:
+
+ res = lmfit.minimize(residual, method='emcee', nan_policy='omit', burn=300, steps=1000, thin=20,
+ params=mi.params, is_weighted=False, progress=False)
+
+As mentioned in the Notes for :meth:`Minimizer.emcee`, the ``is_weighted``
+argument will be ignored if your objective function returns a float instead of
+an array. For the documentation we set ``progress=False``; the default is to
+print a progress bar to the Terminal if the ``tqdm`` package is installed.
+
+The success of the method (i.e., whether or not the sampling went well) can be
+assessed by checking the integrated autocorrelation time and/or the acceptance
+fraction of the walkers. For this specific example the autocorrelation time
+could not be estimated because the "chain is too short". Instead, we plot the
+acceptance fraction per walker and its mean value suggests that the sampling
+worked as intended (as a rule of thumb the value should be between 0.2 and
+0.5).
+
+.. jupyter-execute::
+
+ plt.plot(res.acceptance_fraction, 'o')
+ plt.xlabel('walker')
+ plt.ylabel('acceptance fraction')
+ plt.show()
+
+With the results from ``emcee``, we can visualize the posterior distributions
+for the parameters using the ``corner`` package:
+
+.. jupyter-execute::
+
+ import corner
+
+ emcee_plot = corner.corner(res.flatchain, labels=res.var_names,
+ truths=list(res.params.valuesdict().values()))
+
+The values reported in the :class:`MinimizerResult` are the medians of the
+probability distributions and a 1 :math:`\sigma` quantile, estimated as half
+the difference between the 15.8 and 84.2 percentiles. Printing these values:
+
+
+.. jupyter-execute::
+
+ print('median of posterior probability distribution')
+ print('--------------------------------------------')
+ lmfit.report_fit(res.params)
+
+You can see that this recovered the right uncertainty level on the data. Note
+that these values agree pretty well with the results, uncertainties and
+correlations found by the fit and using ``numdifftools`` to estimate the
+covariance matrix. That is, even though the parameters ``a2``, ``t1``, and
+``t2`` are all highly correlated and do not display perfectly Gaussian
+probability distributions, the probability distributions found by explicitly
+sampling the parameter space are not so far from elliptical as to make the
+simple (and much faster) estimates from inverting the covariance matrix
+completely invalid.
+
+As mentioned above, the result from ``emcee`` reports the median values, which
+are not necessarily the same as the Maximum Likelihood Estimate. To obtain
+the values for the Maximum Likelihood Estimation (MLE) we find the location in
+the chain with the highest probability:
+
+.. jupyter-execute::
+
+ highest_prob = np.argmax(res.lnprob)
+ hp_loc = np.unravel_index(highest_prob, res.lnprob.shape)
+ mle_soln = res.chain[hp_loc]
+ for i, par in enumerate(p):
+ p[par].value = mle_soln[i]
+
+
+ print('\nMaximum Likelihood Estimation from emcee ')
+ print('-------------------------------------------------')
+ print('Parameter MLE Value Median Value Uncertainty')
+ fmt = ' {:5s} {:11.5f} {:11.5f} {:11.5f}'.format
+ for name, param in p.items():
+ print(fmt(name, param.value, res.params[name].value,
+ res.params[name].stderr))
+
+
+Here the difference between MLE and median value are seen to be below 0.5%,
+and well within the estimated 1-:math:`\sigma` uncertainty.
+
+Finally, we can use the samples from ``emcee`` to work out the 1- and
+2-:math:`\sigma` error estimates.
+
+.. jupyter-execute::
+
+ print('\nError estimates from emcee:')
+ print('------------------------------------------------------')
+ print('Parameter -2sigma -1sigma median +1sigma +2sigma')
+
+ for name in p.keys():
+ quantiles = np.percentile(res.flatchain[name],
+ [2.275, 15.865, 50, 84.135, 97.275])
+ median = quantiles[2]
+ err_m2 = quantiles[0] - median
+ err_m1 = quantiles[1] - median
+ err_p1 = quantiles[3] - median
+ err_p2 = quantiles[4] - median
+ fmt = ' {:5s} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f}'.format
+ print(fmt(name, err_m2, err_m1, median, err_p1, err_p2))
+
+And we see that the initial estimates for the 1-:math:`\sigma` standard error
+using ``numdifftools`` was not too bad. We'll return to this example
+problem in :ref:`label-confidence-advanced` and use a different method to
+calculate the 1- and 2-:math:`\sigma` error bars.
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..bd95e08
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,66 @@
+.. lmfit documentation master file,
+
+Non-Linear Least-Squares Minimization and Curve-Fitting for Python
+==================================================================
+
+.. _Levenberg-Marquardt: https://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm
+.. _scipy.optimize: https://docs.scipy.org/doc/scipy/reference/optimize.html
+.. _lmfit GitHub repository: https://github.com/lmfit/lmfit-py
+
+Lmfit provides a high-level interface to non-linear optimization and curve
+fitting problems for Python. It builds on and extends many of the
+optimization methods of `scipy.optimize`_. Initially inspired by (and
+named for) extending the `Levenberg-Marquardt`_ method from
+:scipydoc:`optimize.leastsq`, lmfit now provides a number of useful
+enhancements to optimization and data fitting problems, including:
+
+ * Using :class:`~lmfit.parameter.Parameter` objects instead of plain
+ floats as variables. A :class:`~lmfit.parameter.Parameter` has a value
+ that can be varied during the fit or kept at a fixed value. It can
+ have upper and/or lower bounds. A Parameter can even have a value that
+ is constrained by an algebraic expression of other Parameter values.
+ As a Python object, a Parameter can also have attributes such as a
+ standard error, after a fit that can estimate uncertainties.
+
+ * Ease of changing fitting algorithms. Once a fitting model is set up,
+ one can change the fitting algorithm used to find the optimal solution
+ without changing the objective function.
+
+ * Improved estimation of confidence intervals. While
+ :scipydoc:`optimize.leastsq` will automatically calculate
+ uncertainties and correlations from the covariance matrix, the accuracy
+ of these estimates is sometimes questionable. To help address this,
+ lmfit has functions to explicitly explore parameter space and determine
+ confidence levels even for the most difficult cases. Additionally, lmfit
+ will use the ``numdifftools`` package (if installed) to estimate parameter
+ uncertainties and correlations for algorithms that do not natively
+ support this in SciPy.
+
+ * Improved curve-fitting with the :class:`~lmfit.model.Model` class. This
+ extends the capabilities of :scipydoc:`optimize.curve_fit`, allowing
+ you to turn a function that models your data into a Python class
+ that helps you parametrize and fit data with that model.
+
+ * Many :ref:`built-in models <builtin_models_chapter>` for common
+ lineshapes are included and ready to use.
+
+The lmfit package is Free software, using an Open Source license. The
+software and this document are works in progress. If you are interested in
+participating in this effort please use the `lmfit GitHub repository`_.
+
+.. toctree::
+ :maxdepth: 2
+
+ intro
+ installation
+ support
+ faq
+ parameters
+ fitting
+ model
+ builtin_models
+ confidence
+ bounds
+ constraints
+ whatsnew
+ examples/index
diff --git a/doc/installation.rst b/doc/installation.rst
new file mode 100644
index 0000000..46963a8
--- /dev/null
+++ b/doc/installation.rst
@@ -0,0 +1,136 @@
+============================
+Downloading and Installation
+============================
+
+.. _lmfit github repository: https://github.com/lmfit/lmfit-py
+.. _python: https://python.org
+.. _scipy: https://scipy.org/scipylib/index.html
+.. _numpy: https://numpy.org/
+.. _pytest: https://pytest.org/
+.. _pytest-cov: https://github.com/pytest-dev/pytest-cov
+.. _emcee: https://emcee.readthedocs.io/
+.. _pandas: https://pandas.pydata.org/
+.. _jupyter: https://jupyter.org/
+.. _matplotlib: https://matplotlib.org/
+.. _dill: https://github.com/uqfoundation/dill
+.. _asteval: https://github.com/newville/asteval
+.. _uncertainties: https://github.com/lebigot/uncertainties
+.. _numdifftools: https://github.com/pbrod/numdifftools
+.. _contributing.md: https://github.com/lmfit/lmfit-py/blob/master/.github/CONTRIBUTING.md
+.. _corner: https://github.com/dfm/corner.py
+.. _sphinx: https://www.sphinx-doc.org
+.. _jupyter_sphinx: https://jupyter-sphinx.readthedocs.io
+.. _ipykernel: https://github.com/ipython/ipykernel
+.. _sphinxcontrib-svg2pdfconverter: https://github.com/missinglinkelectronics/sphinxcontrib-svg2pdfconverter
+.. _cairosvg: https://cairosvg.org/
+.. _Pillow: https://python-pillow.org/
+.. _sphinx-gallery: https://sphinx-gallery.github.io/stable/index.html
+.. _flaky: https://github.com/box/flaky
+.. _SymPy: https://www.sympy.org/
+.. _Latexmk: https://ctan.org/pkg/latexmk/
+
+Prerequisites
+~~~~~~~~~~~~~
+
+Lmfit works with `Python`_ versions 3.7 and higher. Version
+0.9.15 is the final version to support Python 2.7.
+
+Lmfit requires the following Python packages, with versions given:
+ * `NumPy`_ version 1.19 or higher.
+ * `SciPy`_ version 1.6 or higher.
+ * `asteval`_ version 0.9.28 or higher.
+ * `uncertainties`_ version 3.1.4 or higher.
+
+All of these are readily available on PyPI, and are installed
+automatically if installing with ``pip install lmfit``.
+
+In order to run the test suite, the `pytest`_, `pytest-cov`_, and `flaky`_
+packages are required. Some functionality requires the `emcee`_ (version 3+),
+`corner`_, `pandas`_, `Jupyter`_, `matplotlib`_, `dill`_, or `numdifftools`_
+packages. These are not installed automatically, but we highly recommend each
+of them.
+
+For building the documentation and generating the examples gallery, `matplotlib`_,
+`emcee`_ (version 3+), `corner`_, `Sphinx`_, `sphinx-gallery`_, `jupyter_sphinx`_,
+`ipykernel`_, `Pillow`_, and `SymPy`_ are required. For generating the PDF documentation,
+the Python packages `sphinxcontrib-svg2pdfconverter`_ and `cairosvg`_ are also required,
+as well as the LaTex package `Latexmk`_ (which is included by default in some
+LaTex distributions).
+
+Please refer to ``setup.cfg`` under ``options.extras_require`` for a list of all
+dependencies that are needed if you want to participate in the development of lmfit.
+You can install all these dependencies automatically by doing ``pip install lmfit[all]``,
+or select only a subset (e.g., ``dev```, ``doc``, or ``test``).
+
+Please note: the "original" ``python setup.py install`` is deprecated, but we will
+provide a shim ``setup.py`` file for as long as ``Python`` and/or ``setuptools``
+allow the use of this legacy command.
+
+Downloads
+~~~~~~~~~
+
+The latest stable version of lmfit is |release| and is available from `PyPI
+<https://pypi.python.org/pypi/lmfit/>`_. Check the :ref:`whatsnew_chapter` for
+a list of changes compared to earlier releases.
+
+Installation
+~~~~~~~~~~~~
+
+The easiest way to install lmfit is with::
+
+ pip install lmfit
+
+For Anaconda Python, lmfit is not an official package, but several
+Anaconda channels provide it, allowing installation with (for example)::
+
+ conda install -c conda-forge lmfit
+
+
+Development Version
+~~~~~~~~~~~~~~~~~~~
+
+To get the latest development version from the `lmfit GitHub repository`_, use::
+
+ git clone https://github.com/lmfit/lmfit-py.git
+
+and install using::
+
+ pip install --upgrade build pip setuptools wheel
+
+to install the required build dependencies and then do::
+
+ python -m build
+ pip install ".[all]'
+
+to generate the wheel and install ``lmfit`` with all its dependencies.
+
+We welcome all contributions to lmfit! If you cloned the repository for this
+purpose, please read `CONTRIBUTING.md`_ for more detailed instructions.
+
+Testing
+~~~~~~~
+
+A battery of tests scripts that can be run with the `pytest`_ testing framework
+is distributed with lmfit in the ``tests`` folder. These are automatically run
+as part of the development process.
+For any release or any master branch from the git repository, running ``pytest``
+should run all of these tests to completion without errors or failures.
+
+Many of the examples in this documentation are distributed with lmfit in the
+``examples`` folder, and should also run for you. Some of these examples assume
+that `matplotlib`_ has been installed and is working correctly.
+
+Acknowledgements
+~~~~~~~~~~~~~~~~
+
+.. literalinclude:: ../AUTHORS.txt
+ :language: none
+
+
+Copyright, Licensing, and Re-distribution
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The LMFIT-py code is distributed under the following license:
+
+.. literalinclude:: ../LICENSE
+ :language: none
diff --git a/doc/intro.rst b/doc/intro.rst
new file mode 100644
index 0000000..79bee1b
--- /dev/null
+++ b/doc/intro.rst
@@ -0,0 +1,220 @@
+.. _intro_chapter:
+
+=====================================================
+Getting started with Non-Linear Least-Squares Fitting
+=====================================================
+
+The lmfit package provides simple tools to help you build complex fitting
+models for non-linear least-squares problems and apply these models to real
+data. This section gives an overview of the concepts and describes how to
+set up and perform simple fits. Some basic knowledge of Python, NumPy, and
+modeling data are assumed -- this is not a tutorial on why or how to
+perform a minimization or fit data, but is rather aimed at explaining how
+to use lmfit to do these things.
+
+In order to do a non-linear least-squares fit of a model to data or for any
+other optimization problem, the main task is to write an *objective
+function* that takes the values of the fitting variables and calculates
+either a scalar value to be minimized or an array of values that are to be
+minimized, typically in the least-squares sense. For many data fitting
+processes, the latter approach is used, and the objective function should
+return an array of ``(data-model)``, perhaps scaled by some weighting factor
+such as the inverse of the uncertainty in the data. For such a problem,
+the chi-square (:math:`\chi^2`) statistic is often defined as:
+
+.. math::
+
+ \chi^2 = \sum_i^{N} \frac{[y^{\rm meas}_i - y_i^{\rm model}({\bf{v}})]^2}{\epsilon_i^2}
+
+where :math:`y_i^{\rm meas}` is the set of measured data, :math:`y_i^{\rm
+model}({\bf{v}})` is the model calculation, :math:`{\bf{v}}` is the set of
+variables in the model to be optimized in the fit, and :math:`\epsilon_i`
+is the estimated uncertainty in the data, respectively.
+
+In a traditional non-linear fit, one writes an objective function that
+takes the variable values and calculates the residual array :math:`y^{\rm
+meas}_i - y_i^{\rm model}({\bf{v}})`, or the residual array scaled by the
+data uncertainties, :math:`[y^{\rm meas}_i - y_i^{\rm
+model}({\bf{v}})]/{\epsilon_i}`, or some other weighting factor.
+
+As a simple concrete example, one might want to model data with a decaying
+sine wave, and so write an objective function like this:
+
+.. jupyter-execute::
+
+ from numpy import exp, sin
+
+ def residual(variables, x, data, uncertainty):
+ """Model a decaying sine wave and subtract data."""
+ amp = variables[0]
+ phaseshift = variables[1]
+ freq = variables[2]
+ decay = variables[3]
+
+ model = amp * sin(x*freq + phaseshift) * exp(-x*x*decay)
+
+ return (data-model) / uncertainty
+
+To perform the minimization with :mod:`scipy.optimize`, one would do this:
+
+.. jupyter-execute::
+
+ from numpy import linspace, random
+ from scipy.optimize import leastsq
+
+ # generate synthetic data with noise
+ x = linspace(0, 100)
+ noise = random.normal(size=x.size, scale=0.2)
+ data = 7.5 * sin(x*0.22 + 2.5) * exp(-x*x*0.01) + noise
+
+ # generate experimental uncertainties
+ uncertainty = abs(0.16 + random.normal(size=x.size, scale=0.05))
+
+ variables = [10.0, 0.2, 3.0, 0.007]
+ out = leastsq(residual, variables, args=(x, data, uncertainty))
+
+Though it is wonderful to be able to use Python for such optimization
+problems, and the SciPy library is robust and easy to use, the approach
+here is not terribly different from how one would do the same fit in C or
+Fortran. There are several practical challenges to using this approach,
+including:
+
+ a) The user has to keep track of the order of the variables, and their
+ meaning -- ``variables[0]`` is the ``amplitude``, ``variables[2]`` is the
+ ``frequency``, and so on, although there is no intrinsic meaning to this
+ order.
+ b) If the user wants to fix a particular variable (*not* vary it in the fit),
+ the residual function has to be altered to have fewer variables, and have
+ the corresponding constant value passed in some other way. While
+ reasonable for simple cases, this quickly becomes a significant work for
+ more complex models, and greatly complicates modeling for people not
+ intimately familiar with the details of the fitting code.
+ c) There is no simple, robust way to put bounds on values for the variables,
+ or enforce mathematical relationships between the variables. While some
+ optimization methods in SciPy do provide bounds, they require bounds to
+ be set for all variables with separate arrays that are in the same
+ arbitrary order as variable values. Again, this is acceptable for small
+ or one-off cases, but becomes painful if the fitting model needs to
+ change.
+ d) In some cases, constraints can be placed on Parameter values, but this is
+ a pretty opaque and complex process.
+
+While these shortcomings can be worked around with some work, they are all
+essentially due to the use of arrays or lists to hold the variables.
+This closely matches the implementation of the underlying Fortran code, but
+does not fit very well with Python's rich selection of objects and data
+structures. The key concept in lmfit is to define and use :class:`Parameter`
+objects instead of plain floating point numbers as the variables for the
+fit. Using :class:`Parameter` objects (or the closely related
+:class:`Parameters` -- a dictionary of :class:`Parameter` objects), allows one
+to do the following:
+
+ a) forget about the order of variables and refer to Parameters
+ by meaningful names.
+ b) place bounds on Parameters as attributes, without worrying about
+ preserving the order of arrays for variables and boundaries, and without
+ relying on the solver to support bounds itself.
+ c) fix Parameters, without having to rewrite the objective function.
+ d) place algebraic constraints on Parameters.
+
+To illustrate the value of this approach, we can rewrite the above example
+for the decaying sine wave as:
+
+.. jupyter-execute::
+
+ from numpy import exp, sin
+
+ from lmfit import minimize, Parameters
+
+
+ def residual(params, x, data, uncertainty):
+ amp = params['amp']
+ phaseshift = params['phase']
+ freq = params['frequency']
+ decay = params['decay']
+
+ model = amp * sin(x*freq + phaseshift) * exp(-x*x*decay)
+
+ return (data-model) / uncertainty
+
+
+ params = Parameters()
+ params.add('amp', value=10)
+ params.add('decay', value=0.007)
+ params.add('phase', value=0.2)
+ params.add('frequency', value=3.0)
+
+ out = minimize(residual, params, args=(x, data, uncertainty))
+
+
+At first look, we simply replaced a list of values with a dictionary, so that
+we can access Parameters by name. Just by itself, this is better as it allows
+separation of the objective function from the code using it.
+
+Note that creation of Parameters here could also be done as:
+
+.. versionadded:: 1.2.0
+
+.. jupyter-execute::
+
+ from lmfit import create_params
+
+ params = create_params(amp=10, decay=0.007, phase=0.2, frequency=3.0)
+
+
+where keyword/value pairs set Parameter names and their initial values.
+
+Either when using :func:`create_param` or :class:`Parameters`, the resulting
+``params`` object is an instance of :class:`Parameters`, which acts like a
+dictionary, with keys being the Parameter name and values being individual
+:class:`Parameter` objects. These :class:`Parameter` objects hold the value
+and several other attributes that control how a Parameter acts. For example,
+Parameters can be fixed or bounded; setting attributes to control this
+behavior can be done during definition, as with:
+
+
+.. jupyter-execute::
+
+ params = Parameters()
+ params.add('amp', value=10, vary=False)
+ params.add('decay', value=0.007, min=0.0)
+ params.add('phase', value=0.2)
+ params.add('frequency', value=3.0, max=10)
+
+
+Here ``vary=False`` will prevent the value from changing in the fit, and
+``min=0.0`` will set a lower bound on that parameter's value. The same thing
+can be accomplished by providing a dictionary of attribute values to
+:func:`create_params`:
+
+.. versionadded:: 1.2.0
+
+.. jupyter-execute::
+
+ params = create_params(amp={'value': 10, 'vary': False},
+ decay={'value': 0.007, 'min': 0},
+ phase=0.2,
+ frequency={'value': 3.0, 'max':10})
+
+Parameter attributes can also be modified after they have been created:
+
+.. jupyter-execute::
+
+ params['amp'].vary = False
+ params['decay'].min = 0.10
+
+Importantly, our objective function remains unchanged. This means the objective
+function can simply express the parametrized phenomenon to be calculated,
+accessing Parameter values by name and separating the choice of parameters to
+be varied in the fit.
+
+The ``params`` object can be copied and modified to make many user-level
+changes to the model and fitting process. Of course, most of the information
+about how your data is modeled goes into the objective function, but the
+approach here allows some external control; that is, control by the **user**
+performing the fit, instead of by the author of the objective function.
+
+Finally, in addition to the :class:`Parameters` approach to fitting data, lmfit
+allows switching optimization methods without changing the objective function,
+provides tools for generating fitting reports, and provides a better
+determination of Parameters confidence levels.
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 0000000..2822cb0
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,43 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=python -msphinx
+)
+set SOURCEDIR=.
+set BUILDDIR=_build
+set SPHINXOPTS=-W
+
+if "%1" == "" goto help
+
+if "%1" == "html" (
+ python doc_examples_to_gallery.py
+ copy %~dp0sphinx\ext_mathjax.py %~dp0extensions.py
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+ goto end
+)
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The Sphinx module was not found. Make sure you have Sphinx installed,
+ echo.then set the SPHINXBUILD environment variable to point to the full
+ echo.path of the 'sphinx-build' executable. Alternatively you may add the
+ echo.Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.http://sphinx-doc.org/
+ exit /b 1
+)
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+
+:end
+popd
diff --git a/doc/model.rst b/doc/model.rst
new file mode 100644
index 0000000..d59f3dc
--- /dev/null
+++ b/doc/model.rst
@@ -0,0 +1,1225 @@
+.. _model_chapter:
+
+===============================
+Modeling Data and Curve Fitting
+===============================
+
+.. module:: lmfit.model
+
+A common use of least-squares minimization is *curve fitting*, where one
+has a parametrized model function meant to explain some phenomena and wants
+to adjust the numerical values for the model so that it most closely
+matches some data. With :mod:`scipy`, such problems are typically solved
+with :scipydoc:`optimize.curve_fit`, which is a wrapper around
+:scipydoc:`optimize.leastsq`. Since lmfit's
+:func:`~lmfit.minimizer.minimize` is also a high-level wrapper around
+:scipydoc:`optimize.leastsq` it can be used for curve-fitting problems.
+While it offers many benefits over :scipydoc:`optimize.leastsq`, using
+:func:`~lmfit.minimizer.minimize` for many curve-fitting problems still
+requires more effort than using :scipydoc:`optimize.curve_fit`.
+
+The :class:`Model` class in lmfit provides a simple and flexible approach
+to curve-fitting problems. Like :scipydoc:`optimize.curve_fit`, a
+:class:`Model` uses a *model function* -- a function that is meant to
+calculate a model for some phenomenon -- and then uses that to best match
+an array of supplied data. Beyond that similarity, its interface is rather
+different from :scipydoc:`optimize.curve_fit`, for example in that it uses
+:class:`~lmfit.parameter.Parameters`, but also offers several other
+important advantages.
+
+In addition to allowing you to turn any model function into a curve-fitting
+method, lmfit also provides canonical definitions for many known lineshapes
+such as Gaussian or Lorentzian peaks and Exponential decays that are widely
+used in many scientific domains. These are available in the :mod:`models`
+module that will be discussed in more detail in the next chapter
+(:ref:`builtin_models_chapter`). We mention it here as you may want to
+consult that list before writing your own model. For now, we focus on
+turning Python functions into high-level fitting models with the
+:class:`Model` class, and using these to fit data.
+
+
+Motivation and simple example: Fit data to Gaussian profile
+===========================================================
+
+Let's start with a simple and common example of fitting data to a Gaussian
+peak. As we will see, there is a built-in :class:`GaussianModel` class that
+can help do this, but here we'll build our own. We start with a simple
+definition of the model function:
+
+.. jupyter-execute::
+ :hide-code:
+
+ import matplotlib as mpl
+ mpl.rcParams['figure.dpi'] = 150
+ %matplotlib inline
+ %config InlineBackend.figure_format = 'svg'
+
+.. jupyter-execute::
+
+ from numpy import exp, linspace, random
+
+
+ def gaussian(x, amp, cen, wid):
+ return amp * exp(-(x-cen)**2 / wid)
+
+We want to use this function to fit to data :math:`y(x)` represented by the
+arrays ``y`` and ``x``. With :scipydoc:`optimize.curve_fit`, this would be:
+
+.. jupyter-execute::
+ :hide-output:
+
+ from scipy.optimize import curve_fit
+
+ x = linspace(-10, 10, 101)
+ y = gaussian(x, 2.33, 0.21, 1.51) + random.normal(0, 0.2, x.size)
+
+ init_vals = [1, 0, 1] # for [amp, cen, wid]
+ best_vals, covar = curve_fit(gaussian, x, y, p0=init_vals)
+
+That is, we create data, make an initial guess of the model values, and run
+:scipydoc:`optimize.curve_fit` with the model function, data arrays, and
+initial guesses. The results returned are the optimal values for the
+parameters and the covariance matrix. It's simple and useful, but it
+misses the benefits of lmfit.
+
+With lmfit, we create a :class:`Model` that wraps the ``gaussian`` model
+function, which automatically generates the appropriate residual function,
+and determines the corresponding parameter names from the function
+signature itself:
+
+.. jupyter-execute::
+
+ from lmfit import Model
+
+ gmodel = Model(gaussian)
+ print(f'parameter names: {gmodel.param_names}')
+ print(f'independent variables: {gmodel.independent_vars}')
+
+As you can see, the Model ``gmodel`` determined the names of the parameters
+and the independent variables. By default, the first argument of the
+function is taken as the independent variable, held in
+:attr:`independent_vars`, and the rest of the functions positional
+arguments (and, in certain cases, keyword arguments -- see below) are used
+for Parameter names. Thus, for the ``gaussian`` function above, the
+independent variable is ``x``, and the parameters are named ``amp``,
+``cen``, and ``wid``, and -- all taken directly from the signature of the
+model function. As we will see below, you can modify the default
+assignment of independent variable / arguments and specify yourself what
+the independent variable is and which function arguments should be identified
+as parameter names.
+
+:class:`~lmfit.parameter.Parameters` are *not* created when the model is
+created. The model knows what the parameters should be named, but nothing about
+the scale and range of your data. To help you create Parameters for a Model,
+each model has a :meth:`make_params` method that will generate parameters with
+the expected names. You will have to do this, or make Parameters some other way
+(say, with :func:`~lmfit.parameter.create_params`), and assign initial values
+for all Parameters. You can also assign other attributes when doing this:
+
+.. jupyter-execute::
+
+ params = gmodel.make_params()
+
+This creates the :class:`~lmfit.parameter.Parameters` but does not
+automatically give them initial values since it has no idea what the scale
+should be. If left unspecified, the initial values will be ``-Inf``, which will
+generally fail to give useful results. You can set initial values for
+parameters with keyword arguments to :meth:`make_params`:
+
+.. jupyter-execute::
+
+ params = gmodel.make_params(cen=0.3, amp=3, wid=1.25)
+
+or assign them (and other parameter properties) after the
+:class:`~lmfit.parameter.Parameters` class has been created.
+
+A :class:`Model` has several methods associated with it. For example, one
+can use the :meth:`eval` method to evaluate the model or the :meth:`fit`
+method to fit data to this model with a :class:`Parameter` object. Both of
+these methods can take explicit keyword arguments for the parameter values.
+For example, one could use :meth:`eval` to calculate the predicted
+function:
+
+.. jupyter-execute::
+
+ x_eval = linspace(0, 10, 201)
+ y_eval = gmodel.eval(params, x=x_eval)
+
+or with:
+
+.. jupyter-execute::
+
+ y_eval = gmodel.eval(x=x_eval, cen=6.5, amp=100, wid=2.0)
+
+Admittedly, this a slightly long-winded way to calculate a Gaussian
+function, given that you could have called your ``gaussian`` function
+directly. But now that the model is set up, we can use its :meth:`fit`
+method to fit this model to data, as with:
+
+.. jupyter-execute::
+
+ result = gmodel.fit(y, params, x=x)
+
+or with:
+
+.. jupyter-execute::
+
+ result = gmodel.fit(y, x=x, cen=0.5, amp=10, wid=2.0)
+
+Putting everything together, included in the ``examples`` folder with the
+source code, is:
+
+.. jupyter-execute:: ../examples/doc_model_gaussian.py
+ :hide-output:
+
+which is pretty compact and to the point. The returned ``result`` will be
+a :class:`ModelResult` object. As we will see below, this has many
+components, including a :meth:`fit_report` method, which will show:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(result.fit_report())
+
+As the script shows, the result will also have :attr:`init_fit` for the fit
+with the initial parameter values and a :attr:`best_fit` for the fit with
+the best fit parameter values. These can be used to generate the following
+plot:
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, 'o')
+ plt.plot(x, result.init_fit, '--', label='initial fit')
+ plt.plot(x, result.best_fit, '-', label='best fit')
+ plt.legend()
+ plt.show()
+
+which shows the data in blue dots, the best fit as a solid green line, and
+the initial fit as a dashed orange line.
+
+Note that the model fitting was really performed with:
+
+.. jupyter-execute::
+
+ gmodel = Model(gaussian)
+ result = gmodel.fit(y, params, x=x, amp=5, cen=5, wid=1)
+
+These lines clearly express that we want to turn the ``gaussian`` function
+into a fitting model, and then fit the :math:`y(x)` data to this model,
+starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``. In
+addition, all the other features of lmfit are included:
+:class:`~lmfit.parameter.Parameters` can have bounds and constraints and
+the result is a rich object that can be reused to explore the model fit in
+detail.
+
+
+The :class:`Model` class
+========================
+
+The :class:`Model` class provides a general way to wrap a pre-defined
+function as a fitting model.
+
+.. autoclass:: Model
+
+
+:class:`Model` class Methods
+----------------------------
+
+.. automethod:: Model.eval
+
+.. automethod:: Model.fit
+
+.. automethod:: Model.guess
+
+.. automethod:: Model.make_params
+
+.. automethod:: Model.set_param_hint
+
+ See :ref:`model_param_hints_section`.
+
+.. automethod:: Model.print_param_hints
+
+
+:class:`Model` class Attributes
+-------------------------------
+
+.. attribute:: func
+
+ The model function used to calculate the model.
+
+.. attribute:: independent_vars
+
+ List of strings for names of the independent variables.
+
+.. attribute:: nan_policy
+
+ Describes what to do for NaNs that indicate missing values in the data.
+ The choices are:
+
+ * ``'raise'``: Raise a ``ValueError`` (default)
+ * ``'propagate'``: Do not check for NaNs or missing values. The fit will
+ try to ignore them.
+ * ``'omit'``: Remove NaNs or missing observations in data. If pandas is
+ installed, :func:`pandas.isnull` is used, otherwise
+ :func:`numpy.isnan` is used.
+
+.. attribute:: name
+
+ Name of the model, used only in the string representation of the
+ model. By default this will be taken from the model function.
+
+.. attribute:: opts
+
+ Extra keyword arguments to pass to model function. Normally this will
+ be determined internally and should not be changed.
+
+.. attribute:: param_hints
+
+ Dictionary of parameter hints. See :ref:`model_param_hints_section`.
+
+.. attribute:: param_names
+
+ List of strings of parameter names.
+
+.. attribute:: prefix
+
+ Prefix used for name-mangling of parameter names. The default is ``''``.
+ If a particular :class:`Model` has arguments ``amplitude``,
+ ``center``, and ``sigma``, these would become the parameter names.
+ Using a prefix of ``'g1_'`` would convert these parameter names to
+ ``g1_amplitude``, ``g1_center``, and ``g1_sigma``. This can be
+ essential to avoid name collision in composite models.
+
+
+Determining parameter names and independent variables for a function
+--------------------------------------------------------------------
+
+The :class:`Model` created from the supplied function ``func`` will create a
+:class:`~lmfit.parameter.Parameters` object, and names are inferred from the
+function` arguments, and a residual function is automatically constructed.
+
+By default, the independent variable is taken as the first argument to the
+function. You can, of course, explicitly set this, and will need to do so
+if the independent variable is not first in the list, or if there is actually
+more than one independent variable.
+
+If not specified, Parameters are constructed from all positional arguments
+and all keyword arguments that have a default value that is numerical, except
+the independent variable, of course. Importantly, the Parameters can be
+modified after creation. In fact, you will have to do this because none of the
+parameters have valid initial values. In addition, one can place bounds and
+constraints on Parameters, or fix their values.
+
+
+Explicitly specifying ``independent_vars``
+------------------------------------------
+
+As we saw for the Gaussian example above, creating a :class:`Model` from a
+function is fairly easy. Let's try another one:
+
+.. jupyter-execute::
+
+ import numpy as np
+ from lmfit import Model
+
+
+ def decay(t, tau, N):
+ return N*np.exp(-t/tau)
+
+
+ decay_model = Model(decay)
+ print(f'independent variables: {decay_model.independent_vars}')
+
+ params = decay_model.make_params()
+ print('\nParameters:')
+ for pname, par in params.items():
+ print(pname, par)
+
+Here, ``t`` is assumed to be the independent variable because it is the
+first argument to the function. The other function arguments are used to
+create parameters for the model.
+
+If you want ``tau`` to be the independent variable in the above example,
+you can say so:
+
+.. jupyter-execute::
+
+ decay_model = Model(decay, independent_vars=['tau'])
+ print(f'independent variables: {decay_model.independent_vars}')
+
+ params = decay_model.make_params()
+ print('\nParameters:')
+ for pname, par in params.items():
+ print(pname, par)
+
+You can also supply multiple values for multi-dimensional functions with
+multiple independent variables. In fact, the meaning of *independent
+variable* here is simple, and based on how it treats arguments of the
+function you are modeling:
+
+independent variable
+ A function argument that is not a parameter or otherwise part of the
+ model, and that will be required to be explicitly provided as a
+ keyword argument for each fit with :meth:`Model.fit` or evaluation
+ with :meth:`Model.eval`.
+
+Note that independent variables are not required to be arrays, or even
+floating point numbers.
+
+
+Functions with keyword arguments
+--------------------------------
+
+If the model function had keyword parameters, these would be turned into
+Parameters if the supplied default value was a valid number (but not
+``None``, ``True``, or ``False``).
+
+.. jupyter-execute::
+
+ def decay2(t, tau, N=10, check_positive=False):
+ if check_positive:
+ arg = abs(t)/max(1.e-9, abs(tau))
+ else:
+ arg = t/tau
+ return N*np.exp(arg)
+
+
+ mod = Model(decay2)
+ params = mod.make_params()
+ print('Parameters:')
+ for pname, par in params.items():
+ print(pname, par)
+
+Here, even though ``N`` is a keyword argument to the function, it is turned
+into a parameter, with the default numerical value as its initial value.
+By default, it is permitted to be varied in the fit -- the 10 is taken as
+an initial value, not a fixed value. On the other hand, the
+``check_positive`` keyword argument, was not converted to a parameter
+because it has a boolean default value. In some sense,
+``check_positive`` becomes like an independent variable to the model.
+However, because it has a default value it is not required to be given for
+each model evaluation or fit, as independent variables are.
+
+Defining a ``prefix`` for the Parameters
+----------------------------------------
+
+As we will see in the next chapter when combining models, it is sometimes
+necessary to decorate the parameter names in the model, but still have them
+be correctly used in the underlying model function. This would be
+necessary, for example, if two parameters in a composite model (see
+:ref:`composite_models_section` or examples in the next chapter) would have
+the same name. To avoid this, we can add a ``prefix`` to the
+:class:`Model` which will automatically do this mapping for us.
+
+.. jupyter-execute::
+
+ def myfunc(x, amplitude=1, center=0, sigma=1):
+ # function definition, for now just ``pass``
+ pass
+
+
+ mod = Model(myfunc, prefix='f1_')
+ params = mod.make_params()
+ print('Parameters:')
+ for pname, par in params.items():
+ print(pname, par)
+
+You would refer to these parameters as ``f1_amplitude`` and so forth, and
+the model will know to map these to the ``amplitude`` argument of ``myfunc``.
+
+
+Initializing model parameter values
+-----------------------------------
+
+As mentioned above, creating a model does not automatically create the
+corresponding :class:`~lmfit.parameter.Parameters`. These can be created with
+either the :func:`create_params` function, or the :meth:`Model.make_params`
+method of the corresponding instance of :class:`Model`.
+
+When creating Parameters, each parameter is created with invalid initial value
+of ``-Inf`` if it is not set explicitly. That is to say, parameter values
+**must** be initialized in order for the model to evaluate a finite result or
+used in a fit. There are a few different ways to do this:
+
+ 1. You can supply initial values in the definition of the model function.
+ 2. You can initialize the parameters when creating parameters with :meth:`Model.make_params`.
+ 3. You can create a Parameters object with :class:`Parameters` or :func:`create_params`.
+ 4. You can supply initial values for the parameters when calling
+ :meth:`Model.eval` or :meth:`Model.fit` methods.
+
+Generally, using the :meth:`Model.make_params` method is recommended. The methods
+described above can be mixed, allowing you to overwrite initial values at any point
+in the process of defining and using the model.
+
+
+Initializing values in the function definition
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To supply initial values for parameters in the definition of the model
+function, you can simply supply a default value:
+
+.. jupyter-execute::
+
+ def myfunc(x, a=1, b=0):
+ return a*x + 10*a - b
+
+instead of using:
+
+.. jupyter-execute::
+
+ def myfunc(x, a, b):
+ return a*x + 10*a - b
+
+This has the advantage of working at the function level -- all parameters
+with keywords can be treated as options. It also means that some default
+initial value will always be available for the parameter.
+
+
+Initializing values with :meth:`Model.make_params`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When creating parameters with :meth:`Model.make_params` you can specify initial
+values. To do this, use keyword arguments for the parameter names. You can
+either set initial values as numbers (floats or ints) or as dictionaries with
+keywords of (``value``, ``vary``, ``min``, ``max``, ``expr``, ``brute_step``,
+and ``is_init_value``) to specify these parameter attributes.
+
+.. jupyter-execute::
+
+ mod = Model(myfunc)
+
+ # simply supply initial values
+ pars = mod.make_params(a=3, b=0.5)
+
+ # supply initial values, attributes for bounds, etcetera:
+ pars_bounded = mod.make_params(a=dict(value=3, min=0),
+ b=dict(value=0.5, vary=False))
+
+
+Creating a :class:`Parameters` object directly
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can also create your own Parameters directly using :func:`create_params`.
+This is independent of using the :class:`Model` class, but is essentially
+equivalent to :meth:`Model.make_params` except with less checking of errors for
+model prefixes and so on.
+
+.. jupyter-execute::
+
+ from lmfit import create_params
+
+ mod = Model(myfunc)
+
+ # simply supply initial values
+ pars = create_params(a=3, b=0.5)
+
+ # supply initial values and attributes for bounds, etc:
+ pars_bounded = create_params(a=dict(value=3, min=0),
+ b=dict(value=0.5, vary=False))
+
+Because less error checking is done, :meth:`Model.make_params` should probably
+be preferred when using Models.
+
+
+Initializing parameter values for a model with keyword arguments
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Finally, you can explicitly supply initial values when using a model. That
+is, as with :meth:`Model.make_params`, you can include values as keyword
+arguments to either the :meth:`Model.eval` or :meth:`Model.fit` methods:
+
+.. jupyter-execute::
+
+ x = linspace(0, 10, 100)
+ y_eval = mod.eval(x=x, a=7.0, b=-2.0)
+ y_sim = y_eval + random.normal(0, 0.2, x.size)
+ out = mod.fit(y_sim, pars, x=x, a=3.0, b=0.0)
+
+These approaches to initialization provide many opportunities for setting
+initial values for parameters. The methods can be combined, so that you
+can set parameter hints but then change the initial value explicitly with
+:meth:`Model.fit`.
+
+.. _model_param_hints_section:
+
+Using parameter hints
+---------------------
+
+After a model has been created, but prior to creating parameters with
+:meth:`Model.make_params`, you can define parameter hints for that model. This
+allows you to set other parameter attributes for bounds, whether it is varied in
+the fit, or set a default constraint expression for a parameter. You can also
+set the initial value, but that is not really the intention of the method,
+which is to really to let you say that about the idealized Model, for example
+that some values may not make sense for some parameters, or that some parameters
+might be a small change from another parameter and so be fixed or constrained
+by default.
+
+To set a parameter hint, you can use :meth:`Model.set_param_hint`,
+as with:
+
+.. jupyter-execute::
+
+ mod = Model(myfunc)
+ mod.set_param_hint('bounded_parameter', min=0, max=1.0)
+ pars = mod.make_params()
+
+Parameter hints are discussed in more detail in section
+:ref:`model_param_hints_section`.
+
+Parameter hints are stored in a model's :attr:`param_hints` attribute,
+which is simply a nested dictionary:
+
+.. jupyter-execute::
+
+ print('Parameter hints:')
+ for pname, par in mod.param_hints.items():
+ print(pname, par)
+
+You can change this dictionary directly or use the :meth:`Model.set_param_hint`
+method. Either way, these parameter hints are used by :meth:`Model.make_params`
+when making parameters.
+
+Parameter hints also allow you to create new parameters. This can be useful to
+make derived parameters with constraint expressions. For example to get the
+full-width at half maximum of a Gaussian model, one could use a parameter hint
+of:
+
+.. jupyter-execute::
+
+ mod = Model(gaussian)
+ mod.set_param_hint('wid', min=0)
+ mod.set_param_hint('fwhm', expr='2.3548*wid')
+ params = mod.make_params(amp={'value': 10, 'min':0.1, 'max':2000},
+ cen=5.5, wid=1.25)
+ params.pretty_print()
+
+With that definition, the value (and uncertainty) of the ``fwhm`` parameter
+will be reported in the output of any fit done with that model.
+
+.. _model_data_coercion_section:
+
+Data Types for data and independent data with ``Model``
+-------------------------------------------------------------
+
+The model as defined by your model function will use the independent
+variable(s) you specify to best match the data you provide. The model is meant
+to be an abstract representation for data, but when you do a fit with
+:meth:`Model.fit`, you really need to pass in values for the data to be modeled
+and the independent data used to calculate that data.
+
+The mathematical solvers used by ``lmfit`` all work exclusively with
+1-dimensional numpy arrays of datatype (dtype) ``float64``. The value of the
+calculation ``(model-data)*weights`` using the calculation of your model
+function, and the data and weights you pass in *will be coerced* to an
+1-dimensional ndarray with dtype ``float64`` when it is passed to the solver.
+
+If the data you pass to :meth:`Model.fit` is not an ndarray of dtype
+``float64`` but is instead a tuples of numbers, a list of numbers, or a
+``pandas.Series``, it will be coerced into an ndarray. If your data is a list,
+tuple, or Series of complex numbers, it *will be coerced* to an ndarray with
+dtype ``complex128``.
+
+If your data is a numpy array of dtype ``float32``, it *will not be coerced* to
+``float64``, as we assume this was an intentional choice. That may make all of
+the calculations done in your model function be in single-precision which may
+make fits less sensitive, but the values will be converted to ``float64``
+before being sent to the solver, so the fit should work.
+
+The independent data for models using ``Model`` are meant to be truly
+independent, and not **not** required to be strictly numerical or objects that
+are easily converted to arrays of numbers. That is, independent data for a
+model could be a dictionary, an instance of a user-defined class, or other type
+of structured data. You can use independent data any way you want in your
+model function.
+
+But, as with almost all the examples given here, independent data is often also
+a 1-dimensonal array of values, say ``x``, and a simple view of the fit would be
+to plot the data as ``y`` as a function of ``x``. Again, this is not required, but
+it is very common. Because of this very common usage, if your independent data
+is a tuple or list of numbers or ``pandas.Series``, it *will be coerced* to be
+an ndarray of dtype ``float64``. But as with the primary data, if your
+independent data is an ndarray of some different dtype (``float32``,
+``uint16``, etc), it *will not be coerced* to ``float64``, as we assume this
+was intentional.
+
+.. note::
+
+ Data and independent data that are tuples or lists of numbers, or
+ ``panda.Series`` will be coerced to an ndarray of dtype ``float64`` before
+ passing to the model function. Data with other dtypes (or independent data
+ of other object types such as dicts) will not be coerced to ``float64``.
+
+
+.. _model_saveload_sec:
+
+Saving and Loading Models
+-------------------------
+
+.. versionadded:: 0.9.8
+
+It is sometimes desirable to save a :class:`Model` for later use outside of
+the code used to define the model. Lmfit provides a :func:`save_model`
+function that will save a :class:`Model` to a file. There is also a
+companion :func:`load_model` function that can read this file and
+reconstruct a :class:`Model` from it.
+
+Saving a model turns out to be somewhat challenging. The main issue is that
+Python is not normally able to *serialize* a function (such as the model
+function making up the heart of the Model) in a way that can be
+reconstructed into a callable Python object. The ``dill`` package can
+sometimes serialize functions, but with the limitation that it can be used
+only in the same version of Python. In addition, class methods used as
+model functions will not retain the rest of the class attributes and
+methods, and so may not be usable. With all those warnings, it should be
+emphasized that if you are willing to save or reuse the definition of the
+model function as Python code, then saving the Parameters and rest of the
+components that make up a model presents no problem.
+
+If the ``dill`` package is installed, the model function will also be saved
+using it. But because saving the model function is not always reliable,
+saving a model will always save the *name* of the model function. The
+:func:`load_model` takes an optional :attr:`funcdefs` argument that can
+contain a dictionary of function definitions with the function names as
+keys and function objects as values. If one of the dictionary keys matches
+the saved name, the corresponding function object will be used as the model
+function. If it is not found by name, and if ``dill`` was used to save
+the model, and if ``dill`` is available at run-time, the ``dill``-encoded
+function will try to be used. Note that this approach will generally allow
+you to save a model that can be used by another installation of the
+same version of Python, but may not work across Python versions. For preserving
+fits for extended periods of time (say, archiving for documentation of
+scientific results), we strongly encourage you to save the full Python code
+used for the model function and fit process.
+
+
+.. autofunction:: save_model
+
+.. autofunction:: load_model
+
+As a simple example, one can save a model as:
+
+.. jupyter-execute:: ../examples/doc_model_savemodel.py
+
+To load that later, one might do:
+
+.. jupyter-execute:: ../examples/doc_model_loadmodel.py
+ :hide-output:
+
+See also :ref:`modelresult_saveload_sec`.
+
+The :class:`ModelResult` class
+==============================
+
+A :class:`ModelResult` (which had been called ``ModelFit`` prior to version
+0.9) is the object returned by :meth:`Model.fit`. It is a subclass of
+:class:`~lmfit.minimizer.Minimizer`, and so contains many of the fit results.
+Of course, it knows the :class:`Model` and the set of
+:class:`~lmfit.parameter.Parameters` used in the fit, and it has methods to
+evaluate the model, to fit the data (or re-fit the data with changes to
+the parameters, or fit with different or modified data) and to print out a
+report for that fit.
+
+While a :class:`Model` encapsulates your model function, it is fairly
+abstract and does not contain the parameters or data used in a particular
+fit. A :class:`ModelResult` *does* contain parameters and data as well as
+methods to alter and re-do fits. Thus the :class:`Model` is the idealized
+model while the :class:`ModelResult` is the messier, more complex (but perhaps
+more useful) object that represents a fit with a set of parameters to data
+with a model.
+
+
+A :class:`ModelResult` has several attributes holding values for fit
+results, and several methods for working with fits. These include
+statistics inherited from :class:`~lmfit.minimizer.Minimizer` useful for
+comparing different models, including ``chisqr``, ``redchi``, ``aic``,
+and ``bic``.
+
+.. autoclass:: ModelResult
+
+
+:class:`ModelResult` methods
+----------------------------
+
+.. automethod:: ModelResult.eval
+
+.. automethod:: ModelResult.eval_components
+
+.. automethod:: ModelResult.fit
+
+.. automethod:: ModelResult.fit_report
+
+.. automethod:: ModelResult.summary
+
+.. automethod:: ModelResult.conf_interval
+
+.. automethod:: ModelResult.ci_report
+
+.. automethod:: ModelResult.eval_uncertainty
+
+.. automethod:: ModelResult.plot
+
+.. automethod:: ModelResult.plot_fit
+
+.. automethod:: ModelResult.plot_residuals
+
+
+:class:`ModelResult` attributes
+-------------------------------
+
+.. attribute:: aic
+
+ Floating point best-fit Akaike Information Criterion statistic
+ (see :ref:`fit-results-label`).
+
+.. attribute:: best_fit
+
+ numpy.ndarray result of model function, evaluated at provided
+ independent variables and with best-fit parameters.
+
+.. attribute:: best_values
+
+ Dictionary with parameter names as keys, and best-fit values as values.
+
+.. attribute:: bic
+
+ Floating point best-fit Bayesian Information Criterion statistic
+ (see :ref:`fit-results-label`).
+
+.. attribute:: chisqr
+
+ Floating point best-fit chi-square statistic (see :ref:`fit-results-label`).
+
+.. attribute:: ci_out
+
+ Confidence interval data (see :ref:`confidence_chapter`) or ``None`` if
+ the confidence intervals have not been calculated.
+
+.. attribute:: covar
+
+ numpy.ndarray (square) covariance matrix returned from fit.
+
+.. attribute:: data
+
+ numpy.ndarray of data to compare to model.
+
+.. attribute:: dely
+
+ numpy.ndarray of estimated uncertainties in the ``y`` values of the model
+ from :meth:`ModelResult.eval_uncertainty` (see :ref:`eval_uncertainty_sec`).
+
+.. attribute:: dely_comps
+
+ a dictionary of estimated uncertainties in the ``y`` values of the model
+ components, from :meth:`ModelResult.eval_uncertainty` (see
+ :ref:`eval_uncertainty_sec`).
+
+.. attribute:: errorbars
+
+ Boolean for whether error bars were estimated by fit.
+
+.. attribute:: ier
+
+ Integer returned code from :scipydoc:`optimize.leastsq`.
+
+.. attribute:: init_fit
+
+ numpy.ndarray result of model function, evaluated at provided
+ independent variables and with initial parameters.
+
+.. attribute:: init_params
+
+ Initial parameters.
+
+.. attribute:: init_values
+
+ Dictionary with parameter names as keys, and initial values as values.
+
+.. attribute:: iter_cb
+
+ Optional callable function, to be called at each fit iteration. This
+ must take take arguments of ``(params, iter, resid, *args, **kws)``, where
+ ``params`` will have the current parameter values, ``iter`` the
+ iteration, ``resid`` the current residual array, and ``*args`` and
+ ``**kws`` as passed to the objective function. See :ref:`fit-itercb-label`.
+
+.. attribute:: jacfcn
+
+ Optional callable function, to be called to calculate Jacobian array.
+
+.. attribute:: lmdif_message
+
+ String message returned from :scipydoc:`optimize.leastsq`.
+
+.. attribute:: message
+
+ String message returned from :func:`~lmfit.minimizer.minimize`.
+
+.. attribute:: method
+
+ String naming fitting method for :func:`~lmfit.minimizer.minimize`.
+
+.. attribute:: call_kws
+
+ Dict of keyword arguments actually send to underlying solver with
+ :func:`~lmfit.minimizer.minimize`.
+
+.. attribute:: model
+
+ Instance of :class:`Model` used for model.
+
+.. attribute:: ndata
+
+ Integer number of data points.
+
+.. attribute:: nfev
+
+ Integer number of function evaluations used for fit.
+
+.. attribute:: nfree
+
+ Integer number of free parameters in fit.
+
+.. attribute:: nvarys
+
+ Integer number of independent, freely varying variables in fit.
+
+.. attribute:: params
+
+ Parameters used in fit; will contain the best-fit values.
+
+.. attribute:: redchi
+
+ Floating point reduced chi-square statistic (see :ref:`fit-results-label`).
+
+.. attribute:: residual
+
+ numpy.ndarray for residual.
+
+.. attribute:: rsquared
+
+ Floating point :math:`R^2` statisic, defined for data :math:`y` and best-fit model :math:`f` as
+
+.. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ R^2 &=& 1 - \frac{\sum_i (y_i - f_i)^2}{\sum_i (y_i - \bar{y})^2}
+ \end{eqnarray*}
+
+.. attribute:: scale_covar
+
+ Boolean flag for whether to automatically scale covariance matrix.
+
+.. attribute:: success
+
+ Boolean value of whether fit succeeded.
+
+.. attribute:: userargs
+
+ positional arguments passed to :meth:`Model.fit`, a tuple of (``y``, ``weights``)
+
+.. attribute:: userkws
+
+ keyword arguments passed to :meth:`Model.fit`, a dict, which will have independent data arrays such as ``x``.
+
+
+.. attribute:: weights
+
+ numpy.ndarray (or ``None``) of weighting values to be used in fit. If not
+ ``None``, it will be used as a multiplicative factor of the residual
+ array, so that ``weights*(data - fit)`` is minimized in the
+ least-squares sense.
+
+.. _eval_uncertainty_sec:
+
+Calculating uncertainties in the model function
+-----------------------------------------------
+
+We return to the first example above and ask not only for the
+uncertainties in the fitted parameters but for the range of values that
+those uncertainties mean for the model function itself. We can use the
+:meth:`ModelResult.eval_uncertainty` method of the model result object to
+evaluate the uncertainty in the model with a specified level for
+:math:`\sigma`.
+
+That is, adding:
+
+.. jupyter-execute:: ../examples/doc_model_gaussian.py
+ :hide-output:
+ :hide-code:
+
+.. jupyter-execute::
+ :hide-output:
+
+ dely = result.eval_uncertainty(sigma=3)
+ plt.fill_between(x, result.best_fit-dely, result.best_fit+dely, color="#ABABAB",
+ label='3-$\sigma$ uncertainty band')
+
+to the example fit to the Gaussian at the beginning of this chapter will
+give 3-:math:`\sigma` bands for the best-fit Gaussian, and produce the
+figure below.
+
+.. jupyter-execute::
+ :hide-code:
+
+ plt.plot(x, y, 'o')
+ plt.plot(x, result.init_fit, '--', label='initial fit')
+ plt.plot(x, result.best_fit, '-', label='best fit')
+ plt.fill_between(x, result.best_fit-dely, result.best_fit+dely, color="#ABABAB",
+ label='3-$\sigma$ uncertainty band')
+ plt.legend()
+ plt.show()
+
+
+.. versionadded:: 1.0.4
+
+If the model is a composite built from multiple components, the
+:meth:`ModelResult.eval_uncertainty` method will evaluate the uncertainty of
+both the full model (often the sum of multiple components) as well as the
+uncertainty in each component. The uncertainty of the full model will be held in
+``result.dely``, and the uncertainties for each component will be held in the dictionary
+``result.dely_comps``, with keys that are the component prefixes.
+
+An example script shows how the uncertainties in components of a composite
+model can be calculated and used:
+
+.. jupyter-execute:: ../examples/doc_model_uncertainty2.py
+
+
+.. _modelresult_saveload_sec:
+
+Saving and Loading ModelResults
+-------------------------------
+
+.. versionadded:: 0.9.8
+
+As with saving models (see section :ref:`model_saveload_sec`), it is
+sometimes desirable to save a :class:`ModelResult`, either for later use or
+to organize and compare different fit results. Lmfit provides a
+:func:`save_modelresult` function that will save a :class:`ModelResult` to
+a file. There is also a companion :func:`load_modelresult` function that
+can read this file and reconstruct a :class:`ModelResult` from it.
+
+As discussed in section :ref:`model_saveload_sec`, there are challenges to
+saving model functions that may make it difficult to restore a saved a
+:class:`ModelResult` in a way that can be used to perform a fit.
+Use of the optional :attr:`funcdefs` argument is generally the most
+reliable way to ensure that a loaded :class:`ModelResult` can be used to
+evaluate the model function or redo the fit.
+
+.. autofunction:: save_modelresult
+
+.. autofunction:: load_modelresult
+
+An example of saving a :class:`ModelResult` is:
+
+.. jupyter-execute:: ../examples/doc_model_savemodelresult.py
+ :hide-output:
+
+To load that later, one might do:
+
+.. jupyter-execute:: ../examples/doc_model_loadmodelresult.py
+ :hide-output:
+
+.. index:: Composite models
+
+.. _composite_models_section:
+
+Composite Models : adding (or multiplying) Models
+=================================================
+
+One of the more interesting features of the :class:`Model` class is that
+Models can be added together or combined with basic algebraic operations
+(add, subtract, multiply, and divide) to give a composite model. The
+composite model will have parameters from each of the component models,
+with all parameters being available to influence the whole model. This
+ability to combine models will become even more useful in the next chapter,
+when pre-built subclasses of :class:`Model` are discussed. For now, we'll
+consider a simple example, and build a model of a Gaussian plus a line, as
+to model a peak with a background. For such a simple problem, we could just
+build a model that included both components:
+
+.. jupyter-execute::
+
+ def gaussian_plus_line(x, amp, cen, wid, slope, intercept):
+ """line + 1-d gaussian"""
+
+ gauss = (amp / (sqrt(2*pi) * wid)) * exp(-(x-cen)**2 / (2*wid**2))
+ line = slope*x + intercept
+ return gauss + line
+
+and use that with:
+
+.. jupyter-execute::
+
+ mod = Model(gaussian_plus_line)
+
+But we already had a function for a gaussian function, and maybe we'll
+discover that a linear background isn't sufficient which would mean the
+model function would have to be changed.
+
+Instead, lmfit allows models to be combined into a :class:`CompositeModel`.
+As an alternative to including a linear background in our model function,
+we could define a linear function:
+
+.. jupyter-execute::
+
+ def line(x, slope, intercept):
+ """a line"""
+ return slope*x + intercept
+
+and build a composite model with just:
+
+.. jupyter-execute::
+
+ mod = Model(gaussian) + Model(line)
+
+This model has parameters for both component models, and can be used as:
+
+.. jupyter-execute:: ../examples/doc_model_two_components.py
+ :hide-output:
+
+which prints out the results:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(result.fit_report())
+
+and shows the plot on the left.
+
+.. jupyter-execute::
+ :hide-code:
+
+ fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+ axes[0].plot(x, y, 'o')
+ axes[0].plot(x, result.init_fit, '--', label='initial fit')
+ axes[0].plot(x, result.best_fit, '-', label='best fit')
+ axes[0].legend()
+
+ comps = result.eval_components()
+ axes[1].plot(x, y, 'o')
+ axes[1].plot(x, comps['gaussian'], '--', label='Gaussian component')
+ axes[1].plot(x, comps['line'], '--', label='Line component')
+ axes[1].legend()
+ plt.show()
+
+On the left, data is shown in blue dots, the total fit is shown in solid
+green line, and the initial fit is shown as a orange dashed line. The figure
+on the right shows again the data in blue dots, the Gaussian component as
+a orange dashed line and the linear component as a green dashed line. It is
+created using the following code:
+
+.. jupyter-execute::
+ :hide-output:
+
+ comps = result.eval_components()
+ plt.plot(x, y, 'o')
+ plt.plot(x, comps['gaussian'], '--', label='Gaussian component')
+ plt.plot(x, comps['line'], '--', label='Line component')
+
+The components were generated after the fit using the
+:meth:`ModelResult.eval_components` method of the ``result``, which returns
+a dictionary of the components, using keys of the model name
+(or ``prefix`` if that is set). This will use the parameter values in
+``result.params`` and the independent variables (``x``) used during the
+fit. Note that while the :class:`ModelResult` held in ``result`` does store the
+best parameters and the best estimate of the model in ``result.best_fit``,
+the original model and parameters in ``pars`` are left unaltered.
+
+You can apply this composite model to other data sets, or evaluate the
+model at other values of ``x``. You may want to do this to give a finer or
+coarser spacing of data point, or to extrapolate the model outside the
+fitting range. This can be done with:
+
+.. jupyter-execute::
+
+ xwide = linspace(-5, 25, 3001)
+ predicted = mod.eval(result.params, x=xwide)
+
+In this example, the argument names for the model functions do not overlap.
+If they had, the ``prefix`` argument to :class:`Model` would have allowed
+us to identify which parameter went with which component model. As we will
+see in the next chapter, using composite models with the built-in models
+provides a simple way to build up complex models.
+
+.. autoclass:: CompositeModel(left, right, op[, **kws])
+
+Note that when using built-in Python binary operators, a
+:class:`CompositeModel` will automatically be constructed for you. That is,
+doing:
+
+.. jupyter-execute::
+ :hide-code:
+
+ def fcn1(x, a):
+ pass
+
+ def fcn2(x, b):
+ pass
+
+ def fcn3(x, c):
+ pass
+
+.. jupyter-execute::
+
+ mod = Model(fcn1) + Model(fcn2) * Model(fcn3)
+
+will create a :class:`CompositeModel`. Here, ``left`` will be ``Model(fcn1)``,
+``op`` will be :meth:`operator.add`, and ``right`` will be another
+CompositeModel that has a ``left`` attribute of ``Model(fcn2)``, an ``op`` of
+:meth:`operator.mul`, and a ``right`` of ``Model(fcn3)``.
+
+To use a binary operator other than ``+``, ``-``, ``*``, or ``/`` you can
+explicitly create a :class:`CompositeModel` with the appropriate binary
+operator. For example, to convolve two models, you could define a simple
+convolution function, perhaps as:
+
+.. jupyter-execute::
+
+ import numpy as np
+
+ def convolve(dat, kernel):
+ """simple convolution of two arrays"""
+ npts = min(len(dat), len(kernel))
+ pad = np.ones(npts)
+ tmp = np.concatenate((pad*dat[0], dat, pad*dat[-1]))
+ out = np.convolve(tmp, kernel, mode='valid')
+ noff = int((len(out) - npts) / 2)
+ return (out[noff:])[:npts]
+
+which extends the data in both directions so that the convolving kernel
+function gives a valid result over the data range. Because this function
+takes two array arguments and returns an array, it can be used as the
+binary operator. A full script using this technique is here:
+
+.. jupyter-execute:: ../examples/doc_model_composite.py
+ :hide-output:
+
+which prints out the results:
+
+.. jupyter-execute::
+ :hide-code:
+
+ print(result.fit_report())
+
+and shows the plots:
+
+.. jupyter-execute::
+ :hide-code:
+
+ fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+ axes[0].plot(x, y, 'o')
+ axes[0].plot(x, result.init_fit, '--', label='initial fit')
+ axes[0].plot(x, result.best_fit, '-', label='best fit')
+ axes[0].legend()
+ axes[1].plot(x, y, 'o')
+ axes[1].plot(x, 10*comps['jump'], '--', label='Jump component')
+ axes[1].plot(x, 10*comps['gaussian'], '-', label='Gaussian component')
+ axes[1].legend()
+ plt.show()
+
+Using composite models with built-in or custom operators allows you to
+build complex models from testable sub-components.
diff --git a/doc/parameters.rst b/doc/parameters.rst
new file mode 100644
index 0000000..5d37552
--- /dev/null
+++ b/doc/parameters.rst
@@ -0,0 +1,130 @@
+.. _parameters_chapter:
+
+.. module:: lmfit.parameter
+
+==========================================
+:class:`Parameter` and :class:`Parameters`
+==========================================
+
+This chapter describes the :class:`Parameter` object, which is a key concept of
+lmfit.
+
+A :class:`Parameter` is the quantity to be optimized in all minimization
+problems, replacing the plain floating point number used in the
+optimization routines from :mod:`scipy.optimize`. A :class:`Parameter` has
+a value that can either be varied in the fit or held at a fixed value, and
+can have lower and/or upper bounds placed on the value. It can even have a
+value that is constrained by an algebraic expression of other Parameter
+values. Since :class:`Parameter` objects live outside the core
+optimization routines, they can be used in **all** optimization routines
+from :mod:`scipy.optimize`. By using :class:`Parameter` objects instead of
+plain variables, the objective function does not have to be modified to
+reflect every change of what is varied in the fit, or whether bounds can be
+applied. This simplifies the writing of models, allowing general models
+that describe the phenomenon and gives the user more flexibility in using
+and testing variations of that model.
+
+Whereas a :class:`Parameter` expands on an individual floating point
+variable, the optimization methods actually still need an ordered group of
+floating point variables. In the :mod:`scipy.optimize` routines this is
+required to be a one-dimensional :numpydoc:`ndarray`. In lmfit, this one-dimensional
+array is replaced by a :class:`Parameters` object, which works as an
+ordered dictionary of :class:`Parameter` objects with a few additional
+features and methods. That is, while the concept of a :class:`Parameter`
+is central to lmfit, one normally creates and interacts with a
+:class:`Parameters` instance that contains many :class:`Parameter` objects.
+For example, the objective functions you write for lmfit will take an
+instance of :class:`Parameters` as its first argument. A table of
+parameter values, bounds, and other attributes can be printed using
+:meth:`Parameters.pretty_print`.
+
+
+The :class:`Parameter` class
+============================
+
+.. autoclass:: Parameter
+
+ See :ref:`bounds_chapter` for details on the math used to implement the
+ bounds with :attr:`min` and :attr:`max`.
+
+ The :attr:`expr` attribute can contain a mathematical expression that will
+ be used to compute the value for the Parameter at each step in the fit.
+ See :ref:`constraints_chapter` for more details and examples of this
+ feature.
+
+ .. index:: Removing a Constraint Expression
+
+ .. automethod:: set
+
+
+The :class:`Parameters` class
+=============================
+
+.. autoclass:: Parameters
+
+ .. automethod:: add
+
+ .. automethod:: add_many
+
+ .. automethod:: pretty_print
+
+ .. automethod:: valuesdict
+
+ .. automethod:: dumps
+
+ .. automethod:: dump
+
+ .. automethod:: eval
+
+ .. automethod:: loads
+
+ .. automethod:: load
+
+
+.. _dumpload_warning:
+
+.. warning::
+
+ Saving Parameters with user-added functions to the ``_asteval``
+ interpreter using :meth::`dump` and :meth:`dumps` may not be easily
+ recovered with the :meth:`load` and :meth:`loads`. See
+ :ref:`model_saveload_sec` for further discussion.
+
+
+The :func:`create_params` function
+==================================
+
+The :func:`create_params` function is probably the easiest method for making
+:class:`Parameters` objects, as it allows defining Parameter names by keyword
+with values either being the numerical initial value for the Parameter or being
+a dictionary with keyword/value pairs for ``value`` as well as other Parameter
+attribute such as ``min``, ``max``, ``expr``, and so forth.
+
+.. autofunction:: create_params
+
+
+Simple Example
+==============
+
+A basic example making use of :class:`~lmfit.parameter.Parameters` and the
+:func:`~lmfit.minimizer.minimize` function (discussed in the next chapter)
+might look like this:
+
+.. jupyter-execute:: ../examples/doc_parameters_basic.py
+ :hide-output:
+
+
+Here, the objective function explicitly unpacks each Parameter value. This
+can be simplified using the :class:`Parameters` :meth:`valuesdict` method,
+which would make the objective function ``fcn2min`` above look like:
+
+.. jupyter-execute::
+
+ def fcn2min(params, x, data):
+ """Model a decaying sine wave and subtract data."""
+ v = params.valuesdict()
+
+ model = v['amp'] * np.sin(x*v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
+ return model - data
+
+The results are identical, and the difference is a stylistic choice.
diff --git a/doc/sphinx/ext_imgmath.py b/doc/sphinx/ext_imgmath.py
new file mode 100644
index 0000000..64b7aaa
--- /dev/null
+++ b/doc/sphinx/ext_imgmath.py
@@ -0,0 +1,12 @@
+# Sphinx extensions for generating EPUB/PDF output
+
+extensions = ['sphinx.ext.autodoc',
+ 'sphinx.ext.extlinks',
+ 'sphinx.ext.imgmath',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.todo',
+ 'IPython.sphinxext.ipython_console_highlighting',
+ 'jupyter_sphinx',
+ 'sphinx_gallery.gen_gallery',
+ 'sphinxcontrib.cairosvgconverter']
diff --git a/doc/sphinx/ext_mathjax.py b/doc/sphinx/ext_mathjax.py
new file mode 100644
index 0000000..bd91142
--- /dev/null
+++ b/doc/sphinx/ext_mathjax.py
@@ -0,0 +1,11 @@
+# Sphinx extensions for generating HTML output
+
+extensions = ['sphinx.ext.autodoc',
+ 'sphinx.ext.extlinks',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.mathjax',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.todo',
+ 'IPython.sphinxext.ipython_console_highlighting',
+ 'jupyter_sphinx',
+ 'sphinx_gallery.gen_gallery']
diff --git a/doc/sphinx/theme/sphinx13/basic_layout.html b/doc/sphinx/theme/sphinx13/basic_layout.html
new file mode 100644
index 0000000..b2a6453
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/basic_layout.html
@@ -0,0 +1,212 @@
+{#
+ basic/layout.html
+ ~~~~~~~~~~~~~~~~~
+
+ Master layout template for Sphinx themes.
+
+ :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{%- block doctype -%}{%- if html5_doctype %}
+<!DOCTYPE html>
+{%- else %}
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+{%- endif %}{%- endblock %}
+{%- set reldelim1 = reldelim1 is not defined and ' &#187;' or reldelim1 %}
+{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %}
+{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and
+ (sidebars != []) %}
+{# URL root should never be #, then all links are fragments #}
+{%- if not embedded and docstitle %}
+ {%- set titlesuffix = " &#8212; "|safe + docstitle|e %}
+{%- else %}
+ {%- set titlesuffix = "" %}
+{%- endif %}
+
+{%- macro relbar() %}
+ <div class="related" role="navigation" aria-label="related navigation">
+ <h3>{{ _('Navigation') }}</h3>
+ <ul>
+ {%- for rellink in rellinks %}
+ <li class="right" {% if loop.first %}style="margin-right: 10px"{% endif %}>
+ <a href="{{ pathto(rellink[0])|e }}" title="{{ rellink[1]|striptags|e }}"
+ {{ accesskey(rellink[2]) }}>{{ rellink[3] }}</a>
+ {%- if not loop.first %}{{ reldelim2 }}{% endif %}</li>
+ {%- endfor %}
+ {%- block rootrellink %}
+ <li class="nav-item nav-item-0"><a href="{{ pathto(root_doc)|e }}">{{ shorttitle|e }}</a>{{ reldelim1 }}</li>
+ {%- endblock %}
+ {%- for parent in parents %}
+ <li class="nav-item nav-item-{{ loop.index }}"><a href="{{ parent.link|e }}" {% if loop.last %}{{ accesskey("U") }}{% endif %}>{{ parent.title }}</a>{{ reldelim1 }}</li>
+ {%- endfor %}
+ {%- block relbaritems %} {% endblock %}
+ </ul>
+ </div>
+{%- endmacro %}
+
+{%- macro sidebar() %}
+ {%- if render_sidebar %}
+ <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
+ <div class="sphinxsidebarwrapper">
+ {%- block sidebarlogo %}
+ {%- if logo_url %}
+ <p class="logo"><a href="{{ pathto(root_doc)|e }}">
+ <img class="logo" src="{{ logo_url|e }}" alt="Logo"/>
+ </a></p>
+ {%- endif %}
+ {%- endblock %}
+ {%- if sidebars != None %}
+ {#- new style sidebar: explicitly include/exclude templates #}
+ {%- for sidebartemplate in sidebars %}
+ {%- include sidebartemplate %}
+ {%- endfor %}
+ {%- else %}
+ {#- old style sidebars: using blocks -- should be deprecated #}
+ {%- block sidebartoc %}
+ {%- include "localtoc.html" %}
+ {%- endblock %}
+ {%- block sidebarrel %}
+ {%- include "relations.html" %}
+ {%- endblock %}
+ {%- block sidebarsourcelink %}
+ {%- include "sourcelink.html" %}
+ {%- endblock %}
+ {%- if customsidebar %}
+ {%- include customsidebar %}
+ {%- endif %}
+ {%- block sidebarsearch %}
+ {%- include "searchbox.html" %}
+ {%- endblock %}
+ {%- endif %}
+ </div>
+ </div>
+ {%- endif %}
+{%- endmacro %}
+
+{%- macro script() %}
+ {%- for js in script_files %}
+ {{ js_tag(js) }}
+ {%- endfor %}
+{%- endmacro %}
+
+{%- macro css() %}
+ {%- for css in css_files %}
+ {%- if css|attr("filename") %}
+ {{ css_tag(css) }}
+ {%- else %}
+ <link rel="stylesheet" href="{{ pathto(css, 1)|e }}" type="text/css" />
+ {%- endif %}
+ {%- endfor %}
+{%- endmacro %}
+
+{%- if html_tag %}
+{{ html_tag }}
+{%- else %}
+<html{% if not html5_doctype %} xmlns="http://www.w3.org/1999/xhtml"{% endif %}{% if language is not none %} lang="{{ language }}"{% endif %}>
+{%- endif %}
+ <head>
+ {%- if not html5_doctype and not skip_ua_compatible %}
+ <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+ {%- endif %}
+ {%- if use_meta_charset or html5_doctype %}
+ <meta charset="{{ encoding }}" />
+ {%- else %}
+ <meta http-equiv="Content-Type" content="text/html; charset={{ encoding }}" />
+ {%- endif %}
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+ {{- metatags }}
+ {%- block htmltitle %}
+ <title>{{ title|striptags|e }}{{ titlesuffix }}</title>
+ {%- endblock %}
+ {%- block css %}
+ {{- css() }}
+ {%- endblock %}
+ {%- if not embedded %}
+ {%- block scripts %}
+ {{- script() }}
+ {%- endblock %}
+ {%- if pageurl %}
+ <link rel="canonical" href="{{ pageurl|e }}" />
+ {%- endif %}
+ {%- if use_opensearch %}
+ <link rel="search" type="application/opensearchdescription+xml"
+ title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
+ href="{{ pathto('_static/opensearch.xml', 1) }}"/>
+ {%- endif %}
+ {%- if favicon_url %}
+ <link rel="shortcut icon" href="{{ favicon_url|e }}"/>
+ {%- endif %}
+ {%- endif %}
+{%- block linktags %}
+ {%- if hasdoc('about') %}
+ <link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" />
+ {%- endif %}
+ {%- if hasdoc('genindex') %}
+ <link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" />
+ {%- endif %}
+ {%- if hasdoc('search') %}
+ <link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" />
+ {%- endif %}
+ {%- if hasdoc('copyright') %}
+ <link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" />
+ {%- endif %}
+ {%- if next %}
+ <link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" />
+ {%- endif %}
+ {%- if prev %}
+ <link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" />
+ {%- endif %}
+{%- endblock %}
+{%- block extrahead %} {% endblock %}
+ </head>
+ {%- block body_tag %}<body>{% endblock %}
+{%- block header %}{% endblock %}
+
+{%- block relbar1 %}{{ relbar() }}{% endblock %}
+
+{%- block content %}
+ {%- block sidebar1 %} {# possible location for sidebar #} {% endblock %}
+
+ <div class="document">
+ {%- block document %}
+ <div class="documentwrapper">
+ {%- if render_sidebar %}
+ <div class="bodywrapper">
+ {%- endif %}
+ <div class="body" role="main">
+ {% block body %} {% endblock %}
+ <div class="clearer"></div>
+ </div>
+ {%- if render_sidebar %}
+ </div>
+ {%- endif %}
+ </div>
+ {%- endblock %}
+
+ {%- block sidebar2 %}{{ sidebar() }}{% endblock %}
+ <div class="clearer"></div>
+ </div>
+{%- endblock %}
+
+{%- block relbar2 %}{{ relbar() }}{% endblock %}
+
+{%- block footer %}
+ <div class="footer" role="contentinfo">
+ {%- if show_copyright %}
+ {%- if hasdoc('copyright') %}
+ {% trans path=pathto('copyright'), copyright=copyright|e %}&#169; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}
+ {%- else %}
+ {% trans copyright=copyright|e %}&#169; Copyright {{ copyright }}.{% endtrans %}
+ {%- endif %}
+ {%- endif %}
+ {%- if last_updated %}
+ {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
+ {%- endif %}
+ {%- if show_sphinx %}
+ {% trans sphinx_version=sphinx_version|e %}Created using <a href="https://www.sphinx-doc.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}
+ {%- endif %}
+ </div>
+{%- endblock %}
+ </body>
+</html>
diff --git a/doc/sphinx/theme/sphinx13/layout.html b/doc/sphinx/theme/sphinx13/layout.html
new file mode 100644
index 0000000..5285367
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/layout.html
@@ -0,0 +1,83 @@
+{#
+ sphinxdoc/layout.html
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx layout template for the sphinxdoc theme.
+
+ :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{%- extends "./basic_layout.html" %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
+
+{% block extrahead %}
+ <link href='https://fonts.googleapis.com/css?family=Open+Sans:300,400,700'
+ rel='stylesheet' type='text/css' />
+{{ super() }}
+{%- if not embedded %}
+ <style type="text/css">
+ table.right { float: right; margin-left: 20px; }
+ table.right td { border: 1px solid #ccc; }
+ </style>
+ <script>
+ // intelligent scrolling of the sidebar content
+ $(window).scroll(function() {
+ var sb = $('.sphinxsidebarwrapper');
+ var win = $(window);
+ var sbh = sb.height();
+ var offset = $('.sphinxsidebar').position()['top'];
+ var wintop = win.scrollTop();
+ var winbot = wintop + win.innerHeight();
+ var curtop = sb.position()['top'];
+ var curbot = curtop + sbh;
+ // does sidebar fit in window?
+ if (sbh < win.innerHeight()) {
+ // yes: easy case -- always keep at the top
+ sb.css('top', $u.min([$u.max([0, wintop - offset - 10]),
+ $(document).height() - sbh - 200]));
+ } else {
+ // no: only scroll if top/bottom edge of sidebar is at
+ // top/bottom edge of window
+ if (curtop > wintop && curbot > winbot) {
+ sb.css('top', $u.max([wintop - offset - 10, 0]));
+ } else if (curtop < wintop && curbot < winbot) {
+ sb.css('top', $u.min([winbot - sbh - offset - 20,
+ $(document).height() - sbh - 200]));
+ }
+ }
+ });
+ </script>
+{%- endif %}
+{% endblock %}
+
+{% block rootrellink %}
+ <li>[ <a href="{{ pathto('intro') }}">intro</a> |</li>
+ <li><a href="{{ pathto('parameters') }}">parameters</a> |</li>
+ <li><a href="{{ pathto('fitting') }}">minimize</a> |</li>
+ <li><a href="{{ pathto('model') }}">model</a> |</li>
+ <li><a href="{{ pathto('builtin_models') }}">built-in models</a> |</li>
+ <li><a href="{{ pathto('confidence') }}">confidence intervals</a> |</li>
+ <li><a href="{{ pathto('bounds') }}">bounds</a> |</li>
+ <li><a href="{{ pathto('constraints') }}">constraints</a> ]</li>
+{% endblock %}
+
+{% block header %}
+<div class="pageheader">
+ <ul>
+ <li><a href="{{ pathto('contents') }}">Contents</a></li>
+ <li><a href="{{ pathto('examples/index') }}">Examples</a></li>
+ <li><a href="{{ pathto('installation') }}">Installation</a></li>
+ <li><a href="{{ pathto('faq') }}">FAQ</a></li>
+ <li><a href="{{ pathto('support') }}">Support</a></li>
+ <li><a href="https://github.com/lmfit/lmfit-py">Develop</a></li>
+ </ul>
+ <div>
+ <a href="{{ pathto('index') }}">
+ <img src="{{ pathto('_static/lmfitheader.png', 1) }}" alt="LMFIT" />
+ </a>
+ </div>
+</div>
+{% endblock %}
diff --git a/doc/sphinx/theme/sphinx13/static/bodybg.png b/doc/sphinx/theme/sphinx13/static/bodybg.png
new file mode 100644
index 0000000..6f667b9
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/static/bodybg.png
Binary files differ
diff --git a/doc/sphinx/theme/sphinx13/static/footerbg.png b/doc/sphinx/theme/sphinx13/static/footerbg.png
new file mode 100644
index 0000000..d1bcb00
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/static/footerbg.png
Binary files differ
diff --git a/doc/sphinx/theme/sphinx13/static/headerbg.png b/doc/sphinx/theme/sphinx13/static/headerbg.png
new file mode 100644
index 0000000..5225049
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/static/headerbg.png
Binary files differ
diff --git a/doc/sphinx/theme/sphinx13/static/listitem.png b/doc/sphinx/theme/sphinx13/static/listitem.png
new file mode 100644
index 0000000..f7f814d
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/static/listitem.png
Binary files differ
diff --git a/doc/sphinx/theme/sphinx13/static/lmfitheader.png b/doc/sphinx/theme/sphinx13/static/lmfitheader.png
new file mode 100644
index 0000000..23dc998
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/static/lmfitheader.png
Binary files differ
diff --git a/doc/sphinx/theme/sphinx13/static/relbg.png b/doc/sphinx/theme/sphinx13/static/relbg.png
new file mode 100644
index 0000000..68a9b77
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/static/relbg.png
Binary files differ
diff --git a/doc/sphinx/theme/sphinx13/static/sphinx13.css b/doc/sphinx/theme/sphinx13/static/sphinx13.css
new file mode 100644
index 0000000..c8fb2e5
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/static/sphinx13.css
@@ -0,0 +1,443 @@
+/*
+ * sphinx13.css
+ * ~~~~~~~~~~~~
+ *
+ * Sphinx stylesheet -- sphinx13 theme.
+ *
+ * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 14px;
+ text-align: center;
+ background-image: url(bodybg.png);
+ color: black;
+ padding: 0;
+ border-right: 1px solid #0a507a;
+ border-left: 1px solid #0a507a;
+
+ margin: 0 auto;
+ min-width: 780px;
+ max-width: 1080px;
+}
+
+.pageheader {
+ background-image: url(headerbg.png);
+ text-align: left;
+ padding: 10px 15px;
+}
+
+.pageheader ul {
+ float: right;
+ color: white;
+ list-style-type: none;
+ padding-left: 0;
+ margin-top: 30px;
+ margin-right: 10px;
+}
+
+.pageheader li {
+ float: left;
+ margin: 0 0 0 10px;
+}
+
+.pageheader li a {
+ border-radius: 1px;
+ padding: 8px 12px;
+ color: #f9f9f0;
+ text-shadow: 0 0 5px rgba(0, 0, 0, 0.5);
+}
+
+.pageheader li a:hover {
+ background-color: #f9f9f0;
+ color: #0a507a;
+ text-shadow: none;
+}
+
+div.document {
+ background-color: white;
+ text-align: left;
+}
+
+div.bodywrapper {
+ margin: 0 240px 0 0;
+ border-right: 1px solid #0a507a;
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+}
+
+div.related {
+ font-size: 1em;
+ color: white;
+}
+
+div.related ul {
+ background-image: url(relbg.png);
+ height: 1.9em;
+ border-top: 1px solid #002e50;
+ border-bottom: 1px solid #002e50;
+}
+
+div.related ul li {
+ margin: 0 5px 0 0;
+ padding: 0;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: #f9f9f0;
+ text-shadow: 0px 0px 1px rgba(0, 0, 0, 0.5);
+}
+
+div.related ul li a:hover {
+ color: white;
+ /*text-decoration: underline;*/
+ text-shadow: 0px 0px 1px rgba(255, 255, 255, 0.5);
+}
+
+div.sphinxsidebarwrapper {
+ position: relative;
+ top: 0px;
+ padding: 0;
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0 15px 15px 0;
+ width: 210px;
+ float: right;
+ font-size: 1em;
+ text-align: left;
+ max-height: 0px;
+}
+
+div.sphinxsidebar .logo {
+ font-size: 1.8em;
+ color: #0A507A;
+ font-weight: 300;
+ text-align: center;
+}
+
+div.sphinxsidebar .logo img {
+ vertical-align: middle;
+}
+
+div.sphinxsidebar .download a img {
+ vertical-align: middle;
+}
+
+div.subscribeformwrapper {
+ display: block;
+ overflow: auto;
+ margin-bottom: 1.2em;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #aaa;
+ font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+}
+
+div.sphinxsidebar .subscribeform {
+ margin-top: 0;
+}
+
+div.sphinxsidebar .subscribeform input {
+ border: 1px solid #aaa;
+ font-size: 0.9em;
+ float: left;
+ padding: 0.25em 0.5em;
+ box-sizing: border-box;
+}
+
+div.sphinxsidebar .subscribeform input[type="text"] {
+ width: 60%;
+}
+
+div.sphinxsidebar .subscribeform input[type="submit"] {
+ width: 40%;
+ border-left: none;
+}
+
+div.sphinxsidebar h3 {
+ font-size: 1.5em;
+ border-top: 1px solid #0a507a;
+ margin-top: 1em;
+ margin-bottom: 0.5em;
+ padding-top: 0.5em;
+}
+
+div.sphinxsidebar h4 {
+ font-size: 1.2em;
+ margin-bottom: 0;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+ margin-right: -15px;
+ margin-left: -15px;
+ padding-right: 14px;
+ padding-left: 14px;
+ color: #333;
+ font-weight: 300;
+ /*text-shadow: 0px 0px 0.5px rgba(0, 0, 0, 0.4);*/
+}
+
+div.sphinxsidebarwrapper > h3:first-child {
+ margin-top: 0.5em;
+ border: none;
+}
+
+div.sphinxsidebar h3 a {
+ color: #333;
+}
+
+div.sphinxsidebar ul {
+ color: #444;
+ margin-top: 7px;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ margin-left: 20px;
+ list-style-image: url(listitem.png);
+}
+
+div.footer {
+ background-image: url(footerbg.png);
+ color: #ccc;
+ text-shadow: 0 0 .2px rgba(255, 255, 255, 0.8);
+ padding: 3px 8px 3px 0;
+ clear: both;
+ font-size: 0.8em;
+ text-align: right;
+}
+
+/* no need to make a visible link to Sphinx on the Sphinx page */
+div.footer a {
+ color: #ccc;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+a {
+ color: #A2881D;
+ text-decoration: none;
+}
+
+a:hover {
+ color: #E1C13F;
+}
+
+div.body a {
+ text-decoration: underline;
+}
+
+h1 {
+ margin: 10px 0 0 0;
+ font-size: 2.4em;
+ color: #0A507A;
+ font-weight: 300;
+}
+
+h2 {
+ margin: 1.em 0 0.2em 0;
+ font-size: 1.5em;
+ font-weight: 300;
+ padding: 0;
+ color: #174967;
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.3em;
+ font-weight: 300;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+ text-decoration: none;
+}
+
+div.body h1 a tt, div.body h2 a tt, div.body h3 a tt, div.body h4 a tt, div.body h5 a tt, div.body h6 a tt {
+ color: #0A507A !important;
+ font-size: inherit !important;
+}
+
+a.headerlink {
+ color: #0A507A !important;
+ font-size: 12px;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none !important;
+ float: right;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+/* avoid font-size when :mod: role in headings */
+h1 code, h2 code, h3 code, h4 code {
+ font-size: inherit;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 14px;
+ letter-spacing: -0.02em;
+}
+
+table.deprecated code.literal {
+ word-break: break-all;
+}
+
+tt {
+ background-color: #f2f2f2;
+ border: 1px solid #ddd;
+ border-radius: 2px;
+ color: #333;
+ padding: 1px 0.2em;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+ border: 0;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+a tt {
+ border: 0;
+ color: #a2881d;
+}
+
+a tt:hover {
+ color: #e1c13f;
+}
+
+pre {
+ font-family: 'Consolas', 'Courier New', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 13px;
+ letter-spacing: 0.015em;
+ line-height: 120%;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ border-radius: 2px;
+ background-color: #f8f8f8;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+}
+
+div.quotebar {
+ background-color: #f8f8f8;
+ max-width: 250px;
+ float: right;
+ padding: 0px 7px;
+ border: 1px solid #ccc;
+ margin-left: 1em;
+}
+
+div.topic {
+ background-color: #f8f8f8;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 1em 0;
+ border: 1px solid #86989B;
+ border-radius: 2px;
+ background-color: #f7f7f7;
+ padding: 0;
+}
+
+div.admonition > p, div.warning > p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition > pre, div.warning > pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition > p.admonition-title,
+div.warning > p.admonition-title {
+ margin-top: 0.5em;
+ font-weight: bold;
+}
+
+div.warning {
+ border: 1px solid #940000;
+}
+
+div.admonition > ul,
+div.admonition > ol,
+div.warning > ul,
+div.warning > ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+div.admonition div.highlight {
+ background: none;
+}
+
+.viewcode-back {
+ font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+}
+
+div.viewcode-block:target {
+ background-color: #f4debf;
+ border-top: 1px solid #ac9;
+ border-bottom: 1px solid #ac9;
+}
+
+.contentstable {
+ margin-left: 30px;
+ margin: 0 auto;
+ table-layout: fixed;
+}
diff --git a/doc/sphinx/theme/sphinx13/theme.conf b/doc/sphinx/theme/sphinx13/theme.conf
new file mode 100644
index 0000000..876b198
--- /dev/null
+++ b/doc/sphinx/theme/sphinx13/theme.conf
@@ -0,0 +1,4 @@
+[theme]
+inherit = basic
+stylesheet = sphinx13.css
+pygments_style = trac
diff --git a/doc/support.rst b/doc/support.rst
new file mode 100644
index 0000000..c6b6434
--- /dev/null
+++ b/doc/support.rst
@@ -0,0 +1,30 @@
+.. _support_chapter:
+
+============
+Getting Help
+============
+
+.. _mailing list: https://groups.google.com/group/lmfit-py
+.. _github issues: https://github.com/lmfit/lmfit-py/issues
+
+If you have questions, comments, or suggestions for LMFIT, please use the
+`mailing list`_. This provides an on-line conversation that is both
+archived well and can be searched easily with standard web searches. If you
+find a bug in the code or documentation, use `GitHub Issues`_ to submit a
+report. If you have an idea for how to solve the problem and are familiar
+with Python and GitHub, submitting a GitHub Pull Request would be greatly
+appreciated.
+
+If you are unsure whether to use the mailing list or the Issue tracker,
+please start a conversation on the `mailing list`_. That is, the problem
+you're having may or may not be due to a bug. If it is due to a bug,
+creating an Issue from the conversation is easy. If it is not a bug, the
+problem will be discussed and then the Issue will be closed. While one
+*can* search through closed Issues on GitHub, these are not so easily
+searched, and the conversation is not easily useful to others later.
+Starting the conversation on the mailing list with "How do I do this?" or
+"Why didn't this work?" instead of "This should work and doesn't" is
+generally preferred, and will better help others with similar questions.
+Of course, there is not always an obvious way to decide if something is a
+Question or an Issue, and we will try our best to engage in all
+discussions.
diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst
new file mode 100644
index 0000000..0d427cd
--- /dev/null
+++ b/doc/whatsnew.rst
@@ -0,0 +1,558 @@
+.. _whatsnew_chapter:
+
+=============
+Release Notes
+=============
+
+.. _lmfit GitHub repository: https://github.com/lmfit/lmfit-py
+
+This section discusses changes between versions, especially changes
+significant to the use and behavior of the library. This is not meant
+to be a comprehensive list of changes. For such a complete record,
+consult the `lmfit GitHub repository`_.
+
+.. _whatsnew_121_label:
+
+Version 1.2.1 Release Notes (May 02, 2023)
+=================================================
+
+Bug fixes/enhancements:
+
+- fixed bug in ``Model.make_params()`` for initial parameter values that were
+ not recognized as floats such as ``np.Int64``. (Issue #871; PR #872)
+
+- explicitly set ``maxfun`` for ``l-bfgs-b`` method when setting
+ ``maxiter``. (Issue #864; Discussion #865; PR #866)
+
+.. _whatsnew_120_label:
+
+Version 1.2.0 Release Notes (April 05, 2023)
+=================================================
+
+New features:
+
+- add ``create_params`` function (PR #844)
+- add ``chi2_out`` and ``nsigma`` options to ``conf_interval2d()``
+- add ``ModelResult.summary()`` to return many resulting fit statistics and attributes into a JSON-able dict.
+- add ``correl_table()`` function to ``lmfit.printfuncs`` and ``correl_mode`` option to ``fit_report()`` and
+ ``ModelResult.fit_report()`` to optionally display a RST-formatted table of a correlation matrix.
+
+Bug fixes/enhancements:
+
+- fix bug when setting ``param.vary=True`` for a constrained parameter (Issue #859; PR #860)
+- fix bug in reported uncertainties for constrained parameters by better propating uncertainties (Issue #855; PR #856)
+- Coercing of user input data and independent data for ``Model`` to float64 ndarrays is somewhat less aggressive and
+ will not increase the precision of numpy ndarrays (see :ref:`model_data_coercion_section` for details). The resulting
+ calculation from a model or objective function is more aggressively coerced to float64. (Issue #850; PR #853)
+- the default value of ``epsfcn`` is increased to 1.e-10 to allow for handling of data with precision less than float64
+ (Issue #850; PR #853)
+- fix ``conf_interval2d`` to use "increase chi-square by sigma**2*reduced chi-square" to give the ``sigma``-level
+ probabilities (Issue #848; PR #852)
+- fix reading of older ``ModelResult`` (Issue #845; included in PR #844)
+- fix deepcopy of ``Parameters`` and user data (mguhyo; PR #837)
+- improve ``Model.make_params`` and ``create_params`` to take optional dict of Parameter attributes (PR #844)
+- fix reporting of ``nfev`` from ``least_squares`` to better reflect actual number of function calls (Issue #842; PR #844)
+- fix bug in ``Model.eval`` when mixing parameters and keyword arguments (PR #844, #839)
+- re-adds ``residual`` to saved ``Model`` result (PR #844, #830)
+- ``ConstantModel`` and ``ComplexConstantModel`` will return an ndarray of the same shape as the independent variable
+ ``x`` (JeppeKlitgaard, Issue #840; PR #841)
+- update tests for latest versions of NumPy and SciPy.
+- many fixes of doc typos and updates of dependencies, pre-commit hooks, and CI.
+
+.. _whatsnew_110_label:
+
+Version 1.1.0 Release Notes (November 27, 2022)
+=================================================
+
+New features:
+
+- add ``Pearson4Model`` (@lellid; PR #800)
+- add ``SplineModel`` (PR #804)
+- add R^2 ``rsquared`` statistic to fit outputs and reports for Model fits (Issue #803; PR #810)
+- add calculation of ``dely`` for model components of composite models (Issue #761; PR #826)
+
+Bug fixes/enhancements:
+
+- make sure variable ``spercent`` is always defined in ``params_html_table`` functions (reported by @MySlientWind; Issue #768, PR #770)
+- always initialize the variables ``success`` and ``covar`` the ``MinimizerResult`` (reported by Marc W. Pound; PR #771)
+- build package following PEP517/PEP518; use ``pyproject.toml`` and ``setup.cfg``; leave ``setup.py`` for now (PR #777)
+- components used to create a ``CompositeModel`` can now have different independent variables (@Julian-Hochhaus; Discussion #787; PR #788)
+- fixed function definition for ``StepModel(form='linear')``, was not consistent with the other ones (@matpompili; PR #794)
+- fixed height factor for ``Gaussian2dModel``, was not correct (@matpompili; PR #795)
+- for covariances with negative diagonal elements, we set the covariance to ``None`` (PR #813)
+- fixed linear mode for ``RectangleModel`` (@arunpersaud; Issue #815; PR #816)
+- report correct initial values for parameters with bounds (Issue #820; PR #821)
+- allow recalculation of confidence intervals (@jagerber48; PR #798)
+- include 'residual' in JSON output of ModelResult.dumps (@mac01021; PR #830)
+- supports and is tested against Python 3.11; updated minimum required version of SciPy, NumPy, and asteval (PR #832)
+
+Deprecations:
+
+- remove support for Python 3.6 which reached EOL on 2021-12-23 (PR #790)
+
+
+.. _whatsnew_103_label:
+
+Version 1.0.3 Release Notes (October 14, 2021)
+==============================================
+
+Potentially breaking change:
+
+- argument ``x`` is now required for the ``guess`` method of Models (Issue #747; PR #748)
+
+To get reasonable estimates for starting values one should always supply both ``x`` and ``y`` values; in some cases it would work
+when only providing ``data`` (i.e., y-values). With the change above, ``x`` is now required in the ``guess`` method call, so scripts might
+need to be updated to explicitly supply ``x``.
+
+Bug fixes/enhancements:
+
+- do not overwrite user-specified figure titles in Model.plot() functions and allow setting with ``title`` keyword argument (PR #711)
+- preserve Parameters subclass in deepcopy (@jenshnielsen; PR #719)
+- coerce ``data`` and ``indepdent_vars`` to NumPy array with ``dtype=float64`` or ``dtype=complex128`` where applicable (Issues #723 and #728)
+- fix collision between parameter names in built-in models and user-specified parameters (Issue #710 and PR #732)
+- correct error message in PolynomialModel (@kremeyer; PR #737)
+- improved handling of altered JSON data (Issue #739; PR #740, reported by Matthew Giammar)
+- map ``max_nfev`` to ``maxiter`` when using ``differential_evolution`` (PR #749, reported by Olivier B.)
+- correct use of noise versus experimental uncertainty in the documentation (PR #751, reported by Andrés Zelcer)
+- specify return type of ``eval`` method more precisely and allow for plotting of (Complex)ConstantModel by coercing their
+ ``float``, ``int``, or ``complex`` return value to a ``numpy.ndarray`` (Issue #684 and PR #754)
+- fix ``dho`` (Damped Harmonic Oscillator) lineshape (PR #755; @rayosborn)
+- reset ``Minimizer._abort`` to ``False`` before starting a new fit (Issue #756 and PR #757; @azelcer)
+- fix typo in ``guess_from_peak2d`` (@ivan-usovl; PR #758)
+
+Various:
+
+- update asteval dependency to >= 0.9.22 to avoid DeprecationWarnings from NumPy v1.20.0 (PR #707)
+- remove incorrectly spelled ``DonaichModel`` and ``donaich`` lineshape, deprecated in version 1.0.1 (PR #707)
+- remove occurrences of OrderedDict throughout the code; dict is order-preserving since Python 3.6 (PR #713)
+- update the contributing instructions (PR #718; @martin-majlis)
+- (again) defer import of matplotlib to when it is needed (@zobristnicholas; PR #721)
+- fix description of ``name`` argument in ``Parameters.add`` (@kristianmeyerr; PR #725)
+- update dependencies, make sure a functional development environment is installed on Windows (Issue #712)
+- use ``setuptools_scm`` for version info instead of ``versioneer`` (PR #729)
+- transition to using ``f-strings`` (PR #730)
+- mark ``test_manypeaks_speed.py`` as flaky to avoid intermittent test failures (repeat up to 5 times; PR #745)
+- update scipy dependency to >= 1.14.0 (PR #751)
+- improvement to output of examples in sphinx-gallery and use higher resolution figures (PR #753)
+- remove deprecated functions ``lmfit.printfuncs.report_errors`` and ``asteval`` argument in ``Parameters`` class (PR #759)
+
+
+.. _whatsnew_102_label:
+
+Version 1.0.2 Release Notes (February 7, 2021)
+==============================================
+
+Version 1.0.2 officially supports Python 3.9 and has dropped support for Python 3.5. The minimum version
+of the following dependencies were updated: asteval>=0.9.21, numpy>=1.18, and scipy>=1.3.
+
+New features:
+
+- added two-dimensional Gaussian lineshape and model (PR #642; @mpmdean)
+- all built-in models are now registered in ``lmfit.models.lmfit_models``; new Model class attribute ``valid_forms`` (PR #663; @rayosborn)
+- added a SineModel (PR #676; @lneuhaus)
+- add the ``run_mcmc_kwargs argument`` to ``Minimizer.emcee`` to pass to the ``emcee.EnsembleSampler.run_mcmc`` function (PR #694; @rbnvrw)
+
+Bug fixes:
+
+- ``ModelResult.eval_uncertainty`` should use provided Parameters (PR #646)
+- center in lognormal model can be negative (Issue #644, PR #645; @YoshieraHuang)
+- restore best-fit values after calculation of covariance matrix (Issue #655, PR #657)
+- add helper-function ``not_zero`` to prevent ZeroDivisionError in lineshapes and use in exponential lineshape (Issue #631, PR #664; @s-weigand)
+- save ``last_internal_values`` and use to restore internal values if fit is aborted (PR #667)
+- dumping a fit using the ``lbfgsb`` method now works, convert bytes to string if needed (Issue #677, PR #678; @leonfoks)
+- fix use of callable Jacobian for scalar methods (PR #681; @mstimberg)
+- preserve float/int types when encoding for JSON (PR #696; @jedzill4)
+- better support for saving/loading of ExpressionModels and assure that ``init_params`` and ``init_fit`` are set when loading a ``ModelResult`` (PR #706)
+
+Various:
+
+- update minimum dependencies (PRs #688, #693)
+- improvements in coding style, docstrings, CI, and test coverage (PRs #647, #649, #650, #653, #654; #685, #668, #689)
+- fix typo in Oscillator (PR #658; @flothesof)
+- add example using SymPy (PR #662)
+- allow better custom pool for emcee() (Issue #666, PR #667)
+- update NIST Strd reference functions and tests (PR #670)
+- make building of documentation cross-platform (PR #673; @s-weigand)
+- relax module name check in ``test_check_ast_errors`` for Python 3.9 (Issue #674, PR #675; @mwhudson)
+- fix/update layout of documentation, now uses the sphinx13 theme (PR #687)
+- fixed DeprecationWarnings reported by NumPy v1.2.0 (PR #699)
+- increase value of ``tiny`` and check for it in bounded parameters to avoid "parameter not moving from initial value" (Issue #700, PR #701)
+- add ``max_nfev`` to ``basinhopping`` and ``brute`` (now supported everywhere in lmfit) and set to more uniform default values (PR #701)
+- use Azure Pipelines for CI, drop Travis (PRs #696 and #702)
+
+
+.. _whatsnew_101_label:
+
+Version 1.0.1 Release Notes
+============================
+
+**Version 1.0.1 is the last release that supports Python 3.5**. All newer version will
+require 3.6+ so that we can use formatting-strings and rely on dictionaries being ordered.
+
+New features:
+
+- added thermal distribution model and lineshape (PR #620; @mpmdean)
+- introduced a new argument ``max_nfev`` to uniformly specify the maximum number of function evaluations (PR #610)
+ **Please note: all other arguments (e.g., ``maxfev``, ``maxiter``, ...) will no longer be passed to the underlying
+ solver. A warning will be emitted stating that one should use ``max_nfev``.**
+- the attribute ``call_kws`` was added to the ``MinimizerResult`` class and contains the keyword arguments that are
+ supplied to the solver in SciPy.
+
+Bug fixes:
+
+- fixes to the ``load`` and ``__setstate__`` methods of the Parameter class
+- fixed failure of ModelResult.dump() due to missing attributes (Issue #611, PR #623; @mpmdean)
+- ``guess_from_peak`` function now also works correctly with decreasing x-values or when using
+ pandas (PRs #627 and #629; @mpmdean)
+- the ``Parameter.set()`` method now correctly first updates the boundaries and then the value (Issue #636, PR #637; @arunpersaud)
+
+Various:
+
+- fixed typo for the use of expressions in the documentation (Issue #610; @jkrogager)
+- removal of PY2-compatibility and unused code and improved test coverage (PRs #619, #631, and #633)
+- removed deprecated ``isParameter`` function and automatic conversion of an ``uncertainties`` object (PR #626)
+- inaccurate FWHM calculations were removed from built-in models, others labeled as estimates (Issue #616 and PR #630)
+- corrected spelling mistake for the Doniach lineshape and model (Issue #634; @rayosborn)
+- removed unsupported/untested code for IPython notebooks in lmfit/ui/*
+
+
+.. _whatsnew_100_label:
+
+Version 1.0.0 Release Notes
+============================
+
+**Version 1.0.0 supports Python 3.5, 3.6, 3.7, and 3.8**
+
+New features:
+
+- no new features are introduced in 1.0.0.
+
+Improvements:
+
+- support for Python 2 and use of the ``six`` package are removed. (PR #612)
+
+Various:
+
+- documentation updates to clarify the use of ``emcee``. (PR #614)
+
+
+.. _whatsnew_0915_label:
+
+Version 0.9.15 Release Notes
+============================
+
+**Version 0.9.15 is the last release that supports Python 2.7**; it now also fully supports Python 3.8.
+
+New features, improvements, and bug fixes:
+
+- move application of parameter bounds to setter instead of getter (PR #587)
+- add support for non-array Jacobian types in least_squares (Issue #588, @ezwelty in PR #589)
+- add more information (i.e., acor and acceptance_fraction) about emcee fit (@j-zimmermann in PR #593)
+- "name" is now a required positional argument for Parameter class, update the magic methods (PR #595)
+- fix nvars count and bound handling in confidence interval calculations (Issue #597, PR #598)
+- support Python 3.8; requires asteval >= 0.9.16 (PR #599)
+- only support emcee version 3 (i.e., no PTSampler anymore) (PR #600)
+- fix and refactor prob_bunc in confidence interval calculations (PR #604)
+- fix adding Parameters with custom user-defined symbols (Issue #607, PR #608; thanks to @gbouvignies for the report)
+
+Various:
+
+- bump requirements to LTS version of SciPy/ NumPy and code clean-up (PR #591)
+- documentation updates (PR #596, and others)
+- improve test coverage and Travis CI updates (PR #595, and others)
+- update pre-commit hooks and configuration in setup.cfg
+
+To-be deprecated:
+- function Parameter.isParameter and conversion from uncertainties.core.Variable to value in _getval (PR #595)
+
+.. _whatsnew_0914_label:
+
+Version 0.9.14 Release Notes
+============================
+
+New features:
+
+- the global optimizers ``shgo`` and ``dual_annealing`` (new in SciPy v1.2) are now supported (Issue #527; PRs #545 and #556)
+- ``eval`` method added to the Parameter class (PR #550 by @zobristnicholas)
+- avoid ZeroDivisionError in ``printfuncs.params_html_table`` (PR #552 by @aaristov and PR #559)
+- add parallelization to ``brute`` method (PR #564, requires SciPy v1.3)
+
+Bug fixes:
+
+- consider only varying parameters when reporting potential issues with calculating errorbars (PR #549) and compare
+ ``value`` to both ``min`` and ``max`` (PR #571)
+- guard against division by zero in lineshape functions and ``FWHM`` and ``height`` expression calculations (PR #545)
+- fix issues with restoring a saved Model (Issue #553; PR #554)
+- always set ``result.method`` for ``emcee`` algorithm (PR #558)
+- more careful adding of parameters to handle out-of-order constraint expressions (Issue #560; PR #561)
+- make sure all parameters in Model.guess() use prefixes (PRs #567 and #569)
+- use ``inspect.signature`` for PY3 to support wrapped functions (Issue #570; PR #576)
+- fix ``result.nfev``` for ``brute`` method when using parallelization (Issue #578; PR #579)
+
+Various:
+
+- remove "missing" in the Model class (replaced by nan_policy) and "drop" as option to nan_policy
+ (replaced by omit) deprecated since 0.9 (PR #565).
+- deprecate 'report_errors' in printfuncs.py (PR #571)
+- updates to the documentation to use ``jupyter-sphinx`` to include examples/output (PRs #573 and #575)
+- include a Gallery with examples in the documentation using ``sphinx-gallery`` (PR #574 and #583)
+- improve test-coverage (PRs #571, #572 and #585)
+- add/clarify warning messages when NaN values are detected (PR #586)
+- several updates to docstrings (Issue #584; PR #583, and others)
+- update pre-commit hooks and several docstrings
+
+.. _whatsnew_0913_label:
+
+Version 0.9.13 Release Notes
+============================
+
+New features:
+
+- Clearer warning message in fit reports when uncertainties should but cannot be estimated, including guesses of which Parameters to examine (#521, #543)
+- SplitLorenztianModel and split_lorentzian function (#523)
+- HTML representations for Parameter, MinimizerResult, and Model so that they can be printed better with Jupyter (#524, #548)
+- support parallelization for differential evolution (#526)
+
+Bug fixes:
+
+- delay import of matplotlib (and so, the selection of its backend) as late as possible (#528, #529)
+- fix for saving, loading, and reloading ModelResults (#534)
+- fix to leastsq to report the best-fit values, not the values tried last (#535, #536)
+- fix synchronization of all parameter values on Model.guess() (#539, #542)
+- improve deprecation warnings for outdated nan_policy keywords (#540)
+- fix for edge case in gformat() (#547)
+
+Project management:
+
+- using pre-commit framework to improve and enforce coding style (#533)
+- added code coverage report to github main page
+- updated docs, github templates, added several tests.
+- dropped support and testing for Python 3.4.
+
+.. _whatsnew_0912_label:
+
+Version 0.9.12 Release Notes
+============================
+
+Lmfit package is now licensed under BSD-3.
+
+New features:
+
+- SkewedVoigtModel was added as built-in model (Issue #493)
+- Parameter uncertainties and correlations are reported for least_squares
+- Plotting of complex-valued models is now handled in ModelResult class (PR #503)
+- A model's independent variable is allowed to be an object (Issue #492)
+- Added ``usersyms`` to Parameters() initialization to make it easier to add custom functions and symbols (Issue #507)
+- the ``numdifftools`` package can be used to calculate parameter uncertainties and correlations for all solvers that do not natively support this (PR #506)
+- ``emcee`` can now be used as method keyword-argument to Minimizer.minimize and minimize function, which allows for using ``emcee`` in the Model class (PR #512; see ``examples/example_emcee_with_Model.py``)
+
+(Bug)fixes:
+
+- asteval errors are now flushed after raising (Issue #486)
+- max_time and evaluation time for ExpressionModel increased to 1 hour (Issue #489)
+- loading a saved ModelResult now restores all attributes (Issue #491)
+- development versions of scipy and emcee are now supported (Issue #497 and PR #496)
+- ModelResult.eval() do no longer overwrite the userkws dictionary (Issue #499)
+- running the test suite requires ``pytest`` only (Issue #504)
+- improved FWHM calculation for VoigtModel (PR #514)
+
+
+.. _whatsnew_0910_label:
+
+.. _Andrea Gavana: http://infinity77.net/global_optimization/index.html
+.. _AMPGO paper: http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf
+
+Version 0.9.10 Release Notes
+============================
+Two new global algorithms were added: basinhopping and AMPGO.
+Basinhopping wraps the method present in ``scipy``, and more information
+can be found in the documentation (:func:`~lmfit.minimizer.Minimizer.basinhopping`
+and :scipydoc:`optimize.basinhopping`).
+The Adaptive Memory Programming for Global Optimization (AMPGO) algorithm
+was adapted from Python code written by `Andrea Gavana`_. A more detailed
+explanation of the algorithm is available in the `AMPGO paper`_ and specifics
+for lmfit can be found in the :func:`~lmfit.minimizer.Minimizer.ampgo` function.
+
+Lmfit uses the external uncertainties (https://github.com/lebigot/uncertainties)
+package (available on PyPI), instead of distributing its own fork.
+
+An ``AbortFitException`` is now raised when the fit is aborted by the user (i.e., by
+using ``iter_cb``).
+
+Bugfixes:
+
+- all exceptions are allowed when trying to import matplotlib
+- simplify and fix corner-case errors when testing closeness of large integers
+
+
+.. _whatsnew_099_label:
+
+Version 0.9.9 Release Notes
+===========================
+Lmfit now uses the asteval (https://github.com/newville/asteval) package
+instead of distributing its own copy. The minimum required asteval version
+is 0.9.12, which is available on PyPI. If you see import errors related to
+asteval, please make sure that you actually have the latest version installed.
+
+
+.. _whatsnew_096_label:
+
+Version 0.9.6 Release Notes
+===========================
+
+Support for SciPy 0.14 has been dropped: SciPy 0.15 is now required. This
+is especially important for lmfit maintenance, as it means we can now rely
+on SciPy having code for differential evolution and do not need to keep a
+local copy.
+
+A brute force method was added, which can be used either with
+:meth:`Minimizer.brute` or using the ``method='brute'`` option to
+:meth:`Minimizer.minimize`. This method requires finite bounds on
+all varying parameters, or that parameters have a finite
+``brute_step`` attribute set to specify the step size.
+
+Custom cost functions can now be used for the scalar minimizers using the
+``reduce_fcn`` option.
+
+Many improvements to documentation and docstrings in the code were made.
+As part of that effort, all API documentation in this main Sphinx
+documentation now derives from the docstrings.
+
+Uncertainties in the resulting best-fit for a model can now be calculated
+from the uncertainties in the model parameters.
+
+Parameters have two new attributes: ``brute_step``, to specify the step
+size when using the ``brute`` method, and ``user_data``, which is unused but
+can be used to hold additional information the user may desire. This will
+be preserved on copy and pickling.
+
+Several bug fixes and cleanups.
+
+Versioneer was updated to 0.18.
+
+Tests can now be run either with nose or pytest.
+
+
+.. _whatsnew_095_label:
+
+Version 0.9.5 Release Notes
+===========================
+
+Support for Python 2.6 and SciPy 0.13 has been dropped.
+
+.. _whatsnew_094_label:
+
+Version 0.9.4 Release Notes
+===========================
+
+Some support for the new ``least_squares`` routine from SciPy 0.17 has been
+added.
+
+
+Parameters can now be used directly in floating point or array expressions,
+so that the Parameter value does not need ``sigma = params['sigma'].value``.
+The older, explicit usage still works, but the docs, samples, and tests
+have been updated to use the simpler usage.
+
+Support for Python 2.6 and SciPy 0.13 is now explicitly deprecated and will
+be dropped in version 0.9.5.
+
+.. _whatsnew_093_label:
+
+Version 0.9.3 Release Notes
+===========================
+
+Models involving complex numbers have been improved.
+
+The ``emcee`` module can now be used for uncertainty estimation.
+
+Many bug fixes, and an important fix for performance slowdown on getting
+parameter values.
+
+ASV benchmarking code added.
+
+
+.. _whatsnew_090_label:
+
+Version 0.9.0 Release Notes
+===========================
+
+This upgrade makes an important, non-backward-compatible change to the way
+many fitting scripts and programs will work. Scripts that work with
+version 0.8.3 will not work with version 0.9.0 and vice versa. The change
+was not made lightly or without ample discussion, and is really an
+improvement. Modifying scripts that did work with 0.8.3 to work with 0.9.0
+is easy, but needs to be done.
+
+
+
+Summary
+~~~~~~~
+
+The upgrade from 0.8.3 to 0.9.0 introduced the :class:`MinimizerResult`
+class (see :ref:`fit-results-label`) which is now used to hold the return
+value from :func:`minimize` and :meth:`Minimizer.minimize`. This returned
+object contains many goodness of fit statistics, and holds the optimized
+parameters from the fit. Importantly, the parameters passed into
+:func:`minimize` and :meth:`Minimizer.minimize` are no longer modified by
+the fit. Instead, a copy of the passed-in parameters is made which is
+changed and returns as the :attr:`params` attribute of the returned
+:class:`MinimizerResult`.
+
+
+Impact
+~~~~~~
+
+This upgrade means that a script that does::
+
+ my_pars = Parameters()
+ my_pars.add('amp', value=300.0, min=0)
+ my_pars.add('center', value=5.0, min=0, max=10)
+ my_pars.add('decay', value=1.0, vary=False)
+
+ result = minimize(objfunc, my_pars)
+
+will still work, but that ``my_pars`` will **NOT** be changed by the fit.
+Instead, ``my_pars`` is copied to an internal set of parameters that is
+changed in the fit, and this copy is then put in ``result.params``. To
+look at fit results, use ``result.params``, not ``my_pars``.
+
+This has the effect that ``my_pars`` will still hold the starting parameter
+values, while all of the results from the fit are held in the ``result``
+object returned by :func:`minimize`.
+
+If you want to do an initial fit, then refine that fit to, for example, do
+a pre-fit, then refine that result different fitting method, such as::
+
+ result1 = minimize(objfunc, my_pars, method='nelder')
+ result1.params['decay'].vary = True
+ result2 = minimize(objfunc, result1.params, method='leastsq')
+
+and have access to all of the starting parameters ``my_pars``, the result of the
+first fit ``result1``, and the result of the final fit ``result2``.
+
+
+
+Discussion
+~~~~~~~~~~
+
+The main goal for making this change were to
+
+1. give a better return value to :func:`minimize` and
+ :meth:`Minimizer.minimize` that can hold all of the information
+ about a fit. By having the return value be an instance of the
+ :class:`MinimizerResult` class, it can hold an arbitrary amount of
+ information that is easily accessed by attribute name, and even
+ be given methods. Using objects is good!
+
+2. To limit or even eliminate the amount of "state information" a
+ :class:`Minimizer` holds. By state information, we mean how much of
+ the previous fit is remembered after a fit is done. Keeping (and
+ especially using) such information about a previous fit means that
+ a :class:`Minimizer` might give different results even for the same
+ problem if run a second time. While it's desirable to be able to
+ adjust a set of :class:`Parameters` re-run a fit to get an improved
+ result, doing this by changing an internal attribute
+ (:attr:`Minimizer.params`) has the undesirable side-effect of not
+ being able to "go back", and makes it somewhat cumbersome to keep
+ track of changes made while adjusting parameters and re-running fits.
diff --git a/examples/NIST_Gauss2.dat b/examples/NIST_Gauss2.dat
new file mode 100644
index 0000000..cd177bb
--- /dev/null
+++ b/examples/NIST_Gauss2.dat
@@ -0,0 +1,310 @@
+# NIST/ITL StRD
+# Dataset Name: Gauss2 (Gauss2.dat)
+#
+# File Format: ASCII
+# Starting Values (lines 41 to 48)
+# Certified Values (lines 41 to 53)
+# Data (lines 61 to 310)
+#
+# Procedure: Nonlinear Least Squares Regression
+#
+# Description: The data are two slightly-blended Gaussians on a
+# decaying exponential baseline plus normally
+# distributed zero-mean noise with variance = 6.25.
+#
+# Reference: Rust, B., NIST (1996).
+#
+#
+#
+#
+#
+#
+#
+#
+#
+# Data: 1 Response (y)
+# 1 Predictor (x)
+# 250 Observations
+# Lower Level of Difficulty
+# Generated Data
+#
+# Model: Exponential Class
+# 8 Parameters (b1 to b8)
+#
+# y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+# + b6*exp( -(x-b7)**2 / b8**2 ) + e
+#
+#
+# Starting values Certified Values
+#
+# Start 1 Start 2 Parameter Standard Deviation
+# b1 = 96.0 98.0 9.9018328406E+01 5.3748766879E-01
+# b2 = 0.009 0.0105 1.0994945399E-02 1.3335306766E-04
+# b3 = 103.0 103.0 1.0188022528E+02 5.9217315772E-01
+# b4 = 106.0 105.0 1.0703095519E+02 1.5006798316E-01
+# b5 = 18.0 20.0 2.3578584029E+01 2.2695595067E-01
+# b6 = 72.0 73.0 7.2045589471E+01 6.1721965884E-01
+# b7 = 151.0 150.0 1.5327010194E+02 1.9466674341E-01
+# b8 = 18.0 20.0 1.9525972636E+01 2.6416549393E-01
+#
+# Residual Sum of Squares: 1.2475282092E+03
+# Residual Standard Deviation: 2.2704790782E+00
+# Degrees of Freedom: 242
+# Number of Observations: 250
+#
+#
+#
+#
+#
+#
+# Data: y x
+ 97.58776 1.000000
+ 97.76344 2.000000
+ 96.56705 3.000000
+ 92.52037 4.000000
+ 91.15097 5.000000
+ 95.21728 6.000000
+ 90.21355 7.000000
+ 89.29235 8.000000
+ 91.51479 9.000000
+ 89.60966 10.000000
+ 86.56187 11.00000
+ 85.55316 12.00000
+ 87.13054 13.00000
+ 85.67940 14.00000
+ 80.04851 15.00000
+ 82.18925 16.00000
+ 87.24081 17.00000
+ 80.79407 18.00000
+ 81.28570 19.00000
+ 81.56940 20.00000
+ 79.22715 21.00000
+ 79.43275 22.00000
+ 77.90195 23.00000
+ 76.75468 24.00000
+ 77.17377 25.00000
+ 74.27348 26.00000
+ 73.11900 27.00000
+ 73.84826 28.00000
+ 72.47870 29.00000
+ 71.92292 30.00000
+ 66.92176 31.00000
+ 67.93835 32.00000
+ 69.56207 33.00000
+ 69.07066 34.00000
+ 66.53983 35.00000
+ 63.87883 36.00000
+ 69.71537 37.00000
+ 63.60588 38.00000
+ 63.37154 39.00000
+ 60.01835 40.00000
+ 62.67481 41.00000
+ 65.80666 42.00000
+ 59.14304 43.00000
+ 56.62951 44.00000
+ 61.21785 45.00000
+ 54.38790 46.00000
+ 62.93443 47.00000
+ 56.65144 48.00000
+ 57.13362 49.00000
+ 58.29689 50.00000
+ 58.91744 51.00000
+ 58.50172 52.00000
+ 55.22885 53.00000
+ 58.30375 54.00000
+ 57.43237 55.00000
+ 51.69407 56.00000
+ 49.93132 57.00000
+ 53.70760 58.00000
+ 55.39712 59.00000
+ 52.89709 60.00000
+ 52.31649 61.00000
+ 53.98720 62.00000
+ 53.54158 63.00000
+ 56.45046 64.00000
+ 51.32276 65.00000
+ 53.11676 66.00000
+ 53.28631 67.00000
+ 49.80555 68.00000
+ 54.69564 69.00000
+ 56.41627 70.00000
+ 54.59362 71.00000
+ 54.38520 72.00000
+ 60.15354 73.00000
+ 59.78773 74.00000
+ 60.49995 75.00000
+ 65.43885 76.00000
+ 60.70001 77.00000
+ 63.71865 78.00000
+ 67.77139 79.00000
+ 64.70934 80.00000
+ 70.78193 81.00000
+ 70.38651 82.00000
+ 77.22359 83.00000
+ 79.52665 84.00000
+ 80.13077 85.00000
+ 85.67823 86.00000
+ 85.20647 87.00000
+ 90.24548 88.00000
+ 93.61953 89.00000
+ 95.86509 90.00000
+ 93.46992 91.00000
+ 105.8137 92.00000
+ 107.8269 93.00000
+ 114.0607 94.00000
+ 115.5019 95.00000
+ 118.5110 96.00000
+ 119.6177 97.00000
+ 122.1940 98.00000
+ 126.9903 99.00000
+ 125.7005 100.00000
+ 123.7447 101.00000
+ 130.6543 102.00000
+ 129.7168 103.00000
+ 131.8240 104.00000
+ 131.8759 105.00000
+ 131.9994 106.0000
+ 132.1221 107.0000
+ 133.4414 108.0000
+ 133.8252 109.0000
+ 133.6695 110.0000
+ 128.2851 111.0000
+ 126.5182 112.0000
+ 124.7550 113.0000
+ 118.4016 114.0000
+ 122.0334 115.0000
+ 115.2059 116.0000
+ 118.7856 117.0000
+ 110.7387 118.0000
+ 110.2003 119.0000
+ 105.17290 120.0000
+ 103.44720 121.0000
+ 94.54280 122.0000
+ 94.40526 123.0000
+ 94.57964 124.0000
+ 88.76605 125.0000
+ 87.28747 126.0000
+ 92.50443 127.0000
+ 86.27997 128.0000
+ 82.44307 129.0000
+ 80.47367 130.0000
+ 78.36608 131.0000
+ 78.74307 132.0000
+ 76.12786 133.0000
+ 79.13108 134.0000
+ 76.76062 135.0000
+ 77.60769 136.0000
+ 77.76633 137.0000
+ 81.28220 138.0000
+ 79.74307 139.0000
+ 81.97964 140.0000
+ 80.02952 141.0000
+ 85.95232 142.0000
+ 85.96838 143.0000
+ 79.94789 144.0000
+ 87.17023 145.0000
+ 90.50992 146.0000
+ 93.23373 147.0000
+ 89.14803 148.0000
+ 93.11492 149.0000
+ 90.34337 150.0000
+ 93.69421 151.0000
+ 95.74256 152.0000
+ 91.85105 153.0000
+ 96.74503 154.0000
+ 87.60996 155.0000
+ 90.47012 156.0000
+ 88.11690 157.0000
+ 85.70673 158.0000
+ 85.01361 159.0000
+ 78.53040 160.0000
+ 81.34148 161.0000
+ 75.19295 162.0000
+ 72.66115 163.0000
+ 69.85504 164.0000
+ 66.29476 165.0000
+ 63.58502 166.0000
+ 58.33847 167.0000
+ 57.50766 168.0000
+ 52.80498 169.0000
+ 50.79319 170.0000
+ 47.03490 171.0000
+ 46.47090 172.0000
+ 43.09016 173.0000
+ 34.11531 174.0000
+ 39.28235 175.0000
+ 32.68386 176.0000
+ 30.44056 177.0000
+ 31.98932 178.0000
+ 23.63330 179.0000
+ 23.69643 180.0000
+ 20.26812 181.0000
+ 19.07074 182.0000
+ 17.59544 183.0000
+ 16.08785 184.0000
+ 18.94267 185.0000
+ 18.61354 186.0000
+ 17.25800 187.0000
+ 16.62285 188.0000
+ 13.48367 189.0000
+ 15.37647 190.0000
+ 13.47208 191.0000
+ 15.96188 192.0000
+ 12.32547 193.0000
+ 16.33880 194.0000
+ 10.438330 195.0000
+ 9.628715 196.0000
+ 13.12268 197.0000
+ 8.772417 198.0000
+ 11.76143 199.0000
+ 12.55020 200.0000
+ 11.33108 201.0000
+ 11.20493 202.0000
+ 7.816916 203.0000
+ 6.800675 204.0000
+ 14.26581 205.0000
+ 10.66285 206.0000
+ 8.911574 207.0000
+ 11.56733 208.0000
+ 11.58207 209.0000
+ 11.59071 210.0000
+ 9.730134 211.0000
+ 11.44237 212.0000
+ 11.22912 213.0000
+ 10.172130 214.0000
+ 12.50905 215.0000
+ 6.201493 216.0000
+ 9.019605 217.0000
+ 10.80607 218.0000
+ 13.09625 219.0000
+ 3.914271 220.0000
+ 9.567886 221.0000
+ 8.038448 222.0000
+ 10.231040 223.0000
+ 9.367410 224.0000
+ 7.695971 225.0000
+ 6.118575 226.0000
+ 8.793207 227.0000
+ 7.796692 228.0000
+ 12.45065 229.0000
+ 10.61601 230.0000
+ 6.001003 231.0000
+ 6.765098 232.0000
+ 8.764653 233.0000
+ 4.586418 234.0000
+ 8.390783 235.0000
+ 7.209202 236.0000
+ 10.012090 237.0000
+ 7.327461 238.0000
+ 6.525136 239.0000
+ 2.840065 240.0000
+ 10.323710 241.0000
+ 4.790035 242.0000
+ 8.376431 243.0000
+ 6.263980 244.0000
+ 2.705892 245.0000
+ 8.362109 246.0000
+ 8.983507 247.0000
+ 3.362469 248.0000
+ 1.182678 249.0000
+ 4.875312 250.0000
diff --git a/examples/README.txt b/examples/README.txt
new file mode 100644
index 0000000..6c18f30
--- /dev/null
+++ b/examples/README.txt
@@ -0,0 +1,10 @@
+Examples gallery
+================
+
+Below are examples of the different things you can do with lmfit.
+Click on any image to see the complete source code and output.
+
+We encourage users (i.e., YOU) to submit user-guide-style, documented,
+and preferably self-contained examples of how you use lmfit for
+inclusion in this gallery! Please note that many of the examples
+below currently do *not* follow these guidelines yet.
diff --git a/examples/doc_builtinmodels_nistgauss.py b/examples/doc_builtinmodels_nistgauss.py
new file mode 100644
index 0000000..c206c75
--- /dev/null
+++ b/examples/doc_builtinmodels_nistgauss.py
@@ -0,0 +1,45 @@
+# <examples/doc_builtinmodels_nistgauss.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.models import ExponentialModel, GaussianModel
+
+dat = np.loadtxt('NIST_Gauss2.dat')
+x = dat[:, 1]
+y = dat[:, 0]
+
+exp_mod = ExponentialModel(prefix='exp_')
+pars = exp_mod.guess(y, x=x)
+
+gauss1 = GaussianModel(prefix='g1_')
+pars.update(gauss1.make_params(center=dict(value=105, min=75, max=125),
+ sigma=dict(value=15, min=0),
+ amplitude=dict(value=2000, min=0)))
+
+gauss2 = GaussianModel(prefix='g2_')
+pars.update(gauss2.make_params(center=dict(value=155, min=125, max=175),
+ sigma=dict(value=15, min=0),
+ amplitude=dict(value=2000, min=0)))
+
+mod = gauss1 + gauss2 + exp_mod
+
+init = mod.eval(pars, x=x)
+out = mod.fit(y, pars, x=x)
+
+print(out.fit_report(correl_mode='table'))
+
+fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+axes[0].plot(x, y)
+axes[0].plot(x, init, '--', label='initial fit')
+axes[0].plot(x, out.best_fit, '-', label='best fit')
+axes[0].legend()
+
+comps = out.eval_components(x=x)
+axes[1].plot(x, y)
+axes[1].plot(x, comps['g1_'], '--', label='Gaussian component 1')
+axes[1].plot(x, comps['g2_'], '--', label='Gaussian component 2')
+axes[1].plot(x, comps['exp_'], '--', label='Exponential component')
+axes[1].legend()
+
+plt.show()
+# <end examples/doc_builtinmodels_nistgauss.py>
diff --git a/examples/doc_builtinmodels_nistgauss2.py b/examples/doc_builtinmodels_nistgauss2.py
new file mode 100644
index 0000000..80ce2f5
--- /dev/null
+++ b/examples/doc_builtinmodels_nistgauss2.py
@@ -0,0 +1,43 @@
+# <examples/doc_nistgauss2.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.models import ExponentialModel, GaussianModel
+
+dat = np.loadtxt('NIST_Gauss2.dat')
+x = dat[:, 1]
+y = dat[:, 0]
+
+exp_mod = ExponentialModel(prefix='exp_')
+gauss1 = GaussianModel(prefix='g1_')
+gauss2 = GaussianModel(prefix='g2_')
+
+
+def index_of(arrval, value):
+ """Return index of array *at or below* value."""
+ if value < min(arrval):
+ return 0
+ return max(np.where(arrval <= value)[0])
+
+
+ix1 = index_of(x, 75)
+ix2 = index_of(x, 135)
+ix3 = index_of(x, 175)
+
+pars1 = exp_mod.guess(y[:ix1], x=x[:ix1])
+pars2 = gauss1.guess(y[ix1:ix2], x=x[ix1:ix2])
+pars3 = gauss2.guess(y[ix2:ix3], x=x[ix2:ix3])
+
+pars = pars1 + pars2 + pars3
+mod = gauss1 + gauss2 + exp_mod
+
+out = mod.fit(y, pars, x=x)
+
+print(out.fit_report(min_correl=0.5))
+
+plt.plot(x, y)
+plt.plot(x, out.init_fit, '--', label='initial fit')
+plt.plot(x, out.best_fit, '-', label='best fit')
+plt.legend()
+plt.show()
+# <end examples/doc_nistgauss2.py>
diff --git a/examples/doc_builtinmodels_peakmodels.py b/examples/doc_builtinmodels_peakmodels.py
new file mode 100644
index 0000000..8aa7daa
--- /dev/null
+++ b/examples/doc_builtinmodels_peakmodels.py
@@ -0,0 +1,62 @@
+# <examples/doc_builtinmodels_peakmodels.py>
+import matplotlib.pyplot as plt
+from numpy import loadtxt
+
+from lmfit.models import GaussianModel, LorentzianModel, VoigtModel
+
+data = loadtxt('test_peak.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+
+# Gaussian model
+mod = GaussianModel()
+pars = mod.guess(y, x=x)
+out = mod.fit(y, pars, x=x)
+
+print(out.fit_report(correl_mode='table'))
+
+plt.plot(x, y)
+plt.plot(x, out.best_fit, '-', label='Gaussian Model')
+plt.legend()
+plt.show()
+
+
+# Lorentzian model
+mod = LorentzianModel()
+pars = mod.guess(y, x=x)
+out = mod.fit(y, pars, x=x)
+
+print(out.fit_report(correl_mode='table'))
+
+plt.figure()
+plt.plot(x, y, '-')
+plt.plot(x, out.best_fit, '-', label='Lorentzian Model')
+plt.legend()
+plt.show()
+
+
+# Voigt model
+mod = VoigtModel()
+pars = mod.guess(y, x=x)
+out = mod.fit(y, pars, x=x)
+
+print(out.fit_report(correl_mode='table'))
+
+fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+
+axes[0].plot(x, y, '-')
+axes[0].plot(x, out.best_fit, '-', label='Voigt Model\ngamma constrained')
+axes[0].legend()
+
+# allow the gamma parameter to vary in the fit
+pars['gamma'].vary = True
+out_gamma = mod.fit(y, pars, x=x)
+print(out.fit_report(correl_mode='table'))
+
+axes[1].plot(x, y, '-')
+axes[1].plot(x, out_gamma.best_fit, '-', label='Voigt Model\ngamma unconstrained')
+axes[1].legend()
+
+plt.show()
+# <end examples/doc_builtinmodels_peakmodels.py>
diff --git a/examples/doc_builtinmodels_splinemodel.py b/examples/doc_builtinmodels_splinemodel.py
new file mode 100644
index 0000000..fdb80b7
--- /dev/null
+++ b/examples/doc_builtinmodels_splinemodel.py
@@ -0,0 +1,61 @@
+# <examples/doc_builtinmodels_splinemodel.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.models import GaussianModel, SplineModel
+
+data = np.loadtxt('test_splinepeak.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+plt.plot(x, y, label='data')
+
+model = GaussianModel(prefix='peak_')
+params = model.make_params(amplitude=dict(value=8, min=0),
+ center=dict(value=16, min=5, max=25),
+ sigma=dict(value=1, min=0))
+
+# make a background spline with knots evenly spaced over the background,
+# but sort of skipping over where the peak is
+knot_xvals3 = np.array([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25])
+knot_xvals2 = np.array([1, 3, 5, 7, 9, 11, 13, 16, 19, 21, 23, 25]) # noqa: E241
+knot_xvals1 = np.array([1, 3, 5, 7, 9, 11, 13, 19, 21, 23, 25]) # noqa: E241
+
+bkg = SplineModel(prefix='bkg_', xknots=knot_xvals1)
+params.update(bkg.guess(y, x))
+
+model = model + bkg
+
+plt.plot(x, model.eval(params, x=x), label='initial')
+
+out = model.fit(y, params, x=x)
+print(out.fit_report(min_correl=0.3))
+comps = out.eval_components()
+
+plt.plot(x, out.best_fit, label='best fit')
+plt.plot(x, comps['bkg_'], label='background')
+plt.plot(x, comps['peak_'], label='peak')
+
+knot_yvals = np.array([o.value for o in out.params.values() if o.name.startswith('bkg')])
+plt.plot(knot_xvals1, knot_yvals, 'o', color='black', label='spline knots values')
+plt.legend()
+plt.show()
+
+
+# knot positions | peak amplitude
+# 11, 13, 19, 21 | 12.223 0.295
+# 11, 13, 16, 19, 21 | 11.746 0.594
+# 11, 13, 15, 17, 19, 21 | 12.052 0.872
+
+
+plt.plot(x, y, 'o', label='data')
+
+for nknots in (10, 15, 20, 25, 30):
+ model = SplineModel(prefix='bkg_', xknots=np.linspace(0, 25, nknots))
+ params = model.guess(y, x)
+ out = model.fit(y, params, x=x)
+ plt.plot(x, out.best_fit, label=f'best-fit ({nknots} knots)')
+plt.legend()
+plt.show()
+
+# <end examples/doc_builtinmodels_splinemodel.py>
diff --git a/examples/doc_builtinmodels_stepmodel.py b/examples/doc_builtinmodels_stepmodel.py
new file mode 100644
index 0000000..8f4f716
--- /dev/null
+++ b/examples/doc_builtinmodels_stepmodel.py
@@ -0,0 +1,30 @@
+# <examples/doc_builtinmodels_stepmodel.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.models import LinearModel, StepModel
+
+x = np.linspace(0, 10, 201)
+y = np.ones_like(x)
+y[:48] = 0.0
+y[48:77] = np.arange(77-48)/(77.0-48)
+np.random.seed(0)
+y = 110.2 * (y + 9e-3*np.random.randn(x.size)) + 12.0 + 2.22*x
+
+step_mod = StepModel(form='erf', prefix='step_')
+line_mod = LinearModel(prefix='line_')
+
+pars = line_mod.make_params(intercept=y.min(), slope=0)
+pars += step_mod.guess(y, x=x, center=2.5)
+
+mod = step_mod + line_mod
+out = mod.fit(y, pars, x=x)
+
+print(out.fit_report())
+
+plt.plot(x, y)
+plt.plot(x, out.init_fit, '--', label='initial fit')
+plt.plot(x, out.best_fit, '-', label='best fit')
+plt.legend()
+plt.show()
+# <end examples/doc_builtinmodels_stepmodel.py>
diff --git a/examples/doc_confidence_advanced.py b/examples/doc_confidence_advanced.py
new file mode 100644
index 0000000..8b9c263
--- /dev/null
+++ b/examples/doc_confidence_advanced.py
@@ -0,0 +1,67 @@
+# <examples/doc_confidence_advanced.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+import lmfit
+
+x = np.linspace(1, 10, 250)
+np.random.seed(0)
+y = 3.0*np.exp(-x/2) - 5.0*np.exp(-(x-0.1)/10.) + 0.1*np.random.randn(x.size)
+
+p = lmfit.create_params(a1=4, a2=4, t1=3, t2=3)
+
+
+def residual(p):
+ return p['a1']*np.exp(-x/p['t1']) + p['a2']*np.exp(-(x-0.1)/p['t2']) - y
+
+
+# create Minimizer
+mini = lmfit.Minimizer(residual, p, nan_policy='propagate')
+
+# first solve with Nelder-Mead algorithm
+out1 = mini.minimize(method='Nelder')
+
+# then solve with Levenberg-Marquardt using the
+# Nelder-Mead solution as a starting point
+out2 = mini.minimize(method='leastsq', params=out1.params)
+
+lmfit.report_fit(out2.params, min_correl=0.5)
+
+ci, trace = lmfit.conf_interval(mini, out2, sigmas=[1, 2], trace=True)
+lmfit.printfuncs.report_ci(ci)
+
+# plot data and best fit
+plt.figure()
+plt.plot(x, y)
+plt.plot(x, residual(out2.params) + y, '-')
+plt.show()
+
+# plot confidence intervals (a1 vs t2 and a2 vs t2)
+fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a1', 't2', 30, 30)
+ctp = axes[0].contourf(cx, cy, grid, np.linspace(0, 1, 11))
+fig.colorbar(ctp, ax=axes[0])
+axes[0].set_xlabel('a1')
+axes[0].set_ylabel('t2')
+
+cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a2', 't2', 30, 30)
+ctp = axes[1].contourf(cx, cy, grid, np.linspace(0, 1, 11))
+fig.colorbar(ctp, ax=axes[1])
+axes[1].set_xlabel('a2')
+axes[1].set_ylabel('t2')
+plt.show()
+
+# plot dependence between two parameters
+fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+cx1, cy1, prob = trace['a1']['a1'], trace['a1']['t2'], trace['a1']['prob']
+cx2, cy2, prob2 = trace['t2']['t2'], trace['t2']['a1'], trace['t2']['prob']
+
+axes[0].scatter(cx1, cy1, c=prob, s=30)
+axes[0].set_xlabel('a1')
+axes[0].set_ylabel('t2')
+
+axes[1].scatter(cx2, cy2, c=prob2, s=30)
+axes[1].set_xlabel('t2')
+axes[1].set_ylabel('a1')
+plt.show()
+# <end examples/doc_confidence_advanced.py>
diff --git a/examples/doc_confidence_basic.py b/examples/doc_confidence_basic.py
new file mode 100644
index 0000000..b002fce
--- /dev/null
+++ b/examples/doc_confidence_basic.py
@@ -0,0 +1,24 @@
+# <examples/doc_confidence_basic.py>
+import numpy as np
+
+import lmfit
+
+x = np.linspace(0.3, 10, 100)
+np.random.seed(0)
+y = 1/(0.1*x) + 2 + 0.1*np.random.randn(x.size)
+
+pars = lmfit.create_params(a=0.1, b=1)
+
+
+def residual(p):
+ return 1/(p['a']*x) + p['b'] - y
+
+
+mini = lmfit.Minimizer(residual, pars)
+result = mini.minimize()
+
+print(lmfit.fit_report(result.params))
+
+ci = lmfit.conf_interval(mini, result)
+lmfit.printfuncs.report_ci(ci)
+# <end examples/doc_confidence_basic.py>
diff --git a/examples/doc_confidence_chi2_maps.py b/examples/doc_confidence_chi2_maps.py
new file mode 100644
index 0000000..b537fda
--- /dev/null
+++ b/examples/doc_confidence_chi2_maps.py
@@ -0,0 +1,117 @@
+# <examples/doc_confidence_chi2_maps.py>
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit import conf_interval, conf_interval2d, report_ci
+from lmfit.lineshapes import gaussian
+from lmfit.models import GaussianModel, LinearModel
+
+sigma_levels = [1, 2, 3]
+
+rng = np.random.default_rng(seed=102)
+
+#########################
+# set up data -- deliberately adding imperfections and
+# a small amount of non-Gaussian noise
+npts = 501
+x = np.linspace(1, 100, num=npts)
+
+noise = rng.normal(scale=0.3, size=npts) + 0.2*rng.f(3, 9, size=npts)
+
+y = (gaussian(x, amplitude=83, center=47., sigma=5.)
+ + 0.02*x + 4 + 0.25*np.cos((x-20)/8.0) + noise)
+
+mod = GaussianModel() + LinearModel()
+params = mod.make_params(amplitude=100, center=50, sigma=5,
+ slope=0, intecept=2)
+
+out = mod.fit(y, params, x=x)
+print(out.fit_report(correl_mode='table'))
+
+#########################
+# run conf_intervale, print report
+ci = conf_interval(out, out, sigmas=sigma_levels)
+
+print("## Confidence Report:")
+report_ci(ci)
+
+#########################
+# plot initial fit
+colors = ('#2030b0', '#b02030', '#207070')
+fig, axes = plt.subplots(2, 3, figsize=(15, 9.5))
+
+
+axes[0, 0].plot(x, y, 'o', markersize=3, label='data', color=colors[0])
+axes[0, 0].plot(x, out.best_fit, label='fit', color=colors[1])
+axes[0, 0].set_xlabel('x')
+axes[0, 0].set_ylabel('y')
+axes[0, 0].legend()
+
+
+aix, aiy = 0, 0
+nsamples = 50
+explicitly_calculate_sigma = True
+
+for pairs in (('sigma', 'amplitude'), ('intercept', 'amplitude'),
+ ('slope', 'intercept'), ('slope', 'center'), ('sigma', 'center')):
+
+ xpar, ypar = pairs
+ if explicitly_calculate_sigma:
+ print("Generating chi-square map for ", pairs)
+ c_x, c_y, chi2_mat = conf_interval2d(out, out, xpar, ypar,
+ nsamples, nsamples, nsigma=3.5,
+ chi2_out=True)
+ # explicitly calculate sigma matrix: sigma increases chi_square
+ # from chi_square_best
+ # to chi_square + sigma**2 * reduced_chi_square
+ # so: sigma = sqrt((chi2-chi2_best)/ reduced_chi_square)
+ chi2_min = chi2_mat.min()
+ sigma_mat = np.sqrt((chi2_mat-chi2_min)/out.redchi)
+ else:
+ print("Generating sigma map for ", pairs)
+ # or, you could just calculate the matrix of probabilities as:
+ # print("Generating chi-square map for ", pairs)
+ c_x, c_y, sigma_mat = conf_interval2d(out, out, xpar, ypar,
+ nsamples, nsamples, nsigma=3.5)
+
+ aix += 1
+ if aix == 2:
+ aix = 0
+ aiy += 1
+ ax = axes[aix, aiy]
+
+ cnt = ax.contour(c_x, c_y, sigma_mat, levels=sigma_levels, colors=colors,
+ linestyles='-')
+ ax.clabel(cnt, inline=True, fmt=r"$\sigma=%.0f$", fontsize=13)
+
+ # draw boxes for estimated uncertaties:
+ # dotted : scaled stderr from initial fit
+ # dashed : values found from conf_interval()
+ xv = out.params[xpar].value
+ xs = out.params[xpar].stderr
+ yv = out.params[ypar].value
+ ys = out.params[ypar].stderr
+
+ cix = ci[xpar]
+ ciy = ci[ypar]
+
+ nc = len(sigma_levels)
+ for i in sigma_levels:
+ # dotted line: scaled stderr
+ ax.plot((xv-i*xs, xv+i*xs, xv+i*xs, xv-i*xs, xv-i*xs),
+ (yv-i*ys, yv-i*ys, yv+i*ys, yv+i*ys, yv-i*ys),
+ linestyle='dotted', color=colors[i-1])
+
+ # dashed line: refined uncertainties from conf_interval
+ xsp, xsm = cix[nc+i][1], cix[nc-i][1]
+ ysp, ysm = ciy[nc+i][1], ciy[nc-i][1]
+ ax.plot((xsm, xsp, xsp, xsm, xsm), (ysm, ysm, ysp, ysp, ysm),
+ linestyle='dashed', color=colors[i-1])
+
+ ax.set_xlabel(xpar)
+ ax.set_ylabel(ypar)
+ ax.grid(True, color='#d0d0d0')
+
+plt.show()
+# <end examples/doc_confidence_chi2_maps.py>
diff --git a/examples/doc_fitting_emcee.py b/examples/doc_fitting_emcee.py
new file mode 100644
index 0000000..94b6761
--- /dev/null
+++ b/examples/doc_fitting_emcee.py
@@ -0,0 +1,107 @@
+# <examples/doc_fitting_emcee.py>
+import numpy as np
+
+import lmfit
+
+try:
+ import matplotlib.pyplot as plt
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+try:
+ import corner
+ HASCORNER = True
+except ImportError:
+ HASCORNER = False
+
+x = np.linspace(1, 10, 250)
+np.random.seed(0)
+y = (3.0*np.exp(-x/2) - 5.0*np.exp(-(x-0.1) / 10.) +
+ 0.1*np.random.randn(x.size))
+
+p = lmfit.Parameters()
+p.add_many(('a1', 4), ('a2', 4), ('t1', 3), ('t2', 3., True))
+
+
+def residual(p):
+ v = p.valuesdict()
+ return v['a1']*np.exp(-x/v['t1']) + v['a2']*np.exp(-(x-0.1) / v['t2']) - y
+
+
+mi = lmfit.minimize(residual, p, method='nelder', nan_policy='omit')
+lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
+if HASPYLAB:
+ plt.figure()
+ plt.plot(x, y, 'o')
+ plt.plot(x, residual(mi.params) + y, label='best fit')
+ plt.legend()
+ plt.show()
+
+# Place bounds on the ln(sigma) parameter that emcee will automatically add
+# to estimate the true uncertainty in the data since is_weighted=False
+mi.params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2))
+
+res = lmfit.minimize(residual, method='emcee', nan_policy='omit', burn=300,
+ steps=1000, thin=20, params=mi.params, is_weighted=False,
+ progress=False)
+
+if HASPYLAB and HASCORNER:
+ emcee_corner = corner.corner(res.flatchain, labels=res.var_names,
+ truths=list(res.params.valuesdict().values()))
+ plt.show()
+
+if HASPYLAB:
+ plt.plot(res.acceptance_fraction, 'o')
+ plt.xlabel('walker')
+ plt.ylabel('acceptance fraction')
+ plt.show()
+
+if hasattr(res, "acor"):
+ print("Autocorrelation time for the parameters:")
+ print("----------------------------------------")
+ for i, par in enumerate(p):
+ print(par, res.acor[i])
+
+print("\nmedian of posterior probability distribution")
+print('--------------------------------------------')
+lmfit.report_fit(res.params)
+
+
+# find the maximum likelihood solution
+highest_prob = np.argmax(res.lnprob)
+hp_loc = np.unravel_index(highest_prob, res.lnprob.shape)
+mle_soln = res.chain[hp_loc]
+for i, par in enumerate(p):
+ p[par].value = mle_soln[i]
+
+print('\nMaximum Likelihood Estimation from emcee ')
+print('-------------------------------------------------')
+print('Parameter MLE Value Median Value Uncertainty')
+fmt = ' {:5s} {:11.5f} {:11.5f} {:11.5f}'.format
+for name, param in p.items():
+ print(fmt(name, param.value, res.params[name].value,
+ res.params[name].stderr))
+
+if HASPYLAB:
+ plt.figure()
+ plt.plot(x, y, 'o')
+ plt.plot(x, residual(mi.params) + y, label='Nelder-Mead')
+ plt.plot(x, residual(res.params) + y, '--', label='emcee')
+ plt.legend()
+ plt.show()
+
+print('\nError Estimates from emcee ')
+print('------------------------------------------------------')
+print('Parameter -2sigma -1sigma median +1sigma +2sigma ')
+
+for name in p.keys():
+ quantiles = np.percentile(res.flatchain[name],
+ [2.275, 15.865, 50, 84.135, 97.275])
+ median = quantiles[2]
+ err_m2 = quantiles[0] - median
+ err_m1 = quantiles[1] - median
+ err_p1 = quantiles[3] - median
+ err_p2 = quantiles[4] - median
+ fmt = ' {:5s} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f}'.format
+ print(fmt(name, err_m2, err_m1, median, err_p1, err_p2))
diff --git a/examples/doc_fitting_withreport.py b/examples/doc_fitting_withreport.py
new file mode 100644
index 0000000..5049ac2
--- /dev/null
+++ b/examples/doc_fitting_withreport.py
@@ -0,0 +1,35 @@
+# <examples/doc_fitting_withreport.py>
+from numpy import exp, linspace, pi, random, sign, sin
+
+from lmfit import create_params, fit_report, minimize
+
+p_true = create_params(amp=14.0, period=5.46, shift=0.123, decay=0.032)
+
+
+def residual(pars, x, data=None):
+ """Model a decaying sine wave and subtract data."""
+ vals = pars.valuesdict()
+ amp = vals['amp']
+ per = vals['period']
+ shift = vals['shift']
+ decay = vals['decay']
+
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+ model = amp * sin(shift + x/per) * exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return model - data
+
+
+random.seed(0)
+x = linspace(0.0, 250., 1001)
+noise = random.normal(scale=0.7215, size=x.size)
+data = residual(p_true, x) + noise
+
+fit_params = create_params(amp=13, period=2, shift=0, decay=0.02)
+
+out = minimize(residual, fit_params, args=(x,), kws={'data': data})
+
+print(fit_report(out))
+# <end examples/doc_fitting_withreport.py>
diff --git a/examples/doc_model_composite.py b/examples/doc_model_composite.py
new file mode 100644
index 0000000..d5d3a34
--- /dev/null
+++ b/examples/doc_model_composite.py
@@ -0,0 +1,66 @@
+# <examples/doc_model_composite.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit import CompositeModel, Model
+from lmfit.lineshapes import gaussian, step
+
+# create data from broadened step
+x = np.linspace(0, 10, 201)
+y = step(x, amplitude=12.5, center=4.5, sigma=0.88, form='erf')
+np.random.seed(0)
+y = y + np.random.normal(scale=0.35, size=x.size)
+
+
+def jump(x, mid):
+ """Heaviside step function."""
+ o = np.zeros(x.size)
+ imid = max(np.where(x <= mid)[0])
+ o[imid:] = 1.0
+ return o
+
+
+def convolve(arr, kernel):
+ """Simple convolution of two arrays."""
+ npts = min(arr.size, kernel.size)
+ pad = np.ones(npts)
+ tmp = np.concatenate((pad*arr[0], arr, pad*arr[-1]))
+ out = np.convolve(tmp, kernel, mode='valid')
+ noff = int((len(out) - npts) / 2)
+ return out[noff:noff+npts]
+
+
+# create Composite Model using the custom convolution operator
+mod = CompositeModel(Model(jump), Model(gaussian), convolve)
+
+# create parameters for model. Note that 'mid' and 'center' will be highly
+# correlated. Since 'mid' is used as an integer index, it will be very
+# hard to fit, so we fix its value
+pars = mod.make_params(amplitude=dict(value=1, min=0),
+ center=3.5,
+ sigma=dict(value=1.5, min=0),
+ mid=dict(value=4, vary=False))
+
+# fit this model to data array y
+result = mod.fit(y, params=pars, x=x)
+
+print(result.fit_report())
+
+# generate components
+comps = result.eval_components(x=x)
+
+# plot results
+fig, axes = plt.subplots(1, 2, figsize=(12.8, 4.8))
+
+axes[0].plot(x, y, 'bo')
+axes[0].plot(x, result.init_fit, 'k--', label='initial fit')
+axes[0].plot(x, result.best_fit, 'r-', label='best fit')
+axes[0].legend()
+
+axes[1].plot(x, y, 'bo')
+axes[1].plot(x, 10*comps['jump'], 'k--', label='Jump component')
+axes[1].plot(x, 10*comps['gaussian'], 'r-', label='Gaussian component')
+axes[1].legend()
+
+plt.show()
+# <end examples/doc_model_composite.py>
diff --git a/examples/doc_model_gaussian.py b/examples/doc_model_gaussian.py
new file mode 100644
index 0000000..e06e631
--- /dev/null
+++ b/examples/doc_model_gaussian.py
@@ -0,0 +1,27 @@
+# <examples/doc_model_gaussian.py>
+import matplotlib.pyplot as plt
+from numpy import exp, loadtxt, pi, sqrt
+
+from lmfit import Model
+
+data = loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+
+def gaussian(x, amp, cen, wid):
+ """1-d gaussian: gaussian(x, amp, cen, wid)"""
+ return (amp / (sqrt(2*pi) * wid)) * exp(-(x-cen)**2 / (2*wid**2))
+
+
+gmodel = Model(gaussian)
+result = gmodel.fit(y, x=x, amp=5, cen=5, wid=1)
+
+print(result.fit_report())
+
+plt.plot(x, y, 'o')
+plt.plot(x, result.init_fit, '--', label='initial fit')
+plt.plot(x, result.best_fit, '-', label='best fit')
+plt.legend()
+plt.show()
+# <end examples/doc_model_gaussian.py>
diff --git a/examples/doc_model_loadmodel.py b/examples/doc_model_loadmodel.py
new file mode 100644
index 0000000..7dafb77
--- /dev/null
+++ b/examples/doc_model_loadmodel.py
@@ -0,0 +1,33 @@
+# <examples/doc_model_loadmodel.py>
+import os
+import sys
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.model import load_model
+
+if not os.path.exists('sinemodel.sav'):
+ os.system(f"{sys.executable} doc_model_savemodel.py")
+
+
+def mysine(x, amp, freq, shift):
+ return amp * np.sin(x*freq + shift)
+
+
+data = np.loadtxt('sinedata.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+model = load_model('sinemodel.sav', funcdefs={'mysine': mysine})
+params = model.make_params(amp=dict(value=3, min=0),
+ freq=0.52,
+ shift=dict(value=0, min=-1, max=1))
+
+result = model.fit(y, params, x=x)
+print(result.fit_report())
+
+plt.plot(x, y, 'o')
+plt.plot(x, result.best_fit, '-')
+plt.show()
+# <end examples/doc_model_loadmodel.py>
diff --git a/examples/doc_model_loadmodelresult.py b/examples/doc_model_loadmodelresult.py
new file mode 100644
index 0000000..fddbceb
--- /dev/null
+++ b/examples/doc_model_loadmodelresult.py
@@ -0,0 +1,23 @@
+# <examples/doc_model_loadmodelresult.py>
+import os
+import sys
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.model import load_modelresult
+
+if not os.path.exists('gauss_modelresult.sav'):
+ os.system(f"{sys.executable} doc_model_savemodelresult.py")
+
+data = np.loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+result = load_modelresult('gauss_modelresult.sav')
+print(result.fit_report())
+
+plt.plot(x, y, 'o')
+plt.plot(x, result.best_fit, '-')
+plt.show()
+# <end examples/doc_model_loadmodelresult.py>
diff --git a/examples/doc_model_loadmodelresult2.py b/examples/doc_model_loadmodelresult2.py
new file mode 100644
index 0000000..115c985
--- /dev/null
+++ b/examples/doc_model_loadmodelresult2.py
@@ -0,0 +1,23 @@
+# <examples/doc_model_loadmodelresult2.py>
+import os
+import sys
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.model import load_modelresult
+
+if not os.path.exists('nistgauss_modelresult.sav'):
+ os.system(f"{sys.executable} doc_model_savemodelresult2.py")
+
+dat = np.loadtxt('NIST_Gauss2.dat')
+x = dat[:, 1]
+y = dat[:, 0]
+
+result = load_modelresult('nistgauss_modelresult.sav')
+print(result.fit_report())
+
+plt.plot(x, y, 'o')
+plt.plot(x, result.best_fit, '-')
+plt.show()
+# <end examples/doc_model_loadmodelresult2.py>
diff --git a/examples/doc_model_savemodel.py b/examples/doc_model_savemodel.py
new file mode 100644
index 0000000..9071866
--- /dev/null
+++ b/examples/doc_model_savemodel.py
@@ -0,0 +1,15 @@
+# <examples/doc_model_savemodel.py>
+import numpy as np
+
+from lmfit.model import Model, save_model
+
+
+def mysine(x, amp, freq, shift):
+ return amp * np.sin(x*freq + shift)
+
+
+sinemodel = Model(mysine)
+pars = sinemodel.make_params(amp=1, freq=0.25, shift=0)
+
+save_model(sinemodel, 'sinemodel.sav')
+# <end examples/doc_model_savemodel.py>
diff --git a/examples/doc_model_savemodelresult.py b/examples/doc_model_savemodelresult.py
new file mode 100644
index 0000000..3a8cecb
--- /dev/null
+++ b/examples/doc_model_savemodelresult.py
@@ -0,0 +1,17 @@
+# <examples/doc_model_savemodelresult.py>
+import numpy as np
+
+from lmfit.model import save_modelresult
+from lmfit.models import GaussianModel
+
+data = np.loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+gmodel = GaussianModel()
+result = gmodel.fit(y, x=x, amplitude=5, center=5, sigma=1)
+
+save_modelresult(result, 'gauss_modelresult.sav')
+
+print(result.fit_report())
+# <end examples/doc_model_savemodelresult.py>
diff --git a/examples/doc_model_savemodelresult2.py b/examples/doc_model_savemodelresult2.py
new file mode 100644
index 0000000..73a3bc5
--- /dev/null
+++ b/examples/doc_model_savemodelresult2.py
@@ -0,0 +1,33 @@
+# <examples/doc_model_savemodelresult2.py>
+import numpy as np
+
+from lmfit.model import save_modelresult
+from lmfit.models import ExponentialModel, GaussianModel
+
+dat = np.loadtxt('NIST_Gauss2.dat')
+x = dat[:, 1]
+y = dat[:, 0]
+
+exp_mod = ExponentialModel(prefix='exp_')
+pars = exp_mod.guess(y, x=x)
+
+gauss1 = GaussianModel(prefix='g1_')
+pars.update(gauss1.make_params(center=dict(value=105, min=75, max=125),
+ sigma=dict(value=15, min=0),
+ amplitude=dict(value=2000, min=0)))
+
+gauss2 = GaussianModel(prefix='g2_')
+pars.update(gauss2.make_params(center=dict(value=155, min=125, max=175),
+ sigma=dict(value=15, min=0),
+ amplitude=dict(value=2000, min=0)))
+
+mod = gauss1 + gauss2 + exp_mod
+
+init = mod.eval(pars, x=x)
+
+result = mod.fit(y, pars, x=x)
+
+save_modelresult(result, 'nistgauss_modelresult.sav')
+
+print(result.fit_report())
+# <end examples/doc_model_savemodelresult2.py>
diff --git a/examples/doc_model_two_components.py b/examples/doc_model_two_components.py
new file mode 100644
index 0000000..d41dc39
--- /dev/null
+++ b/examples/doc_model_two_components.py
@@ -0,0 +1,34 @@
+# <examples/doc_model_two_components.py>
+import matplotlib.pyplot as plt
+from numpy import exp, loadtxt, pi, sqrt
+
+from lmfit import Model
+
+data = loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1] + 0.25*x - 1.0
+
+
+def gaussian(x, amp, cen, wid):
+ """1-d gaussian: gaussian(x, amp, cen, wid)"""
+ return (amp / (sqrt(2*pi) * wid)) * exp(-(x-cen)**2 / (2*wid**2))
+
+
+def line(x, slope, intercept):
+ """a line"""
+ return slope*x + intercept
+
+
+mod = Model(gaussian) + Model(line)
+pars = mod.make_params(amp=5, cen=5, wid={'value': 1, 'min': 0},
+ slope=0, intercept=1)
+
+result = mod.fit(y, pars, x=x)
+print(result.fit_report())
+
+plt.plot(x, y, 'o')
+plt.plot(x, result.init_fit, '--', label='initial fit')
+plt.plot(x, result.best_fit, '-', label='best fit')
+plt.legend()
+plt.show()
+# <end examples/doc_model_two_components.py>
diff --git a/examples/doc_model_uncertainty.py b/examples/doc_model_uncertainty.py
new file mode 100644
index 0000000..6aed14f
--- /dev/null
+++ b/examples/doc_model_uncertainty.py
@@ -0,0 +1,31 @@
+# <examples/doc_model_uncertainty.py>
+import matplotlib.pyplot as plt
+from numpy import exp, loadtxt, pi, sqrt
+
+from lmfit import Model
+
+data = loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+
+def gaussian(x, amp, cen, wid):
+ """1-d gaussian: gaussian(x, amp, cen, wid)"""
+ return (amp / (sqrt(2*pi) * wid)) * exp(-(x-cen)**2 / (2*wid**2))
+
+
+gmodel = Model(gaussian)
+result = gmodel.fit(y, x=x, amp=5, cen=5, wid=1)
+
+print(result.fit_report())
+
+dely = result.eval_uncertainty(sigma=3)
+
+plt.plot(x, y, 'o')
+plt.plot(x, result.init_fit, '--', label='initial fit')
+plt.plot(x, result.best_fit, '-', label='best fit')
+plt.fill_between(x, result.best_fit-dely, result.best_fit+dely,
+ color="#ABABAB", label=r'3-$\sigma$ uncertainty band')
+plt.legend()
+plt.show()
+# <end examples/doc_model_uncertainty.py>
diff --git a/examples/doc_model_uncertainty2.py b/examples/doc_model_uncertainty2.py
new file mode 100644
index 0000000..840b93a
--- /dev/null
+++ b/examples/doc_model_uncertainty2.py
@@ -0,0 +1,79 @@
+# <examples/doc_model_uncertainty2.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.models import ExponentialModel, GaussianModel
+
+dat = np.loadtxt('NIST_Gauss2.dat')
+x = dat[:, 1]
+y = dat[:, 0]
+
+model = (GaussianModel(prefix='g1_') +
+ GaussianModel(prefix='g2_') +
+ ExponentialModel(prefix='bkg_'))
+
+params = model.make_params(bkg_amplitude=100, bkg_decay=80,
+ g1_amplitude=3000,
+ g1_center=100,
+ g1_sigma=10,
+ g2_amplitude=3000,
+ g2_center=150,
+ g2_sigma=10)
+
+result = model.fit(y, params, x=x)
+print(result.fit_report(min_correl=0.5))
+
+comps = result.eval_components(x=x)
+dely = result.eval_uncertainty(sigma=3)
+
+fig, axes = plt.subplots(2, 2, figsize=(12.8, 9.6))
+
+axes[0][0].plot(x, y, 'o', color='#99002299', markersize=3, label='data')
+axes[0][0].plot(x, result.best_fit, '-', label='best fit')
+axes[0][0].plot(x, result.init_fit, '--', label='initial fit')
+axes[0][0].set_title('data, initial fit, and best-fit')
+axes[0][0].legend()
+
+axes[0][1].plot(x, y, 'o', color='#99002299', markersize=3, label='data')
+axes[0][1].plot(x, result.best_fit, '-', label='best fit')
+axes[0][1].fill_between(x, result.best_fit-dely, result.best_fit+dely,
+ color="#8A8A8A", label=r'3-$\sigma$ band')
+axes[0][1].set_title('data, best-fit, and uncertainty band')
+axes[0][1].legend()
+
+axes[1][0].plot(x, result.best_fit, '-', label=r'best fit, 3-$\sigma$ band')
+axes[1][0].fill_between(x,
+ result.best_fit-result.dely,
+ result.best_fit+result.dely,
+ color="#8A8A8A")
+
+axes[1][0].plot(x, comps['bkg_'], label=r'background, 3-$\sigma$ band')
+axes[1][0].fill_between(x,
+ comps['bkg_']-result.dely_comps['bkg_'],
+ comps['bkg_']+result.dely_comps['bkg_'],
+ color="#8A8A8A")
+
+axes[1][0].plot(x, comps['g1_'], label=r'Gaussian #1, 3-$\sigma$ band')
+axes[1][0].fill_between(x,
+ comps['g1_']-result.dely_comps['g1_'],
+ comps['g1_']+result.dely_comps['g1_'],
+ color="#8A8A8A")
+
+axes[1][0].plot(x, comps['g2_'], label=r'Gaussian #2, 3-$\sigma$ band')
+axes[1][0].fill_between(x,
+ comps['g2_']-result.dely_comps['g2_'],
+ comps['g2_']+result.dely_comps['g2_'],
+ color="#8A8A8A")
+axes[1][0].set_title('model components with uncertainty bands')
+axes[1][0].legend()
+
+axes[1][1].plot(x, result.best_fit, '-', label='best fit')
+axes[1][1].plot(x, 10*result.dely, label=r'3-$\sigma$ total (x10)')
+axes[1][1].plot(x, 10*result.dely_comps['bkg_'], label=r'3-$\sigma$ background (x10)')
+axes[1][1].plot(x, 10*result.dely_comps['g1_'], label=r'3-$\sigma$ Gaussian #1 (x10)')
+axes[1][1].plot(x, 10*result.dely_comps['g2_'], label=r'3-$\sigma$ Gaussian #2 (x10)')
+axes[1][1].set_title('uncertainties for model components')
+axes[1][1].legend()
+
+plt.show()
+# <end examples/doc_model_uncertainty2.py>
diff --git a/examples/doc_model_with_iter_callback.py b/examples/doc_model_with_iter_callback.py
new file mode 100644
index 0000000..f789f02
--- /dev/null
+++ b/examples/doc_model_with_iter_callback.py
@@ -0,0 +1,36 @@
+# <examples/doc_with_itercb.py>
+import matplotlib.pyplot as plt
+from numpy import linspace, random
+
+from lmfit.lineshapes import gaussian
+from lmfit.models import GaussianModel, LinearModel
+
+
+def per_iteration(pars, iteration, resid, *args, **kws):
+ print(" ITER ", iteration, [f"{p.name} = {p.value:.5f}" for p in pars.values()])
+
+
+x = linspace(0., 20, 401)
+y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
+random.seed(2021)
+y = y - .20*x + 3.333 + random.normal(scale=0.23, size=x.size)
+
+mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
+
+pars = mod.make_params(peak_amplitude=dict(value=3.0, min=0),
+ peak_center=dict(value=6.0, min=0, max=20),
+ peak_sigma=2.0,
+ bkg_intercept=0,
+ bkg_slope=0)
+
+out = mod.fit(y, pars, x=x, iter_cb=per_iteration)
+
+plt.plot(x, y, '--')
+
+print(f'Nfev = {out.nfev}')
+print(out.fit_report())
+
+plt.plot(x, out.best_fit, '-', label='best fit')
+plt.legend()
+plt.show()
+# <end examples/doc_with_itercb.py>
diff --git a/examples/doc_model_with_nan_policy.py b/examples/doc_model_with_nan_policy.py
new file mode 100644
index 0000000..a58766c
--- /dev/null
+++ b/examples/doc_model_with_nan_policy.py
@@ -0,0 +1,33 @@
+# <examples/doc_model_with_nan_policy.py>
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.models import GaussianModel
+
+data = np.loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+y[44] = np.nan
+y[65] = np.nan
+
+# nan_policy = 'raise'
+# nan_policy = 'propagate'
+nan_policy = 'omit'
+
+gmodel = GaussianModel()
+result = gmodel.fit(y, x=x, amplitude=5, center=6, sigma=1,
+ nan_policy=nan_policy)
+
+print(result.fit_report())
+
+# make sure nans are removed for plotting:
+x_ = x[np.where(np.isfinite(y))]
+y_ = y[np.where(np.isfinite(y))]
+
+plt.plot(x_, y_, 'o')
+plt.plot(x_, result.init_fit, '--', label='initial fit')
+plt.plot(x_, result.best_fit, '-', label='best fit')
+plt.legend()
+plt.show()
+# <end examples/doc_model_with_nan_policy.py>
diff --git a/examples/doc_parameters_basic.py b/examples/doc_parameters_basic.py
new file mode 100644
index 0000000..eed2f1e
--- /dev/null
+++ b/examples/doc_parameters_basic.py
@@ -0,0 +1,55 @@
+# <examples/doc_parameters_basic.py>
+import numpy as np
+
+from lmfit import Minimizer, Parameters, create_params, report_fit
+
+# create data to be fitted
+x = np.linspace(0, 15, 301)
+np.random.seed(2021)
+data = (5.0 * np.sin(2.0*x - 0.1) * np.exp(-x*x*0.025) +
+ np.random.normal(size=x.size, scale=0.2))
+
+
+# define objective function: returns the array to be minimized
+def fcn2min(params, x, data):
+ """Model a decaying sine wave and subtract data."""
+ amp = params['amp']
+ shift = params['shift']
+ omega = params['omega']
+ decay = params['decay']
+ model = amp * np.sin(x*omega + shift) * np.exp(-x*x*decay)
+ return model - data
+
+
+# create a set of Parameters
+params = Parameters()
+params.add('amp', value=10, min=0)
+params.add('decay', value=0.1)
+params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2.)
+params.add('omega', value=3.0)
+
+# ... or use
+params = create_params(amp=dict(value=10, min=0),
+ decay=0.1,
+ omega=3,
+ shift=dict(value=0, min=-np.pi/2, max=np.pi/2))
+
+# do fit, here with the default leastsq algorithm
+minner = Minimizer(fcn2min, params, fcn_args=(x, data))
+result = minner.minimize()
+
+# calculate final result
+final = data + result.residual
+
+# write error report
+report_fit(result)
+
+# try to plot results
+try:
+ import matplotlib.pyplot as plt
+ plt.plot(x, data, '+')
+ plt.plot(x, final)
+ plt.show()
+except ImportError:
+ pass
+# <end of examples/doc_parameters_basic.py>
diff --git a/examples/doc_parameters_valuesdict.py b/examples/doc_parameters_valuesdict.py
new file mode 100644
index 0000000..3c7cca5
--- /dev/null
+++ b/examples/doc_parameters_valuesdict.py
@@ -0,0 +1,46 @@
+# <examples/doc_parameters_valuesdict.py>
+import numpy as np
+
+from lmfit import Minimizer, create_params, report_fit
+
+# create data to be fitted
+x = np.linspace(0, 15, 301)
+np.random.seed(2021)
+data = (5.0 * np.sin(2.0*x - 0.1) * np.exp(-x*x*0.025) +
+ np.random.normal(size=x.size, scale=0.2))
+
+
+# define objective function: returns the array to be minimized
+def fcn2min(params, x, data):
+ """Model a decaying sine wave and subtract data."""
+ v = params.valuesdict()
+
+ model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
+ return model - data
+
+
+# create a set of Parameters
+params = create_params(amp=dict(value=10, min=0),
+ decay=0.1,
+ omega=3.0,
+ shift=dict(value=0.0, min=-np.pi/2., max=np.pi/2))
+
+# do fit, here with the default leastsq algorithm
+minner = Minimizer(fcn2min, params, fcn_args=(x, data))
+result = minner.minimize()
+
+# calculate final result
+final = data + result.residual
+
+# write error report
+report_fit(result)
+
+# try to plot results
+try:
+ import matplotlib.pyplot as plt
+ plt.plot(x, data, '+')
+ plt.plot(x, final)
+ plt.show()
+except ImportError:
+ pass
+# <end of examples/doc_parameters_valuesdict.py>
diff --git a/examples/example_Model_interface.py b/examples/example_Model_interface.py
new file mode 100644
index 0000000..4a2bf36
--- /dev/null
+++ b/examples/example_Model_interface.py
@@ -0,0 +1,174 @@
+"""
+Fit using the Model interface
+=============================
+
+This notebook shows a simple example of using the ``lmfit.Model`` class. For
+more information please refer to:
+https://lmfit.github.io/lmfit-py/model.html#the-model-class.
+
+"""
+import numpy as np
+from pandas import Series
+
+from lmfit import Model, Parameter, report_fit
+
+###############################################################################
+# The ``Model`` class is a flexible, concise curve fitter. I will illustrate
+# fitting example data to an exponential decay.
+
+
+def decay(t, N, tau):
+ return N*np.exp(-t/tau)
+
+
+###############################################################################
+# The parameters are in no particular order. We'll need some example data. I
+# will use ``N=7`` and ``tau=3``, and add a little noise.
+t = np.linspace(0, 5, num=1000)
+np.random.seed(2021)
+data = decay(t, 7, 3) + np.random.randn(t.size)
+
+###############################################################################
+# **Simplest Usage**
+model = Model(decay, independent_vars=['t'])
+result = model.fit(data, t=t, N=10, tau=1)
+
+###############################################################################
+# The Model infers the parameter names by inspecting the arguments of the
+# function, ``decay``. Then I passed the independent variable, ``t``, and
+# initial guesses for each parameter. A residual function is automatically
+# defined, and a least-squared regression is performed.
+#
+# We can immediately see the best-fit values:
+print(result.values)
+
+###############################################################################
+# and use these best-fit parameters for plotting with the ``plot`` function:
+result.plot()
+
+###############################################################################
+# We can review the best-fit `Parameters` by accessing `result.params`:
+result.params.pretty_print()
+
+###############################################################################
+# More information about the fit is stored in the result, which is an
+# ``lmfit.MimimizerResult`` object (see:
+# https://lmfit.github.io/lmfit-py/fitting.html#lmfit.minimizer.MinimizerResult)
+
+###############################################################################
+# **Specifying Bounds and Holding Parameters Constant**
+#
+# Above, the ``Model`` class implicitly builds ``Parameter`` objects from
+# keyword arguments of ``fit`` that match the arguments of ``decay``. You can
+# build the ``Parameter`` objects explicitly; the following is equivalent.
+result = model.fit(data, t=t,
+ N=Parameter('N', value=10),
+ tau=Parameter('tau', value=1))
+report_fit(result.params)
+
+###############################################################################
+# By building ``Parameter`` objects explicitly, you can specify bounds
+# (``min``, ``max``) and set parameters constant (``vary=False``).
+result = model.fit(data, t=t,
+ N=Parameter('N', value=7, vary=False),
+ tau=Parameter('tau', value=1, min=0))
+report_fit(result.params)
+
+###############################################################################
+# **Defining Parameters in Advance**
+#
+# Passing parameters to ``fit`` can become unwieldy. As an alternative, you
+# can extract the parameters from ``model`` like so, set them individually,
+# and pass them to ``fit``.
+params = model.make_params(N=10, tau={'value': 1, 'min': 0})
+
+result = model.fit(data, params, t=t)
+report_fit(result.params)
+
+##############################################################################
+# Keyword arguments override ``params``, resetting ``value`` and all other
+# properties (``min``, ``max``, ``vary``).
+result = model.fit(data, params, t=t, tau=1)
+report_fit(result.params)
+
+###############################################################################
+# The input parameters are not modified by ``fit``. They can be reused,
+# retaining the same initial value. If you want to use the result of one fit
+# as the initial guess for the next, simply pass ``params=result.params``.
+
+###############################################################################
+# #TODO/FIXME: not sure if there ever way a "helpful exception", but currently
+# #it raises a ``ValueError: The input contains nan values``.
+#
+# #*A Helpful Exception*
+#
+# #All this implicit magic makes it very easy for the user to neglect to set a
+# #parameter. The ``fit`` function checks for this and raises a helpful exception.
+
+# #result = model.fit(data, t=t, tau=1) # N unspecified
+
+###############################################################################
+# #An *extra* parameter that cannot be matched to the model function will
+# #throw a ``UserWarning``, but it will not raise, leaving open the possibility
+# #of unforeseen extensions calling for some parameters.
+
+###############################################################################
+# *Weighted Fits*
+#
+# Use the ``sigma`` argument to perform a weighted fit. If you prefer to think
+# of the fit in term of ``weights``, ``sigma=1/weights``.
+weights = np.arange(len(data))
+result = model.fit(data, params, t=t, weights=weights)
+report_fit(result.params)
+
+###############################################################################
+# *Handling Missing Data*
+#
+# By default, attempting to fit data that includes a ``NaN``, which
+# conventionally indicates a "missing" observation, raises a lengthy exception.
+# You can choose to ``omit`` (i.e., skip over) missing values instead.
+data_with_holes = data.copy()
+data_with_holes[[5, 500, 700]] = np.nan # Replace arbitrary values with NaN.
+
+model = Model(decay, independent_vars=['t'], nan_policy='omit')
+result = model.fit(data_with_holes, params, t=t)
+report_fit(result.params)
+
+###############################################################################
+# If you don't want to ignore missing values, you can set the model to raise
+# proactively, checking for missing values before attempting the fit.
+#
+# Uncomment to see the error
+# #model = Model(decay, independent_vars=['t'], nan_policy='raise')
+# #result = model.fit(data_with_holes, params, t=t)
+#
+# The default setting is ``nan_policy='raise'``, which does check for NaNs and
+# raises an exception when present.
+#
+# Null-checking relies on ``pandas.isnull`` if it is available. If pandas
+# cannot be imported, it silently falls back on ``numpy.isnan``.
+
+###############################################################################
+# *Data Alignment*
+#
+# Imagine a collection of time series data with different lengths. It would be
+# convenient to define one sufficiently long array ``t`` and use it for each
+# time series, regardless of length. ``pandas``
+# (https://pandas.pydata.org/pandas-docs/stable/) provides tools for aligning
+# indexed data. And, unlike most wrappers to ``scipy.leastsq``, ``Model`` can
+# handle pandas objects out of the box, using its data alignment features.
+#
+# Here I take just a slice of the ``data`` and fit it to the full ``t``. It is
+# automatically aligned to the correct section of ``t`` using Series' index.
+model = Model(decay, independent_vars=['t'])
+truncated_data = Series(data)[200:800] # data points 200-800
+t = Series(t) # all 1000 points
+result = model.fit(truncated_data, params, t=t)
+report_fit(result.params)
+
+###############################################################################
+# Data with missing entries and an unequal length still aligns properly.
+model = Model(decay, independent_vars=['t'], nan_policy='omit')
+truncated_data_with_holes = Series(data_with_holes)[200:800]
+result = model.fit(truncated_data_with_holes, params, t=t)
+report_fit(result.params)
diff --git a/examples/example_brute.py b/examples/example_brute.py
new file mode 100644
index 0000000..e0ceda9
--- /dev/null
+++ b/examples/example_brute.py
@@ -0,0 +1,392 @@
+"""
+Global minimization using the ``brute`` method (a.k.a. grid search)
+===================================================================
+
+"""
+###############################################################################
+# This notebook shows a simple example of using ``lmfit.minimize.brute`` that
+# uses the method with the same name from ``scipy.optimize``.
+#
+# The method computes the function’s value at each point of a multidimensional
+# grid of points, to find the global minimum of the function. It behaves
+# identically to ``scipy.optimize.brute`` in case finite bounds are given on
+# all varying parameters, but will also deal with non-bounded parameters
+# (see below).
+import copy
+
+from matplotlib.colors import LogNorm
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit import Minimizer, create_params, fit_report
+
+###############################################################################
+# Let's start with the example given in the documentation of SciPy:
+#
+# "We illustrate the use of brute to seek the global minimum of a function of
+# two variables that is given as the sum of a positive-definite quadratic and
+# two deep “Gaussian-shaped” craters. Specifically, define the objective
+# function ``f`` as the sum of three other functions, ``f = f1 + f2 + f3``. We
+# suppose each of these has a signature ``(z, *params), where z = (x, y)``,
+# and params and the functions are as defined below."
+#
+# First, we create a set of Parameters where all variables except ``x`` and
+# ``y`` are given fixed values.
+# Just as in the documentation we will do a grid search between ``-4`` and
+# ``4`` and use a stepsize of ``0.25``. The bounds can be set as usual with
+# the ``min`` and ``max`` attributes, and the stepsize is set using
+# ``brute_step``.
+
+params = create_params(a=dict(value=2, vary=False),
+ b=dict(value=3, vary=False),
+ c=dict(value=7, vary=False),
+ d=dict(value=8, vary=False),
+ e=dict(value=9, vary=False),
+ f=dict(value=10, vary=False),
+ g=dict(value=44, vary=False),
+ h=dict(value=-1, vary=False),
+ i=dict(value=2, vary=False),
+ j=dict(value=26, vary=False),
+ k=dict(value=1, vary=False),
+ l=dict(value=-2, vary=False),
+ scale=dict(value=0.5, vary=False),
+ x=dict(value=0.0, vary=True, min=-4, max=4, brute_step=0.25),
+ y=dict(value=0.0, vary=True, min=-4, max=4, brute_step=0.25))
+
+###############################################################################
+# Second, create the three functions and the objective function:
+
+
+def f1(p):
+ par = p.valuesdict()
+ return (par['a'] * par['x']**2 + par['b'] * par['x'] * par['y'] +
+ par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] +
+ par['f'])
+
+
+def f2(p):
+ par = p.valuesdict()
+ return (-1.0*par['g']*np.exp(-((par['x']-par['h'])**2 +
+ (par['y']-par['i'])**2) / par['scale']))
+
+
+def f3(p):
+ par = p.valuesdict()
+ return (-1.0*par['j']*np.exp(-((par['x']-par['k'])**2 +
+ (par['y']-par['l'])**2) / par['scale']))
+
+
+def f(params):
+ return f1(params) + f2(params) + f3(params)
+
+
+###############################################################################
+# Performing the actual grid search is done with:
+fitter = Minimizer(f, params)
+result = fitter.minimize(method='brute')
+
+###############################################################################
+# , which will increment ``x`` and ``y`` between ``-4`` in increments of
+# ``0.25`` until ``4`` (not inclusive).
+grid_x, grid_y = (np.unique(par.ravel()) for par in result.brute_grid)
+print(grid_x)
+
+###############################################################################
+# The objective function is evaluated on this grid, and the raw output from
+# ``scipy.optimize.brute`` is stored in the MinimizerResult as
+# ``brute_<parname>`` attributes. These attributes are:
+#
+# ``result.brute_x0`` -- A 1-D array containing the coordinates of a point at
+# which the objective function had its minimum value.
+print(result.brute_x0)
+
+###############################################################################
+# ``result.brute_fval`` -- Function value at the point ``x0``.
+print(result.brute_fval)
+
+###############################################################################
+# ``result.brute_grid`` -- Representation of the evaluation grid. It has the
+# same length as ``x0``.
+print(result.brute_grid)
+
+###############################################################################
+# ``result.brute_Jout`` -- Function values at each point of the evaluation
+# grid, i.e., ``Jout = func(*grid)``.
+print(result.brute_Jout)
+
+###############################################################################
+# **Reassuringly, the obtained results are identical to using the method in
+# SciPy directly!**
+
+###############################################################################
+# Example 2: fit of a decaying sine wave
+#
+# In this example, we will explain some of the options of the algorithm.
+#
+# We start off by generating some synthetic data with noise for a decaying sine
+# wave, define an objective function, and create/initialize a Parameter set.
+x = np.linspace(0, 15, 301)
+np.random.seed(7)
+noise = np.random.normal(size=x.size, scale=0.2)
+data = (5. * np.sin(2*x - 0.1) * np.exp(-x*x*0.025) + noise)
+plt.plot(x, data, 'o')
+plt.show()
+
+
+def fcn2min(params, x, data):
+ """Model decaying sine wave, subtract data."""
+ amp = params['amp']
+ shift = params['shift']
+ omega = params['omega']
+ decay = params['decay']
+ model = amp * np.sin(x*omega + shift) * np.exp(-x*x*decay)
+ return model - data
+
+
+###############################################################################
+# In contrast to the implementation in SciPy (as shown in the first example),
+# varying parameters do not need to have finite bounds in lmfit. However, if a
+# parameter does not have finite bounds, then it does need a ``brute_step``
+# attribute specified:
+params = create_params(amp=dict(value=7, min=2.5, brute_step=0.25),
+ decay=dict(value=0.05, brute_step=0.005),
+ shift=dict(value=0.0, min=-np.pi/2., max=np.pi/2),
+ omega=dict(value=3, max=5, brute_step=0.25))
+
+###############################################################################
+# Our initial parameter set is now defined as shown below and this will
+# determine how the grid is set-up.
+params.pretty_print()
+
+###############################################################################
+# First, we initialize a ``Minimizer`` and perform the grid search:
+fitter = Minimizer(fcn2min, params, fcn_args=(x, data))
+result_brute = fitter.minimize(method='brute', Ns=25, keep=25)
+
+print(fit_report(result_brute))
+
+###############################################################################
+# We used two new parameters here: ``Ns`` and ``keep``. The parameter ``Ns``
+# determines the \'number of grid points along the axes\' similarly to its usage
+# in SciPy. Together with ``brute_step``, ``min`` and ``max`` for a Parameter
+# it will dictate how the grid is set-up:
+#
+# **(1)** finite bounds are specified ("SciPy implementation"): uses
+# ``brute_step`` if present (in the example above) or uses ``Ns`` to generate
+# the grid. The latter scenario that interpolates ``Ns`` points from ``min``
+# to ``max`` (inclusive), is here shown for the parameter ``shift``:
+par_name = 'shift'
+indx_shift = result_brute.var_names.index(par_name)
+grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel())
+print(f"parameter = {par_name}\nnumber of steps = {len(grid_shift)}\ngrid = {grid_shift}")
+
+###############################################################################
+# If finite bounds are not set for a certain parameter then the user **must**
+# specify ``brute_step`` - three more scenarios are considered here:
+#
+# **(2)** lower bound ``(min``) and ``brute_step`` are specified:
+# ``range = (min, min + Ns * brute_step, brute_step)``
+par_name = 'amp'
+indx_shift = result_brute.var_names.index(par_name)
+grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel())
+print(f"parameter = {par_name}\nnumber of steps = {len(grid_shift)}\ngrid = {grid_shift}")
+
+###############################################################################
+# **(3)** upper bound (``max``) and ``brute_step`` are specified:
+# ``range = (max - Ns * brute_step, max, brute_step)``
+par_name = 'omega'
+indx_shift = result_brute.var_names.index(par_name)
+grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel())
+print(f"parameter = {par_name}\nnumber of steps = {len(grid_shift)}\ngrid = {grid_shift}")
+
+###############################################################################
+# **(4)** numerical value (``value``) and ``brute_step`` are specified:
+# ``range = (value - (Ns//2) * brute_step, value + (Ns//2) * brute_step, brute_step)``
+par_name = 'decay'
+indx_shift = result_brute.var_names.index(par_name)
+grid_shift = np.unique(result_brute.brute_grid[indx_shift].ravel())
+print(f"parameter = {par_name}\nnumber of steps = {len(grid_shift)}\ngrid = {grid_shift}")
+
+###############################################################################
+# The ``MinimizerResult`` contains all the usual best-fit parameters and
+# fitting statistics. For example, the optimal solution from the grid search
+# is given below together with a plot:
+print(fit_report(result_brute))
+
+###############################################################################
+plt.plot(x, data, 'o')
+plt.plot(x, data + fcn2min(result_brute.params, x, data), '--')
+plt.show()
+
+###############################################################################
+# We can see that this fit is already very good, which is what we should expect
+# since our ``brute`` force grid is sampled rather finely and encompasses the
+# "correct" values.
+#
+# In a more realistic, complicated example the ``brute`` method will be used
+# to get reasonable values for the parameters and perform another minimization
+# (e.g., using ``leastsq``) using those as starting values. That is where the
+# ``keep`` parameter comes into play: it determines the "number of best
+# candidates from the brute force method that are stored in the ``candidates``
+# attribute". In the example above we store the best-ranking 25 solutions (the
+# default value is ``50`` and storing all the grid points can be accomplished
+# by choosing ``all``). The ``candidates`` attribute contains the parameters
+# and ``chisqr`` from the brute force method as a ``namedtuple``,
+# ``(‘Candidate’, [‘params’, ‘score’])``, sorted on the (lowest) ``chisqr``
+# value. To access the values for a particular candidate one can use
+# ``result.candidate[#].params`` or ``result.candidate[#].score``, where a
+# lower # represents a better candidate. The ``show_candidates(#)`` uses the
+# ``pretty_print()`` method to show a specific candidate-# or all candidates
+# when no number is specified.
+#
+# The optimal fit is, as usual, stored in the ``MinimizerResult.params``
+# attribute and is, therefore, identical to ``result_brute.show_candidates(1)``.
+result_brute.show_candidates(1)
+
+###############################################################################
+# In this case, the next-best scoring candidate has already a ``chisqr`` that
+# increased quite a bit:
+result_brute.show_candidates(2)
+
+###############################################################################
+# and is, therefore, probably not so likely... However, as said above, in most
+# cases you'll want to do another minimization using the solutions from the
+# ``brute`` method as starting values. That can be easily accomplished as
+# shown in the code below, where we now perform a ``leastsq`` minimization
+# starting from the top-25 solutions and accept the solution if the ``chisqr``
+# is lower than the previously 'optimal' solution:
+best_result = copy.deepcopy(result_brute)
+
+for candidate in result_brute.candidates:
+ trial = fitter.minimize(method='leastsq', params=candidate.params)
+ if trial.chisqr < best_result.chisqr:
+ best_result = trial
+
+###############################################################################
+# From the ``leastsq`` minimization we obtain the following parameters for the
+# most optimal result:
+print(fit_report(best_result))
+
+###############################################################################
+# As expected the parameters have not changed significantly as they were
+# already very close to the "real" values, which can also be appreciated from
+# the plots below.
+plt.plot(x, data, 'o')
+plt.plot(x, data + fcn2min(result_brute.params, x, data), '-',
+ label='brute')
+plt.plot(x, data + fcn2min(best_result.params, x, data), '--',
+ label='brute followed by leastsq')
+plt.legend()
+
+
+###############################################################################
+# Finally, the results from the ``brute`` force grid-search can be visualized
+# using the rather lengthy Python function below (which might get incorporated
+# in lmfit at some point).
+def plot_results_brute(result, best_vals=True, varlabels=None,
+ output=None):
+ """Visualize the result of the brute force grid search.
+
+ The output file will display the chi-square value per parameter and contour
+ plots for all combination of two parameters.
+
+ Inspired by the `corner` package (https://github.com/dfm/corner.py).
+
+ Parameters
+ ----------
+ result : :class:`~lmfit.minimizer.MinimizerResult`
+ Contains the results from the :meth:`brute` method.
+
+ best_vals : bool, optional
+ Whether to show the best values from the grid search (default is True).
+
+ varlabels : list, optional
+ If None (default), use `result.var_names` as axis labels, otherwise
+ use the names specified in `varlabels`.
+
+ output : str, optional
+ Name of the output PDF file (default is 'None')
+ """
+ npars = len(result.var_names)
+ _fig, axes = plt.subplots(npars, npars)
+
+ if not varlabels:
+ varlabels = result.var_names
+ if best_vals and isinstance(best_vals, bool):
+ best_vals = result.params
+
+ for i, par1 in enumerate(result.var_names):
+ for j, par2 in enumerate(result.var_names):
+
+ # parameter vs chi2 in case of only one parameter
+ if npars == 1:
+ axes.plot(result.brute_grid, result.brute_Jout, 'o', ms=3)
+ axes.set_ylabel(r'$\chi^{2}$')
+ axes.set_xlabel(varlabels[i])
+ if best_vals:
+ axes.axvline(best_vals[par1].value, ls='dashed', color='r')
+
+ # parameter vs chi2 profile on top
+ elif i == j and j < npars-1:
+ if i == 0:
+ axes[0, 0].axis('off')
+ ax = axes[i, j+1]
+ red_axis = tuple(a for a in range(npars) if a != i)
+ ax.plot(np.unique(result.brute_grid[i]),
+ np.minimum.reduce(result.brute_Jout, axis=red_axis),
+ 'o', ms=3)
+ ax.set_ylabel(r'$\chi^{2}$')
+ ax.yaxis.set_label_position("right")
+ ax.yaxis.set_ticks_position('right')
+ ax.set_xticks([])
+ if best_vals:
+ ax.axvline(best_vals[par1].value, ls='dashed', color='r')
+
+ # parameter vs chi2 profile on the left
+ elif j == 0 and i > 0:
+ ax = axes[i, j]
+ red_axis = tuple(a for a in range(npars) if a != i)
+ ax.plot(np.minimum.reduce(result.brute_Jout, axis=red_axis),
+ np.unique(result.brute_grid[i]), 'o', ms=3)
+ ax.invert_xaxis()
+ ax.set_ylabel(varlabels[i])
+ if i != npars-1:
+ ax.set_xticks([])
+ else:
+ ax.set_xlabel(r'$\chi^{2}$')
+ if best_vals:
+ ax.axhline(best_vals[par1].value, ls='dashed', color='r')
+
+ # contour plots for all combinations of two parameters
+ elif j > i:
+ ax = axes[j, i+1]
+ red_axis = tuple(a for a in range(npars) if a not in (i, j))
+ X, Y = np.meshgrid(np.unique(result.brute_grid[i]),
+ np.unique(result.brute_grid[j]))
+ lvls1 = np.linspace(result.brute_Jout.min(),
+ np.median(result.brute_Jout)/2.0, 7, dtype='int')
+ lvls2 = np.linspace(np.median(result.brute_Jout)/2.0,
+ np.median(result.brute_Jout), 3, dtype='int')
+ lvls = np.unique(np.concatenate((lvls1, lvls2)))
+ ax.contourf(X.T, Y.T, np.minimum.reduce(result.brute_Jout, axis=red_axis),
+ lvls, norm=LogNorm())
+ ax.set_yticks([])
+ if best_vals:
+ ax.axvline(best_vals[par1].value, ls='dashed', color='r')
+ ax.axhline(best_vals[par2].value, ls='dashed', color='r')
+ ax.plot(best_vals[par1].value, best_vals[par2].value, 'rs', ms=3)
+ if j != npars-1:
+ ax.set_xticks([])
+ else:
+ ax.set_xlabel(varlabels[i])
+ if j - i >= 2:
+ axes[i, j].axis('off')
+
+ if output is not None:
+ plt.savefig(output)
+
+
+###############################################################################
+# and finally, to generated the figure:
+plot_results_brute(result_brute, best_vals=True, varlabels=None)
+plt.show()
diff --git a/examples/example_complex_resonator_model.py b/examples/example_complex_resonator_model.py
new file mode 100644
index 0000000..161e444
--- /dev/null
+++ b/examples/example_complex_resonator_model.py
@@ -0,0 +1,125 @@
+"""
+Complex Resonator Model
+=======================
+
+This notebook shows how to fit the parameters of a complex resonator,
+using `lmfit.Model` and defining a custom `Model` class.
+
+Following Khalil et al. (https://arxiv.org/abs/1108.3117), we can model the
+forward transmission of a microwave resonator with total quality factor
+:math:`Q`, coupling quality factor :math:`Q_e`, and resonant frequency
+:math:`f_0` using:
+
+.. math::
+
+ S_{21}(f) = 1 - \\frac{Q Q_e^{-1}}{1+2jQ(f-f_0)/f_0}
+
+:math:`S_{21}` is thus a complex function of a real frequency.
+
+By allowing :math:`Q_e` to be complex, this model can take into account
+mismatches in the input and output transmission impedances.
+
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+
+import lmfit
+
+###############################################################################
+# Since ``scipy.optimize`` and ``lmfit`` require real parameters, we represent
+# :math:`Q_e` as ``Q_e_real + 1j*Q_e_imag``.
+
+
+def linear_resonator(f, f_0, Q, Q_e_real, Q_e_imag):
+ Q_e = Q_e_real + 1j*Q_e_imag
+ return 1 - (Q * Q_e**-1 / (1 + 2j * Q * (f - f_0) / f_0))
+
+
+###############################################################################
+# The standard practice of defining a ``lmfit`` model is as follows:
+class ResonatorModel(lmfit.model.Model):
+ __doc__ = "resonator model" + lmfit.models.COMMON_INIT_DOC
+
+ def __init__(self, *args, **kwargs):
+ # pass in the defining equation so the user doesn't have to later
+ super().__init__(linear_resonator, *args, **kwargs)
+
+ self.set_param_hint('Q', min=0) # enforce Q is positive
+
+ def guess(self, data, f=None, **kwargs):
+ verbose = kwargs.pop('verbose', None)
+ if f is None:
+ return
+ argmin_s21 = np.abs(data).argmin()
+ fmin = f.min()
+ fmax = f.max()
+ f_0_guess = f[argmin_s21] # guess that the resonance is the lowest point
+ Q_min = 0.1 * (f_0_guess/(fmax-fmin)) # assume the user isn't trying to fit just a small part of a resonance curve
+ delta_f = np.diff(f) # assume f is sorted
+ min_delta_f = delta_f[delta_f > 0].min()
+ Q_max = f_0_guess/min_delta_f # assume data actually samples the resonance reasonably
+ Q_guess = np.sqrt(Q_min*Q_max) # geometric mean, why not?
+ Q_e_real_guess = Q_guess/(1-np.abs(data[argmin_s21]))
+ if verbose:
+ print(f"fmin={fmin}, fmax={fmax}, f_0_guess={f_0_guess}")
+ print(f"Qmin={Q_min}, Q_max={Q_max}, Q_guess={Q_guess}, Q_e_real_guess={Q_e_real_guess}")
+ params = self.make_params(Q=Q_guess, Q_e_real=Q_e_real_guess, Q_e_imag=0, f_0=f_0_guess)
+ params[f'{self.prefix}Q'].set(min=Q_min, max=Q_max)
+ params[f'{self.prefix}f_0'].set(min=fmin, max=fmax)
+ return lmfit.models.update_param_vals(params, self.prefix, **kwargs)
+
+
+###############################################################################
+# Now let's use the model to generate some fake data:
+resonator = ResonatorModel()
+true_params = resonator.make_params(f_0=100, Q=10000, Q_e_real=9000, Q_e_imag=-9000)
+
+f = np.linspace(99.95, 100.05, 100)
+true_s21 = resonator.eval(params=true_params, f=f)
+noise_scale = 0.02
+np.random.seed(123)
+measured_s21 = true_s21 + noise_scale*(np.random.randn(100) + 1j*np.random.randn(100))
+
+plt.plot(f, 20*np.log10(np.abs(measured_s21)))
+plt.ylabel('|S21| (dB)')
+plt.xlabel('MHz')
+plt.title('simulated measurement')
+
+###############################################################################
+# Try out the ``guess`` method we added:
+guess = resonator.guess(measured_s21, f=f, verbose=True)
+
+##############################################################################
+# And now fit the data using the ``guess``-ed values as a starting point:
+result = resonator.fit(measured_s21, params=guess, f=f, verbose=True)
+
+print(result.fit_report() + '\n')
+result.params.pretty_print()
+
+###############################################################################
+# Now we'll make some plots of the data and fit. Define a convenience function
+# for plotting complex quantities:
+
+
+def plot_ri(data, *args, **kwargs):
+ plt.plot(data.real, data.imag, *args, **kwargs)
+
+
+fit_s21 = resonator.eval(params=result.params, f=f)
+guess_s21 = resonator.eval(params=guess, f=f)
+
+plt.figure()
+plot_ri(measured_s21, '.')
+plot_ri(fit_s21, '.-', label='best fit')
+plot_ri(guess_s21, '--', label='initial fit')
+plt.legend()
+plt.xlabel('Re(S21)')
+plt.ylabel('Im(S21)')
+
+plt.figure()
+plt.plot(f, 20*np.log10(np.abs(measured_s21)), '.')
+plt.plot(f, 20*np.log10(np.abs(fit_s21)), '.-', label='best fit')
+plt.plot(f, 20*np.log10(np.abs(guess_s21)), '--', label='initial fit')
+plt.legend()
+plt.ylabel('|S21| (dB)')
+plt.xlabel('MHz')
diff --git a/examples/example_confidence_interval.py b/examples/example_confidence_interval.py
new file mode 100644
index 0000000..72582c3
--- /dev/null
+++ b/examples/example_confidence_interval.py
@@ -0,0 +1,142 @@
+"""
+Calculate Confidence Intervals
+==============================
+
+"""
+import matplotlib.pyplot as plt
+from numpy import argsort, exp, linspace, pi, random, sign, sin, unique
+from scipy.interpolate import interp1d
+
+from lmfit import (Minimizer, conf_interval, conf_interval2d, create_params,
+ report_ci, report_fit)
+
+###############################################################################
+# Define the residual function, specify "true" parameter values, and generate
+# a synthetic data set with some noise:
+
+
+def residual(pars, x, data=None):
+ argu = (x*pars['decay'])**2
+ shift = pars['shift']
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+ model = pars['amp']*sin(shift + x/pars['period']) * exp(-argu)
+ if data is None:
+ return model
+ return model - data
+
+
+p_true = create_params(amp=14.0, period=5.33, shift=0.123, decay=0.010)
+
+x = linspace(0.0, 250.0, 2500)
+random.seed(2021)
+noise = random.normal(scale=0.7215, size=x.size)
+data = residual(p_true, x) + noise
+
+###############################################################################
+# Create fitting parameters and set initial values:
+fit_params = create_params(amp=13.0, period=2, shift=0.0, decay=0.020)
+
+###############################################################################
+# Set-up the minimizer and perform the fit using ``leastsq`` algorithm, and
+# show the report:
+mini = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data': data})
+out = mini.leastsq()
+
+fit = residual(out.params, x)
+report_fit(out)
+
+###############################################################################
+# Calculate the confidence intervals for parameters and display the results:
+ci, tr = conf_interval(mini, out, trace=True)
+
+report_ci(ci)
+
+names = out.params.keys()
+i = 0
+gs = plt.GridSpec(4, 4)
+sx = {}
+sy = {}
+for fixed in names:
+ j = 0
+ for free in names:
+ if j in sx and i in sy:
+ ax = plt.subplot(gs[i, j], sharex=sx[j], sharey=sy[i])
+ elif i in sy:
+ ax = plt.subplot(gs[i, j], sharey=sy[i])
+ sx[j] = ax
+ elif j in sx:
+ ax = plt.subplot(gs[i, j], sharex=sx[j])
+ sy[i] = ax
+ else:
+ ax = plt.subplot(gs[i, j])
+ sy[i] = ax
+ sx[j] = ax
+ if i < 3:
+ plt.setp(ax.get_xticklabels(), visible=False)
+ else:
+ ax.set_xlabel(free)
+
+ if j > 0:
+ plt.setp(ax.get_yticklabels(), visible=False)
+ else:
+ ax.set_ylabel(fixed)
+
+ res = tr[fixed]
+ prob = res['prob']
+ f = prob < 0.96
+
+ x, y = res[free], res[fixed]
+ ax.scatter(x[f], y[f], c=1-prob[f], s=25*(1-prob[f]+0.5))
+ ax.autoscale(1, 1)
+ j += 1
+ i += 1
+
+
+###############################################################################
+# It is also possible to calculate the confidence regions for two fixed
+# parameters using the function ``conf_interval2d``:
+names = list(out.params.keys())
+
+plt.figure()
+for i in range(4):
+ for j in range(4):
+ indx = 16-j*4-i
+ ax = plt.subplot(4, 4, indx)
+ ax.ticklabel_format(style='sci', scilimits=(-2, 2), axis='y')
+
+ # set-up labels and tick marks
+ ax.tick_params(labelleft=False, labelbottom=False)
+ if indx in (2, 5, 9, 13):
+ plt.ylabel(names[j])
+ ax.tick_params(labelleft=True)
+ if indx == 1:
+ ax.tick_params(labelleft=True)
+ if indx in (13, 14, 15, 16):
+ plt.xlabel(names[i])
+ ax.tick_params(labelbottom=True)
+ [label.set_rotation(45) for label in ax.get_xticklabels()]
+
+ if i != j:
+ x, y, m = conf_interval2d(mini, out, names[i], names[j], 20, 20)
+ plt.contourf(x, y, m, linspace(0, 1, 10))
+
+ x = tr[names[i]][names[i]]
+ y = tr[names[i]][names[j]]
+ pr = tr[names[i]]['prob']
+ s = argsort(x)
+ plt.scatter(x[s], y[s], c=pr[s], s=30, lw=1)
+
+ else:
+ x = tr[names[i]][names[i]]
+ y = tr[names[i]]['prob']
+
+ t, s = unique(x, True)
+ f = interp1d(t, y[s], 'slinear')
+ xn = linspace(x.min(), x.max(), 50)
+ plt.plot(xn, f(xn), lw=1)
+ plt.ylabel('prob')
+ ax.tick_params(labelleft=True)
+
+plt.tight_layout()
+plt.show()
diff --git a/examples/example_detect_outliers.py b/examples/example_detect_outliers.py
new file mode 100644
index 0000000..08ae5f5
--- /dev/null
+++ b/examples/example_detect_outliers.py
@@ -0,0 +1,96 @@
+"""
+Outlier detection via leave-one-out
+===================================
+
+Outliers can sometimes be identified by assessing the influence of each
+datapoint. To assess the influence of one point, we fit the dataset without the
+point and compare the result with the fit of the full dataset. The code below
+shows how to do this with lmfit. Note that the presented method is very basic.
+"""
+from collections import defaultdict
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+import lmfit
+
+###############################################################################
+# Generate test data and model:
+x = np.linspace(0.3, 10, 100)
+np.random.seed(1)
+y = 1.0 / (0.1 * x) + 2.0 + 3 * np.random.randn(x.size)
+
+params = lmfit.Parameters()
+params.add_many(('a', 0.1), ('b', 1))
+
+
+def func(x, a, b):
+ return 1.0 / (a * x) + b
+
+
+###############################################################################
+# Make five points outliers:
+idx = np.random.randint(0, x.size, 5)
+y[idx] += 10 * np.random.randn(idx.size)
+
+###############################################################################
+# Fit the data:
+model = lmfit.Model(func, independent_vars=['x'])
+fit_result = model.fit(y, x=x, a=0.1, b=2)
+
+###############################################################################
+# and gives the plot and fitting results below:
+fit_result.plot_fit()
+plt.plot(x[idx], y[idx], 'o', label='outliers')
+plt.show()
+
+###############################################################################
+print(fit_result.fit_report())
+
+###############################################################################
+# Fit the dataset while omitting one data point:
+best_vals = defaultdict(lambda: np.zeros(x.size))
+stderrs = defaultdict(lambda: np.zeros(x.size))
+chi_sq = np.zeros_like(x)
+for i in range(x.size):
+ idx2 = np.arange(0, x.size)
+ idx2 = np.delete(idx2, i)
+ tmp_x = x[idx2]
+ tmp = model.fit(y[idx2], x=tmp_x, a=fit_result.params['a'],
+ b=fit_result.params['b'])
+ chi_sq[i] = tmp.chisqr
+ for p in tmp.params:
+ tpar = tmp.params[p]
+ best_vals[p][i] = tpar.value
+ stderrs[p][i] = (tpar.stderr / fit_result.params[p].stderr)
+
+###############################################################################
+# Plot the influence on the red. chisqr of each point:
+fig, ax = plt.subplots()
+ax.plot(x, (fit_result.chisqr - chi_sq) / chi_sq)
+ax.scatter(x[idx], fit_result.chisqr / chi_sq[idx] - 1, color='r',
+ label='outlier')
+ax.set_ylabel(r'Relative red. $\chi^2$ change')
+ax.set_xlabel('x')
+ax.legend()
+
+###############################################################################
+# Plot the influence on the parameter value and error of each point:
+fig, axs = plt.subplots(4, figsize=(4, 7), sharex='col')
+axs[0].plot(x, best_vals['a'])
+axs[0].scatter(x[idx], best_vals['a'][idx], color='r', label='outlier')
+axs[0].set_ylabel('best a')
+
+axs[1].plot(x, best_vals['b'])
+axs[1].scatter(x[idx], best_vals['b'][idx], color='r', label='outlier')
+axs[1].set_ylabel('best b')
+
+axs[2].plot(x, stderrs['a'])
+axs[2].scatter(x[idx], stderrs['a'][idx], color='r', label='outlier')
+axs[2].set_ylabel('err a change')
+
+axs[3].plot(x, stderrs['b'])
+axs[3].scatter(x[idx], stderrs['b'][idx], color='r', label='outlier')
+axs[3].set_ylabel('err b change')
+
+axs[3].set_xlabel('x')
diff --git a/examples/example_diffev.py b/examples/example_diffev.py
new file mode 100644
index 0000000..31a3800
--- /dev/null
+++ b/examples/example_diffev.py
@@ -0,0 +1,58 @@
+"""
+Fit Using differential_evolution Algorithm
+==========================================
+
+This example compares the ``leastsq`` and ``differential_evolution`` algorithms
+on a fairly simple problem.
+
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+
+import lmfit
+
+
+def resid(params, x, ydata):
+ decay = params['decay'].value
+ offset = params['offset'].value
+ omega = params['omega'].value
+ amp = params['amp'].value
+
+ y_model = offset + amp * np.sin(x*omega) * np.exp(-x/decay)
+ return y_model - ydata
+
+
+###############################################################################
+# Generate synthetic data and set-up Parameters with initial values/boundaries:
+decay = 5
+offset = 1.0
+amp = 2.0
+omega = 4.0
+
+np.random.seed(2)
+x = np.linspace(0, 10, 101)
+y = offset + amp*np.sin(omega*x) * np.exp(-x/decay)
+yn = y + np.random.normal(size=y.size, scale=0.450)
+
+params = lmfit.Parameters()
+params.add('offset', 2.0, min=0, max=10.0)
+params.add('omega', 3.3, min=0, max=10.0)
+params.add('amp', 2.5, min=0, max=10.0)
+params.add('decay', 1.0, min=0, max=10.0)
+
+###############################################################################
+# Perform the fits and show fitting results and plot:
+o1 = lmfit.minimize(resid, params, args=(x, yn), method='leastsq')
+print("# Fit using leastsq:")
+lmfit.report_fit(o1)
+
+###############################################################################
+o2 = lmfit.minimize(resid, params, args=(x, yn), method='differential_evolution')
+print("\n\n# Fit using differential_evolution:")
+lmfit.report_fit(o2)
+
+###############################################################################
+plt.plot(x, yn, 'o', label='data')
+plt.plot(x, yn+o1.residual, '-', label='leastsq')
+plt.plot(x, yn+o2.residual, '--', label='diffev')
+plt.legend()
diff --git a/examples/example_emcee_Model_interface.py b/examples/example_emcee_Model_interface.py
new file mode 100644
index 0000000..60ff11f
--- /dev/null
+++ b/examples/example_emcee_Model_interface.py
@@ -0,0 +1,100 @@
+"""
+Emcee and the Model Interface
+=============================
+
+"""
+import corner
+import matplotlib.pyplot as plt
+import numpy as np
+
+import lmfit
+
+
+###############################################################################
+# Set up a double-exponential function and create a Model:
+def double_exp(x, a1, t1, a2, t2):
+ return a1*np.exp(-x/t1) + a2*np.exp(-(x-0.1) / t2)
+
+
+model = lmfit.Model(double_exp)
+
+###############################################################################
+# Generate some fake data from the model with added noise:
+truths = (3.0, 2.0, -5.0, 10.0)
+x = np.linspace(1, 10, 250)
+np.random.seed(0)
+y = double_exp(x, *truths)+0.1*np.random.randn(x.size)
+
+###############################################################################
+# Create model parameters and give them initial values:
+p = model.make_params(a1=4, t1=3, a2=4, t2=3)
+
+###############################################################################
+# Fit the model using a traditional minimizer, and show the output:
+result = model.fit(data=y, params=p, x=x, method='Nelder', nan_policy='omit')
+
+lmfit.report_fit(result)
+result.plot()
+
+###############################################################################
+# Calculate parameter covariance using ``emcee``:
+#
+# - start the walkers out at the best-fit values
+# - set ``is_weighted`` to ``False`` to estimate the noise weights
+# - set some sensible priors on the uncertainty to keep the MCMC in check
+
+emcee_kws = dict(steps=5000, burn=500, thin=20, is_weighted=False,
+ progress=False)
+emcee_params = result.params.copy()
+emcee_params.add('__lnsigma', value=np.log(0.1), min=np.log(0.001), max=np.log(2.0))
+
+###############################################################################
+# run the MCMC algorithm and show the results:
+result_emcee = model.fit(data=y, x=x, params=emcee_params, method='emcee',
+ nan_policy='omit', fit_kws=emcee_kws)
+
+###############################################################################
+lmfit.report_fit(result_emcee)
+
+###############################################################################
+result_emcee.plot_fit()
+plt.plot(x, model.eval(params=result.params, x=x), '--', label='Nelder')
+plt.legend()
+
+###############################################################################
+# Check the acceptance fraction to see whether ``emcee`` performed well:
+plt.plot(result_emcee.acceptance_fraction, 'o')
+plt.xlabel('walker')
+plt.ylabel('acceptance fraction')
+
+###############################################################################
+# Try to compute the autocorrelation time:
+if hasattr(result_emcee, "acor"):
+ print("Autocorrelation time for the parameters:")
+ print("----------------------------------------")
+ for i, p in enumerate(result.params):
+ print(f'{p} = {result_emcee.acor[i]:.3f}')
+
+###############################################################################
+# Plot the parameter covariances returned by ``emcee`` using ``corner``:
+emcee_corner = corner.corner(result_emcee.flatchain, labels=result_emcee.var_names,
+ truths=list(result_emcee.params.valuesdict().values()))
+
+###############################################################################
+print("\nmedian of posterior probability distribution")
+print('--------------------------------------------')
+lmfit.report_fit(result_emcee.params)
+
+###############################################################################
+# Find the maximum likelihood solution:
+highest_prob = np.argmax(result_emcee.lnprob)
+hp_loc = np.unravel_index(highest_prob, result_emcee.lnprob.shape)
+mle_soln = result_emcee.chain[hp_loc]
+print("\nMaximum Likelihood Estimation (MLE):")
+print('----------------------------------')
+for ix, param in enumerate(emcee_params):
+ print(f"{param}: {mle_soln[ix]:.3f}")
+
+quantiles = np.percentile(result_emcee.flatchain['t1'], [2.28, 15.9, 50, 84.2, 97.7])
+print(f"\n\n1 sigma spread = {0.5 * (quantiles[3] - quantiles[1]):.3f}")
+print(f"2 sigma spread = {0.5 * (quantiles[4] - quantiles[0]):.3f}")
diff --git a/examples/example_expression_model.py b/examples/example_expression_model.py
new file mode 100644
index 0000000..834e6d0
--- /dev/null
+++ b/examples/example_expression_model.py
@@ -0,0 +1,36 @@
+"""
+Using an ExpressionModel
+========================
+
+ExpressionModels allow a model to be built from a user-supplied expression.
+See: https://lmfit.github.io/lmfit-py/builtin_models.html#user-defined-models
+
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit.models import ExpressionModel
+
+###############################################################################
+# Generate synthetic data for the user-supplied model:
+x = np.linspace(-10, 10, 201)
+amp, cen, wid = 3.4, 1.8, 0.5
+
+y = amp * np.exp(-(x-cen)**2 / (2*wid**2)) / (np.sqrt(2*np.pi)*wid)
+np.random.seed(2021)
+y = y + np.random.normal(size=x.size, scale=0.01)
+
+###############################################################################
+# Define the ``ExpressionModel`` and perform the fit:
+gmod = ExpressionModel("amp * exp(-(x-cen)**2 /(2*wid**2))/(sqrt(2*pi)*wid)")
+result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+
+###############################################################################
+# this results in the following output:
+print(result.fit_report())
+
+###############################################################################
+plt.plot(x, y, 'o')
+plt.plot(x, result.init_fit, '--', label='initial fit')
+plt.plot(x, result.best_fit, '-', label='best fit')
+plt.legend()
diff --git a/examples/example_fit_multi_datasets.py b/examples/example_fit_multi_datasets.py
new file mode 100644
index 0000000..beab3dd
--- /dev/null
+++ b/examples/example_fit_multi_datasets.py
@@ -0,0 +1,82 @@
+"""
+Fit Multiple Data Sets
+======================
+
+Fitting multiple (simulated) Gaussian data sets simultaneously.
+
+All minimizers require the residual array to be one-dimensional. Therefore, in
+the ``objective`` function we need to ``flatten`` the array before returning it.
+
+TODO: this could/should be using the Model interface / built-in models!
+
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit import Parameters, minimize, report_fit
+
+
+def gauss(x, amp, cen, sigma):
+ """Gaussian lineshape."""
+ return amp * np.exp(-(x-cen)**2 / (2.*sigma**2))
+
+
+def gauss_dataset(params, i, x):
+ """Calculate Gaussian lineshape from parameters for data set."""
+ amp = params[f'amp_{i+1}']
+ cen = params[f'cen_{i+1}']
+ sig = params[f'sig_{i+1}']
+ return gauss(x, amp, cen, sig)
+
+
+def objective(params, x, data):
+ """Calculate total residual for fits of Gaussians to several data sets."""
+ ndata, _ = data.shape
+ resid = 0.0*data[:]
+
+ # make residual per data set
+ for i in range(ndata):
+ resid[i, :] = data[i, :] - gauss_dataset(params, i, x)
+
+ # now flatten this to a 1D array, as minimize() needs
+ return resid.flatten()
+
+
+###############################################################################
+# Create five simulated Gaussian data sets
+np.random.seed(2021)
+x = np.linspace(-1, 2, 151)
+data = []
+for _ in np.arange(5):
+ amp = 0.60 + 9.50*np.random.rand()
+ cen = -0.20 + 1.20*np.random.rand()
+ sig = 0.25 + 0.03*np.random.rand()
+ dat = gauss(x, amp, cen, sig) + np.random.normal(size=x.size, scale=0.1)
+ data.append(dat)
+data = np.array(data)
+
+###############################################################################
+# Create five sets of fitting parameters, one per data set
+fit_params = Parameters()
+for iy, y in enumerate(data):
+ fit_params.add(f'amp_{iy+1}', value=0.5, min=0.0, max=200)
+ fit_params.add(f'cen_{iy+1}', value=0.4, min=-2.0, max=2.0)
+ fit_params.add(f'sig_{iy+1}', value=0.3, min=0.01, max=3.0)
+
+###############################################################################
+# Constrain the values of sigma to be the same for all peaks by assigning
+# sig_2, ..., sig_5 to be equal to sig_1.
+for iy in (2, 3, 4, 5):
+ fit_params[f'sig_{iy}'].expr = 'sig_1'
+
+###############################################################################
+# Run the global fit and show the fitting result
+out = minimize(objective, fit_params, args=(x, data))
+report_fit(out.params)
+
+###############################################################################
+# Plot the data sets and fits
+plt.figure()
+for i in range(5):
+ y_fit = gauss_dataset(out.params, i, x)
+ plt.plot(x, data[i, :], 'o', x, y_fit, '-')
diff --git a/examples/example_fit_with_algebraic_constraint.py b/examples/example_fit_with_algebraic_constraint.py
new file mode 100644
index 0000000..df37b0e
--- /dev/null
+++ b/examples/example_fit_with_algebraic_constraint.py
@@ -0,0 +1,44 @@
+"""
+Fit with Algebraic Constraint
+=============================
+
+"""
+###############################################################################
+# Example on how to use algebraic constraints using the ``expr`` attribute.
+import matplotlib.pyplot as plt
+from numpy import linspace, random
+
+from lmfit.lineshapes import gaussian, lorentzian
+from lmfit.models import GaussianModel, LinearModel, LorentzianModel
+
+random.seed(0)
+x = linspace(0.0, 20.0, 601)
+
+data = (gaussian(x, amplitude=21, center=8.1, sigma=1.2) +
+ lorentzian(x, amplitude=10, center=9.6, sigma=2.4) +
+ 0.01 + x*0.05 + random.normal(scale=0.23, size=x.size))
+
+
+model = GaussianModel(prefix='g_') + LorentzianModel(prefix='l_') + LinearModel(prefix='line_')
+
+params = model.make_params(g_amplitude=10, g_center=9, g_sigma=1,
+ line_slope=0, line_intercept=0)
+
+params.add(name='total_amplitude', value=20)
+params.set(l_amplitude=dict(expr='total_amplitude - g_amplitude'))
+params.set(l_center=dict(expr='1.5+g_center'))
+params.set(l_sigma=dict(expr='2*g_sigma'))
+
+
+data_uncertainty = 0.021 # estimate of data error (for all data points)
+
+init = model.eval(params, x=x)
+result = model.fit(data, params, x=x, weights=1.0/data_uncertainty)
+
+print(result.fit_report())
+
+plt.plot(x, data, '+')
+plt.plot(x, init, '--', label='initial fit')
+plt.plot(x, result.best_fit, '-', label='best fit')
+plt.legend()
+plt.show()
diff --git a/examples/example_fit_with_bounds.py b/examples/example_fit_with_bounds.py
new file mode 100644
index 0000000..96453c7
--- /dev/null
+++ b/examples/example_fit_with_bounds.py
@@ -0,0 +1,60 @@
+"""
+Fit Using Bounds
+================
+
+A major advantage of using lmfit is that one can specify boundaries on fitting
+parameters, even if the underlying algorithm in SciPy does not support this.
+For more information on how this is implemented, please refer to:
+https://lmfit.github.io/lmfit-py/bounds.html
+
+The example below shows how to set boundaries using the ``min`` and ``max``
+attributes to fitting parameters.
+
+"""
+import matplotlib.pyplot as plt
+from numpy import exp, linspace, pi, random, sign, sin
+
+from lmfit import create_params, minimize
+from lmfit.printfuncs import report_fit
+
+###############################################################################
+# create the 'true' Parameter values and residual function:
+p_true = create_params(amp=14.0, period=5.4321, shift=0.12345, decay=0.010)
+
+
+def residual(pars, x, data=None):
+ argu = (x * pars['decay'])**2
+ shift = pars['shift']
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+ model = pars['amp'] * sin(shift + x/pars['period']) * exp(-argu)
+ if data is None:
+ return model
+ return model - data
+
+
+###############################################################################
+# Generate synthetic data and initialize fitting Parameters:
+random.seed(0)
+x = linspace(0, 250, 1500)
+noise = random.normal(scale=2.8, size=x.size)
+data = residual(p_true, x) + noise
+
+fit_params = create_params(amp=dict(value=13, max=20, min=0),
+ period=dict(value=2, max=10),
+ shift=dict(value=0, max=pi/2., min=-pi/2.),
+ decay=dict(value=0.02, max=0.1, min=0))
+
+###############################################################################
+# Perform the fit and show the results:
+out = minimize(residual, fit_params, args=(x,), kws={'data': data})
+fit = residual(out.params, x)
+
+###############################################################################
+report_fit(out, modelpars=p_true, correl_mode='table')
+
+###############################################################################
+plt.plot(x, data, 'o', label='data')
+plt.plot(x, fit, label='best fit')
+plt.legend()
+plt.show()
diff --git a/examples/example_fit_with_derivfunc.py b/examples/example_fit_with_derivfunc.py
new file mode 100644
index 0000000..1647705
--- /dev/null
+++ b/examples/example_fit_with_derivfunc.py
@@ -0,0 +1,76 @@
+"""
+Fit Specifying a Function to Compute the Jacobian
+=================================================
+
+Specifying an analytical function to calculate the Jacobian can speed-up the
+fitting procedure.
+
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit import Minimizer, Parameters
+
+
+def func(pars, x, data=None):
+ a, b, c = pars['a'], pars['b'], pars['c']
+ model = a * np.exp(-b*x) + c
+ if data is None:
+ return model
+ return model - data
+
+
+def dfunc(pars, x, data=None):
+ a, b = pars['a'], pars['b']
+ v = np.exp(-b*x)
+ return np.array([v, -a*x*v, np.ones(len(x))])
+
+
+def f(var, x):
+ return var[0] * np.exp(-var[1]*x) + var[2]
+
+
+params = Parameters()
+params.add('a', value=10)
+params.add('b', value=10)
+params.add('c', value=10)
+
+a, b, c = 2.5, 1.3, 0.8
+x = np.linspace(0, 4, 50)
+y = f([a, b, c], x)
+np.random.seed(2021)
+data = y + 0.15*np.random.normal(size=x.size)
+
+###############################################################################
+# Fit without analytic derivative:
+min1 = Minimizer(func, params, fcn_args=(x,), fcn_kws={'data': data})
+out1 = min1.leastsq()
+fit1 = func(out1.params, x)
+
+###############################################################################
+# Fit with analytic derivative:
+min2 = Minimizer(func, params, fcn_args=(x,), fcn_kws={'data': data})
+out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
+fit2 = func(out2.params, x)
+
+###############################################################################
+# Comparison of fit to exponential decay with/without analytical derivatives
+# to model = a*exp(-b*x) + c:
+print(f'"true" parameters are: a = {a:.3f}, b = {b:.3f}, c = {c:.3f}\n\n'
+ '|=========================================\n'
+ '| Statistic/Parameter | Without | With |\n'
+ '|-----------------------------------------\n'
+ f'| N Function Calls | {out1.nfev:d} | {out2.nfev:d} |\n'
+ f'| Chi-square | {out1.chisqr:.4f} | {out2.chisqr:.4f} |\n'
+ f"| a | {out1.params['a'].value:.4f} | {out2.params['a'].value:.4f} |\n"
+ f"| b | {out1.params['b'].value:.4f} | {out2.params['b'].value:.4f} |\n"
+ f"| c | {out1.params['c'].value:.4f} | {out2.params['c'].value:.4f} |\n"
+ '------------------------------------------')
+
+###############################################################################
+# and the best-fit to the synthetic data (with added noise) is the same for
+# both methods:
+plt.plot(x, data, 'o', label='data')
+plt.plot(x, fit1, label='with analytical derivative')
+plt.plot(x, fit2, '--', label='without analytical derivative')
+plt.legend()
diff --git a/examples/example_fit_with_inequality.py b/examples/example_fit_with_inequality.py
new file mode 100644
index 0000000..c4661c4
--- /dev/null
+++ b/examples/example_fit_with_inequality.py
@@ -0,0 +1,57 @@
+"""
+Fit Using Inequality Constraint
+===============================
+
+Sometimes specifying boundaries using ``min`` and ``max`` are not sufficient,
+and more complicated (inequality) constraints are needed. In the example below
+the center of the Lorentzian peak is constrained to be between 0-5 away from
+the center of the Gaussian peak.
+
+See also: https://lmfit.github.io/lmfit-py/constraints.html#using-inequality-constraints
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+
+from lmfit import Minimizer, create_params, report_fit
+from lmfit.lineshapes import gaussian, lorentzian
+
+
+def residual(pars, x, data):
+ model = (gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g']) +
+ lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l']))
+ return model - data
+
+
+###############################################################################
+# Generate the simulated data using a Gaussian and Lorentzian lineshape:
+np.random.seed(0)
+x = np.linspace(0, 20.0, 601)
+
+data = (gaussian(x, 21, 6.1, 1.2) + lorentzian(x, 10, 9.6, 1.3) +
+ np.random.normal(scale=0.1, size=x.size))
+
+###############################################################################
+# Create the fitting parameters and set an inequality constraint for ``cen_l``.
+# First, we add a new fitting parameter ``peak_split``, which can take values
+# between 0 and 5. Afterwards, we constrain the value for ``cen_l`` using the
+# expression to be ``'peak_split+cen_g'``:
+pfit = create_params(amp_g=10, cen_g=5, wid_g=1, amp_l=10,
+ peak_split=dict(value=2.5, min=0, max=5),
+ cen_l=dict(expr='peak_split+cen_g'),
+ wid_l=dict(expr='wid_g'))
+
+mini = Minimizer(residual, pfit, fcn_args=(x, data))
+out = mini.leastsq()
+best_fit = data + out.residual
+
+###############################################################################
+# Performing a fit, here using the ``leastsq`` algorithm, gives the following
+# fitting results:
+report_fit(out.params)
+
+###############################################################################
+# and figure:
+plt.plot(x, data, 'o')
+plt.plot(x, best_fit, '--', label='best fit')
+plt.legend()
+plt.show()
diff --git a/examples/example_reduce_fcn.py b/examples/example_reduce_fcn.py
new file mode 100644
index 0000000..c867738
--- /dev/null
+++ b/examples/example_reduce_fcn.py
@@ -0,0 +1,68 @@
+"""
+Fit Specifying Different Reduce Function
+========================================
+
+The ``reduce_fcn`` specifies how to convert a residual array to a scalar value
+for the scalar minimizers. The default value is None (i.e., "sum of squares of
+residual") - alternatives are: ``negentropy``, ``neglogcauchy``, or a
+user-specified ``callable``. For more information please refer to:
+https://lmfit.github.io/lmfit-py/fitting.html#using-the-minimizer-class
+
+Here, we use as an example the Student's t log-likelihood for robust fitting
+of data with outliers.
+
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+
+import lmfit
+
+
+def resid(params, x, ydata):
+ decay = params['decay'].value
+ offset = params['offset'].value
+ omega = params['omega'].value
+ amp = params['amp'].value
+
+ y_model = offset + amp * np.sin(x*omega) * np.exp(-x/decay)
+ return y_model - ydata
+
+
+###############################################################################
+# Generate synthetic data with noise/outliers and initialize fitting Parameters:
+decay = 5
+offset = 1.0
+amp = 2.0
+omega = 4.0
+
+np.random.seed(2)
+x = np.linspace(0, 10, 101)
+y = offset + amp * np.sin(omega*x) * np.exp(-x/decay)
+yn = y + np.random.normal(size=y.size, scale=0.250)
+
+outliers = np.random.randint(int(len(x)/3.0), len(x), int(len(x)/12))
+yn[outliers] += 5*np.random.random(len(outliers))
+
+params = lmfit.create_params(offset=2.0, omega=3.3, amp=2.5,
+ decay=dict(value=1, min=0))
+
+###############################################################################
+# Perform fits using the ``L-BFGS-B`` method with different ``reduce_fcn``:
+method = 'L-BFGS-B'
+o1 = lmfit.minimize(resid, params, args=(x, yn), method=method)
+print("# Fit using sum of squares:\n")
+lmfit.report_fit(o1)
+
+###############################################################################
+o2 = lmfit.minimize(resid, params, args=(x, yn), method=method,
+ reduce_fcn='neglogcauchy')
+print("\n\n# Robust Fit, using log-likelihood with Cauchy PDF:\n")
+lmfit.report_fit(o2)
+
+###############################################################################
+plt.plot(x, y, 'o', label='true function')
+plt.plot(x, yn, '--*', label='with noise+outliers')
+plt.plot(x, yn+o1.residual, '-', label='sum of squares fit')
+plt.plot(x, yn+o2.residual, '-', label='robust fit')
+plt.legend()
+plt.show()
diff --git a/examples/example_sympy.py b/examples/example_sympy.py
new file mode 100644
index 0000000..1039073
--- /dev/null
+++ b/examples/example_sympy.py
@@ -0,0 +1,73 @@
+"""
+Building a lmfit model with SymPy
+=================================
+
+SymPy is a Python library for symbolic mathematics. It can be very useful to
+build a model with SymPy and then apply that model to the data with lmfit.
+This example shows how to do that. Please note that this example requires
+both the sympy and matplotlib packages.
+
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+import sympy
+from sympy.parsing import sympy_parser
+
+import lmfit
+
+###############################################################################
+# Instead of creating the SymPy symbols explicitly and building an expression
+# with them, we will use the SymPy parser.
+
+gauss_peak1 = sympy_parser.parse_expr('A1*exp(-(x-xc1)**2/(2*sigma1**2))')
+gauss_peak2 = sympy_parser.parse_expr('A2*exp(-(x-xc2)**2/(2*sigma2**2))')
+exp_back = sympy_parser.parse_expr('B*exp(-x/xw)')
+
+model_list = sympy.Array((gauss_peak1, gauss_peak2, exp_back))
+model = sum(model_list)
+print(model)
+
+###############################################################################
+# We are using SymPy's lambdify function to make a function from the model
+# expressions. We then use these functions to generate some fake data.
+
+model_list_func = sympy.lambdify(list(model_list.free_symbols), model_list)
+model_func = sympy.lambdify(list(model.free_symbols), model)
+
+###############################################################################
+# Generate synthetic data with noise and plot the data.
+np.random.seed(1)
+x = np.linspace(0, 10, 40)
+param_values = dict(x=x, A1=2, sigma1=1, sigma2=1, A2=3, xc1=2, xc2=5, xw=4, B=5)
+y = model_func(**param_values)
+yi = model_list_func(**param_values)
+yn = y + np.random.randn(y.size)*0.4
+
+plt.plot(x, yn, 'o')
+plt.plot(x, y)
+for c in yi:
+ plt.plot(x, c, color='0.7')
+
+###############################################################################
+# Next, we will just create a lmfit model from the function and fit the data.
+lm_mod = lmfit.Model(model_func, independent_vars=('x'))
+res = lm_mod.fit(data=yn, **param_values)
+
+###############################################################################
+res.plot_fit()
+plt.plot(x, y, label='true')
+plt.legend()
+
+###############################################################################
+# The nice thing of using SymPy is that we can easily modify our fit function.
+# Let's assume we know that the width of both Gaussians are identical. Similarly,
+# we assume that the ratio between both Gaussians is fixed to 3:2 for some
+# reason. Both can be expressed by just substituting the variables.
+model2 = model.subs('sigma2', 'sigma1').subs('A2', '3/2*A1')
+model2_func = sympy.lambdify(list(model2.free_symbols), model2)
+lm_mod = lmfit.Model(model2_func, independent_vars=('x'))
+param2_values = dict(x=x, A1=2, sigma1=1, xc1=2, xc2=5, xw=4, B=5)
+res2 = lm_mod.fit(data=yn, **param2_values)
+res2.plot_fit()
+plt.plot(x, y, label='true')
+plt.legend()
diff --git a/examples/example_two_dimensional_peak.py b/examples/example_two_dimensional_peak.py
new file mode 100644
index 0000000..8b732fc
--- /dev/null
+++ b/examples/example_two_dimensional_peak.py
@@ -0,0 +1,165 @@
+"""
+Fit Two Dimensional Peaks
+=========================
+
+This example illustrates how to handle two-dimensional data with lmfit.
+
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+from scipy.interpolate import griddata
+
+import lmfit
+from lmfit.lineshapes import gaussian2d, lorentzian
+
+###############################################################################
+# Two-dimensional Gaussian
+# ------------------------
+# We start by considering a simple two-dimensional gaussian function, which
+# depends on coordinates `(x, y)`. The most general case of experimental
+# data will be irregularly sampled and noisy. Let's simulate some:
+npoints = 10000
+np.random.seed(2021)
+x = np.random.rand(npoints)*10 - 4
+y = np.random.rand(npoints)*5 - 3
+z = gaussian2d(x, y, amplitude=30, centerx=2, centery=-.5, sigmax=.6, sigmay=.8)
+z += 2*(np.random.rand(*z.shape)-.5)
+error = np.sqrt(z+1)
+###############################################################################
+# To plot this, we can interpolate the data onto a grid.
+X, Y = np.meshgrid(np.linspace(x.min(), x.max(), 100),
+ np.linspace(y.min(), y.max(), 100))
+Z = griddata((x, y), z, (X, Y), method='linear', fill_value=0)
+
+fig, ax = plt.subplots()
+art = ax.pcolor(X, Y, Z, shading='auto')
+plt.colorbar(art, ax=ax, label='z')
+ax.set_xlabel('x')
+ax.set_ylabel('y')
+plt.show()
+###############################################################################
+# In this case, we can use a built-in model to fit
+model = lmfit.models.Gaussian2dModel()
+params = model.guess(z, x, y)
+result = model.fit(z, x=x, y=y, params=params, weights=1/error)
+lmfit.report_fit(result)
+###############################################################################
+# To check the fit, we can evaluate the function on the same grid we used
+# before and make plots of the data, the fit and the difference between the two.
+fig, axs = plt.subplots(2, 2, figsize=(10, 10))
+
+vmax = np.nanpercentile(Z, 99.9)
+
+ax = axs[0, 0]
+art = ax.pcolor(X, Y, Z, vmin=0, vmax=vmax, shading='auto')
+plt.colorbar(art, ax=ax, label='z')
+ax.set_title('Data')
+
+ax = axs[0, 1]
+fit = model.func(X, Y, **result.best_values)
+art = ax.pcolor(X, Y, fit, vmin=0, vmax=vmax, shading='auto')
+plt.colorbar(art, ax=ax, label='z')
+ax.set_title('Fit')
+
+ax = axs[1, 0]
+fit = model.func(X, Y, **result.best_values)
+art = ax.pcolor(X, Y, Z-fit, vmin=0, vmax=10, shading='auto')
+plt.colorbar(art, ax=ax, label='z')
+ax.set_title('Data - Fit')
+
+for ax in axs.ravel():
+ ax.set_xlabel('x')
+ ax.set_ylabel('y')
+axs[1, 1].remove()
+plt.show()
+
+
+###############################################################################
+# Two-dimensional off-axis Lorentzian
+# -----------------------------------
+# We now go on to show a harder example, in which the peak has a Lorentzian
+# profile and an off-axis anisotropic shape. This can be handled by applying a
+# suitable coordinate transform and then using the `lorentzian` function that
+# lmfit provides in the lineshapes module.
+def lorentzian2d(x, y, amplitude=1., centerx=0., centery=0., sigmax=1., sigmay=1.,
+ rotation=0):
+ """Return a two dimensional lorentzian.
+
+ The maximum of the peak occurs at ``centerx`` and ``centery``
+ with widths ``sigmax`` and ``sigmay`` in the x and y directions
+ respectively. The peak can be rotated by choosing the value of ``rotation``
+ in radians.
+ """
+ xp = (x - centerx)*np.cos(rotation) - (y - centery)*np.sin(rotation)
+ yp = (x - centerx)*np.sin(rotation) + (y - centery)*np.cos(rotation)
+ R = (xp/sigmax)**2 + (yp/sigmay)**2
+
+ return 2*amplitude*lorentzian(R)/(np.pi*sigmax*sigmay)
+
+
+###############################################################################
+# Data can be simulated and plotted in the same way as we did before.
+npoints = 10000
+x = np.random.rand(npoints)*10 - 4
+y = np.random.rand(npoints)*5 - 3
+z = lorentzian2d(x, y, amplitude=30, centerx=2, centery=-.5, sigmax=.6,
+ sigmay=1.2, rotation=30*np.pi/180)
+z += 2*(np.random.rand(*z.shape)-.5)
+error = np.sqrt(z+1)
+
+X, Y = np.meshgrid(np.linspace(x.min(), x.max(), 100),
+ np.linspace(y.min(), y.max(), 100))
+Z = griddata((x, y), z, (X, Y), method='linear', fill_value=0)
+
+fig, ax = plt.subplots()
+ax.set_xlabel('x')
+ax.set_ylabel('y')
+art = ax.pcolor(X, Y, Z, shading='auto')
+plt.colorbar(art, ax=ax, label='z')
+plt.show()
+
+###############################################################################
+# To fit, create a model from the function. Don't forget to tell lmfit that both
+# `x` and `y` are independent variables. Keep in mind that lmfit will take the
+# function keywords as default initial guesses in this case and that it will not
+# know that certain parameters only make physical sense over restricted ranges.
+# For example, peak widths should be positive and the rotation can be restricted
+# over a quarter circle.
+model = lmfit.Model(lorentzian2d, independent_vars=['x', 'y'])
+params = model.make_params(amplitude=10, centerx=x[np.argmax(z)],
+ centery=y[np.argmax(z)])
+params['rotation'].set(value=.1, min=0, max=np.pi/2)
+params['sigmax'].set(value=1, min=0)
+params['sigmay'].set(value=2, min=0)
+
+result = model.fit(z, x=x, y=y, params=params, weights=1/error)
+lmfit.report_fit(result)
+
+###############################################################################
+# The process of making plots to check it worked is the same as before.
+fig, axs = plt.subplots(2, 2, figsize=(10, 10))
+
+vmax = np.nanpercentile(Z, 99.9)
+
+ax = axs[0, 0]
+art = ax.pcolor(X, Y, Z, vmin=0, vmax=vmax, shading='auto')
+plt.colorbar(art, ax=ax, label='z')
+ax.set_title('Data')
+
+ax = axs[0, 1]
+fit = model.func(X, Y, **result.best_values)
+art = ax.pcolor(X, Y, fit, vmin=0, vmax=vmax, shading='auto')
+plt.colorbar(art, ax=ax, label='z')
+ax.set_title('Fit')
+
+ax = axs[1, 0]
+fit = model.func(X, Y, **result.best_values)
+art = ax.pcolor(X, Y, Z-fit, vmin=0, vmax=10, shading='auto')
+plt.colorbar(art, ax=ax, label='z')
+ax.set_title('Data - Fit')
+
+for ax in axs.ravel():
+ ax.set_xlabel('x')
+ ax.set_ylabel('y')
+axs[1, 1].remove()
+plt.show()
diff --git a/examples/example_use_pandas.py b/examples/example_use_pandas.py
new file mode 100644
index 0000000..d908014
--- /dev/null
+++ b/examples/example_use_pandas.py
@@ -0,0 +1,28 @@
+"""
+Fit with Data in a pandas DataFrame
+===================================
+
+Simple example demonstrating how to read in the data using ``pandas`` and
+supply the elements of the ``DataFrame`` to lmfit.
+
+"""
+import pandas as pd
+
+from lmfit.models import LorentzianModel
+
+###############################################################################
+# read the data into a pandas DataFrame, and use the ``x`` and ``y`` columns:
+dframe = pd.read_csv('peak.csv')
+
+model = LorentzianModel()
+params = model.guess(dframe['y'], x=dframe['x'])
+
+result = model.fit(dframe['y'], params, x=dframe['x'])
+
+###############################################################################
+# and gives the fitting results:
+print(result.fit_report())
+
+###############################################################################
+# and plot below:
+result.plot_fit()
diff --git a/examples/lmfit_emcee_model_selection.py b/examples/lmfit_emcee_model_selection.py
new file mode 100644
index 0000000..f81111e
--- /dev/null
+++ b/examples/lmfit_emcee_model_selection.py
@@ -0,0 +1,197 @@
+"""
+Model Selection using lmfit and emcee
+=====================================
+
+FIXME: this is a useful example; however, it doesn't run correctly anymore as
+the PTSampler was removed in emcee v3...
+
+"""
+###############################################################################
+# `lmfit.emcee` can be used to obtain the posterior probability distribution
+# of parameters, given a set of experimental data. This notebook shows how it
+# can be used for Bayesian model selection.
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+import lmfit
+
+###############################################################################
+# Define a Gaussian lineshape and generate some data:
+
+
+def gauss(x, a_max, loc, sd):
+ return a_max * np.exp(-((x - loc) / sd)**2)
+
+
+x = np.linspace(3, 7, 250)
+np.random.seed(0)
+y = 4 + 10 * x + gauss(x, 200, 5, 0.5) + gauss(x, 60, 5.8, 0.2)
+dy = np.sqrt(y)
+y += dy * np.random.randn(y.size)
+
+
+###############################################################################
+# Plot the data:
+plt.errorbar(x, y)
+
+###############################################################################
+# Define the normalised residual for the data:
+
+
+def residual(p, just_generative=False):
+ v = p.valuesdict()
+ generative = v['a'] + v['b'] * x
+ M = 0
+ while f'a_max{M}' in v:
+ generative += gauss(x, v[f'a_max{M}'], v[f'loc{M}'], v[f'sd{M}'])
+ M += 1
+
+ if just_generative:
+ return generative
+ return (generative - y) / dy
+
+
+###############################################################################
+# Create a Parameter set for the initial guesses:
+def initial_peak_params(M):
+ p = lmfit.Parameters()
+
+ # a and b give a linear background
+ a = np.mean(y)
+ b = 1
+
+ # a_max, loc and sd are the amplitude, location and SD of each Gaussian
+ # component
+ a_max = np.max(y)
+ loc = np.mean(x)
+ sd = (np.max(x) - np.min(x)) * 0.5
+
+ p.add_many(('a', a, True, 0, 10), ('b', b, True, 1, 15))
+
+ for i in range(M):
+ p.add_many((f'a_max{i}', 0.5 * a_max, True, 10, a_max),
+ (f'loc{i}', loc, True, np.min(x), np.max(x)),
+ (f'sd{i}', sd, True, 0.1, np.max(x) - np.min(x)))
+ return p
+
+
+###############################################################################
+# Solving with `minimize` gives the Maximum Likelihood solution.
+p1 = initial_peak_params(1)
+mi1 = lmfit.minimize(residual, p1, method='differential_evolution')
+
+lmfit.printfuncs.report_fit(mi1.params, min_correl=0.5)
+
+###############################################################################
+# From inspection of the data above we can tell that there is going to be more
+# than 1 Gaussian component, but how many are there? A Bayesian approach can
+# be used for this model selection problem. We can do this with `lmfit.emcee`,
+# which uses the `emcee` package to do a Markov Chain Monte Carlo sampling of
+# the posterior probability distribution. `lmfit.emcee` requires a function
+# that returns the log-posterior probability. The log-posterior probability is
+# a sum of the log-prior probability and log-likelihood functions.
+#
+# The log-prior probability encodes information about what you already believe
+# about the system. `lmfit.emcee` assumes that this log-prior probability is
+# zero if all the parameters are within their bounds and `-np.inf` if any of
+# the parameters are outside their bounds. As such it's a uniform prior.
+#
+# The log-likelihood function is given below. To use non-uniform priors then
+# should include these terms in `lnprob`. This is the log-likelihood
+# probability for the sampling.
+
+
+def lnprob(p):
+ resid = residual(p, just_generative=True)
+ return -0.5 * np.sum(((resid - y) / dy)**2 + np.log(2 * np.pi * dy**2))
+
+
+###############################################################################
+# To start with we have to create the minimizers and *burn* them in. We create
+# 4 different minimizers representing 0, 1, 2 or 3 Gaussian contributions. To
+# do the model selection we have to integrate the over the log-posterior
+# distribution to see which has the higher probability. This is done using the
+# `thermodynamic_integration_log_evidence` method of the `sampler` attribute
+# contained in the `lmfit.Minimizer` object.
+
+# Work out the log-evidence for different numbers of peaks:
+total_steps = 310
+burn = 300
+thin = 10
+ntemps = 15
+workers = 1 # the multiprocessing does not work with sphinx-gallery
+log_evidence = []
+res = []
+
+# set up the Minimizers
+for i in range(4):
+ p0 = initial_peak_params(i)
+ # you can't use lnprob as a userfcn with minimize because it needs to be
+ # maximised
+ mini = lmfit.Minimizer(residual, p0)
+ out = mini.minimize(method='differential_evolution')
+ res.append(out)
+
+mini = []
+# burn in the samplers
+for i in range(4):
+ # do the sampling
+ mini.append(lmfit.Minimizer(lnprob, res[i].params))
+ out = mini[i].emcee(steps=total_steps, ntemps=ntemps, workers=workers,
+ reuse_sampler=False, float_behavior='posterior',
+ progress=False)
+ # get the evidence
+ print(i, total_steps, mini[i].sampler.thermodynamic_integration_log_evidence())
+ log_evidence.append(mini[i].sampler.thermodynamic_integration_log_evidence()[0])
+
+###############################################################################
+# Once we've burned in the samplers we have to do a collection run. We thin
+# out the MCMC chain to reduce autocorrelation between successive samples.
+for j in range(6):
+ total_steps += 100
+ for i in range(4):
+ # do the sampling
+ res = mini[i].emcee(burn=burn, steps=100, thin=thin, ntemps=ntemps,
+ workers=workers, reuse_sampler=True, progress=False)
+ # get the evidence
+ print(i, total_steps, mini[i].sampler.thermodynamic_integration_log_evidence())
+ log_evidence.append(mini[i].sampler.thermodynamic_integration_log_evidence()[0])
+
+
+plt.plot(log_evidence[-4:])
+plt.ylabel('Log-evidence')
+plt.xlabel('number of peaks')
+
+###############################################################################
+# The Bayes factor is related to the exponential of the difference between the
+# log-evidence values. Thus, 0 peaks is not very likely compared to 1 peak.
+# But 1 peak is not as good as 2 peaks. 3 peaks is not that much better than 2
+# peaks.
+r01 = np.exp(log_evidence[-4] - log_evidence[-3])
+r12 = np.exp(log_evidence[-3] - log_evidence[-2])
+r23 = np.exp(log_evidence[-2] - log_evidence[-1])
+
+print(r01, r12, r23)
+
+###############################################################################
+# These numbers tell us that zero peaks is 0 times as likely as one peak. Two
+# peaks is 7e49 times more likely than one peak. Three peaks is 1.1 times more
+# likely than two peaks. With this data one would say that two peaks is
+# sufficient. Caution has to be taken with these values. The log-priors for
+# this sampling are uniform but improper, i.e. they are not normalised properly.
+# Internally the lnprior probability is calculated as 0 if all parameters are
+# within their bounds and `-np.inf` if any parameter is outside the bounds.
+# The `lnprob` function defined above is the log-likelihood alone. Remember,
+# that the log-posterior probability is equal to the sum of the log-prior and
+# log-likelihood probabilities. Extra terms can be added to the lnprob function
+# to calculate the normalised log-probability. These terms would look something
+# like:
+#
+# .. math::
+#
+# \log (\prod_i \frac{1}{max_i - min_i})
+#
+# where :math:`max_i` and :math:`min_i` are the upper and lower bounds for the
+# parameter, and the prior is a uniform distribution. Other types of prior are
+# possible. For example, you might expect the prior to be Gaussian.
diff --git a/examples/model1d_gauss.dat b/examples/model1d_gauss.dat
new file mode 100644
index 0000000..c7d4bd3
--- /dev/null
+++ b/examples/model1d_gauss.dat
@@ -0,0 +1,103 @@
+#---------------------------------
+# col1 col2
+ 0.000000 -0.305196
+ 0.100000 0.004932
+ 0.200000 0.192535
+ 0.300000 0.100639
+ 0.400000 0.244992
+ 0.500000 -0.001095
+ 0.600000 -0.017190
+ 0.700000 -0.138330
+ 0.800000 -0.065546
+ 0.900000 0.150089
+ 1.000000 0.021981
+ 1.100000 0.231610
+ 1.200000 0.186122
+ 1.300000 0.224188
+ 1.400000 0.355904
+ 1.500000 -0.069747
+ 1.600000 0.062342
+ 1.700000 -0.025591
+ 1.800000 0.052080
+ 1.900000 -0.329106
+ 2.000000 -0.012132
+ 2.100000 0.205438
+ 2.200000 0.118093
+ 2.300000 0.018204
+ 2.400000 -0.113374
+ 2.500000 -0.086265
+ 2.600000 -0.074747
+ 2.700000 0.179214
+ 2.800000 0.168398
+ 2.900000 0.067954
+ 3.000000 0.076506
+ 3.100000 0.433768
+ 3.200000 0.019097
+ 3.300000 0.239973
+ 3.400000 0.006607
+ 3.500000 -0.121174
+ 3.600000 0.162577
+ 3.700000 0.042030
+ 3.800000 0.288718
+ 3.900000 0.137440
+ 4.000000 0.593153
+ 4.100000 0.480413
+ 4.200000 0.901715
+ 4.300000 0.868281
+ 4.400000 1.301646
+ 4.500000 1.093022
+ 4.600000 1.531770
+ 4.700000 1.772498
+ 4.800000 2.346719
+ 4.900000 2.716594
+ 5.000000 3.333042
+ 5.100000 3.688503
+ 5.200000 3.821775
+ 5.300000 4.583784
+ 5.400000 4.805664
+ 5.500000 5.125762
+ 5.600000 4.964982
+ 5.700000 4.988856
+ 5.800000 4.854896
+ 5.900000 4.738134
+ 6.000000 4.815129
+ 6.100000 4.070525
+ 6.200000 3.983041
+ 6.300000 3.107054
+ 6.400000 2.841105
+ 6.500000 2.610117
+ 6.600000 2.146078
+ 6.700000 1.683386
+ 6.800000 1.317547
+ 6.900000 0.789538
+ 7.000000 0.585832
+ 7.100000 0.494665
+ 7.200000 0.447038
+ 7.300000 0.441926
+ 7.400000 0.393547
+ 7.500000 -0.033900
+ 7.600000 0.042947
+ 7.700000 -0.116248
+ 7.800000 0.061516
+ 7.900000 0.183615
+ 8.000000 -0.127174
+ 8.100000 0.368512
+ 8.200000 0.194381
+ 8.300000 0.301574
+ 8.400000 0.045097
+ 8.500000 0.110543
+ 8.600000 0.263164
+ 8.700000 0.190722
+ 8.800000 0.425007
+ 8.900000 0.253164
+ 9.000000 0.201519
+ 9.100000 0.132292
+ 9.200000 0.304519
+ 9.300000 0.129096
+ 9.400000 0.269171
+ 9.500000 0.189405
+ 9.600000 0.243728
+ 9.700000 0.411963
+ 9.800000 0.080682
+ 9.900000 0.332672
+ 10.000000 -0.067100
diff --git a/examples/peak.csv b/examples/peak.csv
new file mode 100644
index 0000000..72cd2a8
--- /dev/null
+++ b/examples/peak.csv
@@ -0,0 +1,102 @@
+x,y
+0.000000, 0.021654
+0.200000, 0.385367
+0.400000, 0.193304
+0.600000, 0.103481
+0.800000, 0.404041
+1.000000, 0.212585
+1.200000, 0.253212
+1.400000, -0.037306
+1.600000, 0.271415
+1.800000, 0.025614
+2.000000, 0.066419
+2.200000, -0.034347
+2.400000, 0.153702
+2.600000, 0.161341
+2.800000, -0.097676
+3.000000, -0.061880
+3.200000, 0.085341
+3.400000, 0.083674
+3.600000, 0.190944
+3.800000, 0.222168
+4.000000, 0.214417
+4.200000, 0.341221
+4.400000, 0.634501
+4.600000, 0.302566
+4.800000, 0.101096
+5.000000, -0.106441
+5.200000, 0.567396
+5.400000, 0.531899
+5.600000, 0.459800
+5.800000, 0.646655
+6.000000, 0.662228
+6.200000, 0.820844
+6.400000, 0.947696
+6.600000, 1.541353
+6.800000, 1.763981
+7.000000, 1.846081
+7.200000, 2.986333
+7.400000, 3.182907
+7.600000, 3.786487
+7.800000, 4.822287
+8.000000, 5.739122
+8.200000, 6.744448
+8.400000, 7.295213
+8.600000, 8.737766
+8.800000, 9.693782
+9.000000, 9.894218
+9.200000, 10.193956
+9.400000, 10.091519
+9.600000, 9.652392
+9.800000, 8.670938
+10.000000, 8.004205
+10.200000, 6.773599
+10.400000, 6.076502
+10.600000, 5.127315
+10.800000, 4.303762
+11.000000, 3.426006
+11.200000, 2.416431
+11.400000, 2.311363
+11.600000, 1.748020
+11.800000, 1.135594
+12.000000, 0.888514
+12.200000, 1.030794
+12.400000, 0.543024
+12.600000, 0.767751
+12.800000, 0.657551
+13.000000, 0.495730
+13.200000, 0.447520
+13.400000, 0.173839
+13.600000, 0.256758
+13.800000, 0.596106
+14.000000, 0.065328
+14.200000, 0.197267
+14.400000, 0.260038
+14.600000, 0.460880
+14.800000, 0.335248
+15.000000, 0.295977
+15.200000, -0.010228
+15.400000, 0.138670
+15.600000, 0.192113
+15.800000, 0.304371
+16.000000, 0.442517
+16.200000, 0.164944
+16.400000, 0.001907
+16.600000, 0.207504
+16.800000, 0.012640
+17.000000, 0.090878
+17.200000, -0.222967
+17.400000, 0.391717
+17.600000, 0.180295
+17.800000, 0.206875
+18.000000, 0.240595
+18.200000, -0.037437
+18.400000, 0.139918
+18.600000, 0.012560
+18.800000, -0.053009
+19.000000, 0.226069
+19.200000, 0.076879
+19.400000, 0.078599
+19.600000, 0.016125
+19.800000, -0.071217
+20.000000, -0.091474
diff --git a/examples/sinedata.dat b/examples/sinedata.dat
new file mode 100644
index 0000000..880d1a0
--- /dev/null
+++ b/examples/sinedata.dat
@@ -0,0 +1,103 @@
+#---------------------------------
+# col1 col2
+ 0.000000 1.546927
+ 1.000000 1.736431
+ 2.000000 2.215431
+ 3.000000 1.784280
+ 4.000000 0.790258
+ 5.000000 0.031843
+ 6.000000 -0.923626
+ 7.000000 -1.751375
+ 8.000000 -2.237504
+ 9.000000 -2.309521
+ 10.000000 -1.808337
+ 11.000000 -0.522004
+ 12.000000 0.193358
+ 13.000000 1.049232
+ 14.000000 2.073874
+ 15.000000 1.975083
+ 16.000000 1.483449
+ 17.000000 1.020072
+ 18.000000 -0.346997
+ 19.000000 -1.718224
+ 20.000000 -1.922197
+ 21.000000 -2.296117
+ 22.000000 -2.064431
+ 23.000000 -0.998217
+ 24.000000 -0.153316
+ 25.000000 1.137056
+ 26.000000 2.110909
+ 27.000000 1.984092
+ 28.000000 2.539031
+ 29.000000 1.459715
+ 30.000000 0.457624
+ 31.000000 -0.802046
+ 32.000000 -1.857762
+ 33.000000 -2.102987
+ 34.000000 -2.338332
+ 35.000000 -1.541560
+ 36.000000 -0.940634
+ 37.000000 1.131820
+ 38.000000 1.487181
+ 39.000000 1.992744
+ 40.000000 2.323562
+ 41.000000 1.477698
+ 42.000000 0.676721
+ 43.000000 -0.474352
+ 44.000000 -1.107867
+ 45.000000 -2.337848
+ 46.000000 -2.019775
+ 47.000000 -2.739905
+ 48.000000 -1.644391
+ 49.000000 0.011781
+ 50.000000 1.034463
+ 51.000000 2.037978
+ 52.000000 2.400420
+ 53.000000 2.344507
+ 54.000000 1.591388
+ 55.000000 0.365871
+ 56.000000 -0.908018
+ 57.000000 -1.543188
+ 58.000000 -2.557156
+ 59.000000 -3.041230
+ 60.000000 -1.713676
+ 61.000000 -0.481723
+ 62.000000 0.549215
+ 63.000000 0.727514
+ 64.000000 1.955187
+ 65.000000 2.162520
+ 66.000000 2.074485
+ 67.000000 1.319350
+ 68.000000 -0.375902
+ 69.000000 -1.156888
+ 70.000000 -2.238698
+ 71.000000 -2.406541
+ 72.000000 -1.861680
+ 73.000000 -1.474703
+ 74.000000 -0.415272
+ 75.000000 1.015717
+ 76.000000 1.598828
+ 77.000000 2.273267
+ 78.000000 2.209011
+ 79.000000 1.758354
+ 80.000000 0.553469
+ 81.000000 -1.257431
+ 82.000000 -1.044855
+ 83.000000 -2.258815
+ 84.000000 -2.004194
+ 85.000000 -1.722751
+ 86.000000 -1.206638
+ 87.000000 0.246201
+ 88.000000 1.377268
+ 89.000000 2.057458
+ 90.000000 2.271500
+ 91.000000 1.919600
+ 92.000000 1.346293
+ 93.000000 -0.184235
+ 94.000000 -1.427797
+ 95.000000 -2.123603
+ 96.000000 -2.248833
+ 97.000000 -2.243055
+ 98.000000 -1.140870
+ 99.000000 -0.627367
+ 100.000000 1.603912
diff --git a/examples/test_peak.dat b/examples/test_peak.dat
new file mode 100644
index 0000000..7222b79
--- /dev/null
+++ b/examples/test_peak.dat
@@ -0,0 +1,404 @@
+# test peak data
+#---------------------------------
+# t y
+ 0.000000 0.021654
+ 0.050000 0.019221
+ 0.100000 -0.146881
+ 0.150000 0.109422
+ 0.200000 0.385367
+ 0.250000 0.426230
+ 0.300000 0.019241
+ 0.350000 0.075568
+ 0.400000 0.193304
+ 0.450000 0.237610
+ 0.500000 -0.107071
+ 0.550000 0.207026
+ 0.600000 0.103481
+ 0.650000 0.175033
+ 0.700000 0.022074
+ 0.750000 0.070510
+ 0.800000 0.404041
+ 0.850000 0.126622
+ 0.900000 -0.138651
+ 0.950000 0.149783
+ 1.000000 0.212585
+ 1.050000 0.133744
+ 1.100000 0.190065
+ 1.150000 -0.254227
+ 1.200000 0.253212
+ 1.250000 0.059663
+ 1.300000 0.187533
+ 1.350000 0.253744
+ 1.400000 -0.037306
+ 1.450000 0.080513
+ 1.500000 0.012607
+ 1.550000 0.224475
+ 1.600000 0.271415
+ 1.650000 0.118073
+ 1.700000 -0.077723
+ 1.750000 0.164330
+ 1.800000 0.025614
+ 1.850000 -0.034864
+ 1.900000 0.068968
+ 1.950000 -0.103238
+ 2.000000 0.066419
+ 2.050000 0.271850
+ 2.100000 0.139049
+ 2.150000 0.162034
+ 2.200000 -0.034347
+ 2.250000 0.135812
+ 2.300000 0.067858
+ 2.350000 -0.161792
+ 2.400000 0.153702
+ 2.450000 0.071054
+ 2.500000 -0.049010
+ 2.550000 0.203306
+ 2.600000 0.161341
+ 2.650000 0.199279
+ 2.700000 0.252416
+ 2.750000 0.355513
+ 2.800000 -0.097676
+ 2.850000 0.254533
+ 2.900000 0.217187
+ 2.950000 0.154375
+ 3.000000 -0.061880
+ 3.050000 0.128343
+ 3.100000 0.205941
+ 3.150000 0.349665
+ 3.200000 0.085341
+ 3.250000 0.125593
+ 3.300000 0.254381
+ 3.350000 0.006456
+ 3.400000 0.083674
+ 3.450000 0.126626
+ 3.500000 0.132028
+ 3.550000 0.367231
+ 3.600000 0.190944
+ 3.650000 -0.004054
+ 3.700000 0.072112
+ 3.750000 0.383266
+ 3.800000 0.222168
+ 3.850000 0.098595
+ 3.900000 0.324558
+ 3.950000 0.125419
+ 4.000000 0.214417
+ 4.050000 0.287499
+ 4.100000 0.230579
+ 4.150000 0.141035
+ 4.200000 0.341221
+ 4.250000 0.162993
+ 4.300000 0.174737
+ 4.350000 0.483097
+ 4.400000 0.634501
+ 4.450000 0.152268
+ 4.500000 0.440815
+ 4.550000 0.125279
+ 4.600000 0.302566
+ 4.650000 0.612674
+ 4.700000 -0.023226
+ 4.750000 0.481199
+ 4.800000 0.101096
+ 4.850000 0.572197
+ 4.900000 0.394625
+ 4.950000 0.461077
+ 5.000000 -0.106441
+ 5.050000 0.635505
+ 5.100000 0.440675
+ 5.150000 0.335979
+ 5.200000 0.567396
+ 5.250000 0.588661
+ 5.300000 0.101309
+ 5.350000 0.370770
+ 5.400000 0.531899
+ 5.450000 0.347064
+ 5.500000 0.387862
+ 5.550000 0.415243
+ 5.600000 0.459800
+ 5.650000 0.559310
+ 5.700000 0.527272
+ 5.750000 0.659222
+ 5.800000 0.646655
+ 5.850000 0.872127
+ 5.900000 0.506336
+ 5.950000 0.832841
+ 6.000000 0.662228
+ 6.050000 0.666240
+ 6.100000 0.745486
+ 6.150000 0.773303
+ 6.200000 0.820844
+ 6.250000 0.949833
+ 6.300000 0.999748
+ 6.350000 1.194918
+ 6.400000 0.947696
+ 6.450000 1.034669
+ 6.500000 1.004666
+ 6.550000 1.155702
+ 6.600000 1.541353
+ 6.650000 1.342422
+ 6.700000 1.477986
+ 6.750000 1.375675
+ 6.800000 1.763981
+ 6.850000 1.638405
+ 6.900000 1.652637
+ 6.950000 2.125423
+ 7.000000 1.846081
+ 7.050000 2.008594
+ 7.100000 1.967327
+ 7.150000 2.420829
+ 7.200000 2.986333
+ 7.250000 2.816069
+ 7.300000 2.779284
+ 7.350000 2.452606
+ 7.400000 3.182907
+ 7.450000 3.345209
+ 7.500000 3.210506
+ 7.550000 3.630722
+ 7.600000 3.786487
+ 7.650000 4.288308
+ 7.700000 4.107791
+ 7.750000 4.223391
+ 7.800000 4.822287
+ 7.850000 4.852727
+ 7.900000 5.153562
+ 7.950000 5.540655
+ 8.000000 5.739122
+ 8.050000 5.965430
+ 8.100000 5.893505
+ 8.150000 6.520379
+ 8.200000 6.744448
+ 8.250000 6.982811
+ 8.300000 6.871811
+ 8.350000 7.381590
+ 8.400000 7.295213
+ 8.450000 7.770220
+ 8.500000 7.855105
+ 8.550000 8.178695
+ 8.600000 8.737766
+ 8.650000 8.659328
+ 8.700000 8.761986
+ 8.750000 9.325407
+ 8.800000 9.693782
+ 8.850000 9.493158
+ 8.900000 9.840173
+ 8.950000 9.591383
+ 9.000000 9.894218
+ 9.050000 9.781619
+ 9.100000 9.787061
+ 9.150000 9.944484
+ 9.200000 10.193956
+ 9.250000 10.452393
+ 9.300000 10.198352
+ 9.350000 10.220196
+ 9.400000 10.091519
+ 9.450000 9.803956
+ 9.500000 9.976457
+ 9.550000 9.644976
+ 9.600000 9.652392
+ 9.650000 9.364996
+ 9.700000 9.141562
+ 9.750000 9.123553
+ 9.800000 8.670938
+ 9.850000 8.830762
+ 9.900000 8.612662
+ 9.950000 8.200565
+ 10.000000 8.004205
+ 10.050000 7.786050
+ 10.100000 7.729310
+ 10.150000 7.287126
+ 10.200000 6.773599
+ 10.250000 6.820778
+ 10.300000 6.790992
+ 10.350000 6.324548
+ 10.400000 6.076502
+ 10.450000 5.768973
+ 10.500000 5.787036
+ 10.550000 5.553690
+ 10.600000 5.127315
+ 10.650000 4.902255
+ 10.700000 4.929891
+ 10.750000 4.171166
+ 10.800000 4.303762
+ 10.850000 3.767545
+ 10.900000 3.791083
+ 10.950000 3.814857
+ 11.000000 3.426006
+ 11.050000 3.078426
+ 11.100000 2.789747
+ 11.150000 2.620130
+ 11.200000 2.416431
+ 11.250000 2.430768
+ 11.300000 2.268585
+ 11.350000 2.235498
+ 11.400000 2.311363
+ 11.450000 2.005221
+ 11.500000 1.970229
+ 11.550000 1.907982
+ 11.600000 1.748020
+ 11.650000 1.481710
+ 11.700000 1.519127
+ 11.750000 1.777618
+ 11.800000 1.135594
+ 11.850000 1.345861
+ 11.900000 1.046777
+ 11.950000 1.040376
+ 12.000000 0.888514
+ 12.050000 0.994942
+ 12.100000 1.002009
+ 12.150000 1.235839
+ 12.200000 1.030794
+ 12.250000 0.894109
+ 12.300000 0.839384
+ 12.350000 0.564763
+ 12.400000 0.543024
+ 12.450000 1.067728
+ 12.500000 0.569039
+ 12.550000 0.546196
+ 12.600000 0.767751
+ 12.650000 0.372794
+ 12.700000 0.506039
+ 12.750000 0.094006
+ 12.800000 0.657551
+ 12.850000 0.689847
+ 12.900000 0.235074
+ 12.950000 0.511880
+ 13.000000 0.495730
+ 13.050000 0.720208
+ 13.100000 0.458972
+ 13.150000 0.515104
+ 13.200000 0.447520
+ 13.250000 0.309378
+ 13.300000 0.336000
+ 13.350000 0.403743
+ 13.400000 0.173839
+ 13.450000 0.542466
+ 13.500000 0.435708
+ 13.550000 0.502801
+ 13.600000 0.256758
+ 13.650000 0.269744
+ 13.700000 0.204110
+ 13.750000 0.219654
+ 13.800000 0.596106
+ 13.850000 0.272604
+ 13.900000 0.228125
+ 13.950000 0.308160
+ 14.000000 0.065328
+ 14.050000 0.491292
+ 14.100000 0.494818
+ 14.150000 0.321783
+ 14.200000 0.197267
+ 14.250000 0.602161
+ 14.300000 0.155016
+ 14.350000 0.333368
+ 14.400000 0.260038
+ 14.450000 0.149090
+ 14.500000 0.164818
+ 14.550000 0.032011
+ 14.600000 0.460880
+ 14.650000 0.275423
+ 14.700000 0.343308
+ 14.750000 0.348898
+ 14.800000 0.335248
+ 14.850000 0.223771
+ 14.900000 0.056021
+ 14.950000 0.146267
+ 15.000000 0.295977
+ 15.050000 0.029256
+ 15.100000 0.188720
+ 15.150000 0.185713
+ 15.200000 -0.010228
+ 15.250000 -0.075438
+ 15.300000 -0.049977
+ 15.350000 0.156545
+ 15.400000 0.138670
+ 15.450000 0.430603
+ 15.500000 0.107233
+ 15.550000 0.268609
+ 15.600000 0.192113
+ 15.650000 -0.089082
+ 15.700000 0.076649
+ 15.750000 0.494606
+ 15.800000 0.304371
+ 15.850000 0.311904
+ 15.900000 0.146849
+ 15.950000 -0.035298
+ 16.000000 0.442517
+ 16.050000 0.129210
+ 16.100000 0.202598
+ 16.150000 -0.038198
+ 16.200000 0.164944
+ 16.250000 0.089727
+ 16.300000 -0.029338
+ 16.350000 0.321681
+ 16.400000 0.001907
+ 16.450000 0.357234
+ 16.500000 0.706248
+ 16.550000 0.189379
+ 16.600000 0.207504
+ 16.650000 0.252780
+ 16.700000 0.337652
+ 16.750000 0.164710
+ 16.800000 0.012640
+ 16.850000 -0.200321
+ 16.900000 0.063620
+ 16.950000 0.014513
+ 17.000000 0.090878
+ 17.050000 0.261647
+ 17.100000 0.140731
+ 17.150000 0.351465
+ 17.200000 -0.222967
+ 17.250000 0.192524
+ 17.300000 -0.083316
+ 17.350000 0.139459
+ 17.400000 0.391717
+ 17.450000 -0.091359
+ 17.500000 -0.118886
+ 17.550000 -0.054844
+ 17.600000 0.180295
+ 17.650000 0.065399
+ 17.700000 0.319015
+ 17.750000 0.166328
+ 17.800000 0.206875
+ 17.850000 0.108605
+ 17.900000 0.085493
+ 17.950000 0.270683
+ 18.000000 0.240595
+ 18.050000 0.299822
+ 18.100000 -0.040008
+ 18.150000 0.306279
+ 18.200000 -0.037437
+ 18.250000 0.006128
+ 18.300000 0.224231
+ 18.350000 0.054691
+ 18.400000 0.139918
+ 18.450000 -0.079608
+ 18.500000 -0.215388
+ 18.550000 -0.063221
+ 18.600000 0.012560
+ 18.650000 -0.138384
+ 18.700000 0.326622
+ 18.750000 0.130812
+ 18.800000 -0.053009
+ 18.850000 -0.028960
+ 18.900000 0.053191
+ 18.950000 0.239460
+ 19.000000 0.226069
+ 19.050000 -0.016509
+ 19.100000 0.155364
+ 19.150000 0.186324
+ 19.200000 0.076879
+ 19.250000 0.184640
+ 19.300000 0.194979
+ 19.350000 0.153825
+ 19.400000 0.078599
+ 19.450000 0.082126
+ 19.500000 0.069517
+ 19.550000 0.169040
+ 19.600000 0.016125
+ 19.650000 -0.145533
+ 19.700000 -0.314756
+ 19.750000 0.409688
+ 19.800000 -0.071217
+ 19.850000 -0.318566
+ 19.900000 0.159099
+ 19.950000 -0.014190
+ 20.000000 -0.091474
diff --git a/examples/test_splinepeak.dat b/examples/test_splinepeak.dat
new file mode 100644
index 0000000..48d1fff
--- /dev/null
+++ b/examples/test_splinepeak.dat
@@ -0,0 +1,504 @@
+# test data for spline + peak
+#---------------------------------
+# x y
+ 0.000 2.96659626314
+ 0.050 3.00493991014
+ 0.100 3.06123652247
+ 0.150 2.52252640704
+ 0.200 3.87613932061
+ 0.250 4.00216534666
+ 0.300 3.50572316777
+ 0.350 3.43342382697
+ 0.400 3.73272247102
+ 0.450 3.59902815254
+ 0.500 3.98899222373
+ 0.550 3.44264010167
+ 0.600 3.23902639219
+ 0.650 3.74484184457
+ 0.700 3.61591299694
+ 0.750 3.25767615382
+ 0.800 3.47569848627
+ 0.850 3.55767377006
+ 0.900 3.78963224708
+ 0.950 3.41651644442
+ 1.000 3.84201421917
+ 1.050 3.94339562855
+ 1.100 3.46389760925
+ 1.150 3.57238104938
+ 1.200 3.78684815453
+ 1.250 3.61931156950
+ 1.300 2.88997092351
+ 1.350 3.59552177983
+ 1.400 3.78583510228
+ 1.450 3.11958901418
+ 1.500 3.92621017016
+ 1.550 3.77612346521
+ 1.600 3.11182878182
+ 1.650 3.20534315458
+ 1.700 3.76012162528
+ 1.750 3.24997196038
+ 1.800 3.90360364053
+ 1.850 3.28102384929
+ 1.900 4.20259080948
+ 1.950 3.14915680186
+ 2.000 3.45753793299
+ 2.050 3.40378870206
+ 2.100 3.90539898346
+ 2.150 3.27699485343
+ 2.200 4.08801741844
+ 2.250 3.40091449389
+ 2.300 4.43561592129
+ 2.350 3.53720202703
+ 2.400 3.51340902548
+ 2.450 4.27899843739
+ 2.500 4.38777379643
+ 2.550 4.35138356487
+ 2.600 3.45138414045
+ 2.650 3.89606317148
+ 2.700 3.38078269422
+ 2.750 3.72757358484
+ 2.800 3.95344629899
+ 2.850 3.64031251260
+ 2.900 3.92633692392
+ 2.950 4.54903499496
+ 3.000 3.83500263553
+ 3.050 3.86787436371
+ 3.100 4.20560369029
+ 3.150 3.79452687040
+ 3.200 4.42391607348
+ 3.250 3.25791105383
+ 3.300 4.54395118612
+ 3.350 3.67373233023
+ 3.400 4.06495292232
+ 3.450 4.06471624329
+ 3.500 3.97053389404
+ 3.550 4.22092346672
+ 3.600 4.16692173295
+ 3.650 3.89074827421
+ 3.700 3.54698063028
+ 3.750 4.24222168533
+ 3.800 3.40519864458
+ 3.850 3.37131572450
+ 3.900 3.56771484953
+ 3.950 4.11322749571
+ 4.000 4.20772923214
+ 4.050 4.01716397254
+ 4.100 4.47782680015
+ 4.150 4.41509687464
+ 4.200 4.47090595323
+ 4.250 3.86313946103
+ 4.300 4.10546063443
+ 4.350 4.27900062014
+ 4.400 3.72291118630
+ 4.450 4.07444282978
+ 4.500 4.49688013792
+ 4.550 4.32224000187
+ 4.600 4.01213249893
+ 4.650 4.34527050803
+ 4.700 4.31885480690
+ 4.750 4.65528015751
+ 4.800 4.12477942263
+ 4.850 4.26700226032
+ 4.900 4.73315243845
+ 4.950 3.57777878796
+ 5.000 4.61624329502
+ 5.050 4.26130401694
+ 5.100 4.41044807874
+ 5.150 4.20565975364
+ 5.200 4.09490101037
+ 5.250 4.50943737680
+ 5.300 4.17516575948
+ 5.350 4.34499306419
+ 5.400 4.22560906354
+ 5.450 4.16916811170
+ 5.500 4.22005179025
+ 5.550 3.89140676445
+ 5.600 4.49734760696
+ 5.650 4.79573697402
+ 5.700 4.32159418798
+ 5.750 4.48087045377
+ 5.800 4.71087348808
+ 5.850 4.46962833927
+ 5.900 4.38868250013
+ 5.950 3.67409708909
+ 6.000 4.11391097790
+ 6.050 4.46659297510
+ 6.100 4.59009406285
+ 6.150 4.58796249430
+ 6.200 4.50791819418
+ 6.250 4.34311032610
+ 6.300 4.30969225891
+ 6.350 4.08985177839
+ 6.400 4.14675849182
+ 6.450 4.96318969822
+ 6.500 4.55756202498
+ 6.550 4.78740417795
+ 6.600 4.62448849670
+ 6.650 4.68109171567
+ 6.700 3.94105744714
+ 6.750 4.02574773487
+ 6.800 4.52507717940
+ 6.850 4.21922040040
+ 6.900 4.69394237402
+ 6.950 3.76093339774
+ 7.000 4.53351841031
+ 7.050 4.69681713690
+ 7.100 5.00416599327
+ 7.150 4.51989607590
+ 7.200 4.72095593713
+ 7.250 4.59173268396
+ 7.300 4.06150079948
+ 7.350 4.77379936788
+ 7.400 4.35851553657
+ 7.450 4.53657427644
+ 7.500 3.92579351613
+ 7.550 4.71415366995
+ 7.600 4.25663227483
+ 7.650 4.54277178651
+ 7.700 4.60273770589
+ 7.750 4.18383910866
+ 7.800 4.40643674830
+ 7.850 4.05832509380
+ 7.900 4.41109717440
+ 7.950 4.17361477709
+ 8.000 4.49864472516
+ 8.050 4.14943454404
+ 8.100 4.77850525292
+ 8.150 3.74482589060
+ 8.200 3.98576039677
+ 8.250 4.64204576206
+ 8.300 4.02517889099
+ 8.350 4.30844807599
+ 8.400 4.52955833524
+ 8.450 4.73266004495
+ 8.500 4.61393789944
+ 8.550 4.99148544903
+ 8.600 3.67392469964
+ 8.650 4.44633683024
+ 8.700 4.56004129726
+ 8.750 4.02613947662
+ 8.800 4.21234308684
+ 8.850 4.11412083941
+ 8.900 4.26464490846
+ 8.950 4.44003840779
+ 9.000 4.17024949358
+ 9.050 4.34281951934
+ 9.100 4.67817992527
+ 9.150 4.21599805736
+ 9.200 4.54681771875
+ 9.250 4.94611036787
+ 9.300 3.82217290114
+ 9.350 4.19721177412
+ 9.400 4.59405568559
+ 9.450 4.50726307379
+ 9.500 5.04938393753
+ 9.550 4.16524366757
+ 9.600 3.90645337751
+ 9.650 4.77177694766
+ 9.700 4.73709711425
+ 9.750 4.51350541878
+ 9.800 4.44276376029
+ 9.850 5.02961839971
+ 9.900 3.95135573308
+ 9.950 3.60165390921
+ 10.00 4.28961288113
+ 10.05 3.53050359494
+ 10.10 4.52907680271
+ 10.15 4.29287529925
+ 10.20 3.98331562037
+ 10.25 4.25374899010
+ 10.30 4.32529280063
+ 10.35 4.32232682041
+ 10.40 4.10806219744
+ 10.45 4.27337485226
+ 10.50 4.82499805164
+ 10.55 4.08836674752
+ 10.60 3.78034279797
+ 10.65 4.13907067891
+ 10.70 4.24076127260
+ 10.75 3.98116602080
+ 10.80 4.18492061335
+ 10.85 4.12632626001
+ 10.90 4.05426403615
+ 10.95 4.62204244130
+ 11.00 4.10339432701
+ 11.05 3.65529839865
+ 11.10 3.62558839284
+ 11.15 3.73918961761
+ 11.20 3.68158174982
+ 11.25 4.19252058029
+ 11.30 4.23233845989
+ 11.35 3.97458702150
+ 11.40 4.00994330502
+ 11.45 3.94082278387
+ 11.50 3.98429819877
+ 11.55 3.53044767475
+ 11.60 3.68498046799
+ 11.65 3.89628464108
+ 11.70 3.40746069225
+ 11.75 3.84699025044
+ 11.80 3.96691104951
+ 11.85 3.38337975679
+ 11.90 3.24154766087
+ 11.95 3.32962768199
+ 12.00 3.84556015350
+ 12.05 3.88669262940
+ 12.10 3.51917557918
+ 12.15 3.85087376839
+ 12.20 3.64596702791
+ 12.25 4.00322112622
+ 12.30 3.91683279654
+ 12.35 3.77869224060
+ 12.40 3.49129211425
+ 12.45 3.18950893506
+ 12.50 3.66108203033
+ 12.55 3.74916470116
+ 12.60 3.72885486815
+ 12.65 3.27357287085
+ 12.70 4.02808369797
+ 12.75 2.90391966989
+ 12.80 3.47147897927
+ 12.85 3.53376937142
+ 12.90 3.67614003452
+ 12.95 3.86581047355
+ 13.00 4.03268817472
+ 13.05 3.79538384868
+ 13.10 3.49862517721
+ 13.15 3.58068341046
+ 13.20 3.72218732962
+ 13.25 3.76383752518
+ 13.30 3.70826100345
+ 13.35 3.15528401222
+ 13.40 3.78343107537
+ 13.45 3.88942581080
+ 13.50 3.30649700834
+ 13.55 3.61346986635
+ 13.60 3.20095422077
+ 13.65 3.63449770192
+ 13.70 4.37128278661
+ 13.75 3.09475840948
+ 13.80 3.64559069249
+ 13.85 3.70920856273
+ 13.90 3.71343881083
+ 13.95 2.94883314831
+ 14.00 3.62391537217
+ 14.05 2.96134101806
+ 14.10 3.50318527074
+ 14.15 3.69812270471
+ 14.20 3.21170636813
+ 14.25 3.75489895972
+ 14.30 3.83295556149
+ 14.35 3.35411441152
+ 14.40 3.65377126898
+ 14.45 4.20179018192
+ 14.50 3.42700220111
+ 14.55 2.96089725781
+ 14.60 3.63963100704
+ 14.65 3.45143192242
+ 14.70 3.47312870863
+ 14.75 3.68702071489
+ 14.80 3.74068343607
+ 14.85 3.80376538961
+ 14.90 4.16365943041
+ 14.95 4.33154401598
+ 15.00 4.40066122642
+ 15.05 4.11934259358
+ 15.10 4.95355878648
+ 15.15 4.52072404063
+ 15.20 4.84582662392
+ 15.25 4.75949753235
+ 15.30 5.28780550686
+ 15.35 5.99012095403
+ 15.40 5.73524180180
+ 15.45 5.93297578991
+ 15.50 6.26632055195
+ 15.55 6.74191937829
+ 15.60 6.49209695411
+ 15.65 6.87162095663
+ 15.70 6.89944165666
+ 15.75 7.16294722525
+ 15.80 7.78296801835
+ 15.85 8.03245785545
+ 15.90 9.04872095901
+ 15.95 9.47017598572
+ 16.00 8.51494345125
+ 16.05 8.62786233062
+ 16.10 8.81794996882
+ 16.15 9.47806204831
+ 16.20 9.34318913312
+ 16.25 10.1293952874
+ 16.30 9.94059006806
+ 16.35 9.46882942068
+ 16.40 9.46750041992
+ 16.45 10.0778227245
+ 16.50 9.60290613840
+ 16.55 9.68542564727
+ 16.60 10.0580512248
+ 16.65 9.54613864279
+ 16.70 9.08379114524
+ 16.75 8.85952483325
+ 16.80 8.77578733506
+ 16.85 9.43018023157
+ 16.90 8.35733674481
+ 16.95 8.65147020522
+ 17.00 8.17052247013
+ 17.05 7.71834063615
+ 17.10 7.64063615191
+ 17.15 7.10331868336
+ 17.20 6.88917292959
+ 17.25 6.49250021047
+ 17.30 6.83830350059
+ 17.35 5.86882143800
+ 17.40 5.99296213029
+ 17.45 4.81643628678
+ 17.50 5.07760708618
+ 17.55 5.21167983715
+ 17.60 4.61888070923
+ 17.65 4.61020867356
+ 17.70 4.73219860417
+ 17.75 3.76703344159
+ 17.80 3.68532849169
+ 17.85 3.67919307630
+ 17.90 3.62080268098
+ 17.95 3.63788880924
+ 18.00 2.89503192888
+ 18.05 3.50520587540
+ 18.10 2.84731747670
+ 18.15 4.27734825573
+ 18.20 3.54931968124
+ 18.25 3.28803468974
+ 18.30 3.17606045637
+ 18.35 3.10697518720
+ 18.40 3.48227906531
+ 18.45 3.21774996564
+ 18.50 3.58005048248
+ 18.55 3.27289036433
+ 18.60 2.88835892253
+ 18.65 2.85267546528
+ 18.70 2.92606772123
+ 18.75 2.46493125067
+ 18.80 3.03768504662
+ 18.85 2.57488214976
+ 18.90 3.29665698032
+ 18.95 2.88975339506
+ 19.00 2.70839853213
+ 19.05 3.00727227103
+ 19.10 3.29396028863
+ 19.15 2.69856536118
+ 19.20 2.86223090733
+ 19.25 3.27945065920
+ 19.30 3.05698092032
+ 19.35 2.77889552950
+ 19.40 2.62209068612
+ 19.45 2.50767338025
+ 19.50 2.28450752744
+ 19.55 2.28284494498
+ 19.60 3.05169783614
+ 19.65 3.07706070518
+ 19.70 3.07713755192
+ 19.75 3.08181857658
+ 19.80 2.93746839618
+ 19.85 2.93867692906
+ 19.90 2.89353641833
+ 19.95 2.84229568098
+ 20.00 3.14929449847
+ 20.05 2.92304225785
+ 20.10 2.51691328490
+ 20.15 3.66548182308
+ 20.20 2.72562129136
+ 20.25 2.92151941234
+ 20.30 2.61179520432
+ 20.35 3.39507949935
+ 20.40 2.86993930461
+ 20.45 2.44626599977
+ 20.50 3.03537741348
+ 20.55 2.25431030457
+ 20.60 2.74131220408
+ 20.65 2.49859125103
+ 20.70 3.46647322323
+ 20.75 2.79881233180
+ 20.80 3.14435082063
+ 20.85 3.24721502910
+ 20.90 2.12722356266
+ 20.95 2.23445294938
+ 21.00 3.22000367656
+ 21.05 2.79143633941
+ 21.10 2.55544938709
+ 21.15 3.09754430899
+ 21.20 2.93071189433
+ 21.25 3.15992128024
+ 21.30 2.60689765856
+ 21.35 3.38911899380
+ 21.40 3.17429295653
+ 21.45 3.28631473944
+ 21.50 2.98695670582
+ 21.55 2.98607712077
+ 21.60 3.09986464921
+ 21.65 2.82480782016
+ 21.70 2.89843603470
+ 21.75 3.20075854402
+ 21.80 3.20954084532
+ 21.85 3.53507050197
+ 21.90 2.75657608164
+ 21.95 2.79758925161
+ 22.00 2.87013825451
+ 22.05 2.79369963948
+ 22.10 2.83919143936
+ 22.15 3.00376872657
+ 22.20 3.03732823723
+ 22.25 3.38912091248
+ 22.30 3.18724881855
+ 22.35 2.84844803639
+ 22.40 3.47473510025
+ 22.45 3.02060426317
+ 22.50 3.63189699986
+ 22.55 2.86578549981
+ 22.60 3.04485365263
+ 22.65 3.27402675418
+ 22.70 2.55563996962
+ 22.75 3.38776152248
+ 22.80 3.75540487159
+ 22.85 3.31664199957
+ 22.90 3.56987968004
+ 22.95 3.50800782027
+ 23.00 3.37994390077
+ 23.05 3.27316188615
+ 23.10 3.41300112675
+ 23.15 3.15286512579
+ 23.20 3.53386979609
+ 23.25 3.06783652450
+ 23.30 3.63534152792
+ 23.35 3.08136446967
+ 23.40 3.43013868764
+ 23.45 3.13085944234
+ 23.50 2.52523438290
+ 23.55 3.44669475069
+ 23.60 3.22765951756
+ 23.65 3.50896545125
+ 23.70 3.86470748598
+ 23.75 2.96775610362
+ 23.80 2.97248103417
+ 23.85 3.11419495505
+ 23.90 3.69486046720
+ 23.95 3.81093313552
+ 24.00 4.04869270397
+ 24.05 3.39149397968
+ 24.10 3.46063743255
+ 24.15 3.28606281454
+ 24.20 3.71658861188
+ 24.25 3.78921391295
+ 24.30 3.52836967541
+ 24.35 3.75802856378
+ 24.40 3.32036173561
+ 24.45 3.49811083392
+ 24.50 3.74123977672
+ 24.55 3.13512327376
+ 24.60 3.32137818900
+ 24.65 3.77393820599
+ 24.70 3.69256530273
+ 24.75 4.02144329738
+ 24.80 3.51055117275
+ 24.85 4.14278693963
+ 24.90 3.97479741857
+ 24.95 3.21220712914
+ 25.00 3.90788342338
diff --git a/lmfit.egg-info/PKG-INFO b/lmfit.egg-info/PKG-INFO
new file mode 100644
index 0000000..019a3da
--- /dev/null
+++ b/lmfit.egg-info/PKG-INFO
@@ -0,0 +1,180 @@
+Metadata-Version: 2.1
+Name: lmfit
+Version: 1.2.1
+Summary: Least-Squares Minimization with Bounds and Constraints
+Home-page: https://lmfit.github.io//lmfit-py/
+Author: LMFit Development Team
+Author-email: matt.newville@gmail.com
+License: BSD 3-Clause
+Project-URL: Source, https://github.com/lmfit/lmfit-py
+Project-URL: Changelog, https://lmfit.github.io/lmfit-py/whatsnew.html
+Project-URL: Documentation, https://lmfit.github.io/lmfit-py/
+Project-URL: Tracker, https://github.com/lmfit/lmfit-py/issues
+Keywords: curve-fitting,least-squares minimization
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Science/Research
+Classifier: Topic :: Scientific/Engineering
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.7
+Description-Content-Type: text/x-rst
+Provides-Extra: dev
+Provides-Extra: doc
+Provides-Extra: test
+Provides-Extra: all
+License-File: LICENSE
+License-File: AUTHORS.txt
+
+LMfit-py
+========
+
+.. image:: https://dev.azure.com/lmfit/lmfit-py/_apis/build/status/lmfit.lmfit-py?branchName=master
+ :target: https://dev.azure.com/lmfit/lmfit-py/_build/latest?definitionId=1&branchName=master
+
+.. image:: https://codecov.io/gh/lmfit/lmfit-py/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/lmfit/lmfit-py
+
+.. image:: https://img.shields.io/pypi/v/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/pypi/dm/lmfit.svg
+ :target: https://pypi.org/project/lmfit
+
+.. image:: https://img.shields.io/badge/docs-read-brightgreen
+ :target: https://lmfit.github.io/lmfit-py/
+
+.. image:: https://zenodo.org/badge/4185/lmfit/lmfit-py.svg
+ :target: https://doi.org/10.5281/zenodo.598352
+
+.. _LMfit google mailing list: https://groups.google.com/group/lmfit-py
+.. _Github Discussions: https://github.com/lmfit/lmfit-py/discussions
+.. _Github Issues: https://github.com/lmfit/lmfit-py/issues
+
+
+..
+ Note: the Zenodo target should be
+ https://zenodo.org/badge/latestdoi/4185/lmfit/lmfit-py
+ but see https://github.com/lmfit/lmfit-py/discussions/862
+
+
+Overview
+---------
+
+The lmfit Python library supports provides tools for non-linear least-squares
+minimization and curve fitting. The goal is to make these optimization
+algorithms more flexible, more comprehensible, and easier to use well, with the
+key feature of casting variables in minimization and fitting routines as named
+parameters that can have many attributes beside just a current value.
+
+LMfit is a pure Python package, built on top of Scipy and Numpy, and so easy to
+install with ``pip install lmfit``.
+
+For questions, comments, and suggestions, please use the `LMfit google mailing
+list`_ or `Github discussions`_. For software issues and bugs, use `Github
+Issues`_, but please read `Contributing.md <.github/CONTRIBUTING.md>`_ before
+creating an Issue.
+
+
+Parameters and Minimization
+------------------------------
+
+LMfit provides optimization routines similar to (and based on) those from
+``scipy.optimize``, but with a simple, flexible approach to parameterizing a
+model for fitting to data using named parameters. These named Parameters can be
+held fixed or freely adjusted in the fit, or held between lower and upper
+bounds. Parameters can also be constrained as a simple mathematical expression
+of other Parameters.
+
+A Parameters object (which acts like a Python dictionary) contains named
+parameters, and can be built as with::
+
+ import lmfit
+ fit_params = lmfit.Parameters()
+ fit_params['amp'] = lmfit.Parameter(value=1.2)
+ fit_params['cen'] = lmfit.Parameter(value=40.0, vary=False)
+ fit_params['wid'] = lmfit.Parameter(value=4, min=0)
+ fit_params['fwhm'] = lmfit.Parameter(expr='wid*2.355')
+
+or using the equivalent::
+
+ fit_params = lmfit.create_params(amp=1.2,
+ cen={'value':40, 'vary':False},
+ wid={'value': 4, 'min':0},
+ fwhm={'expr': 'wid*2.355'})
+
+
+
+In the general minimization case (see below for Curve-fitting), the user will
+also write an objective function to be minimized (in the least-squares sense)
+with its first argument being this Parameters object, and additional positional
+and keyword arguments as desired::
+
+ def myfunc(params, x, data, someflag=True):
+ amp = params['amp'].value
+ cen = params['cen'].value
+ wid = params['wid'].value
+ ...
+ return residual_array
+
+For each call of this function, the values for the ``params`` may have changed,
+subject to the bounds and constraint settings for each Parameter. The function
+should return the residual (i.e., ``data-model``) array to be minimized.
+
+The advantage here is that the function to be minimized does not have to be
+changed if different bounds or constraints are placed on the fitting Parameters.
+The fitting model (as described in myfunc) is instead written in terms of
+physical parameters of the system, and remains remains independent of what is
+actually varied in the fit. In addition, which parameters are adjusted and which
+are fixed happens at run-time, so that changing what is varied and what
+constraints are placed on the parameters can easily be modified by the user in
+real-time data analysis.
+
+To perform the fit, the user calls::
+
+ result = lmfit.minimize(myfunc, fit_params, args=(x, data), kws={'someflag':True}, ....)
+
+After the fit, a ``MinimizerResult`` class is returned that holds the results
+the fit (e.g., fitting statistics and optimized parameters). The dictionary
+``result.params`` contains the best-fit values, estimated standard deviations,
+and correlations with other variables in the fit.
+
+By default, the underlying fit algorithm is the Levenberg-Marquardt algorithm
+with numerically-calculated derivatives from MINPACK's lmdif function, as used
+by ``scipy.optimize.leastsq``. Most other solvers that are present in ``scipy``
+(e.g., Nelder-Mead, differential_evolution, basin-hopping, and more) are also
+supported.
+
+
+Curve-Fitting with lmfit.Model
+----------------------------------
+
+One of the most common use of least-squares minimization is for curve fitting,
+where minimization of ``data-model``, or ``(data-model)*weights``. Using
+``lmfit.minimize`` as above, the objective function would take ``data`` and
+``weights`` and effectively calculated the model and then return the value of
+``(data-model)*weights``.
+
+To simplify this, and make curve-fitting more flexible, lmfit provides a Model
+class that wraps a *model function* that represents the model (without the data
+or weights). Parameters are then automatically found from the named arguments
+of the model function. In addition, simple model functions can be readily
+combined and reused, and several common model functions are included in lmfit.
+
+Exploration of Confidence Intervals
+-------------------------------------
+
+Lmfit tries to always estimate uncertainties in fitting parameters and
+correlations between them. It does this even for those methods where the
+corresponding ``scipy.optimize`` routines do not estimate uncertainties. Lmfit
+also provides methods to explicitly explore and evaluate the confidence
+intervals in fit results.
diff --git a/lmfit.egg-info/SOURCES.txt b/lmfit.egg-info/SOURCES.txt
new file mode 100644
index 0000000..e41f42e
--- /dev/null
+++ b/lmfit.egg-info/SOURCES.txt
@@ -0,0 +1,186 @@
+.codecov.yml
+.gitattributes
+.gitignore
+.pre-commit-config.yaml
+AUTHORS.txt
+LICENSE
+README.rst
+azure-pipelines.yml
+publish_docs.sh
+pyproject.toml
+setup.cfg
+setup.py
+.github/CONTRIBUTING.md
+.github/ISSUE_TEMPLATE.md
+.github/PULL_REQUEST_TEMPLATE.md
+.github/dependabot.yml
+NIST_STRD/Bennett5.dat
+NIST_STRD/BoxBOD.dat
+NIST_STRD/Chwirut1.dat
+NIST_STRD/Chwirut2.dat
+NIST_STRD/DanWood.dat
+NIST_STRD/ENSO.dat
+NIST_STRD/Eckerle4.dat
+NIST_STRD/Gauss1.dat
+NIST_STRD/Gauss2.dat
+NIST_STRD/Gauss3.dat
+NIST_STRD/Hahn1.dat
+NIST_STRD/Kirby2.dat
+NIST_STRD/Lanczos1.dat
+NIST_STRD/Lanczos2.dat
+NIST_STRD/Lanczos3.dat
+NIST_STRD/MGH09.dat
+NIST_STRD/MGH10.dat
+NIST_STRD/MGH17.dat
+NIST_STRD/Misra1a.dat
+NIST_STRD/Misra1b.dat
+NIST_STRD/Misra1c.dat
+NIST_STRD/Misra1d.dat
+NIST_STRD/Models
+NIST_STRD/Nelson.dat
+NIST_STRD/Rat42.dat
+NIST_STRD/Rat43.dat
+NIST_STRD/Roszman1.dat
+NIST_STRD/Thurber.dat
+asv_benchmarking/README.md
+asv_benchmarking/asv.conf.json
+asv_benchmarking/run_benchmark_code.py
+asv_benchmarking/benchmarks/__init__.py
+asv_benchmarking/benchmarks/benchmarks.py
+doc/Makefile
+doc/bounds.rst
+doc/builtin_models.rst
+doc/conf.py
+doc/confidence.rst
+doc/constraints.rst
+doc/contents.rst
+doc/doc_examples_to_gallery.py
+doc/faq.rst
+doc/fitting.rst
+doc/index.rst
+doc/installation.rst
+doc/intro.rst
+doc/make.bat
+doc/model.rst
+doc/parameters.rst
+doc/support.rst
+doc/whatsnew.rst
+doc/_static/empty
+doc/_templates/indexsidebar.html
+doc/sphinx/ext_imgmath.py
+doc/sphinx/ext_mathjax.py
+doc/sphinx/theme/sphinx13/basic_layout.html
+doc/sphinx/theme/sphinx13/layout.html
+doc/sphinx/theme/sphinx13/theme.conf
+doc/sphinx/theme/sphinx13/static/bodybg.png
+doc/sphinx/theme/sphinx13/static/footerbg.png
+doc/sphinx/theme/sphinx13/static/headerbg.png
+doc/sphinx/theme/sphinx13/static/listitem.png
+doc/sphinx/theme/sphinx13/static/lmfitheader.png
+doc/sphinx/theme/sphinx13/static/relbg.png
+doc/sphinx/theme/sphinx13/static/sphinx13.css
+examples/NIST_Gauss2.dat
+examples/README.txt
+examples/doc_builtinmodels_nistgauss.py
+examples/doc_builtinmodels_nistgauss2.py
+examples/doc_builtinmodels_peakmodels.py
+examples/doc_builtinmodels_splinemodel.py
+examples/doc_builtinmodels_stepmodel.py
+examples/doc_confidence_advanced.py
+examples/doc_confidence_basic.py
+examples/doc_confidence_chi2_maps.py
+examples/doc_fitting_emcee.py
+examples/doc_fitting_withreport.py
+examples/doc_model_composite.py
+examples/doc_model_gaussian.py
+examples/doc_model_loadmodel.py
+examples/doc_model_loadmodelresult.py
+examples/doc_model_loadmodelresult2.py
+examples/doc_model_savemodel.py
+examples/doc_model_savemodelresult.py
+examples/doc_model_savemodelresult2.py
+examples/doc_model_two_components.py
+examples/doc_model_uncertainty.py
+examples/doc_model_uncertainty2.py
+examples/doc_model_with_iter_callback.py
+examples/doc_model_with_nan_policy.py
+examples/doc_parameters_basic.py
+examples/doc_parameters_valuesdict.py
+examples/example_Model_interface.py
+examples/example_brute.py
+examples/example_complex_resonator_model.py
+examples/example_confidence_interval.py
+examples/example_detect_outliers.py
+examples/example_diffev.py
+examples/example_emcee_Model_interface.py
+examples/example_expression_model.py
+examples/example_fit_multi_datasets.py
+examples/example_fit_with_algebraic_constraint.py
+examples/example_fit_with_bounds.py
+examples/example_fit_with_derivfunc.py
+examples/example_fit_with_inequality.py
+examples/example_reduce_fcn.py
+examples/example_sympy.py
+examples/example_two_dimensional_peak.py
+examples/example_use_pandas.py
+examples/lmfit_emcee_model_selection.py
+examples/model1d_gauss.dat
+examples/peak.csv
+examples/sinedata.dat
+examples/test_peak.dat
+examples/test_splinepeak.dat
+lmfit/__init__.py
+lmfit/_ampgo.py
+lmfit/conf_emcee.py
+lmfit/confidence.py
+lmfit/jsonutils.py
+lmfit/lineshapes.py
+lmfit/minimizer.py
+lmfit/model.py
+lmfit/models.py
+lmfit/parameter.py
+lmfit/printfuncs.py
+lmfit/version.py
+lmfit.egg-info/PKG-INFO
+lmfit.egg-info/SOURCES.txt
+lmfit.egg-info/dependency_links.txt
+lmfit.egg-info/requires.txt
+lmfit.egg-info/top_level.txt
+tests/NISTModels.py
+tests/__init__.py
+tests/conftest.py
+tests/gauss_modelresult_lmfit100.sav
+tests/test_1variable.py
+tests/test_NIST_Strd.py
+tests/test_algebraic_constraint.py
+tests/test_ampgo.py
+tests/test_basicfit.py
+tests/test_basinhopping.py
+tests/test_bounded_jacobian.py
+tests/test_bounds.py
+tests/test_brute.py
+tests/test_builtin_models.py
+tests/test_confidence.py
+tests/test_covariance_matrix.py
+tests/test_custom_independentvar.py
+tests/test_default_kws.py
+tests/test_dual_annealing.py
+tests/test_itercb.py
+tests/test_jsonutils.py
+tests/test_least_squares.py
+tests/test_lineshapes.py
+tests/test_manypeaks_speed.py
+tests/test_max_nfev.py
+tests/test_minimizer.py
+tests/test_model.py
+tests/test_model_saveload.py
+tests/test_model_uncertainties.py
+tests/test_models.py
+tests/test_multidatasets.py
+tests/test_nose.py
+tests/test_pandas.py
+tests/test_parameter.py
+tests/test_parameters.py
+tests/test_printfuncs.py
+tests/test_shgo.py
+tests/test_stepmodel.py \ No newline at end of file
diff --git a/lmfit.egg-info/dependency_links.txt b/lmfit.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lmfit.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/lmfit.egg-info/requires.txt b/lmfit.egg-info/requires.txt
new file mode 100644
index 0000000..f1aa316
--- /dev/null
+++ b/lmfit.egg-info/requires.txt
@@ -0,0 +1,62 @@
+asteval>=0.9.28
+numpy>=1.19
+scipy>=1.6
+uncertainties>=3.1.4
+
+[all]
+build
+check-wheel-contents
+pre-commit
+twine
+coverage
+flaky
+pytest
+pytest-cov
+cairosvg
+corner
+dill
+emcee>=3.0.0
+ipykernel
+jupyter_sphinx>=0.2.4
+matplotlib
+numdifftools
+pandas
+Pillow
+Sphinx
+sphinx-gallery>=0.10
+sphinxcontrib-svg2pdfconverter
+sympy
+
+[all:platform_system == "Windows"]
+pycairo
+
+[dev]
+build
+check-wheel-contents
+pre-commit
+twine
+
+[doc]
+cairosvg
+corner
+dill
+emcee>=3.0.0
+ipykernel
+jupyter_sphinx>=0.2.4
+matplotlib
+numdifftools
+pandas
+Pillow
+Sphinx
+sphinx-gallery>=0.10
+sphinxcontrib-svg2pdfconverter
+sympy
+
+[doc:platform_system == "Windows"]
+pycairo
+
+[test]
+coverage
+flaky
+pytest
+pytest-cov
diff --git a/lmfit.egg-info/top_level.txt b/lmfit.egg-info/top_level.txt
new file mode 100644
index 0000000..536bcc8
--- /dev/null
+++ b/lmfit.egg-info/top_level.txt
@@ -0,0 +1 @@
+lmfit
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
new file mode 100644
index 0000000..2e31e81
--- /dev/null
+++ b/lmfit/__init__.py
@@ -0,0 +1,45 @@
+"""
+LMFIT: Non-Linear Least-Squares Minimization and Curve-Fitting for Python.
+
+Lmfit provides a high-level interface to non-linear optimization and
+curve-fitting problems for Python. It builds on the Levenberg-Marquardt
+algorithm of `scipy.optimize.leastsq`, but also supports most of the other
+optimization methods present in `scipy.optimize`. It has a number of
+useful enhancements, including:
+
+ * Using Parameter objects instead of plain floats as variables. A
+ Parameter has a value that can be varied in the fit, fixed, have
+ upper and/or lower bounds. It can even have a value that is
+ constrained by an algebraic expression of other Parameter values.
+
+ * Ease of changing fitting algorithms. Once a fitting model is set
+ up, one can change the fitting algorithm without changing the
+ objective function.
+
+ * Improved estimation of confidence intervals. While
+ `scipy.optimize.leastsq` will automatically calculate uncertainties
+ and correlations from the covariance matrix, lmfit also has functions
+ to explicitly explore parameter space to determine confidence levels
+ even for the most difficult cases.
+
+ * Improved curve-fitting with the Model class. This extends the
+ capabilities of `scipy.optimize.curve_fit`, allowing you to turn a
+ function that models your data into a Python class that helps you
+ parametrize and fit data with that model.
+
+ * Many built-in models for common lineshapes are included and ready
+ to use.
+
+Copyright (c) 2022 Lmfit Developers ; BSD-3 license ; see LICENSE
+
+"""
+from asteval import Interpreter
+
+from .confidence import conf_interval, conf_interval2d
+from .minimizer import Minimizer, MinimizerException, minimize
+from .parameter import Parameter, Parameters, create_params
+from .printfuncs import ci_report, fit_report, report_ci, report_fit
+from .model import Model, CompositeModel
+from . import lineshapes, models
+
+from lmfit.version import version as __version__
diff --git a/lmfit/_ampgo.py b/lmfit/_ampgo.py
new file mode 100644
index 0000000..607c439
--- /dev/null
+++ b/lmfit/_ampgo.py
@@ -0,0 +1,298 @@
+"""Adaptive Memory Programming for Global Optimization (AMPGO).
+
+added to lmfit by Renee Otten (2018)
+
+based on the Python implementation of Andrea Gavana
+(see: http://infinity77.net/global_optimization/)
+
+Implementation details can be found in this paper:
+ http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf
+
+"""
+
+import numpy as np
+from scipy.optimize import minimize
+
+SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP']
+
+
+def ampgo(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None,
+ maxfunevals=None, totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02,
+ eps2=0.1, tabulistsize=5, tabustrategy='farthest', disp=False):
+ """Find the global minimum of a multivariate function using AMPGO.
+
+ AMPGO stands for "Adaptive Memory Programming for Global Optimization.
+
+ Parameters
+ ----------
+ objfun : callable
+ Objective function to be minimized. The function must have the
+ signature::
+
+ objfun(params, *args, **kws)
+
+ x0 : numpy.ndarray
+ Initial guesses for parameter values.
+ args : tuple, optional
+ Additional arguments passed to `objfun`.
+ local : str, optional
+ Name of the local minimization method. Valid options are:
+
+ - `'L-BFGS-B'` (default)
+ - `'Nelder-Mead'`
+ - `'Powell'`
+ - `'TNC'`
+ - `'SLSQP'`
+
+ local_opts : dict, optional
+ Options to pass to the local minimizer.
+ bounds : sequence, optional
+ List of tuples specifying the lower and upper bound for each
+ independent variable ``[(xl0, xu0), (xl1, xu1), ...]``.
+ maxfunevals : int, optional
+ Maximum number of function evaluations. If None, the optimization
+ will stop after `totaliter` number of iterations.
+ totaliter : int, optional
+ Maximum number of global iterations.
+ maxiter : int, optional
+ Maximum number of 'Tabu Tunneling' iterations during each global
+ iteration.
+ glbtol : float, optional
+ Tolerance whether or not to accept a solution after a tunneling
+ phase.
+ eps1 : float, optional
+ Constant used to define an aspiration value for the objective
+ function during the Tunneling phase.
+ eps2 : float, optional
+ Perturbation factor used to move away from the latest local
+ minimum at the start of a Tunneling phase.
+ tabulistsize : int, optional
+ Size of the (circular) tabu search list.
+ tabustrategy : {'farthest', 'oldest'}, optional
+ Strategy to use when the size of the tabu list exceeds
+ `tabulistsize`. It can be 'oldest' to drop the oldest point from
+ the tabu list or 'farthest' (default) to drop the element farthest
+ from the last local minimum found.
+ disp : bool, optional
+ Set to True to print convergence messages.
+
+ Returns
+ -------
+ tuple
+ A tuple of 5 elements, in the following order:
+ 1. **best_x** (array_like): the estimated position of the global
+ minimum.
+ 2. **best_f** (float): the value of `objfun` at the minimum.
+ 3. **evaluations** (int): the number of function evaluations.
+ 4. **msg** (str): a message describes the cause of the
+ termination.
+ 5. **tunnel_info** (tuple): a tuple containing the total number
+ of Tunneling phases performed and the successful ones.
+
+ Notes
+ -----
+ The detailed implementation of AMPGO is described in the paper
+ "Adaptive Memory Programming for Constrained Global Optimization"
+ located here:
+
+ http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf
+
+ """
+ if local not in SCIPY_LOCAL_SOLVERS:
+ raise Exception(f'Invalid local solver selected: {local}')
+
+ x0 = np.atleast_1d(x0)
+ n = len(x0)
+
+ if bounds is None:
+ bounds = [(None, None)] * n
+ if len(bounds) != n:
+ raise ValueError('length of x0 != length of bounds')
+
+ bounds = [b if b is not None else (None, None) for b in bounds]
+ _bounds = [(-np.inf if lb is None else lb, np.inf if ub is None else ub)
+ for lb, ub in bounds]
+ low, up = zip(*_bounds)
+
+ if maxfunevals is None:
+ maxfunevals = np.inf
+
+ if tabulistsize < 1:
+ raise Exception(f'Invalid tabulistsize specified: {tabulistsize}. '
+ 'It should be an integer greater than zero.')
+ if tabustrategy not in ['oldest', 'farthest']:
+ raise Exception(f'Invalid tabustrategy specified: {tabustrategy}. '
+ 'It must be one of "oldest" or "farthest".')
+
+ tabulist = []
+ best_f = np.inf
+ best_x = x0
+
+ global_iter = 0
+ all_tunnel = success_tunnel = 0
+ evaluations = 0
+
+ local_tol = min(1e-8, glbtol)
+
+ while 1:
+
+ # minimization to find local minimum, either from initial values or
+ # after a successful tunneling loop
+ if disp:
+ print('\n{0}\nStarting MINIMIZATION Phase {1:d}\n{0}'
+ .format('='*72, global_iter+1))
+
+ options = {'maxiter': max(1, maxfunevals), 'disp': disp}
+ if local_opts is not None:
+ options.update(local_opts)
+ res = minimize(objfun, x0, args=args, method=local, bounds=bounds,
+ tol=local_tol, options=options)
+ xf, yf, num_fun = res['x'], res['fun'], res['nfev']
+ if isinstance(yf, np.ndarray):
+ yf = yf[0]
+
+ maxfunevals -= num_fun
+ evaluations += num_fun
+
+ if yf < best_f:
+ best_f = yf
+ best_x = xf
+
+ if disp:
+ print(f'\n\n ==> Reached local minimum: {yf:.5g}\n')
+
+ if maxfunevals <= 0:
+ if disp:
+ print('='*72)
+ return (best_x, best_f, evaluations,
+ 'Maximum number of function evaluations exceeded',
+ (all_tunnel, success_tunnel))
+
+ # if needed, drop a value from the tabu tunneling list and add the
+ # current solution
+ tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy)
+ tabulist.append(xf)
+
+ i = improve = 0
+
+ while i < maxiter and improve == 0:
+
+ if disp:
+ print('{0}\nStarting TUNNELING Phase ({1:d}-{2:d})\n{0}'
+ .format('='*72, global_iter+1, i+1))
+
+ all_tunnel += 1
+
+ # generate a new starting point away from the current solution
+ r = np.random.uniform(-1.0, 1.0, size=(n, ))
+ beta = eps2*np.linalg.norm(xf) / np.linalg.norm(r)
+
+ if np.abs(beta) < 1e-8:
+ beta = eps2
+
+ x0 = xf + beta*r
+
+ # make sure that the new starting point is within bounds
+ x0 = np.where(x0 < low, low, x0)
+ x0 = np.where(x0 > up, up, x0)
+
+ # aspired value of the objective function for the tunneling loop
+ aspiration = best_f - eps1*(1.0 + np.abs(best_f))
+
+ tunnel_args = tuple([objfun, aspiration, tabulist] + list(args))
+
+ options = {'maxiter': max(1, maxfunevals), 'disp': disp}
+ if local_opts is not None:
+ options.update(local_opts)
+
+ res = minimize(tunnel, x0, args=tunnel_args, method=local,
+ bounds=bounds, tol=local_tol, options=options)
+ xf, yf, num_fun = res['x'], res['fun'], res['nfev']
+ if isinstance(yf, np.ndarray):
+ yf = yf[0]
+
+ maxfunevals -= num_fun
+ evaluations += num_fun
+
+ yf = inverse_tunnel(xf, yf, aspiration, tabulist)
+
+ if yf <= best_f + glbtol:
+ oldf = best_f
+ best_f = yf
+ best_x = xf
+ improve = 1
+ success_tunnel += 1
+
+ if disp:
+ print('\n\n ==> Successful tunnelling phase. Reached new '
+ f'local minimum: {yf:.5g} < {oldf:.5g}\n')
+
+ i += 1
+
+ if maxfunevals <= 0:
+ return (best_x, best_f, evaluations,
+ 'Maximum number of function evaluations exceeded',
+ (all_tunnel, success_tunnel))
+
+ tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy)
+ tabulist.append(xf)
+
+ if disp:
+ print('='*72)
+
+ global_iter += 1
+ x0 = xf.copy()
+
+ if global_iter >= totaliter:
+ return (best_x, best_f, evaluations,
+ 'Maximum number of global iterations exceeded',
+ (all_tunnel, success_tunnel))
+
+
+def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy):
+ """Drop a point from the tabu search list."""
+ if len(tabulist) < tabulistsize:
+ return tabulist
+
+ if tabustrategy == 'oldest':
+ tabulist.pop(0)
+ else:
+ distance = np.sqrt(np.sum((tabulist - xf)**2, axis=1))
+ index = np.argmax(distance)
+ tabulist.pop(index)
+
+ return tabulist
+
+
+def tunnel(x0, *args):
+ """Tunneling objective function.
+
+ This function has a global minimum of zero at any feasible point where
+ ``f(x) = aspiration``, and minimizing this expression tends to move
+ away from all points in `tabulist`.
+
+ """
+ objfun, aspiration, tabulist, *fun_args = args
+
+ numerator = (objfun(x0, *fun_args) - aspiration)**2
+ denominator = 1.0
+
+ for tabu in tabulist:
+ denominator = denominator * np.sqrt(np.sum((x0 - tabu)**2))
+
+ ytf = numerator/denominator
+
+ return ytf
+
+
+def inverse_tunnel(xtf, ytf, aspiration, tabulist):
+ """Calculate the function value after a tunneling phase step."""
+ denominator = 1.0
+
+ for tabu in tabulist:
+ denominator = denominator * np.sqrt(np.sum((xtf - tabu)**2))
+
+ numerator = ytf*denominator
+ yf = aspiration + np.sqrt(numerator)
+
+ return yf
diff --git a/lmfit/conf_emcee.py b/lmfit/conf_emcee.py
new file mode 100644
index 0000000..ff83fa0
--- /dev/null
+++ b/lmfit/conf_emcee.py
@@ -0,0 +1,500 @@
+#!/bin/env python
+"""
+
+Wrapper for emcee
+
+"""
+
+import numpy as np
+from .minimizer import _nan_policy
+
+# check for emcee
+try:
+ import emcee
+ from emcee.autocorr import AutocorrError
+ HAS_EMCEE = int(emcee.__version__[0]) >= 3
+except ImportError:
+ HAS_EMCEE = False
+
+# check for pandas
+try:
+ import pandas as pd
+ from pandas import isnull
+ HAS_PANDAS = True
+except ImportError:
+ HAS_PANDAS = False
+ isnull = np.isnan
+
+def _lnprob(minimizer, theta, userfcn, params, var_names, bounds, userargs=(),
+ userkws=None, float_behavior='posterior', is_weighted=True,
+ nan_policy='raise'):
+ """Calculate the log-posterior probability.
+
+ See the `Minimizer.emcee` method for more details.
+
+ Parameters
+ ----------
+ minimizer : minimizer
+ Minimizer instance
+ theta : sequence
+ Float parameter values (only those being varied).
+ userfcn : callable
+ User objective function.
+ params : Parameters
+ The entire set of Parameters.
+ var_names : list
+ The names of the parameters that are varying.
+ bounds : numpy.ndarray
+ Lower and upper bounds of parameters, with shape
+ ``(nvarys, 2)``.
+ userargs : tuple, optional
+ Extra positional arguments required for user objective function.
+ userkws : dict, optional
+ Extra keyword arguments required for user objective function.
+ float_behavior : {'posterior', 'chi2'}, optional
+ Specifies meaning of objective when it returns a float. Use
+ `'posterior'` if objective function returns a log-posterior
+ probability (default) or `'chi2'` if it returns a chi2 value.
+ is_weighted : bool, optional
+ If `userfcn` returns a vector of residuals then `is_weighted`
+ (default is True) specifies if the residuals have been weighted
+ by data uncertainties.
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ Specifies action if `userfcn` returns NaN values. Use `'raise'`
+ (default) to raise a `ValueError`, `'propagate'` to use values
+ as-is, or `'omit'` to filter out the non-finite values.
+
+ Returns
+ -------
+ lnprob : float
+ Log posterior probability.
+
+ """
+ # the comparison has to be done on theta and bounds. DO NOT inject theta
+ # values into Parameters, then compare Parameters values to the bounds.
+ # Parameters values are clipped to stay within bounds.
+ if np.any(theta > bounds[:, 1]) or np.any(theta < bounds[:, 0]):
+ return -np.inf
+ for name, val in zip(var_names, theta):
+ params[name].value = val
+ userkwargs = {}
+ if userkws is not None:
+ userkwargs = userkws
+ # update the constraints
+ params.update_constraints()
+ # now calculate the log-likelihood
+ out = userfcn(params, *userargs, **userkwargs)
+ minimizer.result.nfev += 1
+ if callable(minimizer.iter_cb):
+ abort = minimizer.iter_cb(params, minimizer.result.nfev, out,
+ *userargs, **userkwargs)
+ minimizer._abort = minimizer._abort or abort
+ if minimizer._abort:
+ minimizer.result.residual = out
+ minimizer._lastpos = theta
+ raise AbortFitException("fit aborted by user.")
+ else:
+ out = _nan_policy(np.asarray(out).ravel(),
+ nan_policy=minimizer.nan_policy)
+ lnprob = np.asarray(out).ravel()
+ if len(lnprob) == 0:
+ lnprob = np.array([-1.e100])
+ if lnprob.size > 1:
+ # objective function returns a vector of residuals
+ if '__lnsigma' in params and not is_weighted:
+ # marginalise over a constant data uncertainty
+ __lnsigma = params['__lnsigma'].value
+ c = np.log(2 * np.pi) + 2 * __lnsigma
+ lnprob = -0.5 * np.sum((lnprob / np.exp(__lnsigma)) ** 2 + c)
+ else:
+ lnprob = -0.5 * (lnprob * lnprob).sum()
+ else:
+ # objective function returns a single value.
+ # use float_behaviour to figure out if the value is posterior or chi2
+ if float_behavior == 'posterior':
+ pass
+ elif float_behavior == 'chi2':
+ lnprob *= -0.5
+ else:
+ raise ValueError("float_behaviour must be either 'posterior' "
+ "or 'chi2' " + float_behavior)
+ return lnprob
+
+def emcee(minimizer, params=None, steps=1000, nwalkers=100, burn=0, thin=1,
+ ntemps=1, pos=None, reuse_sampler=False, workers=1,
+ float_behavior='posterior', is_weighted=True, seed=None,
+ progress=True, run_mcmc_kwargs={}):
+ r"""Bayesian sampling of the posterior distribution.
+
+ The method uses the ``emcee`` Markov Chain Monte Carlo package and
+ assumes that the prior is Uniform. You need to have ``emcee``
+ version 3 or newer installed to use this method.
+
+ Parameters
+ ----------
+ minimizer : minimizer
+ Minimizer instance
+ params : Parameters, optional
+ Parameters to use as starting point. If this is not specified
+ then the Parameters used to initialize the Minimizer object
+ are used.
+ steps : int, optional
+ How many samples you would like to draw from the posterior
+ distribution for each of the walkers?
+ nwalkers : int, optional
+ Should be set so :math:`nwalkers >> nvarys`, where ``nvarys``
+ are the number of parameters being varied during the fit.
+ 'Walkers are the members of the ensemble. They are almost like
+ separate Metropolis-Hastings chains but, of course, the proposal
+ distribution for a given walker depends on the positions of all
+ the other walkers in the ensemble.' - from the `emcee` webpage.
+ burn : int, optional
+ Discard this many samples from the start of the sampling regime.
+ thin : int, optional
+ Only accept 1 in every `thin` samples.
+ ntemps : int, deprecated
+ ntemps has no effect.
+ pos : numpy.ndarray, optional
+ Specify the initial positions for the sampler, an ndarray of
+ shape ``(nwalkers, nvarys)``. You can also initialise using a
+ previous chain of the same `nwalkers` and ``nvarys``. Note that
+ ``nvarys`` may be one larger than you expect it to be if your
+ ``userfcn`` returns an array and ``is_weighted=False``.
+ reuse_sampler : bool, optional
+ Set to True if you have already run `emcee` with the
+ `Minimizer` instance and want to continue to draw from its
+ ``sampler`` (and so retain the chain history). If False, a
+ new sampler is created. The keywords `nwalkers`, `pos`, and
+ `params` will be ignored when this is set, as they will be set
+ by the existing sampler.
+ **Important**: the Parameters used to create the sampler must
+ not change in-between calls to `emcee`. Alteration of Parameters
+ would include changed ``min``, ``max``, ``vary`` and ``expr``
+ attributes. This may happen, for example, if you use an altered
+ Parameters object and call the `minimize` method in-between
+ calls to `emcee`.
+ workers : Pool-like or int, optional
+ For parallelization of sampling. It can be any Pool-like object
+ with a map method that follows the same calling sequence as the
+ built-in `map` function. If int is given as the argument, then
+ a multiprocessing-based pool is spawned internally with the
+ corresponding number of parallel processes. 'mpi4py'-based
+ parallelization and 'joblib'-based parallelization pools can
+ also be used here. **Note**: because of multiprocessing
+ overhead it may only be worth parallelising if the objective
+ function is expensive to calculate, or if there are a large
+ number of objective evaluations per step
+ (``nwalkers * nvarys``).
+ float_behavior : str, optional
+ Meaning of float (scalar) output of objective function. Use
+ `'posterior'` if it returns a log-posterior probability or
+ `'chi2'` if it returns :math:`\chi^2`. See Notes for further
+ details.
+ is_weighted : bool, optional
+ Has your objective function been weighted by measurement
+ uncertainties? If ``is_weighted=True`` then your objective
+ function is assumed to return residuals that have been divided
+ by the true measurement uncertainty ``(data - model) / sigma``.
+ If ``is_weighted=False`` then the objective function is
+ assumed to return unweighted residuals, ``data - model``. In
+ this case `emcee` will employ a positive measurement
+ uncertainty during the sampling. This measurement uncertainty
+ will be present in the output params and output chain with the
+ name ``__lnsigma``. A side effect of this is that you cannot
+ use this parameter name yourself.
+ **Important**: this parameter only has any effect if your
+ objective function returns an array. If your objective function
+ returns a float, then this parameter is ignored. See Notes for
+ more details.
+ seed : int or numpy.random.RandomState, optional
+ If `seed` is an ``int``, a new `numpy.random.RandomState`
+ instance is used, seeded with `seed`.
+ If `seed` is already a `numpy.random.RandomState` instance,
+ then that `numpy.random.RandomState` instance is used. Specify
+ `seed` for repeatable minimizations.
+ progress : bool, optional
+ Print a progress bar to the console while running.
+ run_mcmc_kwargs : dict, optional
+ Additional (optional) keyword arguments that are passed to
+ ``emcee.EnsembleSampler.run_mcmc``.
+
+ Returns
+ -------
+ MinimizerResult
+ MinimizerResult object containing updated params, statistics,
+ etc. The updated params represent the median of the samples,
+ while the uncertainties are half the difference of the 15.87
+ and 84.13 percentiles. The `MinimizerResult` contains a few
+ additional attributes: `chain` contain the samples and has
+ shape ``((steps - burn) // thin, nwalkers, nvarys)``.
+ `flatchain` is a `pandas.DataFrame` of the flattened chain,
+ that can be accessed with `result.flatchain[parname]`.
+ `lnprob` contains the log probability for each sample in
+ `chain`. The sample with the highest probability corresponds
+ to the maximum likelihood estimate. `acor` is an array
+ containing the auto-correlation time for each parameter if the
+ auto-correlation time can be computed from the chain. Finally,
+ `acceptance_fraction` (an array of the fraction of steps
+ accepted for each walker).
+
+ Notes
+ -----
+ This method samples the posterior distribution of the parameters
+ using Markov Chain Monte Carlo. It calculates the log-posterior
+ probability of the model parameters, `F`, given the data, `D`,
+ :math:`\ln p(F_{true} | D)`. This 'posterior probability' is
+ given by:
+
+ .. math::
+
+ \ln p(F_{true} | D) \propto \ln p(D | F_{true}) + \ln p(F_{true})
+
+ where :math:`\ln p(D | F_{true})` is the 'log-likelihood' and
+ :math:`\ln p(F_{true})` is the 'log-prior'. The default log-prior
+ encodes prior information known about the model that the log-prior
+ probability is ``-numpy.inf`` (impossible) if any of the parameters
+ is outside its limits, and is zero if all the parameters are inside
+ their bounds (uniform prior). The log-likelihood function is [1]_:
+
+ .. math::
+
+ \ln p(D|F_{true}) = -\frac{1}{2}\sum_n \left[\frac{(g_n(F_{true}) - D_n)^2}{s_n^2}+\ln (2\pi s_n^2)\right]
+
+ The first term represents the residual (:math:`g` being the
+ generative model, :math:`D_n` the data and :math:`s_n` the
+ measurement uncertainty). This gives :math:`\chi^2` when summed
+ over all data points. The objective function may also return the
+ log-posterior probability, :math:`\ln p(F_{true} | D)`. Since the
+ default log-prior term is zero, the objective function can also
+ just return the log-likelihood, unless you wish to create a
+ non-uniform prior.
+
+ If the objective function returns a float value, this is assumed
+ by default to be the log-posterior probability, (`float_behavior`
+ default is 'posterior'). If your objective function returns
+ :math:`\chi^2`, then you should use ``float_behavior='chi2'``
+ instead.
+
+ By default objective functions may return an ndarray of (possibly
+ weighted) residuals. In this case, use `is_weighted` to select
+ whether these are correctly weighted by measurement uncertainty.
+ Note that this ignores the second term above, so that to calculate
+ a correct log-posterior probability value your objective function
+ should return a float value. With ``is_weighted=False`` the data
+ uncertainty, `s_n`, will be treated as a nuisance parameter to be
+ marginalized out. This uses strictly positive uncertainty
+ (homoscedasticity) for each data point,
+ :math:`s_n = \exp(\rm{\_\_lnsigma})`. ``__lnsigma`` will be
+ present in `MinimizerResult.params`, as well as `Minimizer.chain`
+ and ``nvarys`` will be increased by one.
+
+ References
+ ----------
+ .. [1] https://emcee.readthedocs.io
+
+ """
+ if not HAS_EMCEE:
+ raise NotImplementedError('emcee version 3 is required.')
+
+ if ntemps > 1:
+ msg = ("'ntemps' has no effect anymore, since the PTSampler was "
+ "removed from emcee version 3.")
+ raise DeprecationWarning(msg)
+
+ tparams = params
+ # if you're reusing the sampler then nwalkers have to be
+ # determined from the previous sampling
+ if reuse_sampler:
+ if not hasattr(self, 'sampler') or not hasattr(self, '_lastpos'):
+ raise ValueError("You wanted to use an existing sampler, but "
+ "it hasn't been created yet")
+ if len(self._lastpos.shape) == 2:
+ nwalkers = self._lastpos.shape[0]
+ elif len(self._lastpos.shape) == 3:
+ nwalkers = self._lastpos.shape[1]
+ tparams = None
+
+ result = self.prepare_fit(params=tparams)
+ params = result.params
+
+ # check if the userfcn returns a vector of residuals
+ out = self.userfcn(params, *self.userargs, **self.userkws)
+ out = np.asarray(out).ravel()
+ if out.size > 1 and is_weighted is False and '__lnsigma' not in params:
+ # __lnsigma should already be in params if is_weighted was
+ # previously set to True.
+ params.add('__lnsigma', value=0.01, min=-np.inf, max=np.inf,
+ vary=True)
+ # have to re-prepare the fit
+ result = self.prepare_fit(params)
+ params = result.params
+
+ result.method = 'emcee'
+
+ # Removing internal parameter scaling. We could possibly keep it,
+ # but I don't know how this affects the emcee sampling.
+ bounds = []
+ var_arr = np.zeros(len(result.var_names))
+ i = 0
+ for par in params:
+ param = params[par]
+ if param.expr is not None:
+ param.vary = False
+ if param.vary:
+ var_arr[i] = param.value
+ i += 1
+ else:
+ # don't want to append bounds if they're not being varied.
+ continue
+ param.from_internal = lambda val: val
+ lb, ub = param.min, param.max
+ if lb is None or lb is np.nan:
+ lb = -np.inf
+ if ub is None or ub is np.nan:
+ ub = np.inf
+ bounds.append((lb, ub))
+ bounds = np.array(bounds)
+
+ self.nvarys = len(result.var_names)
+
+ # set up multiprocessing options for the samplers
+ auto_pool = None
+ sampler_kwargs = {}
+ if isinstance(workers, int) and workers > 1 and HAS_DILL:
+ auto_pool = multiprocessing.Pool(workers)
+ sampler_kwargs['pool'] = auto_pool
+ elif hasattr(workers, 'map'):
+ sampler_kwargs['pool'] = workers
+
+ # function arguments for the log-probability functions
+ # these values are sent to the log-probability functions by the sampler.
+ lnprob_args = (self.userfcn, params, result.var_names, bounds)
+ lnprob_kwargs = {'is_weighted': is_weighted,
+ 'float_behavior': float_behavior,
+ 'userargs': self.userargs,
+ 'userkws': self.userkws,
+ 'nan_policy': self.nan_policy}
+
+ sampler_kwargs['args'] = lnprob_args
+ sampler_kwargs['kwargs'] = lnprob_kwargs
+
+ # set up the random number generator
+ rng = _make_random_gen(seed)
+
+ # now initialise the samplers
+ if reuse_sampler:
+ if auto_pool is not None:
+ self.sampler.pool = auto_pool
+
+ p0 = self._lastpos
+ if p0.shape[-1] != self.nvarys:
+ raise ValueError("You cannot reuse the sampler if the number "
+ "of varying parameters has changed")
+
+ else:
+ p0 = 1 + rng.randn(nwalkers, self.nvarys) * 1.e-4
+ p0 *= var_arr
+ sampler_kwargs.setdefault('pool', auto_pool)
+ self.sampler = emcee.EnsembleSampler(nwalkers, self.nvarys,
+ self._lnprob, **sampler_kwargs)
+
+ # user supplies an initialisation position for the chain
+ # If you try to run the sampler with p0 of a wrong size then you'll get
+ # a ValueError. Note, you can't initialise with a position if you are
+ # reusing the sampler.
+ if pos is not None and not reuse_sampler:
+ tpos = np.asfarray(pos)
+ if p0.shape == tpos.shape:
+ pass
+ # trying to initialise with a previous chain
+ elif tpos.shape[-1] == self.nvarys:
+ tpos = tpos[-1]
+ else:
+ raise ValueError('pos should have shape (nwalkers, nvarys)')
+ p0 = tpos
+
+ # if you specified a seed then you also need to seed the sampler
+ if seed is not None:
+ self.sampler.random_state = rng.get_state()
+
+ if not isinstance(run_mcmc_kwargs, dict):
+ raise ValueError('run_mcmc_kwargs should be a dict of keyword arguments')
+
+ # now do a production run, sampling all the time
+ try:
+ output = self.sampler.run_mcmc(p0, steps, progress=progress, **run_mcmc_kwargs)
+ self._lastpos = output.coords
+ except AbortFitException:
+ result.aborted = True
+ result.message = "Fit aborted by user callback. Could not estimate error-bars."
+ result.success = False
+ result.nfev = self.result.nfev
+
+ # discard the burn samples and thin
+ chain = self.sampler.get_chain(thin=thin, discard=burn)[..., :, :]
+ lnprobability = self.sampler.get_log_prob(thin=thin, discard=burn)[..., :]
+ flatchain = chain.reshape((-1, self.nvarys))
+ if not result.aborted:
+ quantiles = np.percentile(flatchain, [15.87, 50, 84.13], axis=0)
+
+ for i, var_name in enumerate(result.var_names):
+ std_l, median, std_u = quantiles[:, i]
+ params[var_name].value = median
+ params[var_name].stderr = 0.5 * (std_u - std_l)
+ params[var_name].correl = {}
+
+ params.update_constraints()
+
+ # work out correlation coefficients
+ corrcoefs = np.corrcoef(flatchain.T)
+
+ for i, var_name in enumerate(result.var_names):
+ for j, var_name2 in enumerate(result.var_names):
+ if i != j:
+ result.params[var_name].correl[var_name2] = corrcoefs[i, j]
+
+ result.chain = np.copy(chain)
+ result.lnprob = np.copy(lnprobability)
+ result.errorbars = True
+ result.nvarys = len(result.var_names)
+ result.nfev = nwalkers*steps
+
+ try:
+ result.acor = self.sampler.get_autocorr_time()
+ except AutocorrError as e:
+ print(str(e))
+ result.acceptance_fraction = self.sampler.acceptance_fraction
+
+ # Calculate the residual with the "best fit" parameters
+ out = self.userfcn(params, *self.userargs, **self.userkws)
+ result.residual = _nan_policy(out, nan_policy=self.nan_policy,
+ handle_inf=False)
+
+ # If uncertainty was automatically estimated, weight the residual properly
+ if not is_weighted and result.residual.size > 1 and '__lnsigma' in params:
+ result.residual /= np.exp(params['__lnsigma'].value)
+
+ # Calculate statistics for the two standard cases:
+ if isinstance(result.residual, np.ndarray) or (float_behavior == 'chi2'):
+ result._calculate_statistics()
+
+ # Handle special case unique to emcee:
+ # This should eventually be moved into result._calculate_statistics.
+ elif float_behavior == 'posterior':
+ result.ndata = 1
+ result.nfree = 1
+
+ # assuming prior prob = 1, this is true
+ _neg2_log_likel = -2*result.residual
+
+ # assumes that residual is properly weighted, avoid overflowing np.exp()
+ result.chisqr = np.exp(min(650, _neg2_log_likel))
+
+ result.redchi = result.chisqr / result.nfree
+ result.aic = _neg2_log_likel + 2 * result.nvarys
+ result.bic = _neg2_log_likel + np.log(result.ndata) * result.nvarys
+
+ if auto_pool is not None:
+ auto_pool.terminate()
+
+ return result
diff --git a/lmfit/confidence.py b/lmfit/confidence.py
new file mode 100644
index 0000000..4396fc1
--- /dev/null
+++ b/lmfit/confidence.py
@@ -0,0 +1,458 @@
+"""Contains functions to calculate confidence intervals."""
+
+from warnings import warn
+
+import numpy as np
+from scipy.optimize import root_scalar
+from scipy.special import erf
+from scipy.stats import f
+
+from .minimizer import MinimizerException
+
+CONF_ERR_GEN = 'Cannot determine Confidence Intervals'
+CONF_ERR_STDERR = f'{CONF_ERR_GEN} without sensible uncertainty estimates'
+CONF_ERR_NVARS = f'{CONF_ERR_GEN} with < 2 variables'
+
+
+def f_compare(best_fit, new_fit):
+ """Return the probability calculated using the F-test.
+
+ The null model (i.e., best-fit solution) is compared to an alternate
+ model where one or more parameters are fixed.
+
+ Parameters
+ ----------
+ best_fit : MinimizerResult
+ The result from the best-fit.
+ new_fit : MinimizerResult
+ The result from fit with the fixed parameter(s).
+
+ Returns
+ -------
+ float
+ Value of the calculated probability.
+
+ """
+ nfree = best_fit.nfree
+ nfix = best_fit.nvarys - new_fit.nvarys
+ dchi = new_fit.chisqr / best_fit.chisqr - 1.0
+ return f.cdf(dchi * nfree / nfix, nfix, nfree)
+
+
+def copy_vals(params):
+ """Save values/stderrs of parameters in a temporary dictionary."""
+ tmp_params = {}
+ for para_key in params:
+ tmp_params[para_key] = (params[para_key].value,
+ params[para_key].stderr)
+ return tmp_params
+
+
+def restore_vals(tmp_params, params):
+ """Restore values/stderrs of parameters from a temporary dictionary."""
+ for para_key in params:
+ params[para_key].value, params[para_key].stderr = tmp_params[para_key]
+
+
+def conf_interval(minimizer, result, p_names=None, sigmas=None, trace=False,
+ maxiter=200, verbose=False, prob_func=None):
+ """Calculate the confidence interval (CI) for parameters.
+
+ The parameter for which the CI is calculated will be varied, while the
+ remaining parameters are re-optimized to minimize the chi-square. The
+ resulting chi-square is used to calculate the probability with a given
+ statistic (e.g., F-test). This function uses a 1d-rootfinder from SciPy
+ to find the values resulting in the searched confidence region.
+
+ Parameters
+ ----------
+ minimizer : Minimizer
+ The minimizer to use, holding objective function.
+ result : MinimizerResult
+ The result of running minimize().
+ p_names : list, optional
+ Names of the parameters for which the CI is calculated. If None
+ (default), the CI is calculated for every parameter.
+ sigmas : list, optional
+ The sigma-levels to find (default is [1, 2, 3]). See Notes below.
+ trace : bool, optional
+ Defaults to False; if True, each result of a probability
+ calculation is saved along with the parameter. This can be used to
+ plot so-called "profile traces".
+ maxiter : int, optional
+ Maximum of iteration to find an upper limit (default is 200).
+ verbose : bool, optional
+ Print extra debugging information (default is False).
+ prob_func : None or callable, optional
+ Function to calculate the probability from the optimized chi-square.
+ Default is None and uses the built-in function `f_compare`
+ (i.e., F-test).
+
+ Returns
+ -------
+ output : dict
+ A dictionary containing a list of ``(sigma, vals)``-tuples for
+ each parameter.
+ trace_dict : dict, optional
+ Only if trace is True. Is a dictionary, the key is the parameter
+ which was fixed. The values are again a dict with the names as
+ keys, but with an additional key 'prob'. Each contains an array
+ of the corresponding values.
+
+ See Also
+ --------
+ conf_interval2d
+
+ Notes
+ -----
+ The values for `sigma` are taken as the number of standard deviations
+ for a normal distribution and converted to probabilities. That is, the
+ default ``sigma=[1, 2, 3]`` will use probabilities of 0.6827, 0.9545,
+ and 0.9973. If any of the sigma values is less than 1, that will be
+ interpreted as a probability. That is, a value of 1 and 0.6827 will
+ give the same results, within precision.
+
+ Examples
+ --------
+ >>> from lmfit.printfuncs import *
+ >>> mini = minimize(some_func, params)
+ >>> mini.leastsq()
+ True
+ >>> report_errors(params)
+ ... #report
+ >>> ci = conf_interval(mini)
+ >>> report_ci(ci)
+ ... #report
+
+ Now with quantiles for the sigmas and using the trace.
+
+ >>> ci, trace = conf_interval(mini, sigmas=[0.5, 1, 2, 3], trace=True)
+ >>> fixed = trace['para1']['para1']
+ >>> free = trace['para1']['not_para1']
+ >>> prob = trace['para1']['prob']
+
+ This makes it possible to plot the dependence between free and fixed
+ parameters.
+
+ """
+ if sigmas is None:
+ sigmas = [1, 2, 3]
+
+ ci = ConfidenceInterval(minimizer, result, p_names, prob_func, sigmas,
+ trace, verbose, maxiter)
+ output = ci.calc_all_ci()
+ if trace:
+ return output, ci.trace_dict
+ return output
+
+
+def map_trace_to_names(trace, params):
+ """Map trace to parameter names."""
+ out = {}
+ allnames = list(params.keys()) + ['prob']
+ for name in trace.keys():
+ tmp_dict = {}
+ tmp = np.array(trace[name])
+ for para_name, values in zip(allnames, tmp.T):
+ tmp_dict[para_name] = values
+ out[name] = tmp_dict
+ return out
+
+
+class ConfidenceInterval:
+ """Class used to calculate the confidence interval."""
+
+ def __init__(self, minimizer, result, p_names=None, prob_func=None,
+ sigmas=None, trace=False, verbose=False, maxiter=50):
+ self.verbose = verbose
+ self.minimizer = minimizer
+ self.result = result
+ self.params = result.params.copy()
+ self.org = copy_vals(self.params)
+ self.best_chi = result.chisqr
+
+ if p_names is None:
+ p_names = [i for i in self.params if self.params[i].vary]
+
+ self.p_names = p_names
+ self.fit_params = [self.params[p] for p in self.p_names]
+
+ # check that there are at least 2 true variables!
+ # check that all stderrs are sensible (including not None or NaN)
+
+ for par in self.fit_params:
+ if par.vary and (par.stderr is None or par.stderr is np.nan):
+ raise MinimizerException(CONF_ERR_STDERR)
+ nvars = len([p for p in self.params.values() if p.vary])
+ if nvars < 2:
+ raise MinimizerException(CONF_ERR_NVARS)
+
+ if prob_func is None:
+ self.prob_func = f_compare
+ else:
+ self.prob_func = prob_func
+ if trace:
+ self.trace_dict = {i: [] for i in self.p_names}
+
+ self.trace = trace
+ self.maxiter = maxiter
+ self.min_rel_change = 1e-5
+
+ if sigmas is None:
+ sigmas = [1, 2, 3]
+ self.sigmas = list(sigmas)
+ self.sigmas.sort()
+ self.probs = []
+ for sigma in self.sigmas:
+ if sigma < 1:
+ prob = sigma
+ else:
+ prob = erf(sigma/np.sqrt(2))
+ self.probs.append(prob)
+
+ def calc_all_ci(self):
+ """Calculate all confidence intervals."""
+ out = {}
+
+ for p in self.p_names:
+ out[p] = (self.calc_ci(p, -1)[::-1] +
+ [(0., self.params[p].value)] +
+ self.calc_ci(p, 1))
+ if self.trace:
+ self.trace_dict = map_trace_to_names(self.trace_dict, self.params)
+
+ return out
+
+ def calc_ci(self, para, direction):
+ """Calculate the CI for a single parameter in a single direction.
+
+ Direction is either positive or negative 1.
+
+ """
+ if isinstance(para, str):
+ para = self.params[para]
+
+ # function used to calculate the probability
+ cache = {}
+
+ def calc_prob(val, target_prob):
+ if val not in cache:
+ cache[val] = self.calc_prob(para, val, 0)
+ return cache[val] - target_prob
+
+ if self.trace:
+ x = [i.value for i in self.params.values()]
+ self.trace_dict[para.name].append(x + [0])
+
+ para.vary = False
+ limit, max_prob = self.find_limit(para, direction)
+ a_limit = float(para.value)
+ ret = []
+ orig_warn_settings = np.geterr()
+ np.seterr(all='ignore')
+ for prob in self.probs:
+ if prob > max_prob:
+ ret.append((prob, direction*np.inf))
+ continue
+
+ sol = root_scalar(calc_prob, method='toms748', bracket=sorted([limit, a_limit]), rtol=.5e-4, args=(prob,))
+ if sol.converged:
+ val = sol.root
+ else:
+ val = np.nan
+ break
+ a_limit = val
+ ret.append((prob, val))
+
+ para.vary = True
+ self.reset_vals()
+ np.seterr(**orig_warn_settings)
+ return ret
+
+ def reset_vals(self):
+ """Reset parameter values to best-fit values."""
+ restore_vals(self.org, self.params)
+
+ def find_limit(self, para, direction):
+ """Find a value for given parameter so that prob(val) > sigmas."""
+ if self.verbose:
+ print(f'Calculating CI for {para.name}')
+ self.reset_vals()
+
+ # determine starting step
+ if para.stderr > 0 and para.stderr < abs(para.value):
+ step = para.stderr
+ else:
+ step = max(abs(para.value) * 0.2, 0.001)
+ para.vary = False
+ start_val = para.value
+
+ old_prob = 0
+ limit = start_val
+ i = 0
+ bound_reached = False
+ max_prob = max(self.probs)
+
+ while old_prob < max_prob:
+ i += 1
+ limit += step * direction
+ if limit > para.max:
+ limit = para.max
+ bound_reached = True
+ elif limit < para.min:
+ limit = para.min
+ bound_reached = True
+
+ new_prob = self.calc_prob(para, limit)
+ rel_change = (new_prob - old_prob) / max(new_prob, old_prob, 1e-12)
+ old_prob = new_prob
+ if self.verbose:
+ print(f'P({para.name}={limit}) = {new_prob}, '
+ f'max. prob={max_prob}')
+
+ # check for convergence
+ if bound_reached and new_prob < max(self.probs):
+ errmsg = (f'Bound reached with prob({para.name}={limit}) '
+ f'= {new_prob} < max(sigmas)')
+ warn(errmsg)
+ break
+
+ if i > self.maxiter:
+ errmsg = (f'maxiter={self.maxiter} reached and prob('
+ f'{para.name}={limit}) = {new_prob} < max(sigmas)')
+ warn(errmsg)
+ break
+
+ if rel_change < self.min_rel_change:
+ errmsg = (f'rel_change={rel_change} < {self.min_rel_change} '
+ f'at iteration {i} and prob({para.name}={limit}) = '
+ f'{new_prob} < max(sigmas)')
+ warn(errmsg)
+ break
+
+ self.reset_vals()
+
+ return limit, new_prob
+
+ def calc_prob(self, para, val, offset=0., restore=False):
+ """Calculate the probability for given value."""
+ if restore:
+ restore_vals(self.org, self.params)
+ para.value = val
+ save_para = self.params[para.name]
+ self.params[para.name] = para
+ self.minimizer.prepare_fit(self.params)
+ out = self.minimizer.leastsq()
+ prob = self.prob_func(self.result, out)
+
+ if self.trace:
+ x = [i.value for i in out.params.values()]
+ self.trace_dict[para.name].append(x + [prob])
+ self.params[para.name] = save_para
+ return prob - offset
+
+
+def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
+ limits=None, prob_func=None, nsigma=5, chi2_out=False):
+ r"""Calculate confidence regions for two fixed parameters.
+
+ The method itself is explained in `conf_interval`: here we are fixing
+ two parameters.
+
+ Parameters
+ ----------
+ minimizer : Minimizer
+ The minimizer to use, holding objective function.
+ result : MinimizerResult
+ The result of running minimize().
+ x_name : str
+ The name of the parameter which will be the x direction.
+ y_name : str
+ The name of the parameter which will be the y direction.
+ nx : int, optional
+ Number of points in the x direction (default is 10).
+ ny : int, optional
+ Number of points in the y direction (default is 10).
+ limits : tuple, optional
+ Should have the form ``((x_upper, x_lower), (y_upper, y_lower))``.
+ If not given, the default is nsigma*stderr in each direction.
+ prob_func : None or callable, deprecated
+ Starting with version 1.2, this argument is unused and has no effect.
+ nsigma : float or int, optional
+ Multiplier of stderr for limits (default is 5).
+ chi2_out: bool
+ Whether to return chi-square at each coordinate instead of probability.
+
+ Returns
+ -------
+ x : numpy.ndarray
+ X-coordinates (same shape as `nx`).
+ y : numpy.ndarray
+ Y-coordinates (same shape as `ny`).
+ grid : numpy.ndarray
+ 2-D array (with shape ``(nx, ny)``) containing the calculated
+ probabilities or chi-square.
+
+ See Also
+ --------
+ conf_interval
+
+ Examples
+ --------
+ >>> mini = Minimizer(some_func, params)
+ >>> result = mini.leastsq()
+ >>> x, y, gr = conf_interval2d(mini, result, 'para1','para2')
+ >>> plt.contour(x,y,gr)
+
+ """
+ if prob_func is not None:
+ msg = "'prob_func' has no effect and will be removed in version 1.4."
+ raise DeprecationWarning(msg)
+
+ params = result.params
+
+ best_chisqr = result.chisqr
+ redchi = result.redchi
+ org = copy_vals(result.params)
+
+ x = params[x_name]
+ y = params[y_name]
+
+ if limits is None:
+ (x_upper, x_lower) = (x.value + nsigma * x.stderr, x.value - nsigma * x.stderr)
+ (y_upper, y_lower) = (y.value + nsigma * y.stderr, y.value - nsigma * y.stderr)
+ elif len(limits) == 2:
+ (x_upper, x_lower) = limits[0]
+ (y_upper, y_lower) = limits[1]
+
+ x_points = np.linspace(x_lower, x_upper, nx)
+ y_points = np.linspace(y_lower, y_upper, ny)
+ grid = np.dstack(np.meshgrid(x_points, y_points))
+
+ x.vary, y.vary = False, False
+
+ def calc_chisqr(vals, restore=False):
+ """Calculate chi-square for a set of parameter values."""
+ save_x = x.value
+ save_y = y.value
+ result.params[x.name].value = vals[0]
+ result.params[y.name].value = vals[1]
+ minimizer.prepare_fit(params=result.params)
+ out = minimizer.leastsq()
+ result.params[x.name].value = save_x
+ result.params[y.name].value = save_y
+ return out.chisqr
+
+ # grid of chi-square
+ out_mat = np.apply_along_axis(calc_chisqr, -1, grid)
+
+ # compute grid of sigma values from chi-square
+ if not chi2_out:
+ chisqr0 = out_mat.min()
+ chisqr0 = min(best_chisqr, chisqr0)
+ out_mat = np.sqrt((out_mat-chisqr0)/redchi)
+
+ x.vary, y.vary = True, True
+ restore_vals(org, result.params)
+ result.chisqr = best_chisqr
+ return x_points, y_points, out_mat
diff --git a/lmfit/jsonutils.py b/lmfit/jsonutils.py
new file mode 100644
index 0000000..7e91afa
--- /dev/null
+++ b/lmfit/jsonutils.py
@@ -0,0 +1,150 @@
+"""JSON utilities."""
+
+from base64 import b64decode, b64encode
+import sys
+import warnings
+
+import numpy as np
+
+try:
+ import dill
+ HAS_DILL = True
+except ImportError:
+ HAS_DILL = False
+
+try:
+ from pandas import DataFrame, Series, read_json
+except ImportError:
+ DataFrame = Series = type(NotImplemented)
+ read_json = None
+
+
+pyvers = f'{sys.version_info.major}.{sys.version_info.minor}'
+
+
+def find_importer(obj):
+ """Find importer of an object."""
+ oname = obj.__name__
+ for modname, module in sys.modules.items():
+ if modname.startswith('__main__'):
+ continue
+ t = getattr(module, oname, None)
+ if t is obj:
+ return modname
+ return None
+
+
+def import_from(modulepath, objectname):
+ """Wrapper for __import__ for nested objects."""
+ path = modulepath.split('.')
+ top = path.pop(0)
+ parent = __import__(top)
+ while len(path) > 0:
+ parent = getattr(parent, path.pop(0))
+ return getattr(parent, objectname)
+
+
+def encode4js(obj):
+ """Prepare an object for JSON encoding.
+
+ It has special handling for many Python types, including:
+ - pandas DataFrames and Series
+ - NumPy ndarrays
+ - complex numbers
+
+ """
+ if isinstance(obj, DataFrame):
+ return dict(__class__='PDataFrame', value=obj.to_json())
+ if isinstance(obj, Series):
+ return dict(__class__='PSeries', value=obj.to_json())
+ if isinstance(obj, np.ndarray):
+ if 'complex' in obj.dtype.name:
+ val = [(obj.real).tolist(), (obj.imag).tolist()]
+ elif obj.dtype.name == 'object':
+ val = [encode4js(item) for item in obj]
+ else:
+ val = obj.flatten().tolist()
+ return dict(__class__='NDArray', __shape__=obj.shape,
+ __dtype__=obj.dtype.name, value=val)
+ if isinstance(obj, float):
+ return float(obj)
+ if isinstance(obj, int):
+ return int(obj)
+ if isinstance(obj, str):
+ try:
+ return str(obj)
+ except UnicodeError:
+ return obj
+ if isinstance(obj, complex):
+ return dict(__class__='Complex', value=(obj.real, obj.imag))
+ if isinstance(obj, (tuple, list)):
+ ctype = 'List'
+ if isinstance(obj, tuple):
+ ctype = 'Tuple'
+ val = [encode4js(item) for item in obj]
+ return dict(__class__=ctype, value=val)
+ if isinstance(obj, dict):
+ out = dict(__class__='Dict')
+ for key, val in obj.items():
+ out[encode4js(key)] = encode4js(val)
+ return out
+ if callable(obj):
+ value = str(b64encode(dill.dumps(obj)), 'utf-8') if HAS_DILL else None
+ return dict(__class__='Callable', __name__=obj.__name__,
+ pyversion=pyvers, value=value,
+ importer=find_importer(obj))
+ return obj
+
+
+def decode4js(obj):
+ """Return decoded Python object from encoded object."""
+ if not isinstance(obj, dict):
+ return obj
+ out = obj
+ classname = obj.pop('__class__', None)
+ if classname is None:
+ return obj
+
+ if classname == 'Complex':
+ out = obj['value'][0] + 1j*obj['value'][1]
+ elif classname in ('List', 'Tuple'):
+ out = []
+ for item in obj['value']:
+ out.append(decode4js(item))
+ if classname == 'Tuple':
+ out = tuple(out)
+ elif classname == 'NDArray':
+ if obj['__dtype__'].startswith('complex'):
+ re = np.fromiter(obj['value'][0], dtype='double')
+ im = np.fromiter(obj['value'][1], dtype='double')
+ out = re + 1j*im
+ elif obj['__dtype__'].startswith('object'):
+ val = [decode4js(v) for v in obj['value']]
+ out = np.array(val, dtype=obj['__dtype__'])
+ else:
+ out = np.fromiter(obj['value'], dtype=obj['__dtype__'])
+ out.shape = obj['__shape__']
+ elif classname == 'PDataFrame' and read_json is not None:
+ out = read_json(obj['value'])
+ elif classname == 'PSeries' and read_json is not None:
+ out = read_json(obj['value'], typ='series')
+ elif classname == 'Callable':
+ out = obj['__name__']
+ try:
+ out = import_from(obj['importer'], out)
+ unpacked = True
+ except (ImportError, AttributeError):
+ unpacked = False
+ if not unpacked and HAS_DILL:
+ try:
+ out = dill.loads(b64decode(obj['value']))
+ except RuntimeError:
+ msg = "Could not unpack dill-encoded callable `{0}`, saved with Python version {1}"
+ warnings.warn(msg.format(obj['__name__'],
+ obj['pyversion']))
+
+ elif classname in ('Dict', 'dict'):
+ out = {}
+ for key, val in obj.items():
+ out[key] = decode4js(val)
+ return out
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
new file mode 100644
index 0000000..7ecdc42
--- /dev/null
+++ b/lmfit/lineshapes.py
@@ -0,0 +1,519 @@
+"""Basic model lineshapes and distribution functions."""
+
+from numpy import (arctan, copysign, cos, exp, isclose, isnan, log, log1p,
+ maximum, minimum, pi, real, sin, sqrt, where)
+from scipy.special import betaln as betalnfcn
+from scipy.special import erf, erfc
+from scipy.special import gamma as gamfcn
+from scipy.special import loggamma as loggammafcn
+from scipy.special import wofz
+
+log2 = log(2)
+s2pi = sqrt(2*pi)
+s2 = sqrt(2.0)
+# tiny had been numpy.finfo(numpy.float64).eps ~=2.2e16.
+# here, we explicitly set it to 1.e-15 == numpy.finfo(numpy.float64).resolution
+tiny = 1.0e-15
+
+functions = ('gaussian', 'gaussian2d', 'lorentzian', 'voigt', 'pvoigt',
+ 'moffat', 'pearson4', 'pearson7', 'breit_wigner', 'damped_oscillator',
+ 'dho', 'logistic', 'lognormal', 'students_t', 'expgaussian',
+ 'doniach', 'skewed_gaussian', 'skewed_voigt',
+ 'thermal_distribution', 'step', 'rectangle', 'exponential',
+ 'powerlaw', 'linear', 'parabolic', 'sine', 'expsine',
+ 'split_lorentzian')
+
+
+def not_zero(value):
+ """Return value with a minimal absolute size of tiny, preserving the sign.
+
+ This is a helper function to prevent ZeroDivisionError's.
+
+ Parameters
+ ----------
+ value : scalar
+ Value to be ensured not to be zero.
+
+ Returns
+ -------
+ scalar
+ Value ensured not to be zero.
+
+ """
+ return float(copysign(max(tiny, abs(value)), value))
+
+
+def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):
+ """Return a 1-dimensional Gaussian function.
+
+ gaussian(x, amplitude, center, sigma) =
+ (amplitude/(s2pi*sigma)) * exp(-(1.0*x-center)**2 / (2*sigma**2))
+
+ """
+ return ((amplitude/(max(tiny, s2pi*sigma)))
+ * exp(-(1.0*x-center)**2 / max(tiny, (2*sigma**2))))
+
+
+def gaussian2d(x, y=0.0, amplitude=1.0, centerx=0.0, centery=0.0, sigmax=1.0,
+ sigmay=1.0):
+ """Return a 2-dimensional Gaussian function.
+
+ gaussian2d(x, y, amplitude, centerx, centery, sigmax, sigmay) =
+ amplitude/(2*pi*sigmax*sigmay) * exp(-(x-centerx)**2/(2*sigmax**2)
+ -(y-centery)**2/(2*sigmay**2))
+
+ """
+ z = amplitude*(gaussian(x, amplitude=1, center=centerx, sigma=sigmax) *
+ gaussian(y, amplitude=1, center=centery, sigma=sigmay))
+ return z
+
+
+def lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0):
+ """Return a 1-dimensional Lorentzian function.
+
+ lorentzian(x, amplitude, center, sigma) =
+ (amplitude/(1 + ((1.0*x-center)/sigma)**2)) / (pi*sigma)
+
+ """
+ return ((amplitude/(1 + ((1.0*x-center)/max(tiny, sigma))**2))
+ / max(tiny, (pi*sigma)))
+
+
+def split_lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0, sigma_r=1.0):
+ """Return a 1-dimensional piecewise Lorentzian function.
+
+ Split means that width of the function is different between
+ left and right slope of the function. The peak height is calculated
+ from the condition that the integral from ``-numpy.inf`` to
+ ``numpy.inf`` is equal to `amplitude`.
+
+ split_lorentzian(x, amplitude, center, sigma, sigma_r) =
+ [2*amplitude / (pi* (sigma + sigma_r)] *
+ { sigma**2 * (x<center) / [sigma**2 + (x - center)**2]
+ + sigma_r**2 * (x>=center) / [sigma_r**2+ (x - center)**2] }
+
+ """
+ s = max(tiny, sigma)
+ r = max(tiny, sigma_r)
+ ss = s*s
+ rr = r*r
+ xc2 = (x-center)**2
+ amp = 2*amplitude/(pi*(s+r))
+ return amp*(ss*(x < center)/(ss+xc2) + rr*(x >= center)/(rr+xc2))
+
+
+def voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None):
+ """Return a 1-dimensional Voigt function.
+
+ voigt(x, amplitude, center, sigma, gamma) =
+ amplitude*wofz(z).real / (sigma*s2pi)
+
+ For more information, see: https://en.wikipedia.org/wiki/Voigt_profile
+
+ """
+ if gamma is None:
+ gamma = sigma
+ z = (x-center + 1j*gamma) / max(tiny, (sigma*s2))
+ return amplitude*wofz(z).real / max(tiny, (sigma*s2pi))
+
+
+def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):
+ """Return a 1-dimensional pseudo-Voigt function.
+
+ pvoigt(x, amplitude, center, sigma, fraction) =
+ amplitude*(1-fraction)*gaussian(x, center, sigma_g) +
+ amplitude*fraction*lorentzian(x, center, sigma)
+
+ where `sigma_g` (the sigma for the Gaussian component) is
+
+ ``sigma_g = sigma / sqrt(2*log(2)) ~= sigma / 1.17741``
+
+ so that the Gaussian and Lorentzian components have the same FWHM of
+ ``2.0*sigma``.
+
+ """
+ sigma_g = sigma / sqrt(2*log2)
+ return ((1-fraction)*gaussian(x, amplitude, center, sigma_g) +
+ fraction*lorentzian(x, amplitude, center, sigma))
+
+
+def moffat(x, amplitude=1, center=0., sigma=1, beta=1.):
+ """Return a 1-dimensional Moffat function.
+
+ moffat(x, amplitude, center, sigma, beta) =
+ amplitude / (((x - center)/sigma)**2 + 1)**beta
+
+ """
+ return amplitude / (((x - center)/max(tiny, sigma))**2 + 1)**beta
+
+
+def pearson4(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0, skew=0.0):
+ """Return a Pearson4 lineshape.
+
+ Using the Wikipedia definition:
+
+ pearson4(x, amplitude, center, sigma, expon, skew) =
+ amplitude*|gamma(expon + I skew/2)/gamma(m)|**2/(w*beta(expon-0.5, 0.5)) * (1+arg**2)**(-expon) * exp(-skew * arctan(arg))
+
+ where ``arg = (x-center)/sigma``, `gamma` is the gamma function and `beta` is the beta function.
+
+ For more information, see: https://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_IV_distribution
+
+ """
+ expon = max(tiny, expon)
+ sigma = max(tiny, sigma)
+ arg = (x - center) / sigma
+ logprefactor = 2 * (real(loggammafcn(expon + skew * 0.5j)) - loggammafcn(expon)) - betalnfcn(expon - 0.5, 0.5)
+ return (amplitude / sigma) * exp(logprefactor - expon * log1p(arg * arg) - skew * arctan(arg))
+
+
+def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0):
+ """Return a Pearson7 lineshape.
+
+ Using the Wikipedia definition:
+
+ pearson7(x, center, sigma, expon) =
+ amplitude*(1+arg**2)**(-expon)/(sigma*beta(expon-0.5, 0.5))
+
+ where ``arg = (x-center)/sigma`` and `beta` is the beta function.
+
+ """
+ expon = max(tiny, expon)
+ arg = (x-center)/max(tiny, sigma)
+ scale = amplitude * gamfcn(expon)/(gamfcn(0.5)*gamfcn(expon-0.5))
+ return scale*(1+arg**2)**(-expon)/max(tiny, sigma)
+
+
+def breit_wigner(x, amplitude=1.0, center=0.0, sigma=1.0, q=1.0):
+ """Return a Breit-Wigner-Fano lineshape.
+
+ breit_wigner(x, amplitude, center, sigma, q) =
+ amplitude*(q*sigma/2 + x - center)**2 /
+ ( (sigma/2)**2 + (x - center)**2 )
+
+ """
+ gam = sigma/2.0
+ return amplitude*(q*gam + x - center)**2 / (gam*gam + (x-center)**2)
+
+
+def damped_oscillator(x, amplitude=1.0, center=1., sigma=0.1):
+ """Return the amplitude for a damped harmonic oscillator.
+
+ damped_oscillator(x, amplitude, center, sigma) =
+ amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
+
+ """
+ center = max(tiny, abs(center))
+ return amplitude/sqrt((1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2)
+
+
+def dho(x, amplitude=1., center=1., sigma=1., gamma=1.0):
+ """Return a Damped Harmonic Oscillator.
+
+ Similar to the version from PAN:
+
+ dho(x, amplitude, center, sigma, gamma) =
+ amplitude*sigma*pi * (lm - lp) / (1.0 - exp(-x/gamma))
+
+ where
+ ``lm(x, center, sigma) = 1.0 / ((x-center)**2 + sigma**2)``
+ ``lp(x, center, sigma) = 1.0 / ((x+center)**2 + sigma**2)``
+
+ """
+ factor = amplitude * sigma / pi
+ bose = (1.0 - exp(-x/max(tiny, gamma)))
+ if isinstance(bose, (int, float)):
+ bose = not_zero(bose)
+ else:
+ bose[where(isnan(bose))] = tiny
+ bose[where(abs(bose) <= tiny)] = tiny
+
+ lm = 1.0/((x-center)**2 + sigma**2)
+ lp = 1.0/((x+center)**2 + sigma**2)
+ return factor * where(isclose(x, 0.0),
+ 4*gamma*center/(center**2+sigma**2)**2,
+ (lm - lp)/bose)
+
+
+def logistic(x, amplitude=1., center=0., sigma=1.):
+ """Return a Logistic lineshape (yet another sigmoidal curve).
+
+ logistic(x, amplitude, center, sigma) =
+ amplitude*(1. - 1. / (1 + exp((x-center)/sigma)))
+
+ """
+ return amplitude*(1. - 1./(1. + exp((x-center)/max(tiny, sigma))))
+
+
+def lognormal(x, amplitude=1.0, center=0., sigma=1):
+ """Return a log-normal function.
+
+ lognormal(x, amplitude, center, sigma) =
+ (amplitude/(x*sigma*s2pi)) * exp(-(ln(x) - center)**2/ (2* sigma**2))
+
+ """
+ if isinstance(x, (int, float)):
+ x = max(tiny, x)
+ else:
+ x[where(x <= tiny)] = tiny
+ return ((amplitude/(x*max(tiny, sigma*s2pi))) *
+ exp(-(log(x)-center)**2 / max(tiny, (2*sigma**2))))
+
+
+def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
+ """Return Student's t-distribution.
+
+ students_t(x, amplitude, center, sigma) =
+
+ gamma((sigma+1)/2)
+ ---------------------------- * (1 + (x-center)**2/sigma)^(-(sigma+1)/2)
+ sqrt(sigma*pi)gamma(sigma/2)
+
+ """
+ s1 = (sigma+1)/2.0
+ denom = max(tiny, (sqrt(sigma*pi)*gamfcn(max(tiny, sigma/2))))
+ return amplitude*(1 + (x-center)**2/max(tiny, sigma))**(-s1) * gamfcn(s1) / denom
+
+
+def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
+ """Return an exponentially modified Gaussian.
+
+ expgaussian(x, amplitude, center, sigma, gamma) =
+ amplitude * (gamma/2) *
+ exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] *
+ erfc[(center + gamma*sigma**2 - x)/(sqrt(2)*sigma)]
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
+
+ """
+ gss = gamma*sigma*sigma
+ arg1 = gamma*(center + gss/2.0 - x)
+ arg2 = (center + gss - x)/max(tiny, (s2*sigma))
+ return amplitude*(gamma/2) * exp(arg1) * erfc(arg2)
+
+
+def doniach(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
+ """Return a Doniach Sunjic asymmetric lineshape.
+
+ doniach(x, amplitude, center, sigma, gamma) =
+ amplitude / sigma^(1-gamma) *
+ cos(pi*gamma/2 + (1-gamma) arctan((x-center)/sigma) /
+ (sigma**2 + (x-center)**2)**[(1-gamma)/2]
+
+ For example used in photo-emission; see
+ http://www.casaxps.com/help_manual/line_shapes.htm for more information.
+
+ """
+ arg = (x-center)/max(tiny, sigma)
+ gm1 = (1.0 - gamma)
+ scale = amplitude/max(tiny, (sigma**gm1))
+ return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2)
+
+
+def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0):
+ """Return a Gaussian lineshape, skewed with error function.
+
+ Equal to: gaussian(x, center, sigma)*(1+erf(beta*(x-center)))
+
+ where ``beta = gamma/(sigma*sqrt(2))``
+
+ with ``gamma < 0``: tail to low value of centroid
+ ``gamma > 0``: tail to high value of centroid
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Skew_normal_distribution
+
+ """
+ asym = 1 + erf(gamma*(x-center)/max(tiny, (s2*sigma)))
+ return asym * gaussian(x, amplitude, center, sigma)
+
+
+def skewed_voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None, skew=0.0):
+ """Return a Voigt lineshape, skewed with error function.
+
+ Equal to: voigt(x, center, sigma, gamma)*(1+erf(beta*(x-center)))
+
+ where ``beta = skew/(sigma*sqrt(2))``
+
+ with ``skew < 0``: tail to low value of centroid
+ ``skew > 0``: tail to high value of centroid
+
+ Useful, for example, for ad-hoc Compton scatter profile. For more
+ information, see: https://en.wikipedia.org/wiki/Skew_normal_distribution
+
+ """
+ beta = skew/max(tiny, (s2*sigma))
+ asym = 1 + erf(beta*(x-center))
+ return asym * voigt(x, amplitude, center, sigma, gamma=gamma)
+
+
+def sine(x, amplitude=1.0, frequency=1.0, shift=0.0):
+ """Return a sinusoidal function.
+
+ sine(x, amplitude, frequency, shift) =
+ amplitude * sin(x*frequency + shift)
+
+ """
+ return amplitude*sin(x*frequency + shift)
+
+
+def expsine(x, amplitude=1.0, frequency=1.0, shift=0.0, decay=0.0):
+ """Return an exponentially decaying sinusoidal function.
+
+ expsine(x, amplitude, frequency, shift, decay) =
+ amplitude * sin(x*frequency + shift) * exp(-x*decay)
+
+ """
+ return amplitude*sin(x*frequency + shift) * exp(-x*decay)
+
+
+def thermal_distribution(x, amplitude=1.0, center=0.0, kt=1.0, form='bose'):
+ """Return a thermal distribution function.
+
+ The variable `form` defines the kind of distribution:
+
+ - ``form='bose'`` (default) is the Bose-Einstein distribution:
+
+ thermal_distribution(x, amplitude=1.0, center=0.0, kt=1.0) =
+ 1/(amplitude*exp((x - center)/kt) - 1)
+
+ - ``form='maxwell'`` is the Maxwell-Boltzmann distribution:
+ thermal_distribution(x, amplitude=1.0, center=0.0, kt=1.0) =
+ 1/(amplitude*exp((x - center)/kt))
+
+ - ``form='fermi'`` is the Fermi-Dirac distribution:
+ thermal_distribution(x, amplitude=1.0, center=0.0, kt=1.0) =
+ 1/(amplitude*exp((x - center)/kt) + 1)
+
+ Notes
+ -----
+ - `kt` should be defined in the same units as `x`. The Boltzmann
+ constant is ``kB = 8.617e-5 eV/K``.
+ - set ``kt<0`` to implement the energy loss convention common in
+ scattering research.
+
+ For more information, see:
+ http://hyperphysics.phy-astr.gsu.edu/hbase/quantum/disfcn.html
+
+ """
+ form = form.lower()
+ if form.startswith('bose'):
+ offset = -1
+ elif form.startswith('maxwell'):
+ offset = 0
+ elif form.startswith('fermi'):
+ offset = 1
+ else:
+ msg = (f"Invalid value ('{form}') for argument 'form'; should be one "
+ "of 'maxwell', 'fermi', or 'bose'.")
+ raise ValueError(msg)
+
+ return real(1/(amplitude*exp((x - center)/not_zero(kt)) + offset + tiny*1j))
+
+
+def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
+ """Return a step function.
+
+ Starts at 0.0, ends at `amplitude`, with half-max at `center`, and
+ rising with `form`:
+
+ - `'linear'` (default) = amplitude * min(1, max(0, arg + 0.5))
+ - `'atan'`, `'arctan'` = amplitude * (0.5 + atan(arg)/pi)
+ - `'erf'` = amplitude * (1 + erf(arg))/2.0
+ - `'logistic'` = amplitude * [1 - 1/(1 + exp(arg))]
+
+ where ``arg = (x - center)/sigma``.
+
+ """
+ out = (x - center)/max(tiny, sigma)
+
+ if form == 'erf':
+ out = 0.5*(1 + erf(out))
+ elif form == 'logistic':
+ out = 1. - 1./(1. + exp(out))
+ elif form in ('atan', 'arctan'):
+ out = 0.5 + arctan(out)/pi
+ elif form == 'linear':
+ out = minimum(1, maximum(0, out + 0.5))
+ else:
+ msg = (f"Invalid value ('{form}') for argument 'form'; should be one "
+ "of 'erf', 'logistic', 'atan', 'arctan', or 'linear'.")
+ raise ValueError(msg)
+
+ return amplitude*out
+
+
+def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
+ center2=1.0, sigma2=1.0, form='linear'):
+ """Return a rectangle function: step up, step down.
+
+ Starts at 0.0, rises to `amplitude` (at `center1` with width `sigma1`),
+ then drops to 0.0 (at `center2` with width `sigma2`) with `form`:
+ - `'linear'` (default) = ramp_up + ramp_down
+ - `'atan'`, `'arctan`' = amplitude*(atan(arg1) + atan(arg2))/pi
+ - `'erf'` = amplitude*(erf(arg1) + erf(arg2))/2.
+ - `'logisitic'` = amplitude*[1 - 1/(1 + exp(arg1)) - 1/(1+exp(arg2))]
+
+ where ``arg1 = (x - center1)/sigma1`` and
+ ``arg2 = -(x - center2)/sigma2``.
+
+ See Also
+ --------
+ step
+
+ """
+ arg1 = (x - center1)/max(tiny, sigma1)
+ arg2 = (center2 - x)/max(tiny, sigma2)
+
+ if form == 'erf':
+ out = 0.5*(erf(arg1) + erf(arg2))
+ elif form == 'logistic':
+ out = 1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2))
+ elif form in ('atan', 'arctan'):
+ out = (arctan(arg1) + arctan(arg2))/pi
+ elif form == 'linear':
+ out = 0.5*(minimum(1, maximum(-1, arg1)) + minimum(1, maximum(-1, arg2)))
+ else:
+ msg = (f"Invalid value ('{form}') for argument 'form'; should be one "
+ "of 'erf', 'logistic', 'atan', 'arctan', or 'linear'.")
+ raise ValueError(msg)
+
+ return amplitude*out
+
+
+def exponential(x, amplitude=1, decay=1):
+ """Return an exponential function.
+
+ exponential(x, amplitude, decay) = amplitude * exp(-x/decay)
+
+ """
+ decay = not_zero(decay)
+ return amplitude * exp(-x/decay)
+
+
+def powerlaw(x, amplitude=1, exponent=1.0):
+ """Return the powerlaw function.
+
+ powerlaw(x, amplitude, exponent) = amplitude * x**exponent
+
+ """
+ return amplitude * x**exponent
+
+
+def linear(x, slope=1.0, intercept=0.0):
+ """Return a linear function.
+
+ linear(x, slope, intercept) = slope * x + intercept
+
+ """
+ return slope * x + intercept
+
+
+def parabolic(x, a=0.0, b=0.0, c=0.0):
+ """Return a parabolic function.
+
+ parabolic(x, a, b, c) = a * x**2 + b * x + c
+
+ """
+ return a * x**2 + b * x + c
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
new file mode 100644
index 0000000..be6e849
--- /dev/null
+++ b/lmfit/minimizer.py
@@ -0,0 +1,2610 @@
+"""Implementation of the Minimizer class and fitting algorithms.
+
+Simple minimizer is a wrapper around scipy.leastsq, allowing a user to
+build a fitting model as a function of general purpose fit Parameters that
+can be fixed or varied, bounded, and written as a simple expression of
+other fit Parameters.
+
+The user sets up a model in terms of instance of Parameters and writes a
+function-to-be-minimized (residual function) in terms of these Parameters.
+
+Original copyright:
+ Copyright (c) 2011 Matthew Newville, The University of Chicago
+
+See LICENSE for more complete authorship information and license.
+
+"""
+from collections import namedtuple
+from copy import deepcopy
+import inspect
+import multiprocessing
+import numbers
+import warnings
+
+import numpy as np
+from scipy import __version__ as scipy_version
+from scipy.linalg import LinAlgError, inv
+from scipy.optimize import basinhopping as scipy_basinhopping
+from scipy.optimize import brute as scipy_brute
+from scipy.optimize import differential_evolution
+from scipy.optimize import dual_annealing as scipy_dual_annealing
+from scipy.optimize import least_squares
+from scipy.optimize import leastsq as scipy_leastsq
+from scipy.optimize import minimize as scipy_minimize
+from scipy.optimize import shgo as scipy_shgo
+from scipy.sparse import issparse
+from scipy.sparse.linalg import LinearOperator
+from scipy.stats import cauchy as cauchy_dist
+from scipy.stats import norm as norm_dist
+import uncertainties
+
+from ._ampgo import ampgo
+from .parameter import Parameter, Parameters
+from .printfuncs import fitreport_html_table
+
+# check for EMCEE
+try:
+ import emcee
+ from emcee.autocorr import AutocorrError
+ HAS_EMCEE = int(emcee.__version__[0]) >= 3
+except ImportError:
+ HAS_EMCEE = False
+
+# check for pandas
+try:
+ import pandas as pd
+ from pandas import isnull
+ HAS_PANDAS = True
+except ImportError:
+ HAS_PANDAS = False
+ isnull = np.isnan
+
+# check for numdifftools
+try:
+ import numdifftools as ndt
+ HAS_NUMDIFFTOOLS = True
+except ImportError:
+ HAS_NUMDIFFTOOLS = False
+
+# check for dill
+try:
+ import dill # noqa: F401
+ HAS_DILL = True
+except ImportError:
+ HAS_DILL = False
+
+
+# define the namedtuple here so pickle will work with the MinimizerResult
+Candidate = namedtuple('Candidate', ['params', 'score'])
+
+maxeval_warning = "ignoring `{}` argument to `{}()`. Use `max_nfev` instead."
+
+
+def thisfuncname():
+ """Return the name of calling function."""
+ try:
+ return inspect.stack()[1].function
+ except AttributeError:
+ return inspect.stack()[1][3]
+
+
+def asteval_with_uncertainties(*vals, **kwargs):
+ """Calculate object value, given values for variables.
+
+ This is used by the uncertainties package to calculate the
+ uncertainty in an object even with a complicated expression.
+
+ """
+ _obj = kwargs.get('_obj', None)
+ _pars = kwargs.get('_pars', None)
+ _names = kwargs.get('_names', None)
+ _asteval = _pars._asteval
+ if (_obj is None or _pars is None or _names is None or
+ _asteval is None or _obj._expr_ast is None):
+ return 0
+ for val, name in zip(vals, _names):
+ _asteval.symtable[name] = val
+
+ # re-evaluate all constraint parameters to
+ # force the propagation of uncertainties
+ [p._getval() for p in _pars.values()]
+ return _asteval.eval(_obj._expr_ast)
+
+
+wrap_ueval = uncertainties.wrap(asteval_with_uncertainties)
+
+
+def eval_stderr(obj, uvars, _names, _pars):
+ """Evaluate uncertainty and set ``.stderr`` for a parameter `obj`.
+
+ Given the uncertain values `uvars` (list of `uncertainties.ufloats`),
+ a list of parameter names that matches `uvars`, and a dictionary of
+ parameter objects, keyed by name.
+
+ This uses the uncertainties package wrapped function to evaluate the
+ uncertainty for an arbitrary expression (in ``obj._expr_ast``) of
+ parameters.
+
+ """
+ if not isinstance(obj, Parameter) or getattr(obj, '_expr_ast', None) is None:
+ return
+ uval = wrap_ueval(*uvars, _obj=obj, _names=_names, _pars=_pars)
+ try:
+ obj.stderr = uval.std_dev
+ except Exception:
+ obj.stderr = 0
+
+
+class MinimizerException(Exception):
+ """General Purpose Exception."""
+
+ def __init__(self, msg):
+ Exception.__init__(self)
+ self.msg = msg
+
+ def __str__(self):
+ """string"""
+ return f"{self.msg}"
+
+
+class AbortFitException(MinimizerException):
+ """Raised when a fit is aborted by the user."""
+
+
+SCALAR_METHODS = {'nelder': 'Nelder-Mead',
+ 'powell': 'Powell',
+ 'cg': 'CG',
+ 'bfgs': 'BFGS',
+ 'newton': 'Newton-CG',
+ 'lbfgsb': 'L-BFGS-B',
+ 'l-bfgsb': 'L-BFGS-B',
+ 'tnc': 'TNC',
+ 'cobyla': 'COBYLA',
+ 'slsqp': 'SLSQP',
+ 'dogleg': 'dogleg',
+ 'trust-ncg': 'trust-ncg',
+ 'differential_evolution': 'differential_evolution',
+ 'trust-constr': 'trust-constr',
+ 'trust-exact': 'trust-exact',
+ 'trust-krylov': 'trust-krylov'}
+
+
+def reduce_chisquare(r):
+ """Reduce residual array to scalar (chi-square).
+
+ Calculate the chi-square value from residual array `r` as
+ ``(r*r).sum()``.
+
+ Parameters
+ ----------
+ r : numpy.ndarray
+ Residual array.
+
+ Returns
+ -------
+ float
+ Chi-square calculated from the residual array.
+
+ """
+ return (r*r).sum()
+
+
+def reduce_negentropy(r):
+ """Reduce residual array to scalar (negentropy).
+
+ Reduce residual array `r` to scalar using negative entropy and the
+ normal (Gaussian) probability distribution of `r` as pdf:
+
+ ``(norm.pdf(r)*norm.logpdf(r)).sum()``
+
+ since ``pdf(r) = exp(-r*r/2)/sqrt(2*pi)``, this is
+ ``((r*r/2 - log(sqrt(2*pi))) * exp(-r*r/2)).sum()``.
+
+ Parameters
+ ----------
+ r : numpy.ndarray
+ Residual array.
+
+ Returns
+ -------
+ float
+ Negative entropy value calculated from the residual array.
+
+ """
+ return (norm_dist.pdf(r)*norm_dist.logpdf(r)).sum()
+
+
+def reduce_cauchylogpdf(r):
+ """Reduce residual array to scalar (cauchylogpdf).
+
+ Reduce residual array `r` to scalar using negative log-likelihood and
+ a Cauchy (Lorentzian) distribution of `r`:
+
+ ``-scipy.stats.cauchy.logpdf(r)``
+
+ where the Cauchy pdf = ``1/(pi*(1+r*r))``.
+
+ This gives better suppression of outliers compared to the default
+ sum-of-squares.
+
+ Parameters
+ ----------
+ r : numpy.ndarray
+ Residual array.
+
+ Returns
+ -------
+ float
+ Negative entropy value calculated from the residual array.
+
+ """
+ return -cauchy_dist.logpdf(r).sum()
+
+
+class MinimizerResult:
+ r"""The results of a minimization.
+
+ Minimization results include data such as status and error messages,
+ fit statistics, and the updated (i.e., best-fit) parameters themselves
+ in the :attr:`params` attribute.
+
+ The list of (possible) `MinimizerResult` attributes is given below:
+
+ Attributes
+ ----------
+ params : Parameters
+ The best-fit parameters resulting from the fit.
+ status : int
+ Termination status of the optimizer. Its value depends on the
+ underlying solver. Refer to `message` for details.
+ var_names : list
+ Ordered list of variable parameter names used in optimization, and
+ useful for understanding the values in :attr:`init_vals` and
+ :attr:`covar`.
+ covar : numpy.ndarray
+ Covariance matrix from minimization, with rows and columns
+ corresponding to :attr:`var_names`.
+ init_vals : list
+ List of initial values for variable parameters using
+ :attr:`var_names`.
+ init_values : dict
+ Dictionary of initial values for variable parameters.
+ nfev : int
+ Number of function evaluations.
+ success : bool
+ True if the fit succeeded, otherwise False.
+ errorbars : bool
+ True if uncertainties were estimated, otherwise False.
+ message : str
+ Message about fit success.
+ call_kws : dict
+ Keyword arguments sent to underlying solver.
+ ier : int
+ Integer error value from :scipydoc:`optimize.leastsq` (`'leastsq'`
+ method only).
+ lmdif_message : str
+ Message from :scipydoc:`optimize.leastsq` (`'leastsq'` method only).
+ nvarys : int
+ Number of variables in fit: :math:`N_{\rm varys}`.
+ ndata : int
+ Number of data points: :math:`N`.
+ nfree : int
+ Degrees of freedom in fit: :math:`N - N_{\rm varys}`.
+ residual : numpy.ndarray
+ Residual array :math:`{\rm Resid_i}`. Return value of the objective
+ function when using the best-fit values of the parameters.
+ chisqr : float
+ Chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`.
+ redchi : float
+ Reduced chi-square:
+ :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm varys})}`.
+ aic : float
+ Akaike Information Criterion statistic:
+ :math:`N \ln(\chi^2/N) + 2 N_{\rm varys}`.
+ bic : float
+ Bayesian Information Criterion statistic:
+ :math:`N \ln(\chi^2/N) + \ln(N) N_{\rm varys}`.
+ flatchain : pandas.DataFrame
+ A flatchain view of the sampling chain from the `emcee` method.
+
+ Methods
+ -------
+ show_candidates
+ :meth:`pretty_print` representation of candidates from the `brute`
+ fitting method.
+
+ """
+
+ def __init__(self, **kws):
+ for key, val in kws.items():
+ setattr(self, key, val)
+
+ @property
+ def flatchain(self):
+ """Show flatchain view of the sampling chain from `emcee` method."""
+ if not hasattr(self, 'chain'):
+ return None
+
+ if not HAS_PANDAS:
+ raise NotImplementedError('Please install Pandas to see the '
+ 'flattened chain')
+ if len(self.chain.shape) == 4:
+ return pd.DataFrame(self.chain[0, ...].reshape((-1, self.nvarys)),
+ columns=self.var_names)
+ elif len(self.chain.shape) == 3:
+ return pd.DataFrame(self.chain.reshape((-1, self.nvarys)),
+ columns=self.var_names)
+
+ def show_candidates(self, candidate_nmb='all'):
+ """Show pretty_print() representation of candidates.
+
+ Showing all stored candidates (default) or the specified
+ candidate-# obtained from the `brute` fitting method.
+
+ Parameters
+ ----------
+ candidate_nmb : int or 'all', optional
+ The candidate-number to show using the :meth:`pretty_print`
+ method (default is 'all').
+
+ """
+ if hasattr(self, 'candidates'):
+ if candidate_nmb == 'all':
+ for i, candidate in enumerate(self.candidates):
+ print(f"\nCandidate #{i + 1}, chisqr = {candidate.score:.3f}")
+ candidate.params.pretty_print()
+ elif (candidate_nmb < 1 or candidate_nmb > len(self.candidates)):
+ raise ValueError(f"'candidate_nmb' should be between 1 and {len(self.candidates)}.")
+ else:
+ candidate = self.candidates[candidate_nmb-1]
+ print(f"\nCandidate #{candidate_nmb}, chisqr = {candidate.score:.3f}")
+ candidate.params.pretty_print()
+
+ def _calculate_statistics(self):
+ """Calculate the fitting statistics."""
+ self.nvarys = len(self.init_vals)
+ if not hasattr(self, 'residual'):
+ self.residual = -np.inf
+ if isinstance(self.residual, np.ndarray):
+ self.chisqr = (self.residual**2).sum()
+ self.ndata = len(self.residual)
+ self.nfree = self.ndata - self.nvarys
+ else:
+ self.chisqr = self.residual
+ self.ndata = 1
+ self.nfree = 1
+ self.redchi = self.chisqr / max(1, self.nfree)
+ # this is -2*loglikelihood
+ self.chisqr = max(self.chisqr, 1.e-250*self.ndata)
+ _neg2_log_likel = self.ndata * np.log(self.chisqr / self.ndata)
+ self.aic = _neg2_log_likel + 2 * self.nvarys
+ self.bic = _neg2_log_likel + np.log(self.ndata) * self.nvarys
+
+ def _repr_html_(self, show_correl=True, min_correl=0.1):
+ """Return a HTML representation of parameters data."""
+ return fitreport_html_table(self, show_correl=show_correl,
+ min_correl=min_correl)
+
+
+class Minimizer:
+ """A general minimizer for curve fitting and optimization."""
+
+ _err_nonparam = ("params must be a minimizer.Parameters() instance or"
+ " list of Parameters()")
+ _err_max_evals = ("Too many function calls (max set to {:i})! Use:"
+ " minimize(func, params, ..., max_nfev=NNN)"
+ " to increase this maximum.")
+
+ def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None,
+ iter_cb=None, scale_covar=True, nan_policy='raise',
+ reduce_fcn=None, calc_covar=True, max_nfev=None, **kws):
+ """
+ Parameters
+ ----------
+ userfcn : callable
+ Objective function that returns the residual (difference
+ between model and data) to be minimized in a least-squares
+ sense. This function must have the signature::
+
+ userfcn(params, *fcn_args, **fcn_kws)
+
+ params : Parameters
+ Contains the Parameters for the model.
+ fcn_args : tuple, optional
+ Positional arguments to pass to `userfcn`.
+ fcn_kws : dict, optional
+ Keyword arguments to pass to `userfcn`.
+ iter_cb : callable, optional
+ Function to be called at each fit iteration. This function
+ should have the signature::
+
+ iter_cb(params, iter, resid, *fcn_args, **fcn_kws)
+
+ where `params` will have the current parameter values, `iter`
+ the iteration number, `resid` the current residual array, and
+ `*fcn_args` and `**fcn_kws` are passed to the objective
+ function.
+ scale_covar : bool, optional
+ Whether to automatically scale the covariance matrix (default
+ is True).
+ nan_policy : {'raise', 'propagate', 'omit}, optional
+ Specifies action if `userfcn` (or a Jacobian) returns NaN
+ values. One of:
+
+ - `'raise'` : a `ValueError` is raised (default)
+ - `'propagate'` : the values returned from `userfcn` are un-altered
+ - `'omit'` : non-finite values are filtered
+
+ reduce_fcn : str or callable, optional
+ Function to convert a residual array to a scalar value for the
+ scalar minimizers. Optional values are (where `r` is the
+ residual array):
+
+ - None : sum-of-squares of residual (default)
+
+ = (r*r).sum()
+
+ - `'negentropy'` : neg entropy, using normal distribution
+
+ = rho*log(rho).sum()`, where rho = exp(-r*r/2)/(sqrt(2*pi))
+
+ - `'neglogcauchy'` : neg log likelihood, using Cauchy distribution
+
+ = -log(1/(pi*(1+r*r))).sum()
+
+ - callable : must take one argument (`r`) and return a float.
+
+ calc_covar : bool, optional
+ Whether to calculate the covariance matrix (default is True)
+ for solvers other than ``'leastsq'`` and ``'least_squares'``.
+ Requires the ``numdifftools`` package to be installed.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations (default is None). The
+ default value depends on the fitting method.
+ **kws : dict, optional
+ Options to pass to the minimizer being used.
+
+ Notes
+ -----
+ The objective function should return the value to be minimized.
+ For the Levenberg-Marquardt algorithm from :meth:`leastsq` or
+ :meth:`least_squares`, this returned value must be an array, with
+ a length greater than or equal to the number of fitting variables
+ in the model. For the other methods, the return value can either be
+ a scalar or an array. If an array is returned, the sum-of-squares
+ of the array will be sent to the underlying fitting method,
+ effectively doing a least-squares optimization of the return
+ values. If the objective function returns non-finite values then a
+ `ValueError` will be raised because the underlying solvers cannot
+ deal with them.
+
+ A common use for the `fcn_args` and `fcn_kws` would be to pass in
+ other data needed to calculate the residual, including such things
+ as the data array, dependent variable, uncertainties in the data,
+ and other data structures for the model calculation.
+
+ """
+ self.userfcn = userfcn
+ self.userargs = fcn_args
+ if self.userargs is None:
+ self.userargs = []
+
+ self.userkws = fcn_kws
+ if self.userkws is None:
+ self.userkws = {}
+ for maxnfev_alias in ('maxfev', 'maxiter'):
+ if maxnfev_alias in kws:
+ warnings.warn(maxeval_warning.format(maxnfev_alias, 'Minimizer'),
+ RuntimeWarning)
+ kws.pop(maxnfev_alias)
+
+ self.kws = kws
+ self.iter_cb = iter_cb
+ self.calc_covar = calc_covar
+ self.scale_covar = scale_covar
+ self.max_nfev = max_nfev
+ self.nfev = 0
+ self.nfree = 0
+ self.ndata = 0
+ self.ier = 0
+ self._abort = False
+ self.success = True
+ self.errorbars = False
+ self.message = None
+ self.lmdif_message = None
+ self.chisqr = None
+ self.redchi = None
+ self.covar = None
+ self.residual = None
+ self.reduce_fcn = reduce_fcn
+ self.params = params
+ self.col_deriv = False
+ self.jacfcn = None
+ self.nan_policy = nan_policy
+
+ def set_max_nfev(self, max_nfev=None, default_value=100000):
+ """Set maximum number of function evaluations.
+
+ If `max_nfev` is None, use the provided `default_value`.
+
+ >>> self.set_max_nfev(max_nfev, 2000*(result.nvarys+1))
+
+ """
+ if max_nfev is not None:
+ self.max_nfev = max_nfev
+ if self.max_nfev in (None, np.inf):
+ self.max_nfev = default_value
+
+ @property
+ def values(self):
+ """Return Parameter values in a simple dictionary."""
+ return {name: p.value for name, p in self.result.params.items()}
+
+ def __residual(self, fvars, apply_bounds_transformation=True):
+ """Residual function used for least-squares fit.
+
+ With the new, candidate values of `fvars` (the fitting variables),
+ this evaluates all parameters, including setting bounds and
+ evaluating constraints, and then passes those to the user-supplied
+ function to calculate the residual.
+
+ Parameters
+ ----------
+ fvars : numpy.ndarray
+ Array of new parameter values suggested by the minimizer.
+ apply_bounds_transformation : bool, optional
+ Whether to apply lmfits parameter transformation to constrain
+ parameters (default is True). This is needed for solvers
+ without built-in support for bounds.
+
+ Returns
+ -------
+ numpy.ndarray
+ The evaluated function values for given `fvars`.
+
+ """
+ params = self.result.params
+
+ if fvars.shape == ():
+ fvars = fvars.reshape((1,))
+
+ for name, val in zip(self.result.var_names, fvars):
+ if apply_bounds_transformation:
+ params[name].value = params[name].from_internal(val)
+ else:
+ params[name].value = val
+ params.update_constraints()
+
+ if self.max_nfev is None:
+ self.max_nfev = 200000*(len(fvars)+1)
+
+ self.result.nfev += 1
+ self.result.last_internal_values = fvars
+ if self.result.nfev > self.max_nfev:
+ self.result.aborted = True
+ m = f"number of function evaluations > {self.max_nfev}"
+ self.result.message = f"Fit aborted: {m}"
+ self.result.success = False
+ raise AbortFitException(f"fit aborted: too many function evaluations {self.max_nfev}")
+
+ out = self.userfcn(params, *self.userargs, **self.userkws)
+
+ if callable(self.iter_cb):
+ abort = self.iter_cb(params, self.result.nfev, out,
+ *self.userargs, **self.userkws)
+ self._abort = self._abort or abort
+
+ if self._abort:
+ self.result.residual = out
+ self.result.aborted = True
+ self.result.message = "Fit aborted by user callback. Could not estimate error-bars."
+ self.result.success = False
+ raise AbortFitException("fit aborted by user.")
+ else:
+ return _nan_policy(np.asfarray(out).ravel(),
+ nan_policy=self.nan_policy)
+
+ def __jacobian(self, fvars):
+ """Return analytical jacobian to be used with Levenberg-Marquardt.
+
+ modified 02-01-2012 by Glenn Jones, Aberystwyth University
+ modified 06-29-2015 by M Newville to apply gradient scaling for
+ bounded variables (thanks to JJ Helmus, N Mayorov)
+
+ """
+ pars = self.result.params
+ grad_scale = np.ones_like(fvars)
+ for ivar, name in enumerate(self.result.var_names):
+ val = fvars[ivar]
+ pars[name].value = pars[name].from_internal(val)
+ grad_scale[ivar] = pars[name].scale_gradient(val)
+
+ pars.update_constraints()
+
+ # compute the jacobian for "internal" unbounded variables,
+ # then rescale for bounded "external" variables.
+ jac = self.jacfcn(pars, *self.userargs, **self.userkws)
+ jac = _nan_policy(jac, nan_policy=self.nan_policy)
+
+ if self.col_deriv:
+ jac = (jac.transpose()*grad_scale).transpose()
+ else:
+ jac *= grad_scale
+ return jac
+
+ def penalty(self, fvars):
+ """Penalty function for scalar minimizers.
+
+ Parameters
+ ----------
+ fvars : numpy.ndarray
+ Array of values for the variable parameters.
+
+ Returns
+ -------
+ r : float
+ The evaluated user-supplied objective function.
+
+ If the objective function is an array of size greater than 1,
+ use the scalar returned by `self.reduce_fcn`. This defaults to
+ sum-of-squares, but can be replaced by other options.
+
+ """
+ if self.result.method in ['brute', 'shgo', 'dual_annealing']:
+ apply_bounds_transformation = False
+ else:
+ apply_bounds_transformation = True
+
+ r = self.__residual(fvars, apply_bounds_transformation)
+ if isinstance(r, np.ndarray) and r.size > 1:
+ r = self.reduce_fcn(r)
+ if isinstance(r, np.ndarray) and r.size > 1:
+ r = r.sum()
+ return r
+
+ def prepare_fit(self, params=None):
+ """Prepare parameters for fitting.
+
+ Prepares and initializes model and Parameters for subsequent
+ fitting. This routine prepares the conversion of
+ :class:`Parameters` into fit variables, organizes parameter bounds,
+ and parses, "compiles" and checks constrain expressions. The method
+ also creates and returns a new instance of a
+ :class:`MinimizerResult` object that contains the copy of the
+ Parameters that will actually be varied in the fit.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Contains the Parameters for the model; if None, then the
+ Parameters used to initialize the Minimizer object are used.
+
+ Returns
+ -------
+ MinimizerResult
+
+ Notes
+ -----
+ This method is called directly by the fitting methods, and it is
+ generally not necessary to call this function explicitly.
+
+
+ .. versionchanged:: 0.9.0
+ Return value changed to :class:`MinimizerResult`.
+
+ """
+ self._abort = False
+
+ self.result = MinimizerResult()
+ result = self.result
+ if params is not None:
+ self.params = params
+ if isinstance(self.params, Parameters):
+ result.params = deepcopy(self.params)
+ elif isinstance(self.params, (list, tuple)):
+ result.params = Parameters()
+ for par in self.params:
+ if not isinstance(par, Parameter):
+ raise MinimizerException(self._err_nonparam)
+ else:
+ result.params[par.name] = par
+ elif self.params is None:
+ raise MinimizerException(self._err_nonparam)
+
+ # determine which parameters are actually variables
+ # and which are defined expressions.
+ result.var_names = [] # note that this *does* belong to self...
+ result.init_vals = []
+ result._init_vals_internal = []
+ result.params.update_constraints()
+ result.nfev = 0
+ result.call_kws = {}
+ result.errorbars = False
+ result.aborted = False
+ result.success = True
+ result.covar = None
+
+ for name, par in self.result.params.items():
+ par.stderr = None
+ par.correl = None
+ if par.expr is not None:
+ par.vary = False
+ if par.vary:
+ result.var_names.append(name)
+ result._init_vals_internal.append(par.setup_bounds())
+ result.init_vals.append(par.value)
+
+ par.init_value = par.value
+ if par.name is None:
+ par.name = name
+ result.nvarys = len(result.var_names)
+ result.init_values = {n: v for n, v in zip(result.var_names,
+ result.init_vals)}
+
+ # set up reduce function for scalar minimizers
+ # 1. user supplied callable
+ # 2. string starting with 'neglogc' or 'negent'
+ # 3. sum-of-squares
+ if not callable(self.reduce_fcn):
+ if isinstance(self.reduce_fcn, str):
+ if self.reduce_fcn.lower().startswith('neglogc'):
+ self.reduce_fcn = reduce_cauchylogpdf
+ elif self.reduce_fcn.lower().startswith('negent'):
+ self.reduce_fcn = reduce_negentropy
+ if self.reduce_fcn is None:
+ self.reduce_fcn = reduce_chisquare
+ return result
+
+ def unprepare_fit(self):
+ """Clean fit state.
+
+ This methods Removes AST compilations of constraint expressions.
+ Subsequent fits will need to call prepare_fit().
+
+
+ """
+
+ def _calculate_covariance_matrix(self, fvars):
+ """Calculate the covariance matrix.
+
+ The ``numdiftoools`` package is used to estimate the Hessian
+ matrix, and the covariance matrix is calculated as:
+
+ .. math::
+
+ cov_x = inverse(Hessian) * 2.0
+
+ Parameters
+ ----------
+ fvars : numpy.ndarray
+ Array of the optimal internal, freely variable parameters.
+
+ Returns
+ -------
+ cov_x : numpy.ndarray or None
+ Covariance matrix if successful, otherwise None.
+
+ """
+ warnings.filterwarnings(action="ignore", module="scipy",
+ message="^internal gelsd")
+
+ nfev = deepcopy(self.result.nfev)
+ best_vals = self.result.params.valuesdict()
+
+ try:
+ Hfun = ndt.Hessian(self.penalty, step=1.e-4)
+ hessian_ndt = Hfun(fvars)
+ cov_x = inv(hessian_ndt) * 2.0
+
+ if cov_x.diagonal().min() < 0:
+ # we know the calculated covariance is incorrect, so we set the covariance to None
+ cov_x = None
+ except (LinAlgError, ValueError):
+ cov_x = None
+ finally:
+ self.result.nfev = nfev
+
+ # restore original values
+ for name in self.result.var_names:
+ self.result.params[name].value = best_vals[name]
+ return cov_x
+
+ def _int2ext_cov_x(self, cov_int, fvars):
+ """Transform covariance matrix to external parameter space.
+
+ It makes use of the gradient scaling according to the MINUIT
+ recipe:
+
+ cov_ext = np.dot(grad.T, grad) * cov_int
+
+ Parameters
+ ----------
+ cov_int : numpy.ndarray
+ Covariance matrix in the internal parameter space.
+ fvars : numpy.ndarray
+ Array of the optimal internal, freely variable, parameter
+ values.
+
+ Returns
+ -------
+ cov_ext : numpy.ndarray
+ Covariance matrix, transformed to external parameter space.
+
+ """
+ g = [self.result.params[name].scale_gradient(fvars[i]) for i, name in
+ enumerate(self.result.var_names)]
+ grad2d = np.atleast_2d(g)
+ grad = np.dot(grad2d.T, grad2d)
+
+ cov_ext = cov_int * grad
+ return cov_ext
+
+ def _calculate_uncertainties_correlations(self):
+ """Calculate parameter uncertainties and correlations."""
+ self.result.errorbars = True
+
+ if self.scale_covar:
+ self.result.covar *= self.result.redchi
+
+ vbest = np.atleast_1d([self.result.params[name].value for name in
+ self.result.var_names])
+
+ has_expr = False
+ for par in self.result.params.values():
+ par.stderr, par.correl = 0, None
+ has_expr = has_expr or par.expr is not None
+
+ for ivar, name in enumerate(self.result.var_names):
+ par = self.result.params[name]
+ par.stderr = np.sqrt(self.result.covar[ivar, ivar])
+ par.correl = {}
+ try:
+ self.result.errorbars = self.result.errorbars and (par.stderr > 0.0)
+ for jvar, varn2 in enumerate(self.result.var_names):
+ if jvar != ivar:
+ par.correl[varn2] = (self.result.covar[ivar, jvar] /
+ (par.stderr * np.sqrt(self.result.covar[jvar, jvar])))
+ except ZeroDivisionError:
+ self.result.errorbars = False
+
+ if has_expr:
+ try:
+ uvars = uncertainties.correlated_values(vbest, self.result.covar)
+ except (LinAlgError, ValueError):
+ uvars = None
+
+ # for uncertainties on constrained parameters, use the calculated
+ # "correlated_values", evaluate the uncertainties on the constrained
+ # parameters and reset the Parameters to best-fit value
+ if uvars is not None:
+ for par in self.result.params.values():
+ eval_stderr(par, uvars, self.result.var_names, self.result.params)
+ # restore nominal values
+ for v, name in zip(uvars, self.result.var_names):
+ self.result.params[name].value = v.nominal_value
+
+ def scalar_minimize(self, method='Nelder-Mead', params=None, max_nfev=None,
+ **kws):
+ """Scalar minimization using :scipydoc:`optimize.minimize`.
+
+ Perform fit with any of the scalar minimization algorithms
+ supported by :scipydoc:`optimize.minimize`. Default argument
+ values are:
+
+ +-------------------------+---------------+-----------------------+
+ | :meth:`scalar_minimize` | Default Value | Description |
+ | arg | | |
+ +=========================+===============+=======================+
+ | `method` | 'Nelder-Mead' | fitting method |
+ +-------------------------+---------------+-----------------------+
+ | `tol` | 1.e-7 | fitting and parameter |
+ | | | tolerance |
+ +-------------------------+---------------+-----------------------+
+ | `hess` | None | Hessian of objective |
+ | | | function |
+ +-------------------------+---------------+-----------------------+
+
+
+ Parameters
+ ----------
+ method : str, optional
+ Name of the fitting method to use. One of:
+
+ - `'Nelder-Mead'` (default)
+ - `'L-BFGS-B'`
+ - `'Powell'`
+ - `'CG'`
+ - `'Newton-CG'`
+ - `'COBYLA'`
+ - `'BFGS'`
+ - `'TNC'`
+ - `'trust-ncg'`
+ - `'trust-exact'`
+ - `'trust-krylov'`
+ - `'trust-constr'`
+ - `'dogleg'`
+ - `'SLSQP'`
+ - `'differential_evolution'`
+
+ params : Parameters, optional
+ Parameters to use as starting point.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations. Defaults to
+ ``2000*(nvars+1)``, where ``nvars`` is the number of variable
+ parameters.
+ **kws : dict, optional
+ Minimizer options pass to :scipydoc:`optimize.minimize`.
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the optimized parameters and several
+ goodness-of-fit statistics.
+
+
+ .. versionchanged:: 0.9.0
+ Return value changed to :class:`MinimizerResult`.
+
+
+ Notes
+ -----
+ If the objective function returns a NumPy array instead of the
+ expected scalar, the sum-of-squares of the array will be used.
+
+ Note that bounds and constraints can be set on Parameters for any
+ of these methods, so are not supported separately for those
+ designed to use bounds. However, if you use the
+ ``differential_evolution`` method you must specify finite
+ ``(min, max)`` for each varying Parameter.
+
+ """
+ result = self.prepare_fit(params=params)
+ result.method = method
+ variables = result._init_vals_internal
+ params = result.params
+
+ self.set_max_nfev(max_nfev, 2000*(result.nvarys+1))
+ fmin_kws = dict(method=method, options={'maxiter': 2*self.max_nfev})
+ if method == 'L-BFGS-B':
+ fmin_kws['options']['maxfun'] = 2*self.max_nfev
+
+ # fmin_kws = dict(method=method, options={'maxfun': 2*self.max_nfev})
+ fmin_kws.update(self.kws)
+
+ if 'maxiter' in kws:
+ warnings.warn(maxeval_warning.format('maxiter', thisfuncname()),
+ RuntimeWarning)
+ kws.pop('maxiter')
+ fmin_kws.update(kws)
+
+ # hess supported only in some methods
+ if 'hess' in fmin_kws and method not in ('Newton-CG', 'dogleg',
+ 'trust-constr', 'trust-ncg',
+ 'trust-krylov', 'trust-exact'):
+ fmin_kws.pop('hess')
+
+ # Accept Jacobians given as Dfun argument
+ if 'jac' not in fmin_kws and fmin_kws.get('Dfun', None) is not None:
+ fmin_kws['jac'] = fmin_kws.pop('Dfun')
+
+ # Wrap Jacobian function to deal with bounds
+ if 'jac' in fmin_kws:
+ self.jacfcn = fmin_kws.pop('jac')
+ fmin_kws['jac'] = self.__jacobian
+
+ # Ignore jac argument for methods that do not support it
+ if 'jac' in fmin_kws and method not in ('CG', 'BFGS', 'Newton-CG',
+ 'L-BFGS-B', 'TNC', 'SLSQP',
+ 'dogleg', 'trust-ncg',
+ 'trust-krylov', 'trust-exact'):
+ self.jacfcn = None
+ fmin_kws.pop('jac')
+
+ # workers / updating keywords only supported in differential_evolution
+ for kwd in ('workers', 'updating'):
+ if kwd in fmin_kws and method != 'differential_evolution':
+ fmin_kws.pop(kwd)
+
+ if method == 'differential_evolution':
+ for par in params.values():
+ if (par.vary and
+ not (np.isfinite(par.min) and np.isfinite(par.max))):
+ raise ValueError('differential_evolution requires finite '
+ 'bound for all varying parameters')
+
+ _bounds = [(-np.pi / 2., np.pi / 2.)] * len(variables)
+ kwargs = dict(args=(), strategy='best1bin', maxiter=self.max_nfev,
+ popsize=15, tol=0.01, mutation=(0.5, 1),
+ recombination=0.7, seed=None, callback=None,
+ disp=False, polish=True, init='latinhypercube',
+ atol=0, updating='immediate', workers=1)
+
+ for k, v in fmin_kws.items():
+ if k in kwargs:
+ kwargs[k] = v
+
+ fmin_kws = kwargs
+ result.call_kws = fmin_kws
+ try:
+ ret = differential_evolution(self.penalty, _bounds, **fmin_kws)
+ except AbortFitException:
+ pass
+
+ else:
+ result.call_kws = fmin_kws
+ try:
+ ret = scipy_minimize(self.penalty, variables, **fmin_kws)
+ except AbortFitException:
+ pass
+
+ if not result.aborted:
+ if isinstance(ret, dict):
+ for attr, value in ret.items():
+ setattr(result, attr, value)
+ else:
+ for attr in dir(ret):
+ if not attr.startswith('_'):
+ setattr(result, attr, getattr(ret, attr))
+
+ result.x = np.atleast_1d(result.x)
+ result.residual = self.__residual(result.x)
+ result.nfev -= 1
+ else:
+ result.x = result.last_internal_values
+ self.result.nfev -= 2
+ self._abort = False
+ result.residual = self.__residual(result.x)
+ result.nfev += 1
+
+ result._calculate_statistics()
+
+ # calculate the cov_x and estimate uncertainties/correlations
+ if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
+ len(result.residual) > len(result.var_names)):
+ _covar_ndt = self._calculate_covariance_matrix(result.x)
+ if _covar_ndt is not None:
+ result.covar = self._int2ext_cov_x(_covar_ndt, result.x)
+ self._calculate_uncertainties_correlations()
+
+ return result
+
+ def _lnprob(self, theta, userfcn, params, var_names, bounds, userargs=(),
+ userkws=None, float_behavior='posterior', is_weighted=True,
+ nan_policy='raise'):
+ """Calculate the log-posterior probability.
+
+ See the `Minimizer.emcee` method for more details.
+
+ Parameters
+ ----------
+ theta : sequence
+ Float parameter values (only those being varied).
+ userfcn : callable
+ User objective function.
+ params : Parameters
+ The entire set of Parameters.
+ var_names : list
+ The names of the parameters that are varying.
+ bounds : numpy.ndarray
+ Lower and upper bounds of parameters, with shape
+ ``(nvarys, 2)``.
+ userargs : tuple, optional
+ Extra positional arguments required for user objective function.
+ userkws : dict, optional
+ Extra keyword arguments required for user objective function.
+ float_behavior : {'posterior', 'chi2'}, optional
+ Specifies meaning of objective when it returns a float. Use
+ `'posterior'` if objective function returns a log-posterior
+ probability (default) or `'chi2'` if it returns a chi2 value.
+ is_weighted : bool, optional
+ If `userfcn` returns a vector of residuals then `is_weighted`
+ (default is True) specifies if the residuals have been weighted
+ by data uncertainties.
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ Specifies action if `userfcn` returns NaN values. Use `'raise'`
+ (default) to raise a `ValueError`, `'propagate'` to use values
+ as-is, or `'omit'` to filter out the non-finite values.
+
+ Returns
+ -------
+ lnprob : float
+ Log posterior probability.
+
+ """
+ # the comparison has to be done on theta and bounds. DO NOT inject theta
+ # values into Parameters, then compare Parameters values to the bounds.
+ # Parameters values are clipped to stay within bounds.
+ if np.any(theta > bounds[:, 1]) or np.any(theta < bounds[:, 0]):
+ return -np.inf
+ for name, val in zip(var_names, theta):
+ params[name].value = val
+ userkwargs = {}
+ if userkws is not None:
+ userkwargs = userkws
+ # update the constraints
+ params.update_constraints()
+ # now calculate the log-likelihood
+ out = userfcn(params, *userargs, **userkwargs)
+ self.result.nfev += 1
+ if callable(self.iter_cb):
+ abort = self.iter_cb(params, self.result.nfev, out,
+ *userargs, **userkwargs)
+ self._abort = self._abort or abort
+ if self._abort:
+ self.result.residual = out
+ self._lastpos = theta
+ raise AbortFitException("fit aborted by user.")
+ else:
+ out = _nan_policy(np.asarray(out).ravel(),
+ nan_policy=self.nan_policy)
+ lnprob = np.asarray(out).ravel()
+ if len(lnprob) == 0:
+ lnprob = np.array([-1.e100])
+ if lnprob.size > 1:
+ # objective function returns a vector of residuals
+ if '__lnsigma' in params and not is_weighted:
+ # marginalise over a constant data uncertainty
+ __lnsigma = params['__lnsigma'].value
+ c = np.log(2 * np.pi) + 2 * __lnsigma
+ lnprob = -0.5 * np.sum((lnprob / np.exp(__lnsigma)) ** 2 + c)
+ else:
+ lnprob = -0.5 * (lnprob * lnprob).sum()
+ else:
+ # objective function returns a single value.
+ # use float_behaviour to figure out if the value is posterior or chi2
+ if float_behavior == 'posterior':
+ pass
+ elif float_behavior == 'chi2':
+ lnprob *= -0.5
+ else:
+ raise ValueError("float_behaviour must be either 'posterior' "
+ "or 'chi2' " + float_behavior)
+ return lnprob
+
+ def emcee(self, params=None, steps=1000, nwalkers=100, burn=0, thin=1,
+ ntemps=1, pos=None, reuse_sampler=False, workers=1,
+ float_behavior='posterior', is_weighted=True, seed=None,
+ progress=True, run_mcmc_kwargs={}):
+ r"""Bayesian sampling of the posterior distribution.
+
+ The method uses the ``emcee`` Markov Chain Monte Carlo package and
+ assumes that the prior is Uniform. You need to have ``emcee``
+ version 3 or newer installed to use this method.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Parameters to use as starting point. If this is not specified
+ then the Parameters used to initialize the Minimizer object
+ are used.
+ steps : int, optional
+ How many samples you would like to draw from the posterior
+ distribution for each of the walkers?
+ nwalkers : int, optional
+ Should be set so :math:`nwalkers >> nvarys`, where ``nvarys``
+ are the number of parameters being varied during the fit.
+ 'Walkers are the members of the ensemble. They are almost like
+ separate Metropolis-Hastings chains but, of course, the proposal
+ distribution for a given walker depends on the positions of all
+ the other walkers in the ensemble.' - from the `emcee` webpage.
+ burn : int, optional
+ Discard this many samples from the start of the sampling regime.
+ thin : int, optional
+ Only accept 1 in every `thin` samples.
+ ntemps : int, deprecated
+ ntemps has no effect.
+ pos : numpy.ndarray, optional
+ Specify the initial positions for the sampler, an ndarray of
+ shape ``(nwalkers, nvarys)``. You can also initialise using a
+ previous chain of the same `nwalkers` and ``nvarys``. Note that
+ ``nvarys`` may be one larger than you expect it to be if your
+ ``userfcn`` returns an array and ``is_weighted=False``.
+ reuse_sampler : bool, optional
+ Set to True if you have already run `emcee` with the
+ `Minimizer` instance and want to continue to draw from its
+ ``sampler`` (and so retain the chain history). If False, a
+ new sampler is created. The keywords `nwalkers`, `pos`, and
+ `params` will be ignored when this is set, as they will be set
+ by the existing sampler.
+ **Important**: the Parameters used to create the sampler must
+ not change in-between calls to `emcee`. Alteration of Parameters
+ would include changed ``min``, ``max``, ``vary`` and ``expr``
+ attributes. This may happen, for example, if you use an altered
+ Parameters object and call the `minimize` method in-between
+ calls to `emcee`.
+ workers : Pool-like or int, optional
+ For parallelization of sampling. It can be any Pool-like object
+ with a map method that follows the same calling sequence as the
+ built-in `map` function. If int is given as the argument, then
+ a multiprocessing-based pool is spawned internally with the
+ corresponding number of parallel processes. 'mpi4py'-based
+ parallelization and 'joblib'-based parallelization pools can
+ also be used here. **Note**: because of multiprocessing
+ overhead it may only be worth parallelising if the objective
+ function is expensive to calculate, or if there are a large
+ number of objective evaluations per step
+ (``nwalkers * nvarys``).
+ float_behavior : str, optional
+ Meaning of float (scalar) output of objective function. Use
+ `'posterior'` if it returns a log-posterior probability or
+ `'chi2'` if it returns :math:`\chi^2`. See Notes for further
+ details.
+ is_weighted : bool, optional
+ Has your objective function been weighted by measurement
+ uncertainties? If ``is_weighted=True`` then your objective
+ function is assumed to return residuals that have been divided
+ by the true measurement uncertainty ``(data - model) / sigma``.
+ If ``is_weighted=False`` then the objective function is
+ assumed to return unweighted residuals, ``data - model``. In
+ this case `emcee` will employ a positive measurement
+ uncertainty during the sampling. This measurement uncertainty
+ will be present in the output params and output chain with the
+ name ``__lnsigma``. A side effect of this is that you cannot
+ use this parameter name yourself.
+ **Important**: this parameter only has any effect if your
+ objective function returns an array. If your objective function
+ returns a float, then this parameter is ignored. See Notes for
+ more details.
+ seed : int or numpy.random.RandomState, optional
+ If `seed` is an ``int``, a new `numpy.random.RandomState`
+ instance is used, seeded with `seed`.
+ If `seed` is already a `numpy.random.RandomState` instance,
+ then that `numpy.random.RandomState` instance is used. Specify
+ `seed` for repeatable minimizations.
+ progress : bool, optional
+ Print a progress bar to the console while running.
+ run_mcmc_kwargs : dict, optional
+ Additional (optional) keyword arguments that are passed to
+ ``emcee.EnsembleSampler.run_mcmc``.
+
+ Returns
+ -------
+ MinimizerResult
+ MinimizerResult object containing updated params, statistics,
+ etc. The updated params represent the median of the samples,
+ while the uncertainties are half the difference of the 15.87
+ and 84.13 percentiles. The `MinimizerResult` contains a few
+ additional attributes: `chain` contain the samples and has
+ shape ``((steps - burn) // thin, nwalkers, nvarys)``.
+ `flatchain` is a `pandas.DataFrame` of the flattened chain,
+ that can be accessed with `result.flatchain[parname]`.
+ `lnprob` contains the log probability for each sample in
+ `chain`. The sample with the highest probability corresponds
+ to the maximum likelihood estimate. `acor` is an array
+ containing the auto-correlation time for each parameter if the
+ auto-correlation time can be computed from the chain. Finally,
+ `acceptance_fraction` (an array of the fraction of steps
+ accepted for each walker).
+
+ Notes
+ -----
+ This method samples the posterior distribution of the parameters
+ using Markov Chain Monte Carlo. It calculates the log-posterior
+ probability of the model parameters, `F`, given the data, `D`,
+ :math:`\ln p(F_{true} | D)`. This 'posterior probability' is
+ given by:
+
+ .. math::
+
+ \ln p(F_{true} | D) \propto \ln p(D | F_{true}) + \ln p(F_{true})
+
+ where :math:`\ln p(D | F_{true})` is the 'log-likelihood' and
+ :math:`\ln p(F_{true})` is the 'log-prior'. The default log-prior
+ encodes prior information known about the model that the log-prior
+ probability is ``-numpy.inf`` (impossible) if any of the parameters
+ is outside its limits, and is zero if all the parameters are inside
+ their bounds (uniform prior). The log-likelihood function is [1]_:
+
+ .. math::
+
+ \ln p(D|F_{true}) = -\frac{1}{2}\sum_n \left[\frac{(g_n(F_{true}) - D_n)^2}{s_n^2}+\ln (2\pi s_n^2)\right]
+
+ The first term represents the residual (:math:`g` being the
+ generative model, :math:`D_n` the data and :math:`s_n` the
+ measurement uncertainty). This gives :math:`\chi^2` when summed
+ over all data points. The objective function may also return the
+ log-posterior probability, :math:`\ln p(F_{true} | D)`. Since the
+ default log-prior term is zero, the objective function can also
+ just return the log-likelihood, unless you wish to create a
+ non-uniform prior.
+
+ If the objective function returns a float value, this is assumed
+ by default to be the log-posterior probability, (`float_behavior`
+ default is 'posterior'). If your objective function returns
+ :math:`\chi^2`, then you should use ``float_behavior='chi2'``
+ instead.
+
+ By default objective functions may return an ndarray of (possibly
+ weighted) residuals. In this case, use `is_weighted` to select
+ whether these are correctly weighted by measurement uncertainty.
+ Note that this ignores the second term above, so that to calculate
+ a correct log-posterior probability value your objective function
+ should return a float value. With ``is_weighted=False`` the data
+ uncertainty, `s_n`, will be treated as a nuisance parameter to be
+ marginalized out. This uses strictly positive uncertainty
+ (homoscedasticity) for each data point,
+ :math:`s_n = \exp(\rm{\_\_lnsigma})`. ``__lnsigma`` will be
+ present in `MinimizerResult.params`, as well as `Minimizer.chain`
+ and ``nvarys`` will be increased by one.
+
+ References
+ ----------
+ .. [1] https://emcee.readthedocs.io
+
+ """
+ if not HAS_EMCEE:
+ raise NotImplementedError('emcee version 3 is required.')
+
+ if ntemps > 1:
+ msg = ("'ntemps' has no effect anymore, since the PTSampler was "
+ "removed from emcee version 3.")
+ raise DeprecationWarning(msg)
+
+ tparams = params
+ # if you're reusing the sampler then nwalkers have to be
+ # determined from the previous sampling
+ if reuse_sampler:
+ if not hasattr(self, 'sampler') or not hasattr(self, '_lastpos'):
+ raise ValueError("You wanted to use an existing sampler, but "
+ "it hasn't been created yet")
+ if len(self._lastpos.shape) == 2:
+ nwalkers = self._lastpos.shape[0]
+ elif len(self._lastpos.shape) == 3:
+ nwalkers = self._lastpos.shape[1]
+ tparams = None
+
+ result = self.prepare_fit(params=tparams)
+ params = result.params
+
+ # check if the userfcn returns a vector of residuals
+ out = self.userfcn(params, *self.userargs, **self.userkws)
+ out = np.asarray(out).ravel()
+ if out.size > 1 and is_weighted is False and '__lnsigma' not in params:
+ # __lnsigma should already be in params if is_weighted was
+ # previously set to True.
+ params.add('__lnsigma', value=0.01, min=-np.inf, max=np.inf,
+ vary=True)
+ # have to re-prepare the fit
+ result = self.prepare_fit(params)
+ params = result.params
+
+ result.method = 'emcee'
+
+ # Removing internal parameter scaling. We could possibly keep it,
+ # but I don't know how this affects the emcee sampling.
+ bounds = []
+ var_arr = np.zeros(len(result.var_names))
+ i = 0
+ for par in params:
+ param = params[par]
+ if param.expr is not None:
+ param.vary = False
+ if param.vary:
+ var_arr[i] = param.value
+ i += 1
+ else:
+ # don't want to append bounds if they're not being varied.
+ continue
+ param.from_internal = lambda val: val
+ lb, ub = param.min, param.max
+ if lb is None or lb is np.nan:
+ lb = -np.inf
+ if ub is None or ub is np.nan:
+ ub = np.inf
+ bounds.append((lb, ub))
+ bounds = np.array(bounds)
+
+ self.nvarys = len(result.var_names)
+
+ # set up multiprocessing options for the samplers
+ auto_pool = None
+ sampler_kwargs = {}
+ if isinstance(workers, int) and workers > 1 and HAS_DILL:
+ auto_pool = multiprocessing.Pool(workers)
+ sampler_kwargs['pool'] = auto_pool
+ elif hasattr(workers, 'map'):
+ sampler_kwargs['pool'] = workers
+
+ # function arguments for the log-probability functions
+ # these values are sent to the log-probability functions by the sampler.
+ lnprob_args = (self.userfcn, params, result.var_names, bounds)
+ lnprob_kwargs = {'is_weighted': is_weighted,
+ 'float_behavior': float_behavior,
+ 'userargs': self.userargs,
+ 'userkws': self.userkws,
+ 'nan_policy': self.nan_policy}
+
+ sampler_kwargs['args'] = lnprob_args
+ sampler_kwargs['kwargs'] = lnprob_kwargs
+
+ # set up the random number generator
+ rng = _make_random_gen(seed)
+
+ # now initialise the samplers
+ if reuse_sampler:
+ if auto_pool is not None:
+ self.sampler.pool = auto_pool
+
+ p0 = self._lastpos
+ if p0.shape[-1] != self.nvarys:
+ raise ValueError("You cannot reuse the sampler if the number "
+ "of varying parameters has changed")
+
+ else:
+ p0 = 1 + rng.randn(nwalkers, self.nvarys) * 1.e-4
+ p0 *= var_arr
+ sampler_kwargs.setdefault('pool', auto_pool)
+ self.sampler = emcee.EnsembleSampler(nwalkers, self.nvarys,
+ self._lnprob, **sampler_kwargs)
+
+ # user supplies an initialisation position for the chain
+ # If you try to run the sampler with p0 of a wrong size then you'll get
+ # a ValueError. Note, you can't initialise with a position if you are
+ # reusing the sampler.
+ if pos is not None and not reuse_sampler:
+ tpos = np.asfarray(pos)
+ if p0.shape == tpos.shape:
+ pass
+ # trying to initialise with a previous chain
+ elif tpos.shape[-1] == self.nvarys:
+ tpos = tpos[-1]
+ else:
+ raise ValueError('pos should have shape (nwalkers, nvarys)')
+ p0 = tpos
+
+ # if you specified a seed then you also need to seed the sampler
+ if seed is not None:
+ self.sampler.random_state = rng.get_state()
+
+ if not isinstance(run_mcmc_kwargs, dict):
+ raise ValueError('run_mcmc_kwargs should be a dict of keyword arguments')
+
+ # now do a production run, sampling all the time
+ try:
+ output = self.sampler.run_mcmc(p0, steps, progress=progress, **run_mcmc_kwargs)
+ self._lastpos = output.coords
+ except AbortFitException:
+ result.aborted = True
+ result.message = "Fit aborted by user callback. Could not estimate error-bars."
+ result.success = False
+ result.nfev = self.result.nfev
+
+ # discard the burn samples and thin
+ chain = self.sampler.get_chain(thin=thin, discard=burn)[..., :, :]
+ lnprobability = self.sampler.get_log_prob(thin=thin, discard=burn)[..., :]
+ flatchain = chain.reshape((-1, self.nvarys))
+ if not result.aborted:
+ quantiles = np.percentile(flatchain, [15.87, 50, 84.13], axis=0)
+
+ for i, var_name in enumerate(result.var_names):
+ std_l, median, std_u = quantiles[:, i]
+ params[var_name].value = median
+ params[var_name].stderr = 0.5 * (std_u - std_l)
+ params[var_name].correl = {}
+
+ params.update_constraints()
+
+ # work out correlation coefficients
+ corrcoefs = np.corrcoef(flatchain.T)
+
+ for i, var_name in enumerate(result.var_names):
+ for j, var_name2 in enumerate(result.var_names):
+ if i != j:
+ result.params[var_name].correl[var_name2] = corrcoefs[i, j]
+
+ result.chain = np.copy(chain)
+ result.lnprob = np.copy(lnprobability)
+ result.errorbars = True
+ result.nvarys = len(result.var_names)
+ result.nfev = nwalkers*steps
+
+ try:
+ result.acor = self.sampler.get_autocorr_time()
+ except AutocorrError as e:
+ print(str(e))
+ result.acceptance_fraction = self.sampler.acceptance_fraction
+
+ # Calculate the residual with the "best fit" parameters
+ out = self.userfcn(params, *self.userargs, **self.userkws)
+ result.residual = _nan_policy(out, nan_policy=self.nan_policy,
+ handle_inf=False)
+
+ # If uncertainty was automatically estimated, weight the residual properly
+ if not is_weighted and result.residual.size > 1 and '__lnsigma' in params:
+ result.residual /= np.exp(params['__lnsigma'].value)
+
+ # Calculate statistics for the two standard cases:
+ if isinstance(result.residual, np.ndarray) or (float_behavior == 'chi2'):
+ result._calculate_statistics()
+
+ # Handle special case unique to emcee:
+ # This should eventually be moved into result._calculate_statistics.
+ elif float_behavior == 'posterior':
+ result.ndata = 1
+ result.nfree = 1
+
+ # assuming prior prob = 1, this is true
+ _neg2_log_likel = -2*result.residual
+
+ # assumes that residual is properly weighted, avoid overflowing np.exp()
+ result.chisqr = np.exp(min(650, _neg2_log_likel))
+
+ result.redchi = result.chisqr / result.nfree
+ result.aic = _neg2_log_likel + 2 * result.nvarys
+ result.bic = _neg2_log_likel + np.log(result.ndata) * result.nvarys
+
+ if auto_pool is not None:
+ auto_pool.terminate()
+
+ return result
+
+ def least_squares(self, params=None, max_nfev=None, **kws):
+ """Least-squares minimization using :scipydoc:`optimize.least_squares`.
+
+ This method wraps :scipydoc:`optimize.least_squares`, which has
+ built-in support for bounds and robust loss functions. By default
+ it uses the Trust Region Reflective algorithm with a linear loss
+ function (i.e., the standard least-squares problem).
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Parameters to use as starting point.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations. Defaults to
+ ``2000*(nvars+1)``, where ``nvars`` is the number of variable
+ parameters.
+ **kws : dict, optional
+ Minimizer options to pass to :scipydoc:`optimize.least_squares`.
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the optimized parameters and several
+ goodness-of-fit statistics.
+
+
+ .. versionchanged:: 0.9.0
+ Return value changed to :class:`MinimizerResult`.
+
+ """
+ result = self.prepare_fit(params)
+ result.method = 'least_squares'
+
+ replace_none = lambda x, sign: sign*np.inf if x is None else x
+ self.set_max_nfev(max_nfev, 2000*(result.nvarys+1))
+
+ start_vals, lower_bounds, upper_bounds = [], [], []
+ for vname in result.var_names:
+ par = self.params[vname]
+ start_vals.append(par.value)
+ lower_bounds.append(replace_none(par.min, -1))
+ upper_bounds.append(replace_none(par.max, 1))
+
+ least_squares_kws = dict(jac='2-point', method='trf', ftol=1e-08,
+ xtol=1e-08, gtol=1e-08, x_scale=1.0,
+ loss='linear', f_scale=1.0, diff_step=None,
+ tr_solver=None, tr_options={},
+ jac_sparsity=None, max_nfev=2*self.max_nfev,
+ verbose=0, kwargs={})
+
+ least_squares_kws.update(self.kws)
+ least_squares_kws.update(kws)
+
+ least_squares_kws['kwargs'].update({'apply_bounds_transformation': False})
+ result.call_kws = least_squares_kws
+
+ try:
+ ret = least_squares(self.__residual, start_vals,
+ bounds=(lower_bounds, upper_bounds),
+ **least_squares_kws)
+ result.residual = ret.fun
+ except AbortFitException:
+ pass
+
+ # Note: scipy.optimize.least_squares is actually returning the
+ # "last evaluation", which is not necessarily the "best result"; so we
+ # do that here for consistency
+ if not result.aborted:
+ result.nfev -= 1
+ result.residual = self.__residual(ret.x, False)
+ result._calculate_statistics()
+
+ if not result.aborted:
+ for attr in ret:
+ outattr = attr
+ if attr == 'nfev':
+ outattr = 'least_squares_nfev'
+ setattr(result, outattr, ret[attr])
+
+ result.x = np.atleast_1d(result.x)
+
+ # calculate the cov_x and estimate uncertainties/correlations
+ try:
+ if issparse(ret.jac):
+ hess = (ret.jac.T * ret.jac).toarray()
+ elif isinstance(ret.jac, LinearOperator):
+ identity = np.eye(ret.jac.shape[1], dtype=ret.jac.dtype)
+ hess = (ret.jac.T * ret.jac) * identity
+ else:
+ hess = np.matmul(ret.jac.T, ret.jac)
+ result.covar = np.linalg.inv(hess)
+ self._calculate_uncertainties_correlations()
+ except LinAlgError:
+ pass
+
+ return result
+
+ def leastsq(self, params=None, max_nfev=None, **kws):
+ """Use Levenberg-Marquardt minimization to perform a fit.
+
+ It assumes that the input Parameters have been initialized, and a
+ function to minimize has been properly set up. When possible, this
+ calculates the estimated uncertainties and variable correlations
+ from the covariance matrix.
+
+ This method calls :scipydoc:`optimize.leastsq` and, by default,
+ numerical derivatives are used.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Parameters to use as starting point.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations. Defaults to
+ ``2000*(nvars+1)``, where ``nvars`` is the number of variable
+ parameters.
+ **kws : dict, optional
+ Minimizer options to pass to :scipydoc:`optimize.leastsq`.
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the optimized parameters and several
+ goodness-of-fit statistics.
+
+
+ .. versionchanged:: 0.9.0
+ Return value changed to :class:`MinimizerResult`.
+
+ """
+ result = self.prepare_fit(params=params)
+ result.method = 'leastsq'
+ result.nfev -= 2 # correct for "pre-fit" initialization/checks
+ variables = result._init_vals_internal
+
+ # Note: we set max number of function evaluations here, and send twice
+ # that value to the solver so it essentially never stops on its own
+ self.set_max_nfev(max_nfev, 2000*(result.nvarys+1))
+
+ lskws = dict(Dfun=None, full_output=1, col_deriv=0, ftol=1.5e-8,
+ xtol=1.5e-8, gtol=0.0, maxfev=2*self.max_nfev,
+ epsfcn=1.e-10, factor=100, diag=None)
+
+ if 'maxfev' in kws:
+ warnings.warn(maxeval_warning.format('maxfev', thisfuncname()),
+ RuntimeWarning)
+ kws.pop('maxfev')
+
+ lskws.update(self.kws)
+ lskws.update(kws)
+ self.col_deriv = False
+
+ if lskws['Dfun'] is not None:
+ self.jacfcn = lskws['Dfun']
+ self.col_deriv = lskws['col_deriv']
+ lskws['Dfun'] = self.__jacobian
+
+ # suppress runtime warnings during fit and error analysis
+ orig_warn_settings = np.geterr()
+ np.seterr(all='ignore')
+ result.call_kws = lskws
+ try:
+ lsout = scipy_leastsq(self.__residual, variables, **lskws)
+ except AbortFitException:
+ pass
+
+ if not result.aborted:
+ _best, _cov, _infodict, errmsg, ier = lsout
+ else:
+ _best = result.last_internal_values
+ _cov = None
+ ier = -1
+ errmsg = 'Fit aborted.'
+
+ result.nfev -= 1
+ if result.nfev >= self.max_nfev:
+ result.nfev = self.max_nfev - 1
+ self.result.nfev = result.nfev
+ try:
+ result.residual = self.__residual(_best)
+ result._calculate_statistics()
+ except AbortFitException:
+ pass
+
+ result.ier = ier
+ result.lmdif_message = errmsg
+ result.success = ier in [1, 2, 3, 4]
+ if ier in {1, 2, 3}:
+ result.message = 'Fit succeeded.'
+ elif ier == 0:
+ result.message = ('Invalid Input Parameters. I.e. more variables '
+ 'than data points given, tolerance < 0.0, or '
+ 'no data provided.')
+ elif ier == 4:
+ result.message = 'One or more variable did not affect the fit.'
+ elif ier == 5:
+ result.message = self._err_max_evals.format(lskws['maxfev'])
+ else:
+ result.message = 'Tolerance seems to be too small.'
+
+ # self.errorbars = error bars were successfully estimated
+ result.errorbars = (_cov is not None)
+ if result.errorbars:
+ # transform the covariance matrix to "external" parameter space
+ result.covar = self._int2ext_cov_x(_cov, _best)
+ # calculate parameter uncertainties and correlations
+ self._calculate_uncertainties_correlations()
+ else:
+ result.message = f'{result.message} Could not estimate error-bars.'
+
+ np.seterr(**orig_warn_settings)
+
+ return result
+
+ def basinhopping(self, params=None, max_nfev=None, **kws):
+ """Use the `basinhopping` algorithm to find the global minimum.
+
+ This method calls :scipydoc:`optimize.basinhopping` using the
+ default arguments. The default minimizer is ``BFGS``, but since
+ lmfit supports parameter bounds for all minimizers, the user can
+ choose any of the solvers present in :scipydoc:`optimize.minimize`.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Contains the Parameters for the model. If None, then the
+ Parameters used to initialize the Minimizer object are used.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations (default is None). Defaults
+ to ``200000*(nvarys+1)``.
+ **kws : dict, optional
+ Minimizer options to pass to :scipydoc:`optimize.basinhopping`.
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the optimization results from the
+ basinhopping algorithm.
+
+
+ .. versionadded:: 0.9.10
+
+ """
+ result = self.prepare_fit(params=params)
+ result.method = 'basinhopping'
+ self.set_max_nfev(max_nfev, 200000*(result.nvarys+1))
+ basinhopping_kws = dict(niter=100, T=1.0, stepsize=0.5,
+ minimizer_kwargs=None, take_step=None,
+ accept_test=None, callback=None, interval=50,
+ disp=False, niter_success=None, seed=None)
+
+ # FIXME: update when SciPy requirement is >= 1.8
+ if int(scipy_version.split('.')[1]) >= 8:
+ basinhopping_kws.update({'target_accept_rate': 0.5,
+ 'stepwise_factor': 0.9})
+
+ basinhopping_kws.update(self.kws)
+ basinhopping_kws.update(kws)
+
+ x0 = result._init_vals_internal
+ result.call_kws = basinhopping_kws
+ try:
+ ret = scipy_basinhopping(self.penalty, x0, **basinhopping_kws)
+ except AbortFitException:
+ pass
+
+ if not result.aborted:
+ result.message = ret.message
+ result.residual = self.__residual(ret.x)
+ result.nfev -= 1
+
+ result._calculate_statistics()
+
+ # calculate the cov_x and estimate uncertainties/correlations
+ if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
+ len(result.residual) > len(result.var_names)):
+ _covar_ndt = self._calculate_covariance_matrix(ret.x)
+ if _covar_ndt is not None:
+ result.covar = self._int2ext_cov_x(_covar_ndt, ret.x)
+ self._calculate_uncertainties_correlations()
+
+ return result
+
+ def brute(self, params=None, Ns=20, keep=50, workers=1, max_nfev=None):
+ """Use the `brute` method to find the global minimum of a function.
+
+ The following parameters are passed to :scipydoc:`optimize.brute`
+ and cannot be changed:
+
+ +-------------------+-------+------------------------------------+
+ | :meth:`brute` arg | Value | Description |
+ +===================+=======+====================================+
+ | `full_output` | 1 | Return the evaluation grid and the |
+ | | | objective function's values on it. |
+ +-------------------+-------+------------------------------------+
+ | `finish` | None | No "polishing" function is to be |
+ | | | used after the grid search. |
+ +-------------------+-------+------------------------------------+
+ | `disp` | False | Do not print convergence messages |
+ | | | (when finish is not None). |
+ +-------------------+-------+------------------------------------+
+
+ It assumes that the input Parameters have been initialized, and a
+ function to minimize has been properly set up.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Contains the Parameters for the model. If None, then the
+ Parameters used to initialize the Minimizer object are used.
+ Ns : int, optional
+ Number of grid points along the axes, if not otherwise
+ specified (see Notes).
+ keep : int, optional
+ Number of best candidates from the brute force method that are
+ stored in the :attr:`candidates` attribute. If `'all'`, then
+ all grid points from :scipydoc:`optimize.brute` are stored as
+ candidates.
+ workers : int or map-like callable, optional
+ For parallel evaluation of the grid (see :scipydoc:`optimize.brute`
+ for more details).
+ max_nfev : int or None, optional
+ Maximum number of function evaluations (default is None). Defaults
+ to ``200000*(nvarys+1)``.
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the parameters from the brute force method.
+ The return values (``x0``, ``fval``, ``grid``, ``Jout``) from
+ :scipydoc:`optimize.brute` are stored as ``brute_<parname>``
+ attributes. The `MinimizerResult` also contains the
+ :attr:``candidates`` attribute and :meth:`show_candidates`
+ method. The :attr:`candidates` attribute contains the
+ parameters and chisqr from the brute force method as a
+ namedtuple, ``('Candidate', ['params', 'score'])`` sorted on
+ the (lowest) chisqr value. To access the values for a
+ particular candidate one can use ``result.candidate[#].params``
+ or ``result.candidate[#].score``, where a lower # represents a
+ better candidate. The :meth:`show_candidates` method uses the
+ :meth:`pretty_print` method to show a specific candidate-# or
+ all candidates when no number is specified.
+
+
+ .. versionadded:: 0.9.6
+
+
+ Notes
+ -----
+ The :meth:`brute` method evaluates the function at each point of a
+ multidimensional grid of points. The grid points are generated from
+ the parameter ranges using `Ns` and (optional) `brute_step`.
+ The implementation in :scipydoc:`optimize.brute` requires finite
+ bounds and the ``range`` is specified as a two-tuple ``(min, max)``
+ or slice-object ``(min, max, brute_step)``. A slice-object is used
+ directly, whereas a two-tuple is converted to a slice object that
+ interpolates `Ns` points from ``min`` to ``max``, inclusive.
+
+ In addition, the :meth:`brute` method in lmfit, handles three other
+ scenarios given below with their respective slice-object:
+
+ - lower bound (:attr:`min`) and :attr:`brute_step` are specified:
+ ``range = (min, min + Ns * brute_step, brute_step)``.
+ - upper bound (:attr:`max`) and :attr:`brute_step` are specified:
+ ``range = (max - Ns * brute_step, max, brute_step)``.
+ - numerical value (:attr:`value`) and :attr:`brute_step` are specified:
+ ``range = (value - (Ns//2) * brute_step`, value +
+ (Ns//2) * brute_step, brute_step)``.
+
+ """
+ result = self.prepare_fit(params=params)
+ result.method = 'brute'
+ self.set_max_nfev(max_nfev, 200000*(result.nvarys+1))
+
+ brute_kws = dict(full_output=1, finish=None, disp=False, Ns=Ns,
+ workers=workers)
+
+ varying = np.asarray([par.vary for par in self.params.values()])
+ replace_none = lambda x, sign: sign*np.inf if x is None else x
+ lower_bounds = np.asarray([replace_none(i.min, -1) for i in
+ self.params.values()])[varying]
+ upper_bounds = np.asarray([replace_none(i.max, 1) for i in
+ self.params.values()])[varying]
+ value = np.asarray([i.value for i in self.params.values()])[varying]
+ stepsize = np.asarray([i.brute_step for i in self.params.values()])[varying]
+
+ ranges = []
+ for i, step in enumerate(stepsize):
+ if np.all(np.isfinite([lower_bounds[i], upper_bounds[i]])):
+ # lower AND upper bounds are specified (brute_step optional)
+ par_range = ((lower_bounds[i], upper_bounds[i], step)
+ if step else (lower_bounds[i], upper_bounds[i]))
+ elif np.isfinite(lower_bounds[i]) and step:
+ # lower bound AND brute_step are specified
+ par_range = (lower_bounds[i], lower_bounds[i] + Ns*step, step)
+ elif np.isfinite(upper_bounds[i]) and step:
+ # upper bound AND brute_step are specified
+ par_range = (upper_bounds[i] - Ns*step, upper_bounds[i], step)
+ elif np.isfinite(value[i]) and step:
+ # no bounds, but an initial value is specified
+ par_range = (value[i] - (Ns//2)*step, value[i] + (Ns//2)*step,
+ step)
+ else:
+ raise ValueError('Not enough information provided for the brute '
+ 'force method. Please specify bounds or at '
+ 'least an initial value and brute_step for '
+ 'parameter "{}".'.format(result.var_names[i]))
+ ranges.append(par_range)
+ result.call_kws = brute_kws
+ try:
+ ret = scipy_brute(self.penalty, tuple(ranges), **brute_kws)
+ except AbortFitException:
+ pass
+
+ if not result.aborted:
+ result.brute_x0 = ret[0]
+ result.brute_fval = ret[1]
+ result.brute_grid = ret[2]
+ result.brute_Jout = ret[3]
+
+ # sort the results of brute and populate .candidates attribute
+ grid_score = ret[3].ravel() # chisqr
+ grid_points = [par.ravel() for par in ret[2]]
+
+ if len(result.var_names) == 1:
+ grid_result = np.array([res for res in zip(zip(grid_points), grid_score)],
+ dtype=[('par', 'O'), ('score', 'float')])
+ else:
+ grid_result = np.array([res for res in zip(zip(*grid_points), grid_score)],
+ dtype=[('par', 'O'), ('score', 'float')])
+ grid_result_sorted = grid_result[grid_result.argsort(order='score')]
+
+ result.candidates = []
+
+ if keep == 'all':
+ keep_candidates = len(grid_result_sorted)
+ else:
+ keep_candidates = min(len(grid_result_sorted), keep)
+
+ for data in grid_result_sorted[:keep_candidates]:
+ pars = deepcopy(self.params)
+ for i, par in enumerate(result.var_names):
+ pars[par].value = data[0][i]
+ result.candidates.append(Candidate(params=pars, score=data[1]))
+
+ result.params = result.candidates[0].params
+ result.residual = self.__residual(result.brute_x0,
+ apply_bounds_transformation=False)
+ result.nfev = len(result.brute_Jout.ravel())
+
+ result._calculate_statistics()
+
+ return result
+
+ def ampgo(self, params=None, max_nfev=None, **kws):
+ """Find the global minimum of a multivariate function using AMPGO.
+
+ AMPGO stands for 'Adaptive Memory Programming for Global
+ Optimization' and is an efficient algorithm to find the global
+ minimum.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Contains the Parameters for the model. If None, then the
+ Parameters used to initialize the Minimizer object are used.
+ max_nfev : int, optional
+ Maximum number of total function evaluations. If None
+ (default), the optimization will stop after `totaliter` number
+ of iterations (see below)..
+ **kws : dict, optional
+ Minimizer options to pass to the ampgo algorithm, the options
+ are listed below::
+
+ local: str, optional
+ Name of the local minimization method. Valid options
+ are:
+ - `'L-BFGS-B'` (default)
+ - `'Nelder-Mead'`
+ - `'Powell'`
+ - `'TNC'`
+ - `'SLSQP'`
+ local_opts: dict, optional
+ Options to pass to the local minimizer (default is
+ None).
+ maxfunevals: int, optional
+ Maximum number of function evaluations. If None
+ (default), the optimization will stop after
+ `totaliter` number of iterations (deprecated: use
+ `max_nfev` instead).
+ totaliter: int, optional
+ Maximum number of global iterations (default is 20).
+ maxiter: int, optional
+ Maximum number of `Tabu Tunneling` iterations during
+ each global iteration (default is 5).
+ glbtol: float, optional
+ Tolerance whether or not to accept a solution after a
+ tunneling phase (default is 1e-5).
+ eps1: float, optional
+ Constant used to define an aspiration value for the
+ objective function during the Tunneling phase (default
+ is 0.02).
+ eps2: float, optional
+ Perturbation factor used to move away from the latest
+ local minimum at the start of a Tunneling phase
+ (default is 0.1).
+ tabulistsize: int, optional
+ Size of the (circular) tabu search list (default is 5).
+ tabustrategy: {'farthest', 'oldest'}, optional
+ Strategy to use when the size of the tabu list exceeds
+ `tabulistsize`. It can be `'oldest'` to drop the oldest
+ point from the tabu list or `'farthest'` (defauilt) to
+ drop the element farthest from the last local minimum
+ found.
+ disp: bool, optional
+ Set to True to print convergence messages (default is
+ False).
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the parameters from the ampgo method, with
+ fit parameters, statistics and such. The return values
+ (``x0``, ``fval``, ``eval``, ``msg``, ``tunnel``) are stored
+ as ``ampgo_<parname>`` attributes.
+
+
+ .. versionadded:: 0.9.10
+
+
+ Notes
+ -----
+ The Python implementation was written by Andrea Gavana in 2014
+ (http://infinity77.net/global_optimization/index.html).
+
+ The details of the AMPGO algorithm are described in the paper
+ "Adaptive Memory Programming for Constrained Global Optimization"
+ located here:
+
+ http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf
+
+ """
+ result = self.prepare_fit(params=params)
+ self.set_max_nfev(max_nfev, 200000*(result.nvarys+1))
+
+ ampgo_kws = dict(local='L-BFGS-B', local_opts=None, maxfunevals=None,
+ totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02,
+ eps2=0.1, tabulistsize=5, tabustrategy='farthest',
+ disp=False)
+ ampgo_kws.update(self.kws)
+ ampgo_kws.update(kws)
+
+ values = result._init_vals_internal
+ result.method = f"ampgo, with {ampgo_kws['local']} as local solver"
+ result.call_kws = ampgo_kws
+ try:
+ ret = ampgo(self.penalty, values, **ampgo_kws)
+ except AbortFitException:
+ pass
+
+ if not result.aborted:
+ result.ampgo_x0 = ret[0]
+ result.ampgo_fval = ret[1]
+ result.ampgo_eval = ret[2]
+ result.ampgo_msg = ret[3]
+ result.ampgo_tunnel = ret[4]
+
+ for i, par in enumerate(result.var_names):
+ result.params[par].value = result.ampgo_x0[i]
+
+ result.residual = self.__residual(result.ampgo_x0)
+ result.nfev -= 1
+
+ result._calculate_statistics()
+
+ # calculate the cov_x and estimate uncertainties/correlations
+ if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
+ len(result.residual) > len(result.var_names)):
+ _covar_ndt = self._calculate_covariance_matrix(result.ampgo_x0)
+ if _covar_ndt is not None:
+ result.covar = self._int2ext_cov_x(_covar_ndt, result.ampgo_x0)
+ self._calculate_uncertainties_correlations()
+
+ return result
+
+ def shgo(self, params=None, max_nfev=None, **kws):
+ """Use the `SHGO` algorithm to find the global minimum.
+
+ SHGO stands for "simplicial homology global optimization" and
+ calls :scipydoc:`optimize.shgo` using its default arguments.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Contains the Parameters for the model. If None, then the
+ Parameters used to initialize the Minimizer object are used.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations. Defaults to
+ ``200000*(nvars+1)``, where ``nvars`` is the number of variable
+ parameters.
+ **kws : dict, optional
+ Minimizer options to pass to the SHGO algorithm.
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the parameters from the SHGO method.
+ The return values specific to :scipydoc:`optimize.shgo`
+ (``x``, ``xl``, ``fun``, ``funl``, ``nfev``, ``nit``,
+ ``nlfev``, ``nlhev``, and ``nljev``) are stored as
+ ``shgo_<parname>`` attributes.
+
+
+ .. versionadded:: 0.9.14
+
+ """
+ result = self.prepare_fit(params=params)
+ result.method = 'shgo'
+
+ self.set_max_nfev(max_nfev, 200000*(result.nvarys+1))
+
+ shgo_kws = dict(constraints=None, n=100, iters=1, callback=None,
+ minimizer_kwargs=None, options=None,
+ sampling_method='simplicial')
+
+ # FIXME: update when SciPy requirement is >= 1.7
+ if int(scipy_version.split('.')[1]) >= 7:
+ shgo_kws['n'] = None
+
+ shgo_kws.update(self.kws)
+ shgo_kws.update(kws)
+
+ varying = np.asarray([par.vary for par in self.params.values()])
+ bounds = np.asarray([(par.min, par.max) for par in
+ self.params.values()])[varying]
+ result.call_kws = shgo_kws
+ try:
+ ret = scipy_shgo(self.penalty, bounds, **shgo_kws)
+ except AbortFitException:
+ pass
+
+ if not result.aborted:
+ for attr, value in ret.items():
+ if attr in ['success', 'message']:
+ setattr(result, attr, value)
+ else:
+ setattr(result, f'shgo_{attr}', value)
+
+ result.residual = self.__residual(result.shgo_x, False)
+ result.nfev -= 1
+
+ result._calculate_statistics()
+
+ # calculate the cov_x and estimate uncertainties/correlations
+ if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
+ len(result.residual) > len(result.var_names)):
+ result.covar = self._calculate_covariance_matrix(result.shgo_x)
+ if result.covar is not None:
+ self._calculate_uncertainties_correlations()
+
+ return result
+
+ def dual_annealing(self, params=None, max_nfev=None, **kws):
+ """Use the `dual_annealing` algorithm to find the global minimum.
+
+ This method calls :scipydoc:`optimize.dual_annealing` using its
+ default arguments.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Contains the Parameters for the model. If None, then the
+ Parameters used to initialize the Minimizer object are used.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations. Defaults to
+ ``200000*(nvars+1)``, where ``nvars`` is the number of variables.
+ **kws : dict, optional
+ Minimizer options to pass to the dual_annealing algorithm.
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the parameters from the dual_annealing
+ method. The return values specific to
+ :scipydoc:`optimize.dual_annealing` (``x``, ``fun``, ``nfev``,
+ ``nhev``, ``njev``, and ``nit``) are stored as
+ ``da_<parname>`` attributes.
+
+
+ .. versionadded:: 0.9.14
+
+ """
+ result = self.prepare_fit(params=params)
+ result.method = 'dual_annealing'
+ self.set_max_nfev(max_nfev, 200000*(result.nvarys+1))
+
+ da_kws = dict(maxiter=1000, local_search_options={},
+ initial_temp=5230.0, restart_temp_ratio=2e-05,
+ visit=2.62, accept=-5.0, maxfun=2*self.max_nfev,
+ seed=None, no_local_search=False, callback=None, x0=None)
+
+ da_kws.update(self.kws)
+ da_kws.update(kws)
+
+ # FIXME: update when SciPy requirement is >= 1.8
+ # ``local_search_options`` deprecated in favor of ``minimizer_kwargs``
+ if int(scipy_version.split('.')[1]) >= 8:
+ da_kws.update({'minimizer_kwargs': da_kws.pop('local_search_options')})
+
+ varying = np.asarray([par.vary for par in self.params.values()])
+ bounds = np.asarray([(par.min, par.max) for par in
+ self.params.values()])[varying]
+
+ if not np.all(np.isfinite(bounds)):
+ raise ValueError('dual_annealing requires finite bounds for all'
+ ' varying parameters')
+ result.call_kws = da_kws
+ try:
+ ret = scipy_dual_annealing(self.penalty, bounds, **da_kws)
+ except AbortFitException:
+ pass
+
+ if not result.aborted:
+ for attr, value in ret.items():
+ if attr in ['success', 'message']:
+ setattr(result, attr, value)
+ else:
+ setattr(result, f'da_{attr}', value)
+
+ result.residual = self.__residual(result.da_x, False)
+ result.nfev -= 1
+
+ result._calculate_statistics()
+
+ # calculate the cov_x and estimate uncertainties/correlations
+ if (not result.aborted and self.calc_covar and HAS_NUMDIFFTOOLS and
+ len(result.residual) > len(result.var_names)):
+ result.covar = self._calculate_covariance_matrix(result.da_x)
+ if result.covar is not None:
+ self._calculate_uncertainties_correlations()
+
+ return result
+
+ def minimize(self, method='leastsq', params=None, **kws):
+ """Perform the minimization.
+
+ Parameters
+ ----------
+ method : str, optional
+ Name of the fitting method to use. Valid values are:
+
+ - `'leastsq'`: Levenberg-Marquardt (default)
+ - `'least_squares'`: Least-Squares minimization, using Trust
+ Region Reflective method
+ - `'differential_evolution'`: differential evolution
+ - `'brute'`: brute force method
+ - `'basinhopping'`: basinhopping
+ - `'ampgo'`: Adaptive Memory Programming for Global
+ Optimization
+ - '`nelder`': Nelder-Mead
+ - `'lbfgsb'`: L-BFGS-B
+ - `'powell'`: Powell
+ - `'cg'`: Conjugate-Gradient
+ - `'newton'`: Newton-CG
+ - `'cobyla'`: Cobyla
+ - `'bfgs'`: BFGS
+ - `'tnc'`: Truncated Newton
+ - `'trust-ncg'`: Newton-CG trust-region
+ - `'trust-exact'`: nearly exact trust-region
+ - `'trust-krylov'`: Newton GLTR trust-region
+ - `'trust-constr'`: trust-region for constrained optimization
+ - `'dogleg'`: Dog-leg trust-region
+ - `'slsqp'`: Sequential Linear Squares Programming
+ - `'emcee'`: Maximum likelihood via Monte-Carlo Markov Chain
+ - `'shgo'`: Simplicial Homology Global Optimization
+ - `'dual_annealing'`: Dual Annealing optimization
+
+ In most cases, these methods wrap and use the method with the
+ same name from `scipy.optimize`, or use
+ `scipy.optimize.minimize` with the same `method` argument.
+ Thus `'leastsq'` will use `scipy.optimize.leastsq`, while
+ `'powell'` will use `scipy.optimize.minimizer(...,
+ method='powell')`.
+
+ For more details on the fitting methods please refer to the
+ `SciPy documentation
+ <https://docs.scipy.org/doc/scipy/reference/optimize.html>`__.
+
+ params : Parameters, optional
+ Parameters of the model to use as starting values.
+ **kws : optional
+ Additional arguments are passed to the underlying minimization
+ method.
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the optimized parameters and several
+ goodness-of-fit statistics.
+
+
+ .. versionchanged:: 0.9.0
+ Return value changed to :class:`MinimizerResult`.
+
+ """
+ kwargs = {'params': params}
+ kwargs.update(self.kws)
+ for maxnfev_alias in ('maxfev', 'maxiter'):
+ if maxnfev_alias in kws:
+ warnings.warn(maxeval_warning.format(maxnfev_alias, thisfuncname()),
+ RuntimeWarning)
+ kws.pop(maxnfev_alias)
+
+ kwargs.update(kws)
+
+ user_method = method.lower()
+ if user_method.startswith('leasts'):
+ function = self.leastsq
+ elif user_method.startswith('least_s'):
+ function = self.least_squares
+ elif user_method == 'brute':
+ function = self.brute
+ elif user_method == 'basinhopping':
+ function = self.basinhopping
+ elif user_method == 'ampgo':
+ function = self.ampgo
+ elif user_method == 'emcee':
+ function = self.emcee
+ elif user_method == 'shgo':
+ function = self.shgo
+ elif user_method == 'dual_annealing':
+ function = self.dual_annealing
+ else:
+ function = self.scalar_minimize
+ for key, val in SCALAR_METHODS.items():
+ if (key.lower().startswith(user_method) or
+ val.lower().startswith(user_method)):
+ kwargs['method'] = val
+ return function(**kwargs)
+
+
+def _make_random_gen(seed):
+ """Turn seed into a numpy.random.RandomState instance.
+
+ If `seed` is None, return the RandomState singleton used by numpy.random.
+ If `seed` is an int, return a new RandomState instance seeded with seed.
+ If `seed` is already a RandomState instance, return it.
+ Otherwise raise a `ValueError`.
+
+ """
+ if seed is None or seed is np.random:
+ return np.random.mtrand._rand
+ if isinstance(seed, (numbers.Integral, np.integer)):
+ return np.random.RandomState(seed)
+ if isinstance(seed, np.random.RandomState):
+ return seed
+ raise ValueError(f'{seed:r} cannot be used to seed a numpy.random.RandomState'
+ ' instance')
+
+
+def _nan_policy(arr, nan_policy='raise', handle_inf=True):
+ """Specify behaviour when array contains ``numpy.nan`` or ``numpy.inf``.
+
+ Parameters
+ ----------
+ arr : array_like
+ Input array to consider.
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ One of:
+
+ `'raise'` - raise a `ValueError` if `arr` contains NaN (default)
+ `'propagate'` - propagate NaN
+ `'omit'` - filter NaN from input array
+
+ handle_inf : bool, optional
+ Whether to apply the `nan_policy` to +/- infinity (default is
+ True).
+
+ Returns
+ -------
+ array_like
+ Result of `arr` after applying the `nan_policy`.
+
+ Notes
+ -----
+ This function is copied, then modified, from
+ scipy/stats/stats.py/_contains_nan
+
+ """
+ if nan_policy not in ('propagate', 'omit', 'raise'):
+ raise ValueError("nan_policy must be 'propagate', 'omit', or 'raise'.")
+
+ if handle_inf:
+ handler_func = lambda x: ~np.isfinite(x)
+ else:
+ handler_func = isnull
+
+ if nan_policy == 'omit':
+ # mask locates any values to remove
+ mask = ~handler_func(arr)
+ if not np.all(mask): # there are some NaNs/infs/missing values
+ return arr[mask]
+
+ if nan_policy == 'raise':
+ try:
+ # Calling np.sum to avoid creating a huge array into memory
+ # e.g. np.isnan(a).any()
+ with np.errstate(invalid='ignore'):
+ contains_nan = handler_func(np.sum(arr))
+ except TypeError:
+ # If the check cannot be properly performed we fallback to omitting
+ # nan values and raising a warning. This can happen when attempting to
+ # sum things that are not numbers (e.g. as in the function `mode`).
+ contains_nan = False
+ warnings.warn("The input array could not be checked for NaNs. "
+ "NaNs will be ignored.", RuntimeWarning)
+
+ if contains_nan:
+ msg = ('NaN values detected in your input data or the output of '
+ 'your objective/model function - fitting algorithms cannot '
+ 'handle this! Please read https://lmfit.github.io/lmfit-py/faq.html#i-get-errors-from-nan-in-my-fit-what-can-i-do '
+ 'for more information.')
+ raise ValueError(msg)
+ return arr
+
+
+def minimize(fcn, params, method='leastsq', args=None, kws=None, iter_cb=None,
+ scale_covar=True, nan_policy='raise', reduce_fcn=None,
+ calc_covar=True, max_nfev=None, **fit_kws):
+ """Perform the minimization of the objective function.
+
+ The minimize function takes an objective function to be minimized,
+ a dictionary (:class:`~lmfit.parameter.Parameters` ; Parameters) containing
+ the model parameters, and several optional arguments including the fitting
+ method.
+
+ Parameters
+ ----------
+ fcn : callable
+ Objective function to be minimized. When method is `'leastsq'` or
+ '`least_squares`', the objective function should return an array
+ of residuals (difference between model and data) to be minimized
+ in a least-squares sense. With the scalar methods the objective
+ function can either return the residuals array or a single scalar
+ value. The function must have the signature::
+
+ fcn(params, *args, **kws)
+
+ params : Parameters
+ Contains the Parameters for the model.
+ method : str, optional
+ Name of the fitting method to use. Valid values are:
+
+ - `'leastsq'`: Levenberg-Marquardt (default)
+ - `'least_squares'`: Least-Squares minimization, using Trust Region Reflective method
+ - `'differential_evolution'`: differential evolution
+ - `'brute'`: brute force method
+ - `'basinhopping'`: basinhopping
+ - `'ampgo'`: Adaptive Memory Programming for Global Optimization
+ - '`nelder`': Nelder-Mead
+ - `'lbfgsb'`: L-BFGS-B
+ - `'powell'`: Powell
+ - `'cg'`: Conjugate-Gradient
+ - `'newton'`: Newton-CG
+ - `'cobyla'`: Cobyla
+ - `'bfgs'`: BFGS
+ - `'tnc'`: Truncated Newton
+ - `'trust-ncg'`: Newton-CG trust-region
+ - `'trust-exact'`: nearly exact trust-region
+ - `'trust-krylov'`: Newton GLTR trust-region
+ - `'trust-constr'`: trust-region for constrained optimization
+ - `'dogleg'`: Dog-leg trust-region
+ - `'slsqp'`: Sequential Linear Squares Programming
+ - `'emcee'`: Maximum likelihood via Monte-Carlo Markov Chain
+ - `'shgo'`: Simplicial Homology Global Optimization
+ - `'dual_annealing'`: Dual Annealing optimization
+
+ In most cases, these methods wrap and use the method of the same
+ name from `scipy.optimize`, or use `scipy.optimize.minimize` with
+ the same `method` argument. Thus `'leastsq'` will use
+ `scipy.optimize.leastsq`, while `'powell'` will use
+ `scipy.optimize.minimizer(..., method='powell')`
+
+ For more details on the fitting methods please refer to the
+ `SciPy docs <https://docs.scipy.org/doc/scipy/reference/optimize.html>`__.
+
+ args : tuple, optional
+ Positional arguments to pass to `fcn`.
+ kws : dict, optional
+ Keyword arguments to pass to `fcn`.
+ iter_cb : callable, optional
+ Function to be called at each fit iteration. This function should
+ have the signature::
+
+ iter_cb(params, iter, resid, *args, **kws),
+
+ where `params` will have the current parameter values, `iter` the
+ iteration number, `resid` the current residual array, and `*args`
+ and `**kws` as passed to the objective function.
+ scale_covar : bool, optional
+ Whether to automatically scale the covariance matrix (default is
+ True).
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ Specifies action if `fcn` (or a Jacobian) returns NaN values. One
+ of:
+
+ - `'raise'` : a `ValueError` is raised
+ - `'propagate'` : the values returned from `userfcn` are un-altered
+ - `'omit'` : non-finite values are filtered
+
+ reduce_fcn : str or callable, optional
+ Function to convert a residual array to a scalar value for the
+ scalar minimizers. See Notes in `Minimizer`.
+ calc_covar : bool, optional
+ Whether to calculate the covariance matrix (default is True) for
+ solvers other than `'leastsq'` and `'least_squares'`. Requires the
+ `numdifftools` package to be installed.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations (default is None). The
+ default value depends on the fitting method.
+ **fit_kws : dict, optional
+ Options to pass to the minimizer being used.
+
+ Returns
+ -------
+ MinimizerResult
+ Object containing the optimized parameters and several
+ goodness-of-fit statistics.
+
+
+ .. versionchanged:: 0.9.0
+ Return value changed to :class:`MinimizerResult`.
+
+
+ Notes
+ -----
+ The objective function should return the value to be minimized. For
+ the Levenberg-Marquardt algorithm from leastsq(), this returned value
+ must be an array, with a length greater than or equal to the number of
+ fitting variables in the model. For the other methods, the return
+ value can either be a scalar or an array. If an array is returned, the
+ sum-of- squares of the array will be sent to the underlying fitting
+ method, effectively doing a least-squares optimization of the return
+ values.
+
+ A common use for `args` and `kws` would be to pass in other data needed
+ to calculate the residual, including such things as the data array,
+ dependent variable, uncertainties in the data, and other data structures
+ for the model calculation.
+
+ On output, `params` will be unchanged. The best-fit values and, where
+ appropriate, estimated uncertainties and correlations, will all be
+ contained in the returned :class:`MinimizerResult`. See
+ :ref:`fit-results-label` for further details.
+
+ This function is simply a wrapper around :class:`Minimizer` and is
+ equivalent to::
+
+ fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
+ iter_cb=iter_cb, scale_covar=scale_covar,
+ nan_policy=nan_policy, reduce_fcn=reduce_fcn,
+ calc_covar=calc_covar, **fit_kws)
+ fitter.minimize(method=method)
+
+ """
+ fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
+ iter_cb=iter_cb, scale_covar=scale_covar,
+ nan_policy=nan_policy, reduce_fcn=reduce_fcn,
+ calc_covar=calc_covar, max_nfev=max_nfev, **fit_kws)
+ return fitter.minimize(method=method)
diff --git a/lmfit/model.py b/lmfit/model.py
new file mode 100644
index 0000000..26b0b64
--- /dev/null
+++ b/lmfit/model.py
@@ -0,0 +1,2297 @@
+"""Implementation of the Model interface."""
+
+from copy import deepcopy
+from functools import wraps
+import inspect
+import json
+import operator
+import warnings
+
+from asteval import valid_symbol_name
+import numpy as np
+from scipy.special import erf
+from scipy.stats import t
+
+import lmfit
+
+from . import Minimizer, Parameter, Parameters, lineshapes
+from .confidence import conf_interval
+from .jsonutils import HAS_DILL, decode4js, encode4js
+from .minimizer import MinimizerResult
+from .printfuncs import ci_report, fit_report, fitreport_html_table
+
+tiny = 1.e-15
+
+# Use pandas.isnull for aligning missing data if pandas is available.
+# otherwise use numpy.isnan
+try:
+ from pandas import Series, isnull
+except ImportError:
+ isnull = np.isnan
+ Series = type(NotImplemented)
+
+
+def _align(var, mask, data):
+ """Align missing data, if pandas is available."""
+ if isinstance(data, Series) and isinstance(var, Series):
+ return var.reindex_like(data).dropna()
+ elif mask is not None:
+ return var[mask]
+ return var
+
+
+try:
+ import matplotlib # noqa: F401
+ _HAS_MATPLOTLIB = True
+except Exception:
+ _HAS_MATPLOTLIB = False
+
+
+def _ensureMatplotlib(function):
+ if _HAS_MATPLOTLIB:
+ @wraps(function)
+ def wrapper(*args, **kws):
+ return function(*args, **kws)
+ return wrapper
+
+ def no_op(*args, **kwargs):
+ print('matplotlib module is required for plotting the results')
+ return no_op
+
+
+def get_reducer(option):
+ """Factory function to build a parser for complex numbers.
+
+ Parameters
+ ----------
+ option : {'real', 'imag', 'abs', 'angle'}
+ Implements the NumPy function with the same name.
+
+ Returns
+ -------
+ callable
+ See docstring for `reducer` below.
+
+ """
+ if option not in ['real', 'imag', 'abs', 'angle']:
+ raise ValueError(f"Invalid option ('{option}') for function 'propagate_err'.")
+
+ def reducer(array):
+ """Convert a complex array to a real array.
+
+ Several conversion methods are available and it does nothing to a
+ purely real array.
+
+ Parameters
+ ----------
+ array : array-like
+ Input array. If complex, will be converted to real array via
+ one of the following NumPy functions: :numpydoc:`real`,
+ :numpydoc:`imag`, :numpydoc:`abs`, or :numpydoc:`angle`.
+
+ Returns
+ -------
+ numpy.array
+ Returned array will be purely real.
+
+ """
+ if any(np.iscomplex(array)):
+ parsed_array = getattr(np, option)(array)
+ else:
+ parsed_array = array
+
+ return parsed_array
+ return reducer
+
+
+def propagate_err(z, dz, option):
+ """Perform error propagation on a vector of complex uncertainties.
+
+ Required to get values for magnitude (abs) and phase (angle)
+ uncertainty.
+
+ Parameters
+ ----------
+ z : array-like
+ Array of complex or real numbers.
+ dz : array-like
+ Array of uncertainties corresponding to `z`. Must satisfy
+ ``numpy.shape(dz) == numpy.shape(z)``.
+ option : {'real', 'imag', 'abs', 'angle'}
+ How to convert the array `z` to an array with real numbers.
+
+ Returns
+ -------
+ numpy.array
+ Returned array will be purely real.
+
+ Notes
+ -----
+ Uncertainties are ``1/weights``. If the weights provided are real,
+ they are assumed to apply equally to the real and imaginary parts. If
+ the weights are complex, the real part of the weights are applied to
+ the real part of the residual and the imaginary part is treated
+ correspondingly.
+
+ In the case where ``option='angle'`` and ``numpy.abs(z) == 0`` for any
+ value of `z` the phase angle uncertainty becomes the entire circle and
+ so a value of `math:pi` is returned.
+
+ In the case where ``option='abs'`` and ``numpy.abs(z) == 0`` for any
+ value of `z` the magnitude uncertainty is approximated by
+ ``numpy.abs(dz)`` for that value.
+
+ """
+ if option not in ['real', 'imag', 'abs', 'angle']:
+ raise ValueError(f"Invalid option ('{option}') for function 'propagate_err'.")
+
+ if z.shape != dz.shape:
+ raise ValueError(f"shape of z: {z.shape} != shape of dz: {dz.shape}")
+
+ # Check the main vector for complex. Do nothing if real.
+ if any(np.iscomplex(z)):
+ # if uncertainties are real, apply them equally to
+ # real and imaginary parts
+ if all(np.isreal(dz)):
+ dz = dz+1j*dz
+
+ if option == 'real':
+ err = np.real(dz)
+ elif option == 'imag':
+ err = np.imag(dz)
+ elif option in ['abs', 'angle']:
+ rz = np.real(z)
+ iz = np.imag(z)
+
+ rdz = np.real(dz)
+ idz = np.imag(dz)
+
+ # Don't spit out warnings for divide by zero. Will fix these later.
+ with np.errstate(divide='ignore', invalid='ignore'):
+
+ if option == 'abs':
+ # Standard error propagation for abs = sqrt(re**2 + im**2)
+ err = np.true_divide(np.sqrt((iz*idz)**2+(rz*rdz)**2),
+ np.abs(z))
+
+ # For abs = 0, error is +/- abs(rdz + j idz)
+ err[err == np.inf] = np.abs(dz)[err == np.inf]
+
+ if option == 'angle':
+ # Standard error propagation for angle = arctan(im/re)
+ err = np.true_divide(np.sqrt((rz*idz)**2+(iz*rdz)**2),
+ np.abs(z)**2)
+
+ # For abs = 0, error is +/- pi (i.e. the whole circle)
+ err[err == np.inf] = np.pi
+ else:
+ err = dz
+
+ return err
+
+
+class Model:
+ """Create a model from a user-supplied model function."""
+
+ _forbidden_args = ('data', 'weights', 'params')
+ _invalid_ivar = "Invalid independent variable name ('%s') for function %s"
+ _invalid_par = "Invalid parameter name ('%s') for function %s"
+ _invalid_hint = "unknown parameter hint '%s' for param '%s'"
+ _hint_names = ('value', 'vary', 'min', 'max', 'expr')
+ valid_forms = ()
+
+ def __init__(self, func, independent_vars=None, param_names=None,
+ nan_policy='raise', prefix='', name=None, **kws):
+ """
+ The model function will normally take an independent variable
+ (generally, the first argument) and a series of arguments that are
+ meant to be parameters for the model. It will return an array of
+ data to model some data as for a curve-fitting problem.
+
+ Parameters
+ ----------
+ func : callable
+ Function to be wrapped.
+ independent_vars : :obj:`list` of :obj:`str`, optional
+ Arguments to `func` that are independent variables (default is
+ None).
+ param_names : :obj:`list` of :obj:`str`, optional
+ Names of arguments to `func` that are to be made into
+ parameters (default is None).
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ How to handle NaN and missing values in data. See Notes below.
+ prefix : str, optional
+ Prefix used for the model.
+ name : str, optional
+ Name for the model. When None (default) the name is the same
+ as the model function (`func`).
+ **kws : dict, optional
+ Additional keyword arguments to pass to model function.
+
+ Notes
+ -----
+ 1. Parameter names are inferred from the function arguments, and a
+ residual function is automatically constructed.
+
+ 2. The model function must return an array that will be the same
+ size as the data being modeled.
+
+ 3. `nan_policy` sets what to do when a NaN or missing value is
+ seen in the data. Should be one of:
+
+ - `'raise'` : raise a `ValueError` (default)
+ - `'propagate'` : do nothing
+ - `'omit'` : drop missing data
+
+ Examples
+ --------
+ The model function will normally take an independent variable
+ (generally, the first argument) and a series of arguments that are
+ meant to be parameters for the model. Thus, a simple peak using a
+ Gaussian defined as:
+
+ >>> import numpy as np
+ >>> def gaussian(x, amp, cen, wid):
+ ... return amp * np.exp(-(x-cen)**2 / wid)
+
+ can be turned into a Model with:
+
+ >>> gmodel = Model(gaussian)
+
+ this will automatically discover the names of the independent
+ variables and parameters:
+
+ >>> print(gmodel.param_names, gmodel.independent_vars)
+ ['amp', 'cen', 'wid'], ['x']
+
+ """
+ self.func = func
+ if not isinstance(prefix, str):
+ prefix = ''
+ if len(prefix) > 0 and not valid_symbol_name(prefix):
+ raise ValueError(f"'{prefix}' is not a valid Model prefix")
+ self._prefix = prefix
+
+ self._param_root_names = param_names # will not include prefixes
+ self.independent_vars = independent_vars
+ self._func_allargs = []
+ self._func_haskeywords = False
+ self.nan_policy = nan_policy
+
+ self.opts = kws
+ # the following has been changed from OrderedSet for the time being
+ self.param_hints = {}
+ self._param_names = []
+ self._parse_params()
+ if self.independent_vars is None:
+ self.independent_vars = []
+ if name is None and hasattr(self.func, '__name__'):
+ name = self.func.__name__
+ self._name = name
+
+ def _reprstring(self, long=False):
+ out = self._name
+ opts = []
+ if len(self._prefix) > 0:
+ opts.append(f"prefix='{self._prefix}'")
+ if long:
+ for k, v in self.opts.items():
+ opts.append(f"{k}='{v}'")
+ if len(opts) > 0:
+ out = f"{out}, {', '.join(opts)}"
+ return f"Model({out})"
+
+ def _get_state(self):
+ """Save a Model for serialization.
+
+ Note: like the standard-ish '__getstate__' method but not really
+ useful with Pickle.
+
+ """
+ funcdef = None
+ if HAS_DILL:
+ funcdef = self.func
+ if self.func.__name__ == '_eval':
+ funcdef = self.expr
+ state = (self.func.__name__, funcdef, self._name, self._prefix,
+ self.independent_vars, self._param_root_names,
+ self.param_hints, self.nan_policy, self.opts)
+ return (state, None, None)
+
+ def _set_state(self, state, funcdefs=None):
+ """Restore Model from serialization.
+
+ Note: like the standard-ish '__setstate__' method but not really
+ useful with Pickle.
+
+ Parameters
+ ----------
+ state
+ Serialized state from `_get_state`.
+ funcdefs : dict, optional
+ Dictionary of function definitions to use to construct Model.
+
+ """
+ return _buildmodel(state, funcdefs=funcdefs)
+
+ def dumps(self, **kws):
+ """Dump serialization of Model as a JSON string.
+
+ Parameters
+ ----------
+ **kws : optional
+ Keyword arguments that are passed to `json.dumps`.
+
+ Returns
+ -------
+ str
+ JSON string representation of Model.
+
+ See Also
+ --------
+ loads, json.dumps
+
+ """
+ return json.dumps(encode4js(self._get_state()), **kws)
+
+ def dump(self, fp, **kws):
+ """Dump serialization of Model to a file.
+
+ Parameters
+ ----------
+ fp : file-like object
+ An open and `.write()`-supporting file-like object.
+ **kws : optional
+ Keyword arguments that are passed to `json.dumps`.
+
+ Returns
+ -------
+ int
+ Return value from `fp.write()`: the number of characters
+ written.
+
+ See Also
+ --------
+ dumps, load, json.dump
+
+ """
+ return fp.write(self.dumps(**kws))
+
+ def loads(self, s, funcdefs=None, **kws):
+ """Load Model from a JSON string.
+
+ Parameters
+ ----------
+ s : str
+ Input JSON string containing serialized Model.
+ funcdefs : dict, optional
+ Dictionary of function definitions to use to construct Model.
+ **kws : optional
+ Keyword arguments that are passed to `json.loads`.
+
+ Returns
+ -------
+ Model
+ Model created from JSON string.
+
+ See Also
+ --------
+ dump, dumps, load, json.loads
+
+ """
+ tmp = decode4js(json.loads(s, **kws))
+ return self._set_state(tmp, funcdefs=funcdefs)
+
+ def load(self, fp, funcdefs=None, **kws):
+ """Load JSON representation of Model from a file-like object.
+
+ Parameters
+ ----------
+ fp : file-like object
+ An open and `.read()`-supporting file-like object.
+ funcdefs : dict, optional
+ Dictionary of function definitions to use to construct Model.
+ **kws : optional
+ Keyword arguments that are passed to `loads`.
+
+ Returns
+ -------
+ Model
+ Model created from `fp`.
+
+ See Also
+ --------
+ dump, loads, json.load
+
+ """
+ return self.loads(fp.read(), funcdefs=funcdefs, **kws)
+
+ @property
+ def name(self):
+ """Return Model name."""
+ return self._reprstring(long=False)
+
+ @name.setter
+ def name(self, value):
+ self._name = value
+
+ @property
+ def prefix(self):
+ """Return Model prefix."""
+ return self._prefix
+
+ @prefix.setter
+ def prefix(self, value):
+ """Change Model prefix."""
+ self._prefix = value
+ self._set_paramhints_prefix()
+ self._param_names = []
+ self._parse_params()
+
+ def _set_paramhints_prefix(self):
+ """Reset parameter hints for prefix: intended to be overwritten."""
+
+ @property
+ def param_names(self):
+ """Return the parameter names of the Model."""
+ return self._param_names
+
+ def __repr__(self):
+ """Return representation of Model."""
+ return f"<lmfit.Model: {self.name}>"
+
+ def copy(self, **kwargs):
+ """DOES NOT WORK."""
+ raise NotImplementedError("Model.copy does not work. Make a new Model")
+
+ def _parse_params(self):
+ """Build parameters from function arguments."""
+ if self.func is None:
+ return
+ kw_args = {}
+ keywords_ = None
+ # need to fetch the following from the function signature:
+ # pos_args: list of positional argument names
+ # kw_args: dict of keyword arguments with default values
+ # keywords_: name of **kws argument or None
+ # 1. limited support for asteval functions as the model functions:
+ if hasattr(self.func, 'argnames') and hasattr(self.func, 'kwargs'):
+ pos_args = self.func.argnames[:]
+ for name, defval in self.func.kwargs:
+ kw_args[name] = defval
+ # 2. modern, best-practice approach: use inspect.signature
+ else:
+ pos_args = []
+ sig = inspect.signature(self.func)
+ for fnam, fpar in sig.parameters.items():
+ if fpar.kind == fpar.VAR_KEYWORD:
+ keywords_ = fnam
+ elif fpar.kind == fpar.POSITIONAL_OR_KEYWORD:
+ if fpar.default == fpar.empty:
+ pos_args.append(fnam)
+ else:
+ kw_args[fnam] = fpar.default
+ elif fpar.kind == fpar.VAR_POSITIONAL:
+ raise ValueError(f"varargs '*{fnam}' is not supported")
+ # inspection done
+
+ self._func_haskeywords = keywords_ is not None
+ self._func_allargs = pos_args + list(kw_args.keys())
+ allargs = self._func_allargs
+
+ if len(allargs) == 0 and keywords_ is not None:
+ return
+
+ # default independent_var = 1st argument
+ if self.independent_vars is None:
+ self.independent_vars = [pos_args[0]]
+
+ # default param names: all positional args
+ # except independent variables
+ self.def_vals = {}
+ might_be_param = []
+ if self._param_root_names is None:
+ self._param_root_names = pos_args[:]
+ for key, val in kw_args.items():
+ if (not isinstance(val, bool) and
+ isinstance(val, (float, int))):
+ self._param_root_names.append(key)
+ self.def_vals[key] = val
+ elif val is None:
+ might_be_param.append(key)
+ for p in self.independent_vars:
+ if p in self._param_root_names:
+ self._param_root_names.remove(p)
+
+ new_opts = {}
+ for opt, val in self.opts.items():
+ if (opt in self._param_root_names or opt in might_be_param and
+ isinstance(val, Parameter)):
+ self.set_param_hint(opt, value=val.value,
+ min=val.min, max=val.max, expr=val.expr)
+ elif opt in self._func_allargs:
+ new_opts[opt] = val
+ self.opts = new_opts
+
+ if self._prefix is None:
+ self._prefix = ''
+ names = [f"{self._prefix}{pname}" for pname in self._param_root_names]
+ # check variables names for validity
+ # The implicit magic in fit() requires us to disallow some
+ fname = self.func.__name__
+ for arg in self.independent_vars:
+ if arg not in allargs or arg in self._forbidden_args:
+ raise ValueError(self._invalid_ivar % (arg, fname))
+ for arg in names:
+ if (self._strip_prefix(arg) not in allargs or
+ arg in self._forbidden_args):
+ raise ValueError(self._invalid_par % (arg, fname))
+ # the following as been changed from OrderedSet for the time being.
+ self._param_names = names[:]
+
+ def set_param_hint(self, name, **kwargs):
+ """Set *hints* to use when creating parameters with `make_params()`.
+
+ The given hint can include optional bounds and constraints
+ ``(value, vary, min, max, expr)``, which will be used by
+ `Model.make_params()` when building default parameters.
+
+ While this can be used to set initial values, `Model.make_params` or
+ the function `create_params` should be preferred for creating
+ parameters with initial values.
+
+ The intended use here is to control how a Model should create
+ parameters, such as setting bounds that are required by the mathematics
+ of the model (for example, that a peak width cannot be negative), or to
+ define common constrained parameters.
+
+ Parameters
+ ----------
+ name : str
+ Parameter name, can include the models `prefix` or not.
+ **kwargs : optional
+ Arbitrary keyword arguments, needs to be a Parameter attribute.
+ Can be any of the following:
+
+ - value : float, optional
+ Numerical Parameter value.
+ - vary : bool, optional
+ Whether the Parameter is varied during a fit (default is
+ True).
+ - min : float, optional
+ Lower bound for value (default is ``-numpy.inf``, no lower
+ bound).
+ - max : float, optional
+ Upper bound for value (default is ``numpy.inf``, no upper
+ bound).
+ - expr : str, optional
+ Mathematical expression used to constrain the value during
+ the fit.
+
+ Example
+ --------
+ >>> model = GaussianModel()
+ >>> model.set_param_hint('sigma', min=0)
+
+ """
+ npref = len(self._prefix)
+ if npref > 0 and name.startswith(self._prefix):
+ name = name[npref:]
+
+ if name not in self.param_hints:
+ self.param_hints[name] = {}
+
+ for key, val in kwargs.items():
+ if key in self._hint_names:
+ self.param_hints[name][key] = val
+ else:
+ warnings.warn(self._invalid_hint % (key, name))
+
+ def print_param_hints(self, colwidth=8):
+ """Print a nicely aligned text-table of parameter hints.
+
+ Parameters
+ ----------
+ colwidth : int, optional
+ Width of each column, except for first and last columns.
+
+ """
+ name_len = max(len(s) for s in self.param_hints)
+ print('{:{name_len}} {:>{n}} {:>{n}} {:>{n}} {:>{n}} {:{n}}'
+ .format('Name', 'Value', 'Min', 'Max', 'Vary', 'Expr',
+ name_len=name_len, n=colwidth))
+ line = ('{name:<{name_len}} {value:{n}g} {min:{n}g} {max:{n}g} '
+ '{vary!s:>{n}} {expr}')
+ for name, values in sorted(self.param_hints.items()):
+ pvalues = dict(name=name, value=np.nan, min=-np.inf, max=np.inf,
+ vary=True, expr='')
+ pvalues.update(**values)
+ print(line.format(name_len=name_len, n=colwidth, **pvalues))
+
+ def make_params(self, verbose=False, **kwargs):
+ """Create a Parameters object for a Model.
+
+ Parameters
+ ----------
+ verbose : bool, optional
+ Whether to print out messages (default is False).
+ **kwargs : optional
+ Parameter names and initial values or dictionaries of
+ values and attributes.
+
+ Returns
+ ---------
+ params : Parameters
+ Parameters object for the Model.
+
+ Notes
+ -----
+ 1. Parameter values can be numbers (floats or ints) to set the parameter
+ value, or dictionaries with any of the following keywords:
+ ``value``, ``vary``, ``min``, ``max``, ``expr``, ``brute_step``,
+ ``is_init_value`` to set those parameter attributes.
+
+ 2. This method will also apply any default values or parameter hints
+ that may have been defined for the model.
+
+ Example
+ --------
+ >>> gmodel = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
+ >>> gmodel.make_params(peak_center=3200, bkg_offset=0, bkg_slope=0,
+ ... peak_amplitdue=dict(value=100, min=2),
+ ... peak_sigma=dict(value=25, min=0, max=1000))
+
+ """
+ params = Parameters()
+
+ def setpar(par, val):
+ # val is expected to be float-like or a dict: must have 'value' or 'expr' key
+ if isinstance(val, dict):
+ dval = val
+ else:
+ dval = {'value': float(val)}
+ if len(dval) < 1 or not ('value' in dval or 'expr' in dval):
+ raise TypeError(f'Invalid parameter value for {par}: {val}')
+
+ par.set(**dval)
+
+ # make sure that all named parameters are in params
+ for name in self.param_names:
+ if name in params:
+ par = params[name]
+ else:
+ par = Parameter(name=name)
+ par._delay_asteval = True
+ basename = name[len(self._prefix):]
+ # apply defaults from model function definition
+ if basename in self.def_vals:
+ par.value = self.def_vals[basename]
+ if par.value in (None, -np.inf, np.inf, np.nan):
+ for key, val in self.def_vals.items():
+ if key in name.lower():
+ par.value = val
+ # apply defaults from parameter hints
+ if basename in self.param_hints:
+ hint = self.param_hints[basename]
+ for item in self._hint_names:
+ if item in hint:
+ setattr(par, item, hint[item])
+ # apply values passed in through kw args
+ if basename in kwargs:
+ setpar(par, kwargs[basename])
+ if name in kwargs:
+ setpar(par, kwargs[basename])
+ params.add(par)
+ if verbose:
+ print(f' - Adding parameter "{name}"')
+
+ # next build parameters defined in param_hints
+ # note that composites may define their own additional
+ # convenience parameters here
+ for basename, hint in self.param_hints.items():
+ name = f"{self._prefix}{basename}"
+ if name in params:
+ par = params[name]
+ else:
+ par = Parameter(name=name)
+ params.add(par)
+ if verbose:
+ print(f' - Adding parameter for hint "{name}"')
+ par._delay_asteval = True
+ for item in self._hint_names:
+ if item in hint:
+ setattr(par, item, hint[item])
+ if basename in kwargs:
+ setpar(par, kwargs[basename])
+ # Add the new parameter to self._param_names
+ if name not in self._param_names:
+ self._param_names.append(name)
+
+ for p in params.values():
+ p._delay_asteval = False
+ return params
+
+ def guess(self, data, x, **kws):
+ """Guess starting values for the parameters of a Model.
+
+ This is not implemented for all models, but is available for many
+ of the built-in models.
+
+ Parameters
+ ----------
+ data : array_like
+ Array of data (i.e., y-values) to use to guess parameter values.
+ x : array_like
+ Array of values for the independent variable (i.e., x-values).
+ **kws : optional
+ Additional keyword arguments, passed to model function.
+
+ Returns
+ -------
+ Parameters
+ Initial, guessed values for the parameters of a Model.
+
+ Raises
+ ------
+ NotImplementedError
+ If the `guess` method is not implemented for a Model.
+
+ Notes
+ -----
+ Should be implemented for each model subclass to run
+ `self.make_params()`, update starting values and return a
+ Parameters object.
+
+ .. versionchanged:: 1.0.3
+ Argument ``x`` is now explicitly required to estimate starting values.
+
+ """
+ cname = self.__class__.__name__
+ msg = f'guess() not implemented for {cname}'
+ raise NotImplementedError(msg)
+
+ def _residual(self, params, data, weights, **kwargs):
+ """Return the residual.
+
+ Default residual: ``(data-model)*weights``.
+
+ If the model returns complex values, the residual is computed by
+ treating the real and imaginary parts separately. In this case, if
+ the weights provided are real, they are assumed to apply equally
+ to the real and imaginary parts. If the weights are complex, the
+ real part of the weights are applied to the real part of the
+ residual and the imaginary part is treated correspondingly.
+
+ Since the underlying `scipy.optimize` routines expect
+ ``numpy.float`` arrays, the only complex type supported is
+ ``complex``.
+
+ The "ravels" throughout are necessary to support `pandas.Series`.
+
+ """
+ model = self.eval(params, **kwargs)
+ if self.nan_policy == 'raise' and not np.all(np.isfinite(model)):
+ msg = ('The model function generated NaN values and the fit '
+ 'aborted! Please check your model function and/or set '
+ 'boundaries on parameters where applicable. In cases like '
+ 'this, using "nan_policy=\'omit\'" will probably not work.')
+ raise ValueError(msg)
+
+ diff = model - data
+
+ if diff.dtype == complex:
+ # data/model are complex
+ diff = diff.ravel().view(float)
+ if weights is not None:
+ if weights.dtype == complex:
+ # weights are complex
+ weights = weights.ravel().view(float)
+ else:
+ # real weights but complex data
+ weights = (weights + 1j * weights).ravel().view(float)
+ if weights is not None:
+ diff *= weights
+ return np.asarray(diff).ravel() # for compatibility with pandas.Series
+
+ def _strip_prefix(self, name):
+ npref = len(self._prefix)
+ if npref > 0 and name.startswith(self._prefix):
+ name = name[npref:]
+ return name
+
+ def make_funcargs(self, params=None, kwargs=None, strip=True):
+ """Convert parameter values and keywords to function arguments."""
+ if params is None:
+ params = {}
+ if kwargs is None:
+ kwargs = {}
+ out = {}
+ out.update(self.opts)
+
+ # 0: if a keyword argument is going to overwrite a parameter,
+ # save that value so it can be restored before returning
+ saved_values = {}
+ for name, val in kwargs.items():
+ if name in params:
+ saved_values[name] = params[name].value
+ params[name].value = val
+
+ if len(saved_values) > 0:
+ params.update_constraints()
+
+ # 1. fill in in all parameter values
+ for name, par in params.items():
+ if strip:
+ name = self._strip_prefix(name)
+ if name in self._func_allargs or self._func_haskeywords:
+ out[name] = par.value
+
+ # 2. for each function argument, use 'prefix+varname' in params,
+ # avoiding possible name collisions with unprefixed params
+ if len(self._prefix) > 0:
+ for fullname in self._param_names:
+ if fullname in params:
+ name = self._strip_prefix(fullname)
+ if name in self._func_allargs or self._func_haskeywords:
+ out[name] = params[fullname].value
+
+ # 3. kwargs might directly update function arguments
+ for name, val in kwargs.items():
+ if strip:
+ name = self._strip_prefix(name)
+ if name in self._func_allargs or self._func_haskeywords:
+ out[name] = val
+
+ # 4. finally, reset any values that have overwritten parameter values
+ for name, val in saved_values.items():
+ params[name].value = val
+ return out
+
+ def _make_all_args(self, params=None, **kwargs):
+ """Generate **all** function args for all functions."""
+ args = {}
+ for key, val in self.make_funcargs(params, kwargs).items():
+ args[f"{self._prefix}{key}"] = val
+ return args
+
+ def eval(self, params=None, **kwargs):
+ """Evaluate the model with supplied parameters and keyword arguments.
+
+ Parameters
+ -----------
+ params : Parameters, optional
+ Parameters to use in Model.
+ **kwargs : optional
+ Additional keyword arguments to pass to model function.
+
+ Returns
+ -------
+ numpy.ndarray, float, int or complex
+ Value of model given the parameters and other arguments.
+
+ Notes
+ -----
+ 1. if `params` is None, the values for all parameters are expected
+ to be provided as keyword arguments.
+
+ 2. If `params` is given, and a keyword argument for a parameter value
+ is also given, the keyword argument will be used in place of the value
+ in the value in `params`.
+
+ 3. all non-parameter arguments for the model function, **including
+ all the independent variables** will need to be passed in using
+ keyword arguments.
+
+ 4. The return types are generally `numpy.ndarray`, but may depends on
+ the model function and input independent variables. That is, return
+ values may be Python `float`, `int`, or `complex` values.
+
+ """
+ return self.func(**self.make_funcargs(params, kwargs))
+
+ @property
+ def components(self):
+ """Return components for composite model."""
+ return [self]
+
+ def eval_components(self, params=None, **kwargs):
+ """Evaluate the model with the supplied parameters.
+
+ Parameters
+ -----------
+ params : Parameters, optional
+ Parameters to use in Model.
+ **kwargs : optional
+ Additional keyword arguments to pass to model function.
+
+ Returns
+ -------
+ dict
+ Keys are prefixes for component model, values are value of
+ each component.
+
+ """
+ key = self._prefix
+ if len(key) < 1:
+ key = self._name
+ return {key: self.eval(params=params, **kwargs)}
+
+ def fit(self, data, params=None, weights=None, method='leastsq',
+ iter_cb=None, scale_covar=True, verbose=False, fit_kws=None,
+ nan_policy=None, calc_covar=True, max_nfev=None, **kwargs):
+ """Fit the model to the data using the supplied Parameters.
+
+ Parameters
+ ----------
+ data : array_like
+ Array of data to be fit.
+ params : Parameters, optional
+ Parameters to use in fit (default is None).
+ weights : array_like, optional
+ Weights to use for the calculation of the fit residual [i.e.,
+ `weights*(data-fit)`]. Default is None; must have the same size as
+ `data`.
+ method : str, optional
+ Name of fitting method to use (default is `'leastsq'`).
+ iter_cb : callable, optional
+ Callback function to call at each iteration (default is None).
+ scale_covar : bool, optional
+ Whether to automatically scale the covariance matrix when
+ calculating uncertainties (default is True).
+ verbose : bool, optional
+ Whether to print a message when a new parameter is added
+ because of a hint (default is True).
+ fit_kws : dict, optional
+ Options to pass to the minimizer being used.
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ What to do when encountering NaNs when fitting Model.
+ calc_covar : bool, optional
+ Whether to calculate the covariance matrix (default is True)
+ for solvers other than `'leastsq'` and `'least_squares'`.
+ Requires the ``numdifftools`` package to be installed.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations (default is None). The
+ default value depends on the fitting method.
+ **kwargs : optional
+ Arguments to pass to the model function, possibly overriding
+ parameters.
+
+ Returns
+ -------
+ ModelResult
+
+ Notes
+ -----
+ 1. if `params` is None, the values for all parameters are expected
+ to be provided as keyword arguments. Mixing `params` and
+ keyword arguments is deprecated (see `Model.eval`).
+
+ 2. all non-parameter arguments for the model function, **including
+ all the independent variables** will need to be passed in using
+ keyword arguments.
+
+ 3. Parameters are copied on input, so that the original Parameter objects
+ are unchanged, and the updated values are in the returned `ModelResult`.
+
+ Examples
+ --------
+ Take ``t`` to be the independent variable and data to be the curve
+ we will fit. Use keyword arguments to set initial guesses:
+
+ >>> result = my_model.fit(data, tau=5, N=3, t=t)
+
+ Or, for more control, pass a Parameters object.
+
+ >>> result = my_model.fit(data, params, t=t)
+
+ """
+ if params is None:
+ params = self.make_params(verbose=verbose)
+ else:
+ params = deepcopy(params)
+
+ # If any kwargs match parameter names, override params.
+ param_kwargs = set(kwargs.keys()) & set(self.param_names)
+ for name in param_kwargs:
+ p = kwargs[name]
+ if isinstance(p, Parameter):
+ p.name = name # allows N=Parameter(value=5) with implicit name
+ params[name] = deepcopy(p)
+ else:
+ params[name].set(value=p)
+ del kwargs[name]
+
+ # All remaining kwargs should correspond to independent variables.
+ for name in kwargs:
+ if name not in self.independent_vars:
+ warnings.warn(f"The keyword argument {name} does not " +
+ "match any arguments of the model function. " +
+ "It will be ignored.", UserWarning)
+
+ # If any parameter is not initialized raise a more helpful error.
+ missing_param = any(p not in params.keys() for p in self.param_names)
+ blank_param = any((p.value is None and p.expr is None)
+ for p in params.values())
+ if missing_param or blank_param:
+ msg = ('Assign each parameter an initial value by passing '
+ 'Parameters or keyword arguments to fit.\n')
+ missing = [p for p in self.param_names if p not in params.keys()]
+ blank = [name for name, p in params.items()
+ if p.value is None and p.expr is None]
+ msg += f'Missing parameters: {str(missing)}\n'
+ msg += f'Non initialized parameters: {str(blank)}'
+ raise ValueError(msg)
+
+ # Handle null/missing values.
+ if nan_policy is not None:
+ self.nan_policy = nan_policy
+
+ mask = None
+ if self.nan_policy == 'omit':
+ mask = ~isnull(data)
+ if mask is not None:
+ data = data[mask]
+ if weights is not None:
+ weights = _align(weights, mask, data)
+
+ # If independent_vars and data are alignable (pandas), align them,
+ # and apply the mask from above if there is one.
+ for var in self.independent_vars:
+ if not np.isscalar(kwargs[var]):
+ kwargs[var] = _align(kwargs[var], mask, data)
+
+ def coerce_arraylike(x):
+ """
+ coerce lists, tuples, and pandas Series to float64 or complex128,
+ but leave other ndarrays of different dtypes and objects unchanged
+ """
+ if isinstance(x, (list, tuple, Series)):
+ if np.isrealobj(x):
+ return np.asfarray(x)
+ elif np.iscomplexobj(x):
+ return np.asarray(x, dtype='complex128')
+ return x
+
+ # coerce data and independent variable(s) that are 'array-like' (list,
+ # tuples, pandas Series) to float64/complex128. Note: this will not
+ # alter the dtype of data or independent variables that are already
+ # ndarrays but with dtype other than float64/complex128.
+ data = coerce_arraylike(data)
+ for var in self.independent_vars:
+ kwargs[var] = coerce_arraylike(kwargs[var])
+
+ if fit_kws is None:
+ fit_kws = {}
+
+ output = ModelResult(self, params, method=method, iter_cb=iter_cb,
+ scale_covar=scale_covar, fcn_kws=kwargs,
+ nan_policy=self.nan_policy, calc_covar=calc_covar,
+ max_nfev=max_nfev, **fit_kws)
+ output.fit(data=data, weights=weights)
+ output.components = self.components
+ return output
+
+ def __add__(self, other):
+ """+"""
+ return CompositeModel(self, other, operator.add)
+
+ def __sub__(self, other):
+ """-"""
+ return CompositeModel(self, other, operator.sub)
+
+ def __mul__(self, other):
+ """*"""
+ return CompositeModel(self, other, operator.mul)
+
+ def __truediv__(self, other):
+ """/"""
+ return CompositeModel(self, other, operator.truediv)
+
+
+class CompositeModel(Model):
+ """Combine two models (`left` and `right`) with binary operator (`op`).
+
+ Normally, one does not have to explicitly create a `CompositeModel`,
+ but can use normal Python operators ``+``, ``-``, ``*``, and ``/`` to
+ combine components as in::
+
+ >>> mod = Model(fcn1) + Model(fcn2) * Model(fcn3)
+
+ """
+
+ _known_ops = {operator.add: '+', operator.sub: '-',
+ operator.mul: '*', operator.truediv: '/'}
+
+ def __init__(self, left, right, op, **kws):
+ """
+ Parameters
+ ----------
+ left : Model
+ Left-hand model.
+ right : Model
+ Right-hand model.
+ op : callable binary operator
+ Operator to combine `left` and `right` models.
+ **kws : optional
+ Additional keywords are passed to `Model` when creating this
+ new model.
+
+ Notes
+ -----
+ The two models can use different independent variables.
+
+ """
+ if not isinstance(left, Model):
+ raise ValueError(f'CompositeModel: argument {left} is not a Model')
+ if not isinstance(right, Model):
+ raise ValueError(f'CompositeModel: argument {right} is not a Model')
+ if not callable(op):
+ raise ValueError(f'CompositeModel: operator {op} is not callable')
+
+ self.left = left
+ self.right = right
+ self.op = op
+
+ name_collisions = set(left.param_names) & set(right.param_names)
+ if len(name_collisions) > 0:
+ msg = ''
+ for collision in name_collisions:
+ msg += (f"\nTwo models have parameters named '{collision}'; "
+ "use distinct names.")
+ raise NameError(msg)
+
+ # the unique ``independent_vars`` of the left and right model are
+ # combined to ``independent_vars`` of the ``CompositeModel``
+ if 'independent_vars' not in kws:
+ ivars = self.left.independent_vars + self.right.independent_vars
+ kws['independent_vars'] = list(np.unique(ivars))
+ if 'nan_policy' not in kws:
+ kws['nan_policy'] = self.left.nan_policy
+
+ def _tmp(self, *args, **kws):
+ pass
+ Model.__init__(self, _tmp, **kws)
+
+ for side in (left, right):
+ prefix = side.prefix
+ for basename, hint in side.param_hints.items():
+ self.param_hints[f"{prefix}{basename}"] = hint
+
+ def _parse_params(self):
+ self._func_haskeywords = (self.left._func_haskeywords or
+ self.right._func_haskeywords)
+ self._func_allargs = (self.left._func_allargs +
+ self.right._func_allargs)
+ self.def_vals = deepcopy(self.right.def_vals)
+ self.def_vals.update(self.left.def_vals)
+ self.opts = deepcopy(self.right.opts)
+ self.opts.update(self.left.opts)
+
+ def _reprstring(self, long=False):
+ return (f"({self.left._reprstring(long=long)} "
+ f"{self._known_ops.get(self.op, self.op)} "
+ f"{self.right._reprstring(long=long)})")
+
+ def eval(self, params=None, **kwargs):
+ """Evaluate model function for composite model."""
+ return self.op(self.left.eval(params=params, **kwargs),
+ self.right.eval(params=params, **kwargs))
+
+ def eval_components(self, **kwargs):
+ """Return dictionary of name, results for each component."""
+ out = dict(self.left.eval_components(**kwargs))
+ out.update(self.right.eval_components(**kwargs))
+ return out
+
+ @property
+ def param_names(self):
+ """Return parameter names for composite model."""
+ return self.left.param_names + self.right.param_names
+
+ @property
+ def components(self):
+ """Return components for composite model."""
+ return self.left.components + self.right.components
+
+ def _get_state(self):
+ return (self.left._get_state(),
+ self.right._get_state(), self.op.__name__)
+
+ def _set_state(self, state, funcdefs=None):
+ return _buildmodel(state, funcdefs=funcdefs)
+
+ def _make_all_args(self, params=None, **kwargs):
+ """Generate **all** function arguments for all functions."""
+ out = self.right._make_all_args(params=params, **kwargs)
+ out.update(self.left._make_all_args(params=params, **kwargs))
+ return out
+
+
+def save_model(model, fname):
+ """Save a Model to a file.
+
+ Parameters
+ ----------
+ model : Model
+ Model to be saved.
+ fname : str
+ Name of file for saved Model.
+
+ """
+ with open(fname, 'w') as fout:
+ model.dump(fout)
+
+
+def load_model(fname, funcdefs=None):
+ """Load a saved Model from a file.
+
+ Parameters
+ ----------
+ fname : str
+ Name of file containing saved Model.
+ funcdefs : dict, optional
+ Dictionary of custom function names and definitions.
+
+ Returns
+ -------
+ Model
+ Model object loaded from file.
+
+ """
+ m = Model(lambda x: x)
+ with open(fname) as fh:
+ model = m.load(fh, funcdefs=funcdefs)
+ return model
+
+
+def _buildmodel(state, funcdefs=None):
+ """Build Model from saved state.
+
+ Intended for internal use only.
+
+ """
+ if len(state) != 3:
+ raise ValueError("Cannot restore Model")
+ known_funcs = {}
+ for fname in lineshapes.functions:
+ fcn = getattr(lineshapes, fname, None)
+ if callable(fcn):
+ known_funcs[fname] = fcn
+ if funcdefs is not None:
+ known_funcs.update(funcdefs)
+
+ left, right, op = state
+ if op is None and right is None:
+ (fname, fcndef, name, prefix, ivars, pnames,
+ phints, nan_policy, opts) = left
+ if not callable(fcndef) and fname in known_funcs:
+ fcndef = known_funcs[fname]
+
+ if fcndef is None:
+ raise ValueError("Cannot restore Model: model function not found")
+
+ if fname == '_eval' and isinstance(fcndef, str):
+ from .models import ExpressionModel
+ model = ExpressionModel(fcndef, name=name,
+ independent_vars=ivars,
+ param_names=pnames,
+ nan_policy=nan_policy, **opts)
+
+ else:
+ model = Model(fcndef, name=name, prefix=prefix,
+ independent_vars=ivars, param_names=pnames,
+ nan_policy=nan_policy, **opts)
+
+ for name, hint in phints.items():
+ model.set_param_hint(name, **hint)
+ return model
+ else:
+ lmodel = _buildmodel(left, funcdefs=funcdefs)
+ rmodel = _buildmodel(right, funcdefs=funcdefs)
+ return CompositeModel(lmodel, rmodel, getattr(operator, op))
+
+
+def save_modelresult(modelresult, fname):
+ """Save a ModelResult to a file.
+
+ Parameters
+ ----------
+ modelresult : ModelResult
+ ModelResult to be saved.
+ fname : str
+ Name of file for saved ModelResult.
+
+ """
+ with open(fname, 'w') as fout:
+ modelresult.dump(fout)
+
+
+def load_modelresult(fname, funcdefs=None):
+ """Load a saved ModelResult from a file.
+
+ Parameters
+ ----------
+ fname : str
+ Name of file containing saved ModelResult.
+ funcdefs : dict, optional
+ Dictionary of custom function names and definitions.
+
+ Returns
+ -------
+ ModelResult
+ ModelResult object loaded from file.
+
+ """
+ params = Parameters()
+ modres = ModelResult(Model(lambda x: x, None), params)
+ with open(fname) as fh:
+ mresult = modres.load(fh, funcdefs=funcdefs)
+ return mresult
+
+
+class ModelResult(Minimizer):
+ """Result from the Model fit.
+
+ This has many attributes and methods for viewing and working with the
+ results of a fit using Model. It inherits from Minimizer, so that it
+ can be used to modify and re-run the fit for the Model.
+
+ """
+
+ def __init__(self, model, params, data=None, weights=None,
+ method='leastsq', fcn_args=None, fcn_kws=None,
+ iter_cb=None, scale_covar=True, nan_policy='raise',
+ calc_covar=True, max_nfev=None, **fit_kws):
+ """
+ Parameters
+ ----------
+ model : Model
+ Model to use.
+ params : Parameters
+ Parameters with initial values for model.
+ data : array_like, optional
+ Data to be modeled.
+ weights : array_like, optional
+ Weights to multiply ``(data-model)`` for fit residual.
+ method : str, optional
+ Name of minimization method to use (default is `'leastsq'`).
+ fcn_args : sequence, optional
+ Positional arguments to send to model function.
+ fcn_dict : dict, optional
+ Keyword arguments to send to model function.
+ iter_cb : callable, optional
+ Function to call on each iteration of fit.
+ scale_covar : bool, optional
+ Whether to scale covariance matrix for uncertainty evaluation.
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ What to do when encountering NaNs when fitting Model.
+ calc_covar : bool, optional
+ Whether to calculate the covariance matrix (default is True)
+ for solvers other than `'leastsq'` and `'least_squares'`.
+ Requires the ``numdifftools`` package to be installed.
+ max_nfev : int or None, optional
+ Maximum number of function evaluations (default is None). The
+ default value depends on the fitting method.
+ **fit_kws : optional
+ Keyword arguments to send to minimization routine.
+
+ """
+ self.model = model
+ self.data = data
+ self.weights = weights
+ self.method = method
+ self.ci_out = None
+ self.user_options = None
+ self.init_params = deepcopy(params)
+ Minimizer.__init__(self, model._residual, params,
+ fcn_args=fcn_args, fcn_kws=fcn_kws,
+ iter_cb=iter_cb, nan_policy=nan_policy,
+ scale_covar=scale_covar, calc_covar=calc_covar,
+ max_nfev=max_nfev, **fit_kws)
+
+ def fit(self, data=None, params=None, weights=None, method=None,
+ nan_policy=None, **kwargs):
+ """Re-perform fit for a Model, given data and params.
+
+ Parameters
+ ----------
+ data : array_like, optional
+ Data to be modeled.
+ params : Parameters, optional
+ Parameters with initial values for model.
+ weights : array_like, optional
+ Weights to multiply ``(data-model)`` for fit residual.
+ method : str, optional
+ Name of minimization method to use (default is `'leastsq'`).
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ What to do when encountering NaNs when fitting Model.
+ **kwargs : optional
+ Keyword arguments to send to minimization routine.
+
+ """
+ if data is not None:
+ self.data = data
+ if params is not None:
+ self.init_params = params
+ if weights is not None:
+ self.weights = weights
+ if method is not None:
+ self.method = method
+ if nan_policy is not None:
+ self.nan_policy = nan_policy
+
+ self.ci_out = None
+ self.userargs = (self.data, self.weights)
+ self.userkws.update(kwargs)
+ self.init_fit = self.model.eval(params=self.params, **self.userkws)
+ _ret = self.minimize(method=self.method)
+
+ for attr in dir(_ret):
+ if not attr.startswith('_'):
+ try:
+ setattr(self, attr, getattr(_ret, attr))
+ except AttributeError:
+ pass
+
+ if self.data is not None and len(self.data) > 1:
+ sstot = ((self.data - self.data.mean())**2).sum()
+ if isinstance(self.residual, np.ndarray) and len(self.residual) > 1:
+ self.rsquared = 1.0 - (self.residual**2).sum()/max(tiny, sstot)
+
+ self.init_values = self.model._make_all_args(self.init_params)
+ self.best_values = self.model._make_all_args(_ret.params)
+ self.best_fit = self.model.eval(params=_ret.params, **self.userkws)
+
+ def eval(self, params=None, **kwargs):
+ """Evaluate model function.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Parameters to use.
+ **kwargs : optional
+ Options to send to Model.eval().
+
+ Returns
+ -------
+ numpy.ndarray, float, int, or complex
+ Array or value for the evaluated model.
+
+ """
+ userkws = self.userkws.copy()
+ userkws.update(kwargs)
+ if params is None:
+ params = self.params
+ return self.model.eval(params=params, **userkws)
+
+ def eval_components(self, params=None, **kwargs):
+ """Evaluate each component of a composite model function.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Parameters, defaults to ModelResult.params.
+ **kwargs : optional
+ Keyword arguments to pass to model function.
+
+ Returns
+ -------
+ dict
+ Keys are prefixes of component models, and values are the
+ estimated model value for each component of the model.
+
+ """
+ userkws = self.userkws.copy()
+ userkws.update(kwargs)
+ if params is None:
+ params = self.params
+ return self.model.eval_components(params=params, **userkws)
+
+ def eval_uncertainty(self, params=None, sigma=1, **kwargs):
+ """Evaluate the uncertainty of the *model function*.
+
+ This can be used to give confidence bands for the model from the
+ uncertainties in the best-fit parameters.
+
+ Parameters
+ ----------
+ params : Parameters, optional
+ Parameters, defaults to ModelResult.params.
+ sigma : float, optional
+ Confidence level, i.e. how many sigma (default is 1).
+ **kwargs : optional
+ Values of options, independent variables, etcetera.
+
+ Returns
+ -------
+ numpy.ndarray
+ Uncertainty at each value of the model.
+
+ Notes
+ -----
+ 1. This is based on the excellent and clear example from
+ https://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html#confidence-and-prediction-intervals,
+ which references the original work of:
+ J. Wolberg, Data Analysis Using the Method of Least Squares, 2006, Springer
+ 2. The value of sigma is number of `sigma` values, and is converted
+ to a probability. Values of 1, 2, or 3 give probabilities of
+ 0.6827, 0.9545, and 0.9973, respectively. If the sigma value is
+ < 1, it is interpreted as the probability itself. That is,
+ ``sigma=1`` and ``sigma=0.6827`` will give the same results,
+ within precision errors.
+ 3. Also sets attributes of `dely` for the uncertainty of the model
+ (which will be the same as the array returned by this method) and
+ `dely_comps`, a dictionary of `dely` for each component.
+
+
+ Examples
+ --------
+
+ >>> out = model.fit(data, params, x=x)
+ >>> dely = out.eval_uncertainty(x=x)
+ >>> plt.plot(x, data)
+ >>> plt.plot(x, out.best_fit)
+ >>> plt.fill_between(x, out.best_fit-dely,
+ ... out.best_fit+dely, color='#888888')
+
+ """
+ userkws = self.userkws.copy()
+ userkws.update(kwargs)
+ if params is None:
+ params = self.params
+
+ nvarys = self.nvarys
+ # ensure fjac and df2 are correct size if independent var updated by kwargs
+ ndata = self.model.eval(params, **userkws).size
+ covar = self.covar
+ if any(p.stderr is None for p in params.values()):
+ return np.zeros(ndata)
+
+ fjac = {'0': np.zeros((nvarys, ndata))} # '0' signify 'Full', an invalid prefix
+ df2 = {'0': np.zeros(ndata)}
+
+ for comp in self.components:
+ label = comp.prefix if len(comp.prefix) > 1 else comp._name
+ fjac[label] = np.zeros((nvarys, ndata))
+ df2[label] = np.zeros(ndata)
+
+ # find derivative by hand!
+ pars = params.copy()
+ for i in range(nvarys):
+ pname = self.var_names[i]
+ val0 = pars[pname].value
+ dval = pars[pname].stderr/3.0
+ pars[pname].value = val0 + dval
+ res1 = {'0': self.model.eval(pars, **userkws)}
+ res1.update(self.model.eval_components(params=pars, **userkws))
+
+ pars[pname].value = val0 - dval
+ res2 = {'0': self.model.eval(pars, **userkws)}
+ res2.update(self.model.eval_components(params=pars, **userkws))
+
+ pars[pname].value = val0
+ for key in fjac:
+ fjac[key][i] = (res1[key] - res2[key]) / (2*dval)
+
+ for i in range(nvarys):
+ for j in range(nvarys):
+ for key in fjac:
+ df2[key] += fjac[key][i] * fjac[key][j] * covar[i, j]
+
+ if sigma < 1.0:
+ prob = sigma
+ else:
+ prob = erf(sigma/np.sqrt(2))
+
+ scale = t.ppf((prob+1)/2.0, self.ndata-nvarys)
+ self.dely = scale * np.sqrt(df2.pop('0'))
+
+ self.dely_comps = {}
+ for key in df2:
+ self.dely_comps[key] = scale * np.sqrt(df2[key])
+ return self.dely
+
+ def conf_interval(self, **kwargs):
+ """Calculate the confidence intervals for the variable parameters.
+
+ Confidence intervals are calculated using the
+ :func:`confidence.conf_interval` function and keyword arguments
+ (`**kwargs`) are passed to that function. The result is stored in
+ the :attr:`ci_out` attribute so that it can be accessed without
+ recalculating them.
+
+ """
+ self.ci_out = conf_interval(self, self, **kwargs)
+ return self.ci_out
+
+ def ci_report(self, with_offset=True, ndigits=5, **kwargs):
+ """Return a formatted text report of the confidence intervals.
+
+ Parameters
+ ----------
+ with_offset : bool, optional
+ Whether to subtract best value from all other values (default
+ is True).
+ ndigits : int, optional
+ Number of significant digits to show (default is 5).
+ **kwargs : optional
+ Keyword arguments that are passed to the `conf_interval`
+ function.
+
+ Returns
+ -------
+ str
+ Text of formatted report on confidence intervals.
+
+ """
+ return ci_report(self.conf_interval(**kwargs),
+ with_offset=with_offset, ndigits=ndigits)
+
+ def fit_report(self, modelpars=None, show_correl=True,
+ min_correl=0.1, sort_pars=False, correl_mode='list'):
+ """Return a printable fit report.
+
+ The report contains fit statistics and best-fit values with
+ uncertainties and correlations.
+
+
+ Parameters
+ ----------
+ modelpars : Parameters, optional
+ Known Model Parameters.
+ show_correl : bool, optional
+ Whether to show list of sorted correlations (default is True).
+ min_correl : float, optional
+ Smallest correlation in absolute value to show (default is 0.1).
+ sort_pars : callable, optional
+ Whether to show parameter names sorted in alphanumerical order
+ (default is False). If False, then the parameters will be
+ listed in the order as they were added to the Parameters
+ dictionary. If callable, then this (one argument) function is
+ used to extract a comparison key from each list element.
+ correl_mode : {'list', table'} str, optional
+ Mode for how to show correlations. Can be either 'list' (default)
+ to show a sorted (if ``sort_pars`` is True) list of correlation
+ values, or 'table' to show a complete, formatted table of
+ correlations.
+
+ Returns
+ -------
+ str
+ Multi-line text of fit report.
+
+ """
+ report = fit_report(self, modelpars=modelpars, show_correl=show_correl,
+ min_correl=min_correl, sort_pars=sort_pars,
+ correl_mode=correl_mode)
+
+ modname = self.model._reprstring(long=True)
+ return f'[[Model]]\n {modname}\n{report}'
+
+ def _repr_html_(self, show_correl=True, min_correl=0.1):
+ """Return a HTML representation of parameters data."""
+ report = fitreport_html_table(self, show_correl=show_correl,
+ min_correl=min_correl)
+ modname = self.model._reprstring(long=True)
+ return f"<h2> Model</h2> {modname} {report}"
+
+ def summary(self):
+ """Return a dictionary with statistics and attributes of a ModelResult.
+
+ Returns
+ -------
+ dict
+ Dictionary of statistics and many attributes from a ModelResult.
+
+ Notes
+ ------
+ 1. values for data arrays are not included.
+
+ 2. The result summary dictionary will include the following entries:
+
+ ``model``, ``method``, ``ndata``, ``nvarys``, ``nfree``, ``chisqr``,
+ ``redchi``, ``aic``, ``bic``, ``rsquared``, ``nfev``, ``max_nfev``,
+ ``aborted``, ``errorbars``, ``success``, ``message``,
+ ``lmdif_message``, ``ier``, ``nan_policy``, ``scale_covar``,
+ ``calc_covar``, ``ci_out``, ``col_deriv``, ``flatchain``,
+ ``call_kws``, ``var_names``, ``user_options``, ``kws``,
+ ``init_values``, ``best_values``, and ``params``.
+
+ where 'params' is a list of parameter "states": tuples with entries of
+ ``(name, value, vary, expr, min, max, brute_step, stderr, correl,
+ init_value, user_data)``.
+
+ 3. The result will include only plain Python objects, and so should be
+ easily serializable with JSON or similar tools.
+
+ """
+ summary = {'model': self.model._reprstring(long=True),
+ 'method': self.method}
+
+ for attr in ('ndata', 'nvarys', 'nfree', 'chisqr', 'redchi', 'aic',
+ 'bic', 'rsquared', 'nfev', 'max_nfev', 'aborted',
+ 'errorbars', 'success', 'message', 'lmdif_message', 'ier',
+ 'nan_policy', 'scale_covar', 'calc_covar', 'ci_out',
+ 'col_deriv', 'flatchain', 'call_kws', 'var_names',
+ 'user_options', 'kws', 'init_values', 'best_values'):
+ val = getattr(self, attr, None)
+ if isinstance(val, np.float64):
+ val = float(val)
+ elif isinstance(val, (np.int32, np.int64)):
+ val = int(val)
+ elif isinstance(val, np.bool_):
+ val = bool(val)
+ elif isinstance(val, bytes):
+ val = str(val, encoding='UTF-8')
+ summary[attr] = val
+
+ summary['params'] = [par.__getstate__() for par in self.params.values()]
+ return summary
+
+ def dumps(self, **kws):
+ """Represent ModelResult as a JSON string.
+
+ Parameters
+ ----------
+ **kws : optional
+ Keyword arguments that are passed to `json.dumps`.
+
+ Returns
+ -------
+ str
+ JSON string representation of ModelResult.
+
+ See Also
+ --------
+ loads, json.dumps
+
+ """
+ out = {'__class__': 'lmfit.ModelResult', '__version__': '1',
+ 'model': encode4js(self.model._get_state())}
+ pasteval = self.params._asteval
+ out['params'] = [p.__getstate__() for p in self.params.values()]
+ out['unique_symbols'] = {key: encode4js(pasteval.symtable[key])
+ for key in pasteval.user_defined_symbols()}
+
+ for attr in ('aborted', 'aic', 'best_values', 'bic', 'chisqr',
+ 'ci_out', 'col_deriv', 'covar', 'errorbars', 'flatchain',
+ 'ier', 'init_values', 'lmdif_message', 'message',
+ 'method', 'nan_policy', 'ndata', 'nfev', 'nfree',
+ 'nvarys', 'redchi', 'residual', 'rsquared', 'scale_covar',
+ 'calc_covar', 'success', 'userargs', 'userkws', 'values',
+ 'var_names', 'weights', 'user_options'):
+ try:
+ val = getattr(self, attr)
+ except AttributeError:
+ continue
+ if isinstance(val, np.bool_):
+ val = bool(val)
+
+ out[attr] = encode4js(val)
+
+ val = out.get('message', '')
+ if isinstance(val, bytes):
+ out['message'] = str(val, encoding='ASCII')
+
+ return json.dumps(out, **kws)
+
+ def dump(self, fp, **kws):
+ """Dump serialization of ModelResult to a file.
+
+ Parameters
+ ----------
+ fp : file-like object
+ An open and `.write()`-supporting file-like object.
+ **kws : optional
+ Keyword arguments that are passed to `json.dumps`.
+
+ Returns
+ -------
+ int
+ Return value from `fp.write()`: the number of characters
+ written.
+
+ See Also
+ --------
+ dumps, load, json.dump
+
+ """
+ return fp.write(self.dumps(**kws))
+
+ def loads(self, s, funcdefs=None, **kws):
+ """Load ModelResult from a JSON string.
+
+ Parameters
+ ----------
+ s : str
+ String representation of ModelResult, as from `dumps`.
+ funcdefs : dict, optional
+ Dictionary of custom function names and definitions.
+ **kws : optional
+ Keyword arguments that are passed to `json.dumps`.
+
+ Returns
+ -------
+ ModelResult
+ ModelResult instance from JSON string representation.
+
+ See Also
+ --------
+ load, dumps, json.dumps
+
+ """
+ modres = json.loads(s, **kws)
+ if 'modelresult' not in modres['__class__'].lower():
+ raise AttributeError('ModelResult.loads() needs saved ModelResult')
+
+ modres = decode4js(modres)
+ if 'model' not in modres or 'params' not in modres:
+ raise AttributeError('ModelResult.loads() needs valid ModelResult')
+
+ # model
+ self.model = _buildmodel(decode4js(modres['model']), funcdefs=funcdefs)
+
+ # params
+ for target in ('params', 'init_params'):
+ state = {'unique_symbols': modres['unique_symbols'], 'params': []}
+ for parstate in modres['params']:
+ _par = Parameter(name='')
+ _par.__setstate__(parstate)
+ state['params'].append(_par)
+ _params = Parameters()
+ _params.__setstate__(state)
+ setattr(self, target, _params)
+
+ for attr in ('aborted', 'aic', 'best_fit', 'best_values', 'bic',
+ 'chisqr', 'ci_out', 'col_deriv', 'covar', 'data',
+ 'errorbars', 'fjac', 'flatchain', 'ier', 'init_fit',
+ 'init_values', 'kws', 'lmdif_message', 'message',
+ 'method', 'nan_policy', 'ndata', 'nfev', 'nfree',
+ 'nvarys', 'redchi', 'residual', 'rsquared', 'scale_covar',
+ 'calc_covar', 'success', 'userargs', 'userkws',
+ 'var_names', 'weights', 'user_options'):
+ setattr(self, attr, decode4js(modres.get(attr, None)))
+
+ self.best_fit = self.model.eval(self.params, **self.userkws)
+ if len(self.userargs) == 2:
+ self.data = self.userargs[0]
+ self.weights = self.userargs[1]
+
+ for parname, val in self.init_values.items():
+ par = self.init_params.get(parname, None)
+ if par is not None:
+ par.correl = par.stderr = None
+ par.value = par.init_value = self.init_values[parname]
+ self.init_fit = self.model.eval(self.init_params, **self.userkws)
+ self.result = MinimizerResult()
+ self.result.params = self.params
+ self.init_vals = list(self.init_values.items())
+ return self
+
+ def load(self, fp, funcdefs=None, **kws):
+ """Load JSON representation of ModelResult from a file-like object.
+
+ Parameters
+ ----------
+ fp : file-like object
+ An open and `.read()`-supporting file-like object.
+ funcdefs : dict, optional
+ Dictionary of function definitions to use to construct Model.
+ **kws : optional
+ Keyword arguments that are passed to `loads`.
+
+ Returns
+ -------
+ ModelResult
+ ModelResult created from `fp`.
+
+ See Also
+ --------
+ dump, loads, json.load
+
+ """
+ return self.loads(fp.read(), funcdefs=funcdefs, **kws)
+
+ @_ensureMatplotlib
+ def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
+ xlabel=None, ylabel=None, yerr=None, numpoints=None,
+ data_kws=None, fit_kws=None, init_kws=None, ax_kws=None,
+ show_init=False, parse_complex='abs', title=None):
+ """Plot the fit results using matplotlib, if available.
+
+ The plot will include the data points, the initial fit curve
+ (optional, with ``show_init=True``), and the best-fit curve. If
+ the fit model included weights or if `yerr` is specified,
+ errorbars will also be plotted.
+
+ Parameters
+ ----------
+ ax : matplotlib.axes.Axes, optional
+ The axes to plot on. The default in None, which means use the
+ current pyplot axis or create one if there is none.
+ datafmt : str, optional
+ Matplotlib format string for data points.
+ fitfmt : str, optional
+ Matplotlib format string for fitted curve.
+ initfmt : str, optional
+ Matplotlib format string for initial conditions for the fit.
+ xlabel : str, optional
+ Matplotlib format string for labeling the x-axis.
+ ylabel : str, optional
+ Matplotlib format string for labeling the y-axis.
+ yerr : numpy.ndarray, optional
+ Array of uncertainties for data array.
+ numpoints : int, optional
+ If provided, the final and initial fit curves are evaluated
+ not only at data points, but refined to contain `numpoints`
+ points in total.
+ data_kws : dict, optional
+ Keyword arguments passed to the plot function for data points.
+ fit_kws : dict, optional
+ Keyword arguments passed to the plot function for fitted curve.
+ init_kws : dict, optional
+ Keyword arguments passed to the plot function for the initial
+ conditions of the fit.
+ ax_kws : dict, optional
+ Keyword arguments for a new axis, if a new one is created.
+ show_init : bool, optional
+ Whether to show the initial conditions for the fit (default is
+ False).
+ parse_complex : {'abs', 'real', 'imag', 'angle'}, optional
+ How to reduce complex data for plotting. Options are one of:
+ `'abs'` (default), `'real'`, `'imag'`, or `'angle'`, which
+ correspond to the NumPy functions with the same name.
+ title : str, optional
+ Matplotlib format string for figure title.
+
+ Returns
+ -------
+ matplotlib.axes.Axes
+
+ See Also
+ --------
+ ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
+ ModelResult.plot : Plot the fit results and residuals using matplotlib.
+
+ Notes
+ -----
+ For details about plot format strings and keyword arguments see
+ documentation of `matplotlib.axes.Axes.plot`.
+
+ If `yerr` is specified or if the fit model included weights, then
+ `matplotlib.axes.Axes.errorbar` is used to plot the data. If
+ `yerr` is not specified and the fit includes weights, `yerr` set
+ to ``1/self.weights``.
+
+ If model returns complex data, `yerr` is treated the same way that
+ weights are in this case.
+
+ If `ax` is None then `matplotlib.pyplot.gca(**ax_kws)` is called.
+
+ """
+ from matplotlib import pyplot as plt
+ if data_kws is None:
+ data_kws = {}
+ if fit_kws is None:
+ fit_kws = {}
+ if init_kws is None:
+ init_kws = {}
+ if ax_kws is None:
+ ax_kws = {}
+
+ # The function reduce_complex will convert complex vectors into real vectors
+ reduce_complex = get_reducer(parse_complex)
+
+ if len(self.model.independent_vars) == 1:
+ independent_var = self.model.independent_vars[0]
+ else:
+ print('Fit can only be plotted if the model function has one '
+ 'independent variable.')
+ return False
+
+ if not isinstance(ax, plt.Axes):
+ ax = plt.axes(**ax_kws)
+
+ x_array = self.userkws[independent_var]
+
+ # make a dense array for x-axis if data is not dense
+ if numpoints is not None and len(self.data) < numpoints:
+ x_array_dense = np.linspace(min(x_array), max(x_array), numpoints)
+ else:
+ x_array_dense = x_array
+
+ if show_init:
+ y_eval_init = self.model.eval(self.init_params,
+ **{independent_var: x_array_dense})
+ if isinstance(self.model, (lmfit.models.ConstantModel,
+ lmfit.models.ComplexConstantModel)):
+ y_eval_init *= np.ones(x_array_dense.size)
+
+ ax.plot(
+ x_array_dense, reduce_complex(y_eval_init), initfmt,
+ label='initial fit', **init_kws)
+
+ if yerr is None and self.weights is not None:
+ yerr = 1.0/self.weights
+
+ if yerr is not None:
+ ax.errorbar(x_array, reduce_complex(self.data),
+ yerr=propagate_err(self.data, yerr, parse_complex),
+ fmt=datafmt, label='data', **data_kws)
+ else:
+ ax.plot(x_array, reduce_complex(self.data),
+ datafmt, label='data', **data_kws)
+
+ y_eval = self.model.eval(self.params, **{independent_var: x_array_dense})
+ if isinstance(self.model, (lmfit.models.ConstantModel,
+ lmfit.models.ComplexConstantModel)):
+ y_eval *= np.ones(x_array_dense.size)
+
+ ax.plot(x_array_dense, reduce_complex(y_eval), fitfmt, label='best fit',
+ **fit_kws)
+
+ if title:
+ ax.set_title(title)
+ elif ax.get_title() == '':
+ ax.set_title(self.model.name)
+ if xlabel is None:
+ ax.set_xlabel(independent_var)
+ else:
+ ax.set_xlabel(xlabel)
+ if ylabel is None:
+ ax.set_ylabel('y')
+ else:
+ ax.set_ylabel(ylabel)
+ ax.legend()
+ return ax
+
+ @_ensureMatplotlib
+ def plot_residuals(self, ax=None, datafmt='o', yerr=None, data_kws=None,
+ fit_kws=None, ax_kws=None, parse_complex='abs',
+ title=None):
+ """Plot the fit residuals using matplotlib, if available.
+
+ If `yerr` is supplied or if the model included weights, errorbars
+ will also be plotted.
+
+ Parameters
+ ----------
+ ax : matplotlib.axes.Axes, optional
+ The axes to plot on. The default in None, which means use the
+ current pyplot axis or create one if there is none.
+ datafmt : str, optional
+ Matplotlib format string for data points.
+ yerr : numpy.ndarray, optional
+ Array of uncertainties for data array.
+ data_kws : dict, optional
+ Keyword arguments passed to the plot function for data points.
+ fit_kws : dict, optional
+ Keyword arguments passed to the plot function for fitted curve.
+ ax_kws : dict, optional
+ Keyword arguments for a new axis, if a new one is created.
+ parse_complex : {'abs', 'real', 'imag', 'angle'}, optional
+ How to reduce complex data for plotting. Options are one of:
+ `'abs'` (default), `'real'`, `'imag'`, or `'angle'`, which
+ correspond to the NumPy functions with the same name.
+ title : str, optional
+ Matplotlib format string for figure title.
+
+ Returns
+ -------
+ matplotlib.axes.Axes
+
+ See Also
+ --------
+ ModelResult.plot_fit : Plot the fit results using matplotlib.
+ ModelResult.plot : Plot the fit results and residuals using matplotlib.
+
+ Notes
+ -----
+ For details about plot format strings and keyword arguments see
+ documentation of `matplotlib.axes.Axes.plot`.
+
+ If `yerr` is specified or if the fit model included weights, then
+ `matplotlib.axes.Axes.errorbar` is used to plot the data. If
+ `yerr` is not specified and the fit includes weights, `yerr` set
+ to ``1/self.weights``.
+
+ If model returns complex data, `yerr` is treated the same way that
+ weights are in this case.
+
+ If `ax` is None then `matplotlib.pyplot.gca(**ax_kws)` is called.
+
+ """
+ from matplotlib import pyplot as plt
+ if data_kws is None:
+ data_kws = {}
+ if fit_kws is None:
+ fit_kws = {}
+ if ax_kws is None:
+ ax_kws = {}
+
+ # The function reduce_complex will convert complex vectors into real vectors
+ reduce_complex = get_reducer(parse_complex)
+
+ if len(self.model.independent_vars) == 1:
+ independent_var = self.model.independent_vars[0]
+ else:
+ print('Fit can only be plotted if the model function has one '
+ 'independent variable.')
+ return False
+
+ if not isinstance(ax, plt.Axes):
+ ax = plt.axes(**ax_kws)
+
+ x_array = self.userkws[independent_var]
+
+ ax.axhline(0, **fit_kws, color='k')
+
+ y_eval = self.model.eval(self.params, **{independent_var: x_array})
+ if isinstance(self.model, (lmfit.models.ConstantModel,
+ lmfit.models.ComplexConstantModel)):
+ y_eval *= np.ones(x_array.size)
+
+ if yerr is None and self.weights is not None:
+ yerr = 1.0/self.weights
+
+ residuals = reduce_complex(self.eval()) - reduce_complex(self.data)
+ if yerr is not None:
+ ax.errorbar(x_array, residuals,
+ yerr=propagate_err(self.data, yerr, parse_complex),
+ fmt=datafmt, **data_kws)
+ else:
+ ax.plot(x_array, residuals, datafmt, **data_kws)
+
+ if title:
+ ax.set_title(title)
+ elif ax.get_title() == '':
+ ax.set_title(self.model.name)
+ ax.set_ylabel('residuals')
+ return ax
+
+ @_ensureMatplotlib
+ def plot(self, datafmt='o', fitfmt='-', initfmt='--', xlabel=None,
+ ylabel=None, yerr=None, numpoints=None, fig=None, data_kws=None,
+ fit_kws=None, init_kws=None, ax_res_kws=None, ax_fit_kws=None,
+ fig_kws=None, show_init=False, parse_complex='abs', title=None):
+ """Plot the fit results and residuals using matplotlib.
+
+ The method will produce a matplotlib figure (if package available)
+ with both results of the fit and the residuals plotted. If the fit
+ model included weights, errorbars will also be plotted. To show
+ the initial conditions for the fit, pass the argument
+ ``show_init=True``.
+
+ Parameters
+ ----------
+ datafmt : str, optional
+ Matplotlib format string for data points.
+ fitfmt : str, optional
+ Matplotlib format string for fitted curve.
+ initfmt : str, optional
+ Matplotlib format string for initial conditions for the fit.
+ xlabel : str, optional
+ Matplotlib format string for labeling the x-axis.
+ ylabel : str, optional
+ Matplotlib format string for labeling the y-axis.
+ yerr : numpy.ndarray, optional
+ Array of uncertainties for data array.
+ numpoints : int, optional
+ If provided, the final and initial fit curves are evaluated
+ not only at data points, but refined to contain `numpoints`
+ points in total.
+ fig : matplotlib.figure.Figure, optional
+ The figure to plot on. The default is None, which means use
+ the current pyplot figure or create one if there is none.
+ data_kws : dict, optional
+ Keyword arguments passed to the plot function for data points.
+ fit_kws : dict, optional
+ Keyword arguments passed to the plot function for fitted curve.
+ init_kws : dict, optional
+ Keyword arguments passed to the plot function for the initial
+ conditions of the fit.
+ ax_res_kws : dict, optional
+ Keyword arguments for the axes for the residuals plot.
+ ax_fit_kws : dict, optional
+ Keyword arguments for the axes for the fit plot.
+ fig_kws : dict, optional
+ Keyword arguments for a new figure, if a new one is created.
+ show_init : bool, optional
+ Whether to show the initial conditions for the fit (default is
+ False).
+ parse_complex : {'abs', 'real', 'imag', 'angle'}, optional
+ How to reduce complex data for plotting. Options are one of:
+ `'abs'` (default), `'real'`, `'imag'`, or `'angle'`, which
+ correspond to the NumPy functions with the same name.
+ title : str, optional
+ Matplotlib format string for figure title.
+
+ Returns
+ -------
+ matplotlib.figure.Figure
+
+ See Also
+ --------
+ ModelResult.plot_fit : Plot the fit results using matplotlib.
+ ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
+
+ Notes
+ -----
+ The method combines `ModelResult.plot_fit` and
+ `ModelResult.plot_residuals`.
+
+ If `yerr` is specified or if the fit model included weights, then
+ `matplotlib.axes.Axes.errorbar` is used to plot the data. If
+ `yerr` is not specified and the fit includes weights, `yerr` set
+ to ``1/self.weights``.
+
+ If model returns complex data, `yerr` is treated the same way that
+ weights are in this case.
+
+ If `fig` is None then `matplotlib.pyplot.figure(**fig_kws)` is
+ called, otherwise `fig_kws` is ignored.
+
+ """
+ from matplotlib import pyplot as plt
+ if data_kws is None:
+ data_kws = {}
+ if fit_kws is None:
+ fit_kws = {}
+ if init_kws is None:
+ init_kws = {}
+ if ax_res_kws is None:
+ ax_res_kws = {}
+ if ax_fit_kws is None:
+ ax_fit_kws = {}
+
+ # make a square figure with side equal to the default figure's x-size
+ figxsize = plt.rcParams['figure.figsize'][0]
+ fig_kws_ = dict(figsize=(figxsize, figxsize))
+ if fig_kws is not None:
+ fig_kws_.update(fig_kws)
+
+ if len(self.model.independent_vars) != 1:
+ print('Fit can only be plotted if the model function has one '
+ 'independent variable.')
+ return False
+
+ if not isinstance(fig, plt.Figure):
+ fig = plt.figure(**fig_kws_)
+
+ gs = plt.GridSpec(nrows=2, ncols=1, height_ratios=[1, 4])
+ ax_res = fig.add_subplot(gs[0], **ax_res_kws)
+ ax_fit = fig.add_subplot(gs[1], sharex=ax_res, **ax_fit_kws)
+
+ self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr,
+ initfmt=initfmt, xlabel=xlabel, ylabel=ylabel,
+ numpoints=numpoints, data_kws=data_kws,
+ fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws,
+ show_init=show_init, parse_complex=parse_complex,
+ title=title)
+ self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,
+ data_kws=data_kws, fit_kws=fit_kws,
+ ax_kws=ax_res_kws, parse_complex=parse_complex,
+ title=title)
+ plt.setp(ax_res.get_xticklabels(), visible=False)
+ ax_fit.set_title('')
+ return fig
diff --git a/lmfit/models.py b/lmfit/models.py
new file mode 100644
index 0000000..f3744a7
--- /dev/null
+++ b/lmfit/models.py
@@ -0,0 +1,1721 @@
+"""Module containing built-in fitting models."""
+
+import time
+
+from asteval import Interpreter, get_ast_names
+import numpy as np
+from scipy.interpolate import splev, splrep
+
+from . import lineshapes
+from .lineshapes import (breit_wigner, damped_oscillator, dho, doniach,
+ expgaussian, exponential, gaussian, gaussian2d,
+ linear, lognormal, lorentzian, moffat, parabolic,
+ pearson4, pearson7, powerlaw, pvoigt, rectangle, sine,
+ skewed_gaussian, skewed_voigt, split_lorentzian, step,
+ students_t, thermal_distribution, tiny, voigt)
+from .model import Model
+
+tau = 2.0 * np.pi
+
+
+class DimensionalError(Exception):
+ """Raise exception when number of independent variables is not one."""
+
+
+def _validate_1d(independent_vars):
+ if len(independent_vars) != 1:
+ raise DimensionalError(
+ "This model requires exactly one independent variable.")
+
+
+def fwhm_expr(model):
+ """Return constraint expression for fwhm."""
+ fmt = "{factor:.7f}*{prefix:s}sigma"
+ return fmt.format(factor=model.fwhm_factor, prefix=model.prefix)
+
+
+def height_expr(model):
+ """Return constraint expression for maximum peak height."""
+ fmt = "{factor:.7f}*{prefix:s}amplitude/max({}, {prefix:s}sigma)"
+ return fmt.format(tiny, factor=model.height_factor, prefix=model.prefix)
+
+
+def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
+ """Estimate starting values from 1D peak data and create Parameters."""
+ sort_increasing = np.argsort(x)
+ x = x[sort_increasing]
+ y = y[sort_increasing]
+
+ maxy, miny = max(y), min(y)
+ maxx, minx = max(x), min(x)
+ cen = x[np.argmax(y)]
+ height = (maxy - miny)*3.0
+ sig = (maxx-minx)/6.0
+
+ # the explicit conversion to a NumPy array is to make sure that the
+ # indexing on line 65 also works if the data is supplied as pandas.Series
+ x_halfmax = np.array(x[y > (maxy+miny)/2.0])
+ if negative:
+ height = -(maxy - miny)*3.0
+ x_halfmax = x[y < (maxy+miny)/2.0]
+ if len(x_halfmax) > 2:
+ sig = (x_halfmax[-1] - x_halfmax[0])/2.0
+ cen = x_halfmax.mean()
+ amp = height*sig*ampscale
+ sig = sig*sigscale
+
+ pars = model.make_params(amplitude=amp, center=cen, sigma=sig)
+ pars[f'{model.prefix}sigma'].set(min=0.0)
+ return pars
+
+
+def guess_from_peak2d(model, z, x, y, negative):
+ """Estimate starting values from 2D peak data and create Parameters."""
+ maxx, minx = max(x), min(x)
+ maxy, miny = max(y), min(y)
+ maxz, minz = max(z), min(z)
+
+ centerx = x[np.argmax(z)]
+ centery = y[np.argmax(z)]
+ height = (maxz - minz)
+ sigmax = (maxx-minx)/6.0
+ sigmay = (maxy-miny)/6.0
+
+ if negative:
+ centerx = x[np.argmin(z)]
+ centery = y[np.argmin(z)]
+ height = (minz - maxz)
+
+ amp = height*sigmax*sigmay
+
+ pars = model.make_params(amplitude=amp, centerx=centerx, centery=centery,
+ sigmax=sigmax, sigmay=sigmay)
+ pars[f'{model.prefix}sigmax'].set(min=0.0)
+ pars[f'{model.prefix}sigmay'].set(min=0.0)
+ return pars
+
+
+def update_param_vals(pars, prefix, **kwargs):
+ """Update parameter values with keyword arguments."""
+ for key, val in kwargs.items():
+ pname = f"{prefix}{key}"
+ if pname in pars:
+ pars[pname].value = val
+ pars.update_constraints()
+ return pars
+
+
+COMMON_INIT_DOC = """
+ Parameters
+ ----------
+ independent_vars : :obj:`list` of :obj:`str`, optional
+ Arguments to the model function that are independent variables
+ default is ['x']).
+ prefix : str, optional
+ String to prepend to parameter names, needed to add two Models
+ that have parameter names in common.
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ How to handle NaN and missing values in data. See Notes below.
+ **kwargs : optional
+ Keyword arguments to pass to :class:`Model`.
+
+ Notes
+ -----
+ 1. `nan_policy` sets what to do when a NaN or missing value is seen in
+ the data. Should be one of:
+
+ - `'raise'` : raise a `ValueError` (default)
+ - `'propagate'` : do nothing
+ - `'omit'` : drop missing data
+
+ """
+
+COMMON_GUESS_DOC = """Guess starting values for the parameters of a model.
+
+ Parameters
+ ----------
+ data : array_like
+ Array of data (i.e., y-values) to use to guess parameter values.
+ x : array_like
+ Array of values for the independent variable (i.e., x-values).
+ **kws : optional
+ Additional keyword arguments, passed to model function.
+
+ Returns
+ -------
+ params : Parameters
+ Initial, guessed values for the parameters of a Model.
+
+ .. versionchanged:: 1.0.3
+ Argument ``x`` is now explicitly required to estimate starting values.
+
+ """
+
+
+class ConstantModel(Model):
+ """Constant model, with a single Parameter: `c`.
+
+ Note that this is 'constant' in the sense of having no dependence on
+ the independent variable `x`, not in the sense of being non-varying.
+ To be clear, `c` will be a Parameter that will be varied in the fit
+ (by default, of course).
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+
+ def constant(x, c=0.0):
+ return c * np.ones(np.shape(x))
+ super().__init__(constant, **kwargs)
+
+ def guess(self, data, x=None, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = self.make_params()
+
+ pars[f'{self.prefix}c'].set(value=data.mean())
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class ComplexConstantModel(Model):
+ """Complex constant model, with two Parameters: `re` and `im`.
+
+ Note that `re` and `im` are 'constant' in the sense of having no
+ dependence on the independent variable `x`, not in the sense of being
+ non-varying. To be clear, `re` and `im` will be Parameters that will
+ be varied in the fit (by default, of course).
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ name=None, **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+
+ def constant(x, re=0., im=0.):
+ return (re + 1j*im) * np.ones(np.shape(x))
+ super().__init__(constant, **kwargs)
+
+ def guess(self, data, x=None, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = self.make_params()
+ pars[f'{self.prefix}re'].set(value=data.real.mean())
+ pars[f'{self.prefix}im'].set(value=data.imag.mean())
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class LinearModel(Model):
+ """Linear model, with two Parameters: `intercept` and `slope`.
+
+ Defined as:
+
+ .. math::
+
+ f(x; m, b) = m x + b
+
+ with `slope` for :math:`m` and `intercept` for :math:`b`.
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(linear, **kwargs)
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from data."""
+ sval, oval = np.polyfit(x, data, 1)
+ pars = self.make_params(intercept=oval, slope=sval)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class QuadraticModel(Model):
+ """A quadratic model, with three Parameters: `a`, `b`, and `c`.
+
+ Defined as:
+
+ .. math::
+
+ f(x; a, b, c) = a x^2 + b x + c
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(parabolic, **kwargs)
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from data."""
+ a, b, c = np.polyfit(x, data, 2)
+ pars = self.make_params(a=a, b=b, c=c)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+ParabolicModel = QuadraticModel
+
+
+class PolynomialModel(Model):
+ r"""A polynomial model with up to 7 Parameters, specified by `degree`.
+
+ .. math::
+
+ f(x; c_0, c_1, \ldots, c_7) = \sum_{i=0, 7} c_i x^i
+
+ with parameters `c0`, `c1`, ..., `c7`. The supplied `degree` will
+ specify how many of these are actual variable parameters. This uses
+ :numpydoc:`polyval` for its calculation of the polynomial.
+
+ """
+
+ MAX_DEGREE = 7
+ DEGREE_ERR = f"degree must be an integer equal to or smaller than {MAX_DEGREE}."
+
+ valid_forms = (0, 1, 2, 3, 4, 5, 6, 7)
+
+ def __init__(self, degree=7, independent_vars=['x'], prefix='',
+ nan_policy='raise', **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ if 'form' in kwargs:
+ degree = int(kwargs.pop('form'))
+ if not isinstance(degree, int) or degree > self.MAX_DEGREE:
+ raise TypeError(self.DEGREE_ERR)
+
+ self.poly_degree = degree
+ pnames = [f'c{i}' for i in range(degree + 1)]
+ kwargs['param_names'] = pnames
+
+ def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
+ return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x)
+
+ super().__init__(polynomial, **kwargs)
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = self.make_params()
+ out = np.polyfit(x, data, self.poly_degree)
+ for i, coef in enumerate(out[::-1]):
+ pars[f'{self.prefix}c{i}'].set(value=coef)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class SplineModel(Model):
+ r"""A 1-D cubic spline model with a variable number of `knots` and
+ parameters `s0`, `s1`, ..., `sN`, for `N` knots.
+
+ The user must supply a list or ndarray `xknots`: the `x` values for the
+ 'knots' which control the flexibility of the spline function.
+
+ The parameters `s0`, ..., `sN` (where `N` is the size of `xknots`) will
+ correspond to the `y` values for the spline knots at the `x=xknots`
+ positions where the highest order derivative will be discontinuous.
+ The resulting curve will not necessarily pass through these knot
+ points, but for finely-spaced knots, the spline parameter values will
+ be very close to the `y` values of the resulting curve.
+
+ The maximum number of knots supported is 300.
+
+ Using the `guess()` method to initialize parameter values is highly
+ recommended.
+
+ Parameters
+ ----------
+ xknots : :obj:`list` of floats or :obj:`ndarray`, required
+ x-values of knots for spline.
+ independent_vars : :obj:`list` of :obj:`str`, optional
+ Arguments to the model function that are independent variables
+ default is ['x']).
+ prefix : str, optional
+ String to prepend to parameter names, needed to add two Models
+ that have parameter names in common.
+ nan_policy : {'raise', 'propagate', 'omit'}, optional
+ How to handle NaN and missing values in data. See Notes below.
+
+ Notes
+ -----
+ 1. There must be at least 4 knot points, and not more than 300.
+
+ 2. `nan_policy` sets what to do when a NaN or missing value is seen in
+ the data. Should be one of:
+
+ - `'raise'` : raise a `ValueError` (default)
+ - `'propagate'` : do nothing
+ - `'omit'` : drop missing data
+
+ """
+
+ MAX_KNOTS = 300
+ NKNOTS_MAX_ERR = f"SplineModel supports up to {MAX_KNOTS:d} knots"
+ NKNOTS_NDARRY_ERR = "SplineModel xknots must be 1-D array-like"
+ DIM_ERR = "SplineModel supports only 1-d spline interpolation"
+
+ def __init__(self, xknots, independent_vars=['x'], prefix='',
+ nan_policy='raise', **kwargs):
+ """ """
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+
+ if isinstance(xknots, (list, tuple)):
+ xknots = np.asarray(xknots, dtype=np.float64)
+ try:
+ xknots = xknots.flatten()
+ except Exception:
+ raise TypeError(self.NKNOTS_NDARRAY_ERR)
+
+ if len(xknots) > self.MAX_KNOTS:
+ raise TypeError(self.NKNOTS_MAX_ERR)
+
+ if len(independent_vars) > 1:
+ raise TypeError(self.DIM_ERR)
+
+ self.xknots = xknots
+ self.nknots = len(xknots)
+ self.order = 3 # cubic splines only
+
+ def spline_model(x, s0=1, s1=1, s2=1, s3=1, s4=1, s5=1):
+ "used only for the initial parsing"
+ return x
+
+ super().__init__(spline_model, **kwargs)
+
+ if 'x' not in independent_vars:
+ self.independent_vars.pop('x')
+
+ self._param_root_names = [f's{d}' for d in range(self.nknots)]
+ self._param_names = [f'{prefix}{s}' for s in self._param_root_names]
+
+ self.knots, _c, _k = splrep(self.xknots, np.ones(self.nknots),
+ k=self.order)
+
+ def eval(self, params=None, **kwargs):
+ """note that we override `eval()` here for a variadic function,
+ as we will not know the number of spline parameters until run time
+ """
+ self.make_funcargs(params, kwargs)
+
+ coefs = [params[f'{self.prefix}s{d}'].value for d in range(self.nknots)]
+ coefs.extend([coefs[-1]]*(self.order+1))
+ coefs = np.array(coefs)
+ x = kwargs[self.independent_vars[0]]
+ return splev(x, [self.knots, coefs, self.order])
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = self.make_params()
+
+ for i, xk in enumerate(self.xknots):
+ ix = np.abs(x-xk).argmin()
+ this = data[ix]
+ pone = data[ix+1] if ix < len(x)-2 else this
+ mone = data[ix-1] if ix > 0 else this
+ pars[f'{self.prefix}s{i}'].value = (4.*this + pone + mone)/6.
+
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class SineModel(Model):
+ r"""A model based on a sinusoidal lineshape.
+
+ The model has three Parameters: `amplitude`, `frequency`, and `shift`.
+
+ .. math::
+
+ f(x; A, \phi, f) = A \sin (f x + \phi)
+
+ where the parameter `amplitude` corresponds to :math:`A`, `frequency` to
+ :math:`f`, and `shift` to :math:`\phi`. All are constrained to be
+ non-negative, and `shift` additionally to be smaller than :math:`2\pi`.
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(sine, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('amplitude', min=0)
+ self.set_param_hint('frequency', min=0)
+ self.set_param_hint('shift', min=-tau-1.e-5, max=tau+1.e-5)
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from the FFT of the data."""
+ data = data - data.mean()
+ # assume uniform spacing
+ frequencies = np.fft.fftfreq(len(x), abs(x[-1] - x[0]) / (len(x) - 1))
+ fft = abs(np.fft.fft(data))
+ argmax = abs(fft).argmax()
+ amplitude = 2.0 * fft[argmax] / len(fft)
+ frequency = tau * abs(frequencies[argmax])
+ # try shifts in the range [0, 2*pi) and take the one with best residual
+ shift_guesses = np.linspace(0, tau, 11, endpoint=False)
+ errors = [np.linalg.norm(self.eval(x=x, amplitude=amplitude,
+ frequency=frequency,
+ shift=shift_guess) - data)
+ for shift_guess in shift_guesses]
+ shift = shift_guesses[np.argmin(errors)]
+ pars = self.make_params(amplitude=amplitude, frequency=frequency,
+ shift=shift)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class GaussianModel(Model):
+ r"""A model based on a Gaussian or normal distribution lineshape.
+
+ The model has three Parameters: `amplitude`, `center`, and `sigma`.
+ In addition, parameters `fwhm` and `height` are included as
+ constraints to report full width at half maximum and maximum peak
+ height, respectively.
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]}
+
+ where the parameter `amplitude` corresponds to :math:`A`, `center` to
+ :math:`\mu`, and `sigma` to :math:`\sigma`. The full width at half
+ maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately
+ :math:`2.3548\sigma`.
+
+ For more information, see: https://en.wikipedia.org/wiki/Normal_distribution
+
+ """
+
+ fwhm_factor = 2*np.sqrt(2*np.log(2))
+ height_factor = 1./np.sqrt(2*np.pi)
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(gaussian, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('fwhm', expr=fwhm_expr(self))
+ self.set_param_hint('height', expr=height_expr(self))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class Gaussian2dModel(Model):
+ r"""A model based on a two-dimensional Gaussian function.
+
+ The model has two independent variables `x` and `y` and five
+ Parameters: `amplitude`, `centerx`, `sigmax`, `centery`, and `sigmay`.
+ In addition, parameters `fwhmx`, `fwhmy`, and `height` are included as
+ constraints to report the maximum peak height and the two full width
+ at half maxima, respectively.
+
+ .. math::
+
+ f(x, y; A, \mu_x, \sigma_x, \mu_y, \sigma_y) =
+ A g(x; A=1, \mu_x, \sigma_x) g(y; A=1, \mu_y, \sigma_y)
+
+ where subfunction :math:`g(x; A, \mu, \sigma)` is a Gaussian lineshape:
+
+ .. math::
+
+ g(x; A, \mu, \sigma) =
+ \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]}.
+
+ """
+
+ fwhm_factor = 2*np.sqrt(2*np.log(2))
+ height_factor = 1./(2*np.pi)
+
+ def __init__(self, independent_vars=['x', 'y'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(gaussian2d, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigmax', min=0)
+ self.set_param_hint('sigmay', min=0)
+ expr = fwhm_expr(self)
+ self.set_param_hint('fwhmx', expr=expr.replace('sigma', 'sigmax'))
+ self.set_param_hint('fwhmy', expr=expr.replace('sigma', 'sigmay'))
+ fmt = ("{factor:.7f}*{prefix:s}amplitude/(max({tiny}, {prefix:s}sigmax)"
+ + "*max({tiny}, {prefix:s}sigmay))")
+ expr = fmt.format(tiny=tiny, factor=self.height_factor, prefix=self.prefix)
+ self.set_param_hint('height', expr=expr)
+
+ def guess(self, data, x, y, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak2d(self, data, x, y, negative)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC.replace("['x']", "['x', 'y']")
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class LorentzianModel(Model):
+ r"""A model based on a Lorentzian or Cauchy-Lorentz distribution function.
+
+ The model has three Parameters: `amplitude`, `center`, and `sigma`. In
+ addition, parameters `fwhm` and `height` are included as constraints
+ to report full width at half maximum and maximum peak height,
+ respectively.
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
+
+ where the parameter `amplitude` corresponds to :math:`A`, `center` to
+ :math:`\mu`, and `sigma` to :math:`\sigma`. The full width at half
+ maximum is :math:`2\sigma`.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Cauchy_distribution
+
+ """
+
+ fwhm_factor = 2.0
+ height_factor = 1./np.pi
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(lorentzian, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('fwhm', expr=fwhm_expr(self))
+ self.set_param_hint('height', expr=height_expr(self))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class SplitLorentzianModel(Model):
+ r"""A model based on a Lorentzian or Cauchy-Lorentz distribution function.
+
+ The model has four parameters: `amplitude`, `center`, `sigma`, and
+ `sigma_r`. In addition, parameters `fwhm` and `height` are included
+ as constraints to report full width at half maximum and maximum peak
+ height, respectively.
+
+ 'Split' means that the width of the distribution is different between
+ left and right slopes.
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \sigma_r) = \frac{2 A}{\pi (\sigma+\sigma_r)} \big[\frac{\sigma^2}{(x - \mu)^2 + \sigma^2} * H(\mu-x) + \frac{\sigma_r^2}{(x - \mu)^2 + \sigma_r^2} * H(x-\mu)\big]
+
+ where the parameter `amplitude` corresponds to :math:`A`, `center` to
+ :math:`\mu`, `sigma` to :math:`\sigma`, `sigma_l` to :math:`\sigma_l`,
+ and :math:`H(x)` is a Heaviside step function:
+
+ .. math::
+
+ H(x) = 0 | x < 0, 1 | x \geq 0
+
+ The full width at half maximum is :math:`\sigma_l+\sigma_r`. Just as
+ with the Lorentzian model, integral of this function from `-.inf` to
+ `+.inf` equals to `amplitude`.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Cauchy_distribution
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(split_lorentzian, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ fwhm_expr = '{pre:s}sigma+{pre:s}sigma_r'
+ height_expr = '2*{pre:s}amplitude/{0:.7f}/max({1:.7f}, ({pre:s}sigma+{pre:s}sigma_r))'
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('sigma_r', min=0)
+ self.set_param_hint('fwhm', expr=fwhm_expr.format(pre=self.prefix))
+ self.set_param_hint('height', expr=height_expr.format(np.pi, tiny, pre=self.prefix))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
+ sigma = pars[f'{self.prefix}sigma']
+ pars[f'{self.prefix}sigma_r'].set(value=sigma.value, min=sigma.min, max=sigma.max)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class VoigtModel(Model):
+ r"""A model based on a Voigt distribution function.
+
+ The model has four Parameters: `amplitude`, `center`, `sigma`, and
+ `gamma`. By default, `gamma` is constrained to have a value equal to
+ `sigma`, though it can be varied independently. In addition,
+ parameters `fwhm` and `height` are included as constraints to report
+ full width at half maximum and maximum peak height, respectively. The
+ definition for the Voigt function used here is:
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A \textrm{Re}[w(z)]}{\sigma\sqrt{2 \pi}}
+
+ where
+
+ .. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ z &=& \frac{x-\mu +i\gamma}{\sigma\sqrt{2}} \\
+ w(z) &=& e^{-z^2}{\operatorname{erfc}}(-iz)
+ \end{eqnarray*}
+
+ and :func:`erfc` is the complementary error function. As above,
+ `amplitude` corresponds to :math:`A`, `center` to :math:`\mu`, and
+ `sigma` to :math:`\sigma`. The parameter `gamma` corresponds to
+ :math:`\gamma`. If `gamma` is kept at the default value (constrained
+ to `sigma`), the full width at half maximum is approximately
+ :math:`3.6013\sigma`.
+
+ For more information, see: https://en.wikipedia.org/wiki/Voigt_profile
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(voigt, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('gamma', expr=f'{self.prefix}sigma')
+
+ fexpr = ("1.0692*{pre:s}gamma+" +
+ "sqrt(0.8664*{pre:s}gamma**2+5.545083*{pre:s}sigma**2)")
+ hexpr = ("({pre:s}amplitude/(max({0}, {pre:s}sigma*sqrt(2*pi))))*"
+ "wofz((1j*{pre:s}gamma)/(max({0}, {pre:s}sigma*sqrt(2)))).real")
+
+ self.set_param_hint('fwhm', expr=fexpr.format(pre=self.prefix))
+ self.set_param_hint('height', expr=hexpr.format(tiny, pre=self.prefix))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative,
+ ampscale=1.5, sigscale=0.65)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class PseudoVoigtModel(Model):
+ r"""A model based on a pseudo-Voigt distribution function.
+
+ This is a weighted sum of a Gaussian and Lorentzian distribution
+ function that share values for `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), and full width at half maximum `fwhm` (and so has
+ constrained values of `sigma` (:math:`\sigma`) and `height` (maximum
+ peak height). The parameter `fraction` (:math:`\alpha`) controls the
+ relative weight of the Gaussian and Lorentzian components, giving the
+ full definition of:
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \alpha) = \frac{(1-\alpha)A}{\sigma_g\sqrt{2\pi}}
+ e^{[{-{(x-\mu)^2}/{{2\sigma_g}^2}}]}
+ + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
+
+ where :math:`\sigma_g = {\sigma}/{\sqrt{2\ln{2}}}` so that the full
+ width at half maximum of each component and of the sum is
+ :math:`2\sigma`. The :meth:`guess` function always sets the starting
+ value for `fraction` at 0.5.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation
+
+ """
+
+ fwhm_factor = 2.0
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(pvoigt, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('fraction', value=0.5, min=0.0, max=1.0)
+ self.set_param_hint('fwhm', expr=fwhm_expr(self))
+ fmt = ("(((1-{prefix:s}fraction)*{prefix:s}amplitude)/"
+ "max({0}, ({prefix:s}sigma*sqrt(pi/log(2))))+"
+ "({prefix:s}fraction*{prefix:s}amplitude)/"
+ "max({0}, (pi*{prefix:s}sigma)))")
+ self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
+ pars[f'{self.prefix}fraction'].set(value=0.5, min=0.0, max=1.0)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class MoffatModel(Model):
+ r"""A model based on the Moffat distribution function.
+
+ The model has four Parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), a width parameter `sigma` (:math:`\sigma`), and an
+ exponent `beta` (:math:`\beta`). In addition, parameters `fwhm` and
+ `height` are included as constraints to report full width at half
+ maximum and maximum peak height, respectively.
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \beta) = A \big[(\frac{x-\mu}{\sigma})^2+1\big]^{-\beta}
+
+ the full width at half maximum is :math:`2\sigma\sqrt{2^{1/\beta}-1}`.
+ The :meth:`guess` function always sets the starting value for `beta`
+ to 1.
+
+ Note that for (:math:`\beta=1`) the Moffat has a Lorentzian shape. For
+ more information, see:
+ https://en.wikipedia.org/wiki/Moffat_distribution
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(moffat, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('beta')
+ self.set_param_hint('fwhm', expr=f"2*{self.prefix}sigma*sqrt(2**(1.0/max(1e-3, {self.prefix}beta))-1)")
+ self.set_param_hint('height', expr=f"{self.prefix}amplitude")
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative, ampscale=0.5, sigscale=1.)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class Pearson4Model(Model):
+ r"""A model based on a Pearson IV distribution.
+
+ The model has five parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), `sigma` (:math:`\sigma`), `expon` (:math:`m`) and `skew` (:math:`\nu`).
+ In addition, parameters `fwhm`, `height` and `position` are included as
+ constraints to report estimates for the approximate full width at half maximum (20% error),
+ the peak height, and the peak position (the position of the maximal function value), respectively.
+ The fwhm value has an error of about 20% in the
+ parameter range expon: (0.5, 1000], skew: [-1000, 1000].
+
+ .. math::
+
+ f(x;A,\mu,\sigma,m,\nu)=A \frac{\left|\frac{\Gamma(m+i\tfrac{\nu}{2})}{\Gamma(m)}\right|^2}{\sigma\beta(m-\tfrac{1}{2},\tfrac{1}{2})}\left[1+\frac{(x-\mu)^2}{\sigma^2}\right]^{-m}\exp\left(-\nu \arctan\left(\frac{x-\mu}{\sigma}\right)\right)
+
+ where :math:`\beta` is the beta function (see :scipydoc:`special.beta`).
+ The :meth:`guess` function always gives a starting value of 1.5 for `expon`,
+ and 0 for `skew`.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_IV_distribution
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(pearson4, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('expon', value=1.5, min=0.5 + tiny, max=1000)
+ self.set_param_hint('skew', value=0.0, min=-1000, max=1000)
+ fmt = ("{prefix:s}sigma*sqrt(2**(1/{prefix:s}expon)-1)*pi/arctan2(exp(1)*{prefix:s}expon, {prefix:s}skew)")
+ self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix))
+ fmt = ("({prefix:s}amplitude / {prefix:s}sigma) * exp(2 * (real(loggammafcn({prefix:s}expon + {prefix:s}skew * 0.5j)) - loggammafcn({prefix:s}expon)) - betalnfnc({prefix:s}expon-0.5, 0.5) - "
+ "{prefix:s}expon * log1p(square({prefix:s}skew/(2*{prefix:s}expon))) - {prefix:s}skew * arctan(-{prefix:s}skew/(2*{prefix:s}expon)))")
+ self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
+ fmt = ("{prefix:s}center-{prefix:s}sigma*{prefix:s}skew/(2*{prefix:s}expon)")
+ self.set_param_hint('position', expr=fmt.format(prefix=self.prefix))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative)
+ pars[f'{self.prefix}expon'].set(value=1.5)
+ pars[f'{self.prefix}skew'].set(value=0.0)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class Pearson7Model(Model):
+ r"""A model based on a Pearson VII distribution.
+
+ The model has four parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), `sigma` (:math:`\sigma`), and `exponent` (:math:`m`).
+ In addition, parameters `fwhm` and `height` are included as
+ constraints to report estimates for the full width at half maximum and
+ maximum peak height, respectively.
+
+ .. math::
+
+ f(x; A, \mu, \sigma, m) = \frac{A}{\sigma{\beta(m-\frac{1}{2}, \frac{1}{2})}} \bigl[1 + \frac{(x-\mu)^2}{\sigma^2} \bigr]^{-m}
+
+ where :math:`\beta` is the beta function (see :scipydoc:`special.beta`).
+ The :meth:`guess` function always gives a starting value for `exponent`
+ of 1.5. In addition, parameters `fwhm` and `height` are included as
+ constraints to report full width at half maximum and maximum peak
+ height, respectively.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_VII_distribution
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(pearson7, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('expon', value=1.5, max=100)
+ fmt = ("sqrt(2**(1/{prefix:s}expon)-1)*2*{prefix:s}sigma")
+ self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix))
+ fmt = ("{prefix:s}amplitude * gamfcn({prefix:s}expon)/"
+ "max({0}, (gamfcn(0.5)*gamfcn({prefix:s}expon-0.5)*{prefix:s}sigma))")
+ self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative)
+ pars[f'{self.prefix}expon'].set(value=1.5)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class StudentsTModel(Model):
+ r"""A model based on a Student's t-distribution function.
+
+ The model has three Parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), and `sigma` (:math:`\sigma`). In addition, parameters
+ `fwhm` and `height` are included as constraints to report full width
+ at half maximum and maximum peak height, respectively.
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A \Gamma(\frac{\sigma+1}{2})} {\sqrt{\sigma\pi}\,\Gamma(\frac{\sigma}{2})} \Bigl[1+\frac{(x-\mu)^2}{\sigma}\Bigr]^{-\frac{\sigma+1}{2}}
+
+ where :math:`\Gamma(x)` is the gamma function.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Student%27s_t-distribution
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(students_t, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0.0, max=100)
+ fmt = ("{prefix:s}amplitude*gamfcn(({prefix:s}sigma+1)/2)/"
+ "(sqrt({prefix:s}sigma*pi)*gamfcn({prefix:s}sigma/2))")
+ self.set_param_hint('height', expr=fmt.format(prefix=self.prefix))
+ fmt = ("2*sqrt(2**(2/({prefix:s}sigma+1))*"
+ "{prefix:s}sigma-{prefix:s}sigma)")
+ self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class BreitWignerModel(Model):
+ r"""A model based on a Breit-Wigner-Fano function.
+
+ The model has four Parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), `sigma` (:math:`\sigma`), and `q` (:math:`q`).
+
+ .. math::
+
+ f(x; A, \mu, \sigma, q) = \frac{A (q\sigma/2 + x - \mu)^2}{(\sigma/2)^2 + (x - \mu)^2}
+
+ For more information, see: https://en.wikipedia.org/wiki/Fano_resonance
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(breit_wigner, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0.0)
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative)
+ pars[f'{self.prefix}q'].set(value=1.0)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class LognormalModel(Model):
+ r"""A model based on the Log-normal distribution function.
+
+ The modal has three Parameters `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), and `sigma` (:math:`\sigma`). In addition, parameters
+ `fwhm` and `height` are included as constraints to report estimates of
+ full width at half maximum and maximum peak height, respectively.
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}}\frac{e^{-(\ln(x) - \mu)^2/ 2\sigma^2}}{x}
+
+ For more information, see: https://en.wikipedia.org/wiki/Lognormal
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(lognormal, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+
+ fmt = ("{prefix:s}amplitude/max({0}, ({prefix:s}sigma*sqrt(2*pi)))"
+ "*exp({prefix:s}sigma**2/2-{prefix:s}center)")
+ self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
+ fmt = ("exp({prefix:s}center-{prefix:s}sigma**2+{prefix:s}sigma*sqrt("
+ "2*log(2)))-"
+ "exp({prefix:s}center-{prefix:s}sigma**2-{prefix:s}sigma*sqrt("
+ "2*log(2)))")
+ self.set_param_hint('fwhm', expr=fmt.format(prefix=self.prefix))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25)
+ pars[f'{self.prefix}sigma'].set(min=0.0)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class DampedOscillatorModel(Model):
+ r"""A model based on the Damped Harmonic Oscillator Amplitude.
+
+ The model has three Parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), and `sigma` (:math:`\sigma`). In addition, the
+ parameter `height` is included as a constraint to report the maximum
+ peak height.
+
+ .. math::
+
+ f(x; A, \mu, \sigma) = \frac{A}{\sqrt{ [1 - (x/\mu)^2]^2 + (2\sigma x/\mu)^2}}
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Harmonic_oscillator#Amplitude_part
+
+ """
+
+ height_factor = 0.5
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(damped_oscillator, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('height', expr=height_expr(self))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative,
+ ampscale=0.1, sigscale=0.1)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class DampedHarmonicOscillatorModel(Model):
+ r"""A model based on a variation of the Damped Harmonic Oscillator.
+
+ The model follows the definition given in DAVE/PAN (see:
+ https://www.ncnr.nist.gov/dave) and has four Parameters: `amplitude`
+ (:math:`A`), `center` (:math:`\mu`), `sigma` (:math:`\sigma`), and
+ `gamma` (:math:`\gamma`). In addition, parameters `fwhm` and `height`
+ are included as constraints to report estimates for full width at half
+ maximum and maximum peak height, respectively.
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A\sigma}{\pi [1 - \exp(-x/\gamma)]}
+ \Big[ \frac{1}{(x-\mu)^2 + \sigma^2} - \frac{1}{(x+\mu)^2 + \sigma^2} \Big]
+
+ where :math:`\gamma=kT`, `k` is the Boltzmann constant in
+ :math:`evK^-1`, and `T` is the temperature in :math:`K`.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Harmonic_oscillator
+
+ """
+
+ fwhm_factor = 2.0
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(dho, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('center', min=0)
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('gamma', min=1.e-19)
+ fmt = ("({prefix:s}amplitude*{prefix:s}sigma)/"
+ "max({0}, (pi*(1-exp(-{prefix:s}center/max({0}, {prefix:s}gamma)))))*"
+ "(1/max({0}, {prefix:s}sigma**2)-1/"
+ "max({0}, (4*{prefix:s}center**2+{prefix:s}sigma**2)))")
+ self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
+ self.set_param_hint('fwhm', expr=fwhm_expr(self))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative,
+ ampscale=0.1, sigscale=0.1)
+ pars[f'{self.prefix}gamma'].set(value=1.0, min=0.0)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class ExponentialGaussianModel(Model):
+ r"""A model of an Exponentially modified Gaussian distribution.
+
+ The model has four Parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), `sigma` (:math:`\sigma`), and `gamma` (:math:`\gamma`).
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2}
+ \exp\bigl[\gamma({\mu - x + \gamma\sigma^2/2})\bigr]
+ {\operatorname{erfc}}\Bigl(\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\Bigr)
+
+
+ where :func:`erfc` is the complementary error function.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(expgaussian, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('gamma', min=0, max=20)
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class SkewedGaussianModel(Model):
+ r"""A skewed Gaussian model, using a skewed normal distribution.
+
+ The model has four Parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), `sigma` (:math:`\sigma`), and `gamma` (:math:`\gamma`).
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}}
+ e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 +
+ {\operatorname{erf}}\bigl[
+ \frac{{\gamma}(x-\mu)}{\sigma\sqrt{2}}
+ \bigr] \Bigr\}
+
+ where :func:`erf` is the error function.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Skew_normal_distribution
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(skewed_gaussian, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class SkewedVoigtModel(Model):
+ r"""A skewed Voigt model, modified using a skewed normal distribution.
+
+ The model has five Parameters `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), `sigma` (:math:`\sigma`), and `gamma` (:math:`\gamma`),
+ as usual for a Voigt distribution, and adds a new Parameter `skew`.
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma, \rm{skew}) = {\rm{Voigt}}(x; A, \mu, \sigma, \gamma)
+ \Bigl\{ 1 + {\operatorname{erf}}\bigl[
+ \frac{{\rm{skew}}(x-\mu)}{\sigma\sqrt{2}}
+ \bigr] \Bigr\}
+
+ where :func:`erf` is the error function.
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Skew_normal_distribution
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(skewed_voigt, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('sigma', min=0)
+ self.set_param_hint('gamma', expr=f'{self.prefix}sigma')
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class ThermalDistributionModel(Model):
+ r"""Return a thermal distribution function.
+
+ Variable `form` defines the kind of distribution as below with three
+ Parameters: `amplitude` (:math:`A`), `center` (:math:`x_0`), and `kt`
+ (:math:`kt`). The following distributions are available:
+
+ - `'bose'` : Bose-Einstein distribution (default)
+ - `'maxwell'` : Maxwell-Boltzmann distribution
+ - `'fermi'` : Fermi-Dirac distribution
+
+ The functional forms are defined as:
+
+ .. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ & f(x; A, x_0, kt, {\mathrm{form={}'bose{}'}}) & = \frac{1}{A \exp(\frac{x - x_0}{kt}) - 1} \\
+ & f(x; A, x_0, kt, {\mathrm{form={}'maxwell{}'}}) & = \frac{1}{A \exp(\frac{x - x_0}{kt})} \\
+ & f(x; A, x_0, kt, {\mathrm{form={}'fermi{}'}}) & = \frac{1}{A \exp(\frac{x - x_0}{kt}) + 1} ]
+ \end{eqnarray*}
+
+ Notes
+ -----
+ - `kt` should be defined in the same units as `x` (:math:`k_B =
+ 8.617\times10^{-5}` eV/K).
+ - set :math:`kt<0` to implement the energy loss convention common in
+ scattering research.
+
+ For more information, see:
+ http://hyperphysics.phy-astr.gsu.edu/hbase/quantum/disfcn.html
+
+ """
+
+ valid_forms = ('bose', 'maxwell', 'fermi')
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ form='bose', **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'form': form, 'independent_vars': independent_vars})
+ super().__init__(thermal_distribution, **kwargs)
+ self._set_paramhints_prefix()
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ center = np.mean(x)
+ kt = (max(x) - min(x))/10
+
+ pars = self.make_params()
+ return update_param_vals(pars, self.prefix, center=center, kt=kt)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class DoniachModel(Model):
+ r"""A model of an Doniach Sunjic asymmetric lineshape.
+
+ This model is used in photo-emission and has four Parameters:
+ `amplitude` (:math:`A`), `center` (:math:`\mu`), `sigma`
+ (:math:`\sigma`), and `gamma` (:math:`\gamma`). In addition, parameter
+ `height` is included as a constraint to report maximum peak height.
+
+ .. math::
+
+ f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma^{1-\gamma}}
+ \frac{\cos\bigl[\pi\gamma/2 + (1-\gamma)
+ \arctan{((x - \mu)}/\sigma)\bigr]} {\bigr[1 + (x-\mu)/\sigma\bigl]^{(1-\gamma)/2}}
+
+ For more information, see:
+ https://www.casaxps.com/help_manual/line_shapes.htm
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(doniach, **kwargs)
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ fmt = ("{prefix:s}amplitude/max({0}, ({prefix:s}sigma**(1-{prefix:s}gamma)))"
+ "*cos(pi*{prefix:s}gamma/2)")
+ self.set_param_hint('height', expr=fmt.format(tiny, prefix=self.prefix))
+
+ def guess(self, data, x, negative=False, **kwargs):
+ """Estimate initial model parameter values from data."""
+ pars = guess_from_peak(self, data, x, negative, ampscale=0.5)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class PowerLawModel(Model):
+ r"""A model based on a Power Law.
+
+ The model has two Parameters: `amplitude` (:math:`A`) and `exponent`
+ (:math:`k`) and is defined as:
+
+ .. math::
+
+ f(x; A, k) = A x^k
+
+ For more information, see: https://en.wikipedia.org/wiki/Power_law
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(powerlaw, **kwargs)
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from data."""
+ try:
+ expon, amp = np.polyfit(np.log(x+1.e-14), np.log(data+1.e-14), 1)
+ except TypeError:
+ expon, amp = 1, np.log(abs(max(data)+1.e-9))
+
+ pars = self.make_params(amplitude=np.exp(amp), exponent=expon)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class ExponentialModel(Model):
+ r"""A model based on an exponential decay function.
+
+ The model has two Parameters: `amplitude` (:math:`A`) and `decay`
+ (:math:`\tau`) and is defined as:
+
+ .. math::
+
+ f(x; A, \tau) = A e^{-x/\tau}
+
+ For more information, see:
+ https://en.wikipedia.org/wiki/Exponential_decay
+
+ """
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'independent_vars': independent_vars})
+ super().__init__(exponential, **kwargs)
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from data."""
+ try:
+ sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
+ except TypeError:
+ sval, oval = 1., np.log(abs(max(data)+1.e-9))
+ pars = self.make_params(amplitude=np.exp(oval), decay=-1.0/sval)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class StepModel(Model):
+ r"""A model based on a Step function.
+
+ The model has three Parameters: `amplitude` (:math:`A`), `center`
+ (:math:`\mu`), and `sigma` (:math:`\sigma`).
+
+ There are four choices for `form`:
+
+ - `'linear'` (default)
+ - `'atan'` or `'arctan'` for an arc-tangent function
+ - `'erf'` for an error function
+ - `'logistic'` for a logistic function (for more information, see:
+ https://en.wikipedia.org/wiki/Logistic_function)
+
+ The step function starts with a value 0 and ends with a value of
+ :math:`A` rising to :math:`A/2` at :math:`\mu`, with :math:`\sigma`
+ setting the characteristic width. The functional forms are defined as:
+
+ .. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) & = A \min{[1, \max{(0, \alpha + 1/2)}]} \\
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) & = A [1/2 + \arctan{(\alpha)}/{\pi}] \\
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) & = A [1 + {\operatorname{erf}}(\alpha)]/2 \\
+ & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A \left[1 - \frac{1}{1 + e^{\alpha}} \right]
+ \end{eqnarray*}
+
+ where :math:`\alpha = (x - \mu)/{\sigma}`.
+
+ """
+
+ valid_forms = ('linear', 'atan', 'arctan', 'erf', 'logistic')
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ form='linear', **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'form': form, 'independent_vars': independent_vars})
+ super().__init__(step, **kwargs)
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from data."""
+ ymin, ymax = min(data), max(data)
+ xmin, xmax = min(x), max(x)
+ pars = self.make_params(amplitude=(ymax-ymin),
+ center=(xmax+xmin)/2.0)
+ pars[f'{self.prefix}sigma'].set(value=(xmax-xmin)/7.0, min=0.0)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class RectangleModel(Model):
+ r"""A model based on a Step-up and Step-down function.
+
+ The model has five Parameters: `amplitude` (:math:`A`), `center1`
+ (:math:`\mu_1`), `center2` (:math:`\mu_2`), `sigma1`
+ (:math:`\sigma_1`), and `sigma2` (:math:`\sigma_2`).
+
+ There are four choices for `form`, which is used for both the Step up
+ and the Step down:
+
+ - `'linear'` (default)
+ - `'atan'` or `'arctan'` for an arc-tangent function
+ - `'erf'` for an error function
+ - `'logistic'` for a logistic function (for more information, see:
+ https://en.wikipedia.org/wiki/Logistic_function)
+
+ The function starts with a value 0 and transitions to a value of
+ :math:`A`, taking the value :math:`A/2` at :math:`\mu_1`, with
+ :math:`\sigma_1` setting the characteristic width. The function then
+ transitions again to the value :math:`A/2` at :math:`\mu_2`, with
+ :math:`\sigma_2` setting the characteristic width. The functional
+ forms are defined as:
+
+ .. math::
+ :nowrap:
+
+ \begin{eqnarray*}
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}}) &= A \{ \min{[1, \max{(-1, \alpha_1)}]} + \min{[1, \max{(-1, \alpha_2)}]} \}/2 \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}}) &= A [\arctan{(\alpha_1)} + \arctan{(\alpha_2)}]/{\pi} \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}}) &= A \left[{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)\right]/2 \\
+ &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A \left[1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 + e^{\alpha_2}} \right]
+ \end{eqnarray*}
+
+
+ where :math:`\alpha_1 = (x - \mu_1)/{\sigma_1}` and
+ :math:`\alpha_2 = -(x - \mu_2)/{\sigma_2}`.
+
+ """
+
+ valid_forms = ('linear', 'atan', 'arctan', 'erf', 'logistic')
+
+ def __init__(self, independent_vars=['x'], prefix='', nan_policy='raise',
+ form='linear', **kwargs):
+ kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
+ 'form': form, 'independent_vars': independent_vars})
+ super().__init__(rectangle, **kwargs)
+
+ self._set_paramhints_prefix()
+
+ def _set_paramhints_prefix(self):
+ self.set_param_hint('center1')
+ self.set_param_hint('center2')
+ self.set_param_hint('midpoint',
+ expr=f'({self.prefix}center1+{self.prefix}center2)/2.0')
+
+ def guess(self, data, x, **kwargs):
+ """Estimate initial model parameter values from data."""
+ ymin, ymax = min(data), max(data)
+ xmin, xmax = min(x), max(x)
+ pars = self.make_params(amplitude=(ymax-ymin),
+ center1=(xmax+xmin)/4.0,
+ center2=3*(xmax+xmin)/4.0)
+ pars[f'{self.prefix}sigma1'].set(value=(xmax-xmin)/7.0, min=0.0)
+ pars[f'{self.prefix}sigma2'].set(value=(xmax-xmin)/7.0, min=0.0)
+ return update_param_vals(pars, self.prefix, **kwargs)
+
+ __init__.__doc__ = COMMON_INIT_DOC
+ guess.__doc__ = COMMON_GUESS_DOC
+
+
+class ExpressionModel(Model):
+ """ExpressionModel class."""
+
+ idvar_missing = "No independent variable found in\n {}"
+ idvar_notfound = "Cannot find independent variables '{}' in\n {}"
+ no_prefix = "ExpressionModel does not support `prefix` argument"
+
+ def __init__(self, expr, independent_vars=None, init_script=None,
+ nan_policy='raise', **kws):
+ """Generate a model from user-supplied expression.
+
+ Parameters
+ ----------
+ expr : str
+ Mathematical expression for model.
+ independent_vars : :obj:`list` of :obj:`str` or None, optional
+ Variable names to use as independent variables.
+ init_script : str or None, optional
+ Initial script to run in asteval interpreter.
+ nan_policy : {'raise, 'propagate', 'omit'}, optional
+ How to handle NaN and missing values in data. See Notes below.
+ **kws : optional
+ Keyword arguments to pass to :class:`Model`.
+
+ Notes
+ -----
+ 1. each instance of ExpressionModel will create and use its own
+ version of an asteval interpreter.
+
+ 2. `prefix` is **not supported** for ExpressionModel.
+
+ 3. `nan_policy` sets what to do when a NaN or missing value is
+ seen in the data. Should be one of:
+
+ - `'raise'` : raise a `ValueError` (default)
+ - `'propagate'` : do nothing
+ - `'omit'` : drop missing data
+
+ """
+ # create ast evaluator, load custom functions
+ self.asteval = Interpreter()
+ for name in lineshapes.functions:
+ self.asteval.symtable[name] = getattr(lineshapes, name, None)
+ if init_script is not None:
+ self.asteval.eval(init_script)
+
+ # save expr as text, parse to ast, save for later use
+ self.expr = expr.strip()
+ self.astcode = self.asteval.parse(self.expr)
+
+ # find all symbol names found in expression
+ sym_names = get_ast_names(self.astcode)
+
+ if independent_vars is None and 'x' in sym_names:
+ independent_vars = ['x']
+ if independent_vars is None:
+ raise ValueError(self.idvar_missing.format(self.expr))
+
+ # determine which named symbols are parameter names,
+ # try to find all independent variables
+ idvar_found = [False]*len(independent_vars)
+ param_names = []
+ for name in sym_names:
+ if name in independent_vars:
+ idvar_found[independent_vars.index(name)] = True
+ elif name not in param_names and name not in self.asteval.symtable:
+ param_names.append(name)
+
+ # make sure we have all independent parameters
+ if not all(idvar_found):
+ lost = []
+ for ix, found in enumerate(idvar_found):
+ if not found:
+ lost.append(independent_vars[ix])
+ lost = ', '.join(lost)
+ raise ValueError(self.idvar_notfound.format(lost, self.expr))
+
+ kws['independent_vars'] = independent_vars
+ if 'prefix' in kws:
+ raise Warning(self.no_prefix)
+
+ def _eval(**kwargs):
+ for name, val in kwargs.items():
+ self.asteval.symtable[name] = val
+ self.asteval.start_time = time.time()
+ return self.asteval.run(self.astcode)
+
+ kws["nan_policy"] = nan_policy
+
+ super().__init__(_eval, **kws)
+
+ # set param names here, and other things normally
+ # set in _parse_params(), which will be short-circuited.
+ self.independent_vars = independent_vars
+ self._func_allargs = independent_vars + param_names
+ self._param_names = param_names
+ self._func_haskeywords = True
+ self.def_vals = {}
+
+ def __repr__(self):
+ """Return printable representation of ExpressionModel."""
+ return f"<lmfit.ExpressionModel('{self.expr}')>"
+
+ def _parse_params(self):
+ """Over-write ExpressionModel._parse_params with `pass`.
+
+ This prevents normal parsing of function for parameter names.
+
+ """
+
+
+lmfit_models = {'Constant': ConstantModel,
+ 'Complex Constant': ComplexConstantModel,
+ 'Linear': LinearModel,
+ 'Quadratic': QuadraticModel,
+ 'Polynomial': PolynomialModel,
+ 'Spline': SplineModel,
+ 'Gaussian': GaussianModel,
+ 'Gaussian-2D': Gaussian2dModel,
+ 'Lorentzian': LorentzianModel,
+ 'Split-Lorentzian': SplitLorentzianModel,
+ 'Voigt': VoigtModel,
+ 'PseudoVoigt': PseudoVoigtModel,
+ 'Moffat': MoffatModel,
+ 'Pearson4': Pearson4Model,
+ 'Pearson7': Pearson7Model,
+ 'StudentsT': StudentsTModel,
+ 'Breit-Wigner': BreitWignerModel,
+ 'Log-Normal': LognormalModel,
+ 'Damped Oscillator': DampedOscillatorModel,
+ 'Damped Harmonic Oscillator': DampedHarmonicOscillatorModel,
+ 'Exponential Gaussian': ExponentialGaussianModel,
+ 'Skewed Gaussian': SkewedGaussianModel,
+ 'Skewed Voigt': SkewedVoigtModel,
+ 'Thermal Distribution': ThermalDistributionModel,
+ 'Doniach': DoniachModel,
+ 'Power Law': PowerLawModel,
+ 'Exponential': ExponentialModel,
+ 'Step': StepModel,
+ 'Rectangle': RectangleModel,
+ 'Expression': ExpressionModel}
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
new file mode 100644
index 0000000..d595997
--- /dev/null
+++ b/lmfit/parameter.py
@@ -0,0 +1,1084 @@
+"""Parameter class."""
+
+from copy import deepcopy
+import json
+
+from asteval import Interpreter, get_ast_names, valid_symbol_name
+from numpy import arcsin, array, cos, inf, isclose, sin, sqrt
+import scipy.special
+
+from .jsonutils import decode4js, encode4js
+from .lineshapes import tiny
+from .printfuncs import params_html_table
+
+SCIPY_FUNCTIONS = {'gamfcn': scipy.special.gamma, 'loggammafcn': scipy.special.loggamma, 'betalnfnc': scipy.special.betaln}
+for fnc_name in ('erf', 'erfc', 'wofz'):
+ SCIPY_FUNCTIONS[fnc_name] = getattr(scipy.special, fnc_name)
+
+
+def check_ast_errors(expr_eval):
+ """Check for errors derived from asteval."""
+ if len(expr_eval.error) > 0:
+ expr_eval.raise_exception(None)
+
+
+class Parameters(dict):
+ """A dictionary of Parameter objects.
+
+ It should contain all Parameter objects that are required to specify
+ a fit model. All minimization and Model fitting routines in lmfit will
+ use exactly one Parameters object, typically given as the first
+ argument to the objective function.
+
+ All keys of a Parameters() instance must be strings and valid Python
+ symbol names, so that the name must match ``[a-z_][a-z0-9_]*`` and
+ cannot be a Python reserved word.
+
+ All values of a Parameters() instance must be Parameter objects.
+
+ A Parameters() instance includes an `asteval` Interpreter used for
+ evaluation of constrained Parameters.
+
+ Parameters() support copying and pickling, and have methods to convert
+ to and from serializations using json strings.
+
+ """
+
+ def __init__(self, usersyms=None):
+ """
+ Arguments
+ ---------
+ usersyms : dict, optional
+ Dictionary of symbols to add to the
+ :class:`asteval.Interpreter` (default is None).
+
+ """
+ super().__init__(self)
+
+ self._asteval = Interpreter()
+
+ _syms = {}
+ _syms.update(SCIPY_FUNCTIONS)
+ if usersyms is not None:
+ _syms.update(usersyms)
+ for key, val in _syms.items():
+ self._asteval.symtable[key] = val
+
+ def copy(self):
+ """Parameters.copy() should always be a deepcopy."""
+ return self.__deepcopy__(None)
+
+ def update(self, other):
+ """Update values and symbols with another Parameters object."""
+ if not isinstance(other, Parameters):
+ raise ValueError(f"'{other}' is not a Parameters object")
+ self.add_many(*other.values())
+ for sym in other._asteval.user_defined_symbols():
+ self._asteval.symtable[sym] = other._asteval.symtable[sym]
+ return self
+
+ def __copy__(self):
+ """Parameters.copy() should always be a deepcopy."""
+ return self.__deepcopy__(None)
+
+ def __deepcopy__(self, memo):
+ """Implementation of Parameters.deepcopy().
+
+ The method needs to make sure that `asteval` is available and that
+ all individual Parameter objects are copied.
+
+ """
+ _pars = self.__class__()
+
+ # find the symbols that were added by users, not during construction
+ unique_symbols = {}
+ for key in self._asteval.user_defined_symbols():
+ try:
+ val = deepcopy(self._asteval.symtable[key])
+ unique_symbols[key] = val
+ except (TypeError, ValueError):
+ unique_symbols[key] = self._asteval.symtable[key]
+
+ _pars._asteval.symtable.update(unique_symbols)
+
+ # we're just about to add a lot of Parameter objects to the newly
+ parameter_list = []
+ for key, par in self.items():
+ if isinstance(par, Parameter):
+ param = Parameter(name=par.name,
+ value=par.value,
+ min=par.min,
+ max=par.max)
+ param.vary = par.vary
+ param.brute_step = par.brute_step
+ param.stderr = par.stderr
+ param.correl = deepcopy(par.correl)
+ param.init_value = par.init_value
+ param.expr = par.expr
+ param.user_data = deepcopy(par.user_data)
+ parameter_list.append(param)
+
+ _pars.add_many(*parameter_list)
+
+ return _pars
+
+ def __setitem__(self, key, par):
+ """Set items of Parameters object."""
+ if key not in self and not valid_symbol_name(key):
+ raise KeyError(f"'{key}' is not a valid Parameters name")
+ if par is not None and not isinstance(par, Parameter):
+ raise ValueError(f"'{par}' is not a Parameter")
+ dict.__setitem__(self, key, par)
+ par.name = key
+ par._expr_eval = self._asteval
+ self._asteval.symtable[key] = par.value
+
+ def __add__(self, other):
+ """Add Parameters objects."""
+ if not isinstance(other, Parameters):
+ raise ValueError(f"'{other}' is not a Parameters object")
+ out = deepcopy(self)
+ out.add_many(*other.values())
+ for sym in other._asteval.user_defined_symbols():
+ if sym not in out._asteval.symtable:
+ out._asteval.symtable[sym] = other._asteval.symtable[sym]
+ return out
+
+ def __iadd__(self, other):
+ """Add/assign Parameters objects."""
+ self.update(other)
+ return self
+
+ def __array__(self):
+ """Convert Parameters to array."""
+ return array([float(k) for k in self.values()])
+
+ def __reduce__(self):
+ """Reduce Parameters instance such that it can be pickled."""
+ # make a list of all the parameters
+ params = [self[k] for k in self]
+
+ # find the symbols from _asteval.symtable, that need to be remembered.
+ sym_unique = self._asteval.user_defined_symbols()
+ unique_symbols = {key: deepcopy(self._asteval.symtable[key])
+ for key in sym_unique}
+
+ return self.__class__, (), {'unique_symbols': unique_symbols,
+ 'params': params}
+
+ def __setstate__(self, state):
+ """Unpickle a Parameters instance.
+
+ Parameters
+ ----------
+ state : dict
+ state['unique_symbols'] is a dictionary containing symbols
+ that need to be injected into `_asteval.symtable`.
+ state['params'] is a list of Parameter instances to be added.
+
+ """
+ # first update the Interpreter symbol table. This needs to be done
+ # first because Parameter's early in the list may depend on later
+ # Parameter's. This leads to problems because add_many eventually leads
+ # to a Parameter value being retrieved with _getval, which, if the
+ # dependent value hasn't already been added to the symtable, leads to
+ # an Error. Another way of doing this would be to remove all the expr
+ # from the Parameter instances before they get added, then to restore
+ # them.
+
+ symtab = self._asteval.symtable
+ for key, val in state['unique_symbols'].items():
+ if key not in symtab:
+ symtab[key] = val
+
+ # then add all the parameters
+ self.add_many(*state['params'])
+
+ def __repr__(self):
+ """__repr__ from OrderedDict."""
+ if not self:
+ return f'{self.__class__.__name__}()'
+ return f'{self.__class__.__name__}({list(self.items())!r})'
+
+ def eval(self, expr):
+ """Evaluate a statement using the `asteval` Interpreter.
+
+ Parameters
+ ----------
+ expr : str
+ An expression containing parameter names and other symbols
+ recognizable by the `asteval` Interpreter.
+
+ Returns
+ -------
+ float
+ The result of evaluating the expression.
+
+ """
+ return self._asteval.eval(expr)
+
+ def update_constraints(self):
+ """Update all constrained parameters.
+
+ This method ensures that dependencies are evaluated as needed.
+
+ """
+ requires_update = {name for name, par in self.items() if par._expr is
+ not None}
+ updated_tracker = set(requires_update)
+
+ def _update_param(name):
+ """Update a parameter value, including setting bounds.
+
+ For a constrained parameter (one with an `expr` defined), this
+ first updates (recursively) all parameters on which the
+ parameter depends (using the 'deps' field).
+
+ """
+ par = self.__getitem__(name)
+ if par._expr_eval is None:
+ par._expr_eval = self._asteval
+ for dep in par._expr_deps:
+ if dep in updated_tracker:
+ _update_param(dep)
+ self._asteval.symtable[name] = par.value
+ updated_tracker.discard(name)
+
+ for name in requires_update:
+ _update_param(name)
+
+ def pretty_repr(self, oneline=False):
+ """Return a pretty representation of a Parameters class.
+
+ Parameters
+ ----------
+ oneline : bool, optional
+ If True prints a one-line parameters representation (default
+ is False).
+
+ Returns
+ -------
+ s: str
+ Parameters representation.
+
+ """
+ if oneline:
+ return self.__repr__()
+ s = "Parameters({\n"
+ for key in self.keys():
+ s += f" '{key}': {self[key]}, \n"
+ s += " })\n"
+ return s
+
+ def pretty_print(self, oneline=False, colwidth=8, precision=4, fmt='g',
+ columns=['value', 'min', 'max', 'stderr', 'vary', 'expr',
+ 'brute_step']):
+ """Pretty-print of parameters data.
+
+ Parameters
+ ----------
+ oneline : bool, optional
+ If True prints a one-line parameters representation (default
+ is False).
+ colwidth : int, optional
+ Column width for all columns specified in `columns` (default
+ is 8).
+ precision : int, optional
+ Number of digits to be printed after floating point (default
+ is 4).
+ fmt : {'g', 'e', 'f'}, optional
+ Single-character numeric formatter. Valid values are: `'g'`
+ floating point and exponential (default), `'e'` exponential,
+ or `'f'` floating point.
+ columns : :obj:`list` of :obj:`str`, optional
+ List of :class:`Parameter` attribute names to print (default
+ is to show all attributes).
+
+ """
+ if oneline:
+ print(self.pretty_repr(oneline=oneline))
+ return
+
+ name_len = max(len(s) for s in self)
+ allcols = ['name'] + columns
+ title = '{:{name_len}} ' + len(columns) * ' {:>{n}}'
+ print(title.format(*allcols, name_len=name_len, n=colwidth).title())
+ numstyle = '{%s:>{n}.{p}{f}}' # format for numeric columns
+ otherstyles = dict(name='{name:<{name_len}} ', stderr='{stderr!s:>{n}}',
+ vary='{vary!s:>{n}}', expr='{expr!s:>{n}}',
+ brute_step='{brute_step!s:>{n}}')
+ line = ' '.join(otherstyles.get(k, numstyle % k) for k in allcols)
+ for name, values in sorted(self.items()):
+ pvalues = {k: getattr(values, k) for k in columns}
+ pvalues['name'] = name
+ # stderr is a special case: it is either numeric or None (i.e. str)
+ if 'stderr' in columns and pvalues['stderr'] is not None:
+ pvalues['stderr'] = (numstyle % '').format(
+ pvalues['stderr'], n=colwidth, p=precision, f=fmt)
+ elif 'brute_step' in columns and pvalues['brute_step'] is not None:
+ pvalues['brute_step'] = (numstyle % '').format(
+ pvalues['brute_step'], n=colwidth, p=precision, f=fmt)
+ print(line.format(name_len=name_len, n=colwidth, p=precision,
+ f=fmt, **pvalues))
+
+ def _repr_html_(self):
+ """Return a HTML representation of parameters data."""
+ return params_html_table(self)
+
+ def set(self, **kws):
+ """Set Parameter values and other attributes.
+
+ Parameters
+ ----------
+ **kws : optional
+ Parameter names and initial values or dictionaries of
+ values and attributes.
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ 1. keyword arguments will be used to create parameter names.
+ 2. values can either be numbers (floats or integers) to set the
+ parameter value, or can be dictionaries with any of the following
+ keywords: ``value``, ``vary``, ``min``, ``max``, ``expr``,
+ ``brute_step``, or ``is_init_value`` to set those parameter attributes.
+ 3. for each parameter, ``is_init_value`` controls whether to set
+ ``init_value`` when setting ``value``, and defaults to True.
+
+ Examples
+ --------
+ >>> params = Parameters()
+ >>> params.add('xvar', value=0.50, min=0, max=1)
+ >>> params.add('yvar', expr='1.0 - xvar')
+ >>> params.set(xvar=0.80, zvar={'value':3, 'min':0})
+
+ """
+ for name, val in kws.items():
+ if name not in self:
+ self.__setitem__(name, Parameter(value=-inf, name=name,
+ vary=True, min=-inf, max=inf,
+ expr=None, brute_step=None))
+ par = self.__getitem__(name)
+ if isinstance(val, (float, int)):
+ val = {'value': val}
+ if 'is_init_value' not in val:
+ val['is_init_value'] = True
+ par.set(**val)
+
+ def add(self, name, value=None, vary=True, min=-inf, max=inf, expr=None,
+ brute_step=None):
+ """Add a Parameter.
+
+ Parameters
+ ----------
+ name : str or Parameter
+ If ``name`` refers to a Parameter object it will be added directly
+ to the Parameters instance, otherwise a new Parameter object with name
+ ``string`` is created before adding it. In both cases, ``name`` must
+ match ``[a-z_][a-z0-9_]*`` and cannot be a Python reserved word.
+ value : float, optional
+ Numerical Parameter value, typically the *initial value*.
+ vary : bool, optional
+ Whether the Parameter is varied during a fit (default is True).
+ min : float, optional
+ Lower bound for value (default is ``-numpy.inf``, no lower
+ bound).
+ max : float, optional
+ Upper bound for value (default is ``numpy.inf``, no upper
+ bound).
+ expr : str, optional
+ Mathematical expression used to constrain the value during the
+ fit (default is None).
+ brute_step : float, optional
+ Step size for grid points in the `brute` method (default is
+ None).
+
+ Examples
+ --------
+ >>> params = Parameters()
+ >>> params.add('xvar', value=0.50, min=0, max=1)
+ >>> params.add('yvar', expr='1.0 - xvar')
+
+ which is equivalent to:
+
+ >>> params = Parameters()
+ >>> params['xvar'] = Parameter(name='xvar', value=0.50, min=0, max=1)
+ >>> params['yvar'] = Parameter(name='yvar', expr='1.0 - xvar')
+
+ """
+ if isinstance(name, Parameter):
+ self.__setitem__(name.name, name)
+ else:
+ self.__setitem__(name, Parameter(value=value, name=name, vary=vary,
+ min=min, max=max, expr=expr,
+ brute_step=brute_step))
+
+ def add_many(self, *parlist):
+ """Add many parameters, using a sequence of tuples.
+
+ Parameters
+ ----------
+ *parlist : :obj:`sequence` of :obj:`tuple` or Parameter
+ A sequence of tuples, or a sequence of `Parameter` instances.
+ If it is a sequence of tuples, then each tuple must contain at
+ least a `name`. The order in each tuple must be
+ ``(name, value, vary, min, max, expr, brute_step)``.
+
+ Examples
+ --------
+ >>> params = Parameters()
+ # add with tuples: (NAME VALUE VARY MIN MAX EXPR BRUTE_STEP)
+ >>> params.add_many(('amp', 10, True, None, None, None, None),
+ ... ('cen', 4, True, 0.0, None, None, None),
+ ... ('wid', 1, False, None, None, None, None),
+ ... ('frac', 0.5))
+ # add a sequence of Parameters
+ >>> f = Parameter('par_f', 100)
+ >>> g = Parameter('par_g', 2.)
+ >>> params.add_many(f, g)
+
+ """
+ __params = []
+ for par in parlist:
+ if not isinstance(par, Parameter):
+ par = Parameter(*par)
+ __params.append(par)
+ par._delay_asteval = True
+ self.__setitem__(par.name, par)
+
+ for para in __params:
+ para._delay_asteval = False
+
+ def valuesdict(self):
+ """Return an ordered dictionary of parameter values.
+
+ Returns
+ -------
+ dict
+ A dictionary of :attr:`name`::attr:`value` pairs for each
+ Parameter.
+
+ """
+ return {p.name: p.value for p in self.values()}
+
+ def dumps(self, **kws):
+ """Represent Parameters as a JSON string.
+
+ Parameters
+ ----------
+ **kws : optional
+ Keyword arguments that are passed to `json.dumps`.
+
+ Returns
+ -------
+ str
+ JSON string representation of Parameters.
+
+ See Also
+ --------
+ dump, loads, load, json.dumps
+
+ """
+ params = [p.__getstate__() for p in self.values()]
+ sym_unique = self._asteval.user_defined_symbols()
+ unique_symbols = {key: encode4js(deepcopy(self._asteval.symtable[key]))
+ for key in sym_unique}
+ return json.dumps({'unique_symbols': unique_symbols,
+ 'params': params}, **kws)
+
+ def loads(self, s, **kws):
+ """Load Parameters from a JSON string.
+
+ Parameters
+ ----------
+ **kws : optional
+ Keyword arguments that are passed to `json.loads`.
+
+ Returns
+ -------
+ Parameters
+ Updated Parameters from the JSON string.
+
+ Notes
+ -----
+ Current Parameters will be cleared before loading the data from
+ the JSON string.
+
+ See Also
+ --------
+ dump, dumps, load, json.loads
+
+ """
+ self.clear()
+
+ tmp = json.loads(s, **kws)
+ unique_symbols = {key: decode4js(tmp['unique_symbols'][key]) for key
+ in tmp['unique_symbols']}
+
+ state = {'unique_symbols': unique_symbols, 'params': []}
+ for parstate in tmp['params']:
+ _par = Parameter(name='')
+ _par.__setstate__(parstate)
+ state['params'].append(_par)
+ self.__setstate__(state)
+ return self
+
+ def dump(self, fp, **kws):
+ """Write JSON representation of Parameters to a file-like object.
+
+ Parameters
+ ----------
+ fp : file-like object
+ An open and `.write()`-supporting file-like object.
+ **kws : optional
+ Keyword arguments that are passed to `dumps`.
+
+ Returns
+ -------
+ int
+ Return value from `fp.write()`: the number of characters
+ written.
+
+ See Also
+ --------
+ dumps, load, json.dump
+
+ """
+ return fp.write(self.dumps(**kws))
+
+ def load(self, fp, **kws):
+ """Load JSON representation of Parameters from a file-like object.
+
+ Parameters
+ ----------
+ fp : file-like object
+ An open and `.read()`-supporting file-like object.
+ **kws : optional
+ Keyword arguments that are passed to `loads`.
+
+ Returns
+ -------
+ Parameters
+ Updated Parameters loaded from `fp`.
+
+ See Also
+ --------
+ dump, loads, json.load
+
+ """
+ return self.loads(fp.read(), **kws)
+
+
+class Parameter:
+ """A Parameter is an object that can be varied in a fit.
+
+ It is a central component of lmfit, and all minimization and modeling
+ methods use Parameter objects.
+
+ A Parameter has a `name` attribute, and a scalar floating point
+ `value`. It also has a `vary` attribute that describes whether the
+ value should be varied during the minimization. Finite bounds can be
+ placed on the Parameter's value by setting its `min` and/or `max`
+ attributes. A Parameter can also have its value determined by a
+ mathematical expression of other Parameter values held in the `expr`
+ attribute. Additional attributes include `brute_step` used as the step
+ size in a brute-force minimization, and `user_data` reserved
+ exclusively for user's need.
+
+ After a minimization, a Parameter may also gain other attributes,
+ including `stderr` holding the estimated standard error in the
+ Parameter's value, and `correl`, a dictionary of correlation values
+ with other Parameters used in the minimization.
+
+ """
+
+ def __init__(self, name, value=None, vary=True, min=-inf, max=inf,
+ expr=None, brute_step=None, user_data=None):
+ """
+ Parameters
+ ----------
+ name : str
+ Name of the Parameter.
+ value : float, optional
+ Numerical Parameter value.
+ vary : bool, optional
+ Whether the Parameter is varied during a fit (default is True).
+ min : float, optional
+ Lower bound for value (default is ``-numpy.inf``, no lower
+ bound).
+ max : float, optional
+ Upper bound for value (default is ``numpy.inf``, no upper
+ bound).
+ expr : str, optional
+ Mathematical expression used to constrain the value during the
+ fit (default is None).
+ brute_step : float, optional
+ Step size for grid points in the `brute` method (default is
+ None).
+ user_data : optional
+ User-definable extra attribute used for a Parameter (default
+ is None).
+
+ Attributes
+ ----------
+ stderr : float
+ The estimated standard error for the best-fit value.
+ correl : dict
+ A dictionary of the correlation with the other fitted
+ Parameters of the form::
+
+ {'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}
+
+ """
+ self.name = name
+ self.user_data = user_data
+ self.init_value = value
+ self.min = min
+ self.max = max
+ self.brute_step = brute_step
+ self._vary = vary
+ self._expr = expr
+ self._expr_ast = None
+ self._expr_eval = None
+ self._expr_deps = []
+ self._delay_asteval = False
+ self.stderr = None
+ self.correl = None
+ self.from_internal = lambda val: val
+ self._val = value
+ self._init_bounds()
+
+ def set(self, value=None, vary=None, min=None, max=None, expr=None,
+ brute_step=None, is_init_value=True):
+ """Set or update Parameter attributes.
+
+ Parameters
+ ----------
+ value : float, optional
+ Numerical Parameter value.
+ vary : bool, optional
+ Whether the Parameter is varied during a fit.
+ min : float, optional
+ Lower bound for value. To remove a lower bound you must use
+ ``-numpy.inf``.
+ max : float, optional
+ Upper bound for value. To remove an upper bound you must use
+ ``numpy.inf``.
+ expr : str, optional
+ Mathematical expression used to constrain the value during the
+ fit. To remove a constraint you must supply an empty string.
+ brute_step : float, optional
+ Step size for grid points in the `brute` method. To remove the
+ step size you must use ``0``.
+ is_init_value: bool, optional
+ Whether to set value as `init_value`, when setting value.
+
+ Notes
+ -----
+ Each argument to `set()` has a default value of None, which will
+ leave the current value for the attribute unchanged. Thus, to lift
+ a lower or upper bound, passing in None will not work. Instead,
+ you must set these to ``-numpy.inf`` or ``numpy.inf``, as with::
+
+ par.set(min=None) # leaves lower bound unchanged
+ par.set(min=-numpy.inf) # removes lower bound
+
+ Similarly, to clear an expression, pass a blank string, (not
+ None!) as with::
+
+ par.set(expr=None) # leaves expression unchanged
+ par.set(expr='') # removes expression
+
+ Explicitly setting a value or setting ``vary=True`` will also
+ clear the expression.
+
+ Finally, to clear the brute_step size, pass ``0``, not None::
+
+ par.set(brute_step=None) # leaves brute_step unchanged
+ par.set(brute_step=0) # removes brute_step
+
+ """
+ if vary is not None:
+ self._vary = vary
+ if vary:
+ self.__set_expression('')
+
+ if min is not None:
+ self.min = min
+
+ if max is not None:
+ self.max = max
+
+ # need to set this after min and max, so that it will use new
+ # bounds in the setter for value
+ if value is not None:
+ is_init_value = is_init_value or self.value in (None, -inf, inf)
+ self.value = value
+ if is_init_value:
+ self.init_value = value
+ self.__set_expression("")
+
+ if expr is not None:
+ self.__set_expression(expr)
+
+ if brute_step is not None:
+ if brute_step == 0.0:
+ self.brute_step = None
+ else:
+ self.brute_step = brute_step
+
+ def _init_bounds(self):
+ """Make sure initial bounds are self-consistent."""
+ # _val is None means - infinity.
+ if self.max is None:
+ self.max = inf
+ if self.min is None:
+ self.min = -inf
+ if self._val is None:
+ self._val = -inf
+ if self.min > self.max:
+ self.min, self.max = self.max, self.min
+ if isclose(self.min, self.max, atol=1e-13, rtol=1e-13):
+ raise ValueError(f"Parameter '{self.name}' has min == max")
+ if self._val > self.max:
+ self._val = self.max
+ if self._val < self.min:
+ self._val = self.min
+ self.setup_bounds()
+
+ def __getstate__(self):
+ """Get state for pickle."""
+ return (self.name, self.value, self._vary, self.expr, self.min,
+ self.max, self.brute_step, self.stderr, self.correl,
+ self.init_value, self.user_data)
+
+ def __setstate__(self, state):
+ """Set state for pickle."""
+ (self.name, _value, self._vary, self.expr, self.min, self.max,
+ self.brute_step, self.stderr, self.correl, self.init_value,
+ self.user_data) = state
+ self._expr_ast = None
+ self._expr_eval = None
+ self._expr_deps = []
+ self._delay_asteval = False
+ self._val = _value
+ self._init_bounds()
+ self.value = _value
+
+ def __repr__(self):
+ """Return printable representation of a Parameter object."""
+ s = []
+ sval = f"value={repr(self._getval())}"
+ if not self._vary and self._expr is None:
+ sval += " (fixed)"
+ elif self.stderr is not None:
+ sval += f" +/- {self.stderr:.3g}"
+ s.append(sval)
+ s.append(f"bounds=[{repr(self.min)}:{repr(self.max)}]")
+ if self._expr is not None:
+ s.append(f"expr='{self.expr}'")
+ if self.brute_step is not None:
+ s.append(f"brute_step={self.brute_step}")
+ return f"<Parameter '{self.name}', {', '.join(s)}>"
+
+ def setup_bounds(self):
+ """Set up Minuit-style internal/external parameter transformation
+ of min/max bounds.
+
+ As a side-effect, this also defines the self.from_internal method
+ used to re-calculate self.value from the internal value, applying
+ the inverse Minuit-style transformation. This method should be
+ called prior to passing a Parameter to the user-defined objective
+ function.
+
+ This code borrows heavily from JJ Helmus' leastsqbound.py
+
+ Returns
+ -------
+ _val : float
+ The internal value for parameter from `self.value` (which holds
+ the external, user-expected value). This internal value should
+ actually be used in a fit.
+
+ """
+ if self.min is None:
+ self.min = -inf
+ if self.max is None:
+ self.max = inf
+ if self.min == -inf and self.max == inf:
+ self.from_internal = lambda val: val
+ _val = self._val
+ elif self.max == inf:
+ self.from_internal = lambda val: self.min - 1.0 + sqrt(val*val + 1)
+ _val = sqrt((self._val - self.min + 1.0)**2 - 1)
+ elif self.min == -inf:
+ self.from_internal = lambda val: self.max + 1 - sqrt(val*val + 1)
+ _val = sqrt((self.max - self._val + 1.0)**2 - 1)
+ else:
+ self.from_internal = lambda val: self.min + (sin(val) + 1) * \
+ (self.max - self.min) / 2.0
+ _val = arcsin(2*(self._val - self.min)/(self.max - self.min) - 1)
+ if abs(_val) < tiny:
+ _val = 0.0
+ return _val
+
+ def scale_gradient(self, val):
+ """Return scaling factor for gradient.
+
+ Parameters
+ ----------
+ val : float
+ Numerical Parameter value.
+
+ Returns
+ -------
+ float
+ Scaling factor for gradient the according to Minuit-style
+ transformation.
+
+ """
+ if self.min == -inf and self.max == inf:
+ return 1.0
+ if self.max == inf:
+ return val / sqrt(val*val + 1)
+ if self.min == -inf:
+ return -val / sqrt(val*val + 1)
+ return cos(val) * (self.max - self.min) / 2.0
+
+ def _getval(self):
+ """Get value, with bounds applied."""
+ # Note assignment to self._val has been changed to self.value
+ # The self.value property setter makes sure that the
+ # _expr_eval.symtable is kept up-to-date.
+ # If you just assign to self._val then _expr_eval.symtable[self.name]
+ # becomes stale if parameter.expr is not None.
+ if self._expr is not None:
+ if self._expr_ast is None:
+ self.__set_expression(self._expr)
+ if self._expr_eval is not None and not self._delay_asteval:
+ self.value = self._expr_eval(self._expr_ast)
+ check_ast_errors(self._expr_eval)
+ return self._val
+
+ @property
+ def value(self):
+ """Return the numerical Parameter value, with bounds applied."""
+ return self._getval()
+
+ @value.setter
+ def value(self, val):
+ """Set the numerical Parameter value."""
+ self._val = val
+ if self._val is not None:
+ if self._val > self.max:
+ self._val = self.max
+ elif self._val < self.min:
+ self._val = self.min
+ if not hasattr(self, '_expr_eval'):
+ self._expr_eval = None
+ if self._expr_eval is not None:
+ self._expr_eval.symtable[self.name] = self._val
+
+ @property
+ def vary(self):
+ """Return whether the parameter is variable"""
+ return self._vary
+
+ @vary.setter
+ def vary(self, val):
+ """Set whether a parameter is varied"""
+ self._vary = val
+ if val:
+ self.__set_expression('')
+
+ @property
+ def expr(self):
+ """Return the mathematical expression used to constrain the value in fit."""
+ return self._expr
+
+ @expr.setter
+ def expr(self, val):
+ """Set the mathematical expression used to constrain the value in fit.
+
+ To remove a constraint you must supply an empty string.
+
+ """
+ self.__set_expression(val)
+
+ def __set_expression(self, val):
+ if val == '':
+ val = None
+ self._expr = val
+ if val is not None:
+ self._vary = False
+ if not hasattr(self, '_expr_eval'):
+ self._expr_eval = None
+ if val is None:
+ self._expr_ast = None
+ if val is not None and self._expr_eval is not None:
+ self._expr_eval.error = []
+ self._expr_eval.error_msg = None
+ self._expr_ast = self._expr_eval.parse(val)
+ check_ast_errors(self._expr_eval)
+ self._expr_deps = get_ast_names(self._expr_ast)
+
+ def __array__(self):
+ """array"""
+ return array(float(self._getval()))
+
+ def __str__(self):
+ """string"""
+ return self.__repr__()
+
+ def __abs__(self):
+ """abs"""
+ return abs(self._getval())
+
+ def __neg__(self):
+ """neg"""
+ return -self._getval()
+
+ def __pos__(self):
+ """positive"""
+ return +self._getval()
+
+ def __bool__(self):
+ """bool"""
+ return self._getval() != 0
+
+ def __int__(self):
+ """int"""
+ return int(self._getval())
+
+ def __float__(self):
+ """float"""
+ return float(self._getval())
+
+ def __trunc__(self):
+ """trunc"""
+ return self._getval().__trunc__()
+
+ def __add__(self, other):
+ """+"""
+ return self._getval() + other
+
+ def __sub__(self, other):
+ """-"""
+ return self._getval() - other
+
+ def __truediv__(self, other):
+ """/"""
+ return self._getval() / other
+
+ def __floordiv__(self, other):
+ """//"""
+ return self._getval() // other
+
+ def __divmod__(self, other):
+ """divmod"""
+ return divmod(self._getval(), other)
+
+ def __mod__(self, other):
+ """%"""
+ return self._getval() % other
+
+ def __mul__(self, other):
+ """*"""
+ return self._getval() * other
+
+ def __pow__(self, other):
+ """**"""
+ return self._getval() ** other
+
+ def __gt__(self, other):
+ """>"""
+ return self._getval() > other
+
+ def __ge__(self, other):
+ """>="""
+ return self._getval() >= other
+
+ def __le__(self, other):
+ """<="""
+ return self._getval() <= other
+
+ def __lt__(self, other):
+ """<"""
+ return self._getval() < other
+
+ def __eq__(self, other):
+ """=="""
+ return self._getval() == other
+
+ def __ne__(self, other):
+ """!="""
+ return self._getval() != other
+
+ def __radd__(self, other):
+ """+ (right)"""
+ return other + self._getval()
+
+ def __rtruediv__(self, other):
+ """/ (right)"""
+ return other / self._getval()
+
+ def __rdivmod__(self, other):
+ """divmod (right)"""
+ return divmod(other, self._getval())
+
+ def __rfloordiv__(self, other):
+ """// (right)"""
+ return other // self._getval()
+
+ def __rmod__(self, other):
+ """% (right)"""
+ return other % self._getval()
+
+ def __rmul__(self, other):
+ """* (right)"""
+ return other * self._getval()
+
+ def __rpow__(self, other):
+ """** (right)"""
+ return other ** self._getval()
+
+ def __rsub__(self, other):
+ """- (right)"""
+ return other - self._getval()
+
+
+def create_params(**kws):
+ """Create lmfit.Parameters instance and set initial values and attributes.
+
+ Parameters
+ ----------
+ **kws
+ keywords are parameter names, value are dictionaries of Parameter
+ values and attributes.
+
+ Returns
+ -------
+ Parameters instance
+
+ Notes
+ -----
+ 1. keyword arguments will be used to create parameter names.
+ 2. values can either be numbers (floats or integers) to set the parameter
+ value, or can be dictionaries with any of the following keywords:
+ ``value``, ``vary``, ``min``, ``max``, ``expr``, ``brute_step``, or
+ ``is_init_value`` to set those parameter attributes.
+ 3. for each parameter, ``is_init_value`` controls whether to set
+ ``init_value`` when setting ``value``, and defaults to True.
+
+ Examples
+ --------
+ >>> params = create_params(amplitude=2, center=200,
+ sigma={'value': 3, 'min':0},
+ fwhm={'expr': '2.0*sigma'})
+ """
+ params = Parameters()
+ params.set(**kws)
+ return params
diff --git a/lmfit/printfuncs.py b/lmfit/printfuncs.py
new file mode 100644
index 0000000..dbdc98b
--- /dev/null
+++ b/lmfit/printfuncs.py
@@ -0,0 +1,460 @@
+"""Functions to display fitting results and confidence intervals."""
+
+from math import log10
+import re
+
+import numpy as np
+
+try:
+ import numdifftools # noqa: F401
+ HAS_NUMDIFFTOOLS = True
+except ImportError:
+ HAS_NUMDIFFTOOLS = False
+
+
+def alphanumeric_sort(s, _nsre=re.compile('([0-9]+)')):
+ """Sort alphanumeric string."""
+ return [int(text) if text.isdigit() else text.lower()
+ for text in re.split(_nsre, s)]
+
+
+def getfloat_attr(obj, attr, length=11):
+ """Format an attribute of an object for printing."""
+ val = getattr(obj, attr, None)
+ if val is None:
+ return 'unknown'
+ if isinstance(val, int):
+ return f'{val}'
+ if isinstance(val, float):
+ return gformat(val, length=length).strip()
+ return repr(val)
+
+
+def gformat(val, length=11):
+ """Format a number with '%g'-like format.
+
+ Except that:
+ a) the length of the output string will be of the requested length.
+ b) positive numbers will have a leading blank.
+ b) the precision will be as high as possible.
+ c) trailing zeros will not be trimmed.
+
+ The precision will typically be ``length-7``.
+
+ Parameters
+ ----------
+ val : float
+ Value to be formatted.
+ length : int, optional
+ Length of output string (default is 11).
+
+ Returns
+ -------
+ str
+ String of specified length.
+
+ Notes
+ ------
+ Positive values will have leading blank.
+
+ """
+ if val is None or isinstance(val, bool):
+ return f'{repr(val):>{length}s}'
+ try:
+ expon = int(log10(abs(val)))
+ except (OverflowError, ValueError):
+ expon = 0
+ except TypeError:
+ return f'{repr(val):>{length}s}'
+
+ length = max(length, 7)
+ form = 'e'
+ prec = length - 7
+ if abs(expon) > 99:
+ prec -= 1
+ elif ((expon > 0 and expon < (prec+4)) or
+ (expon <= 0 and -expon < (prec-1))):
+ form = 'f'
+ prec += 4
+ if expon > 0:
+ prec -= expon
+ return f'{val:{length}.{prec}{form}}'
+
+
+def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
+ sort_pars=False, correl_mode='list'):
+ """Generate a report of the fitting results.
+
+ The report contains the best-fit values for the parameters and their
+ uncertainties and correlations.
+
+ Parameters
+ ----------
+ inpars : Parameters
+ Input Parameters from fit or MinimizerResult returned from a fit.
+ modelpars : Parameters, optional
+ Known Model Parameters.
+ show_correl : bool, optional
+ Whether to show list of sorted correlations (default is True).
+ min_correl : float, optional
+ Smallest correlation in absolute value to show (default is 0.1).
+ sort_pars : bool or callable, optional
+ Whether to show parameter names sorted in alphanumerical order. If
+ False (default), then the parameters will be listed in the order
+ they were added to the Parameters dictionary. If callable, then
+ this (one argument) function is used to extract a comparison key
+ from each list element.
+ correl_mode : {'list', table'} str, optional
+ Mode for how to show correlations. Can be either 'list' (default)
+ to show a sorted (if ``sort_pars`` is True) list of correlation
+ values, or 'table' to show a complete, formatted table of
+ correlations.
+
+ Returns
+ -------
+ str
+ Multi-line text of fit report.
+
+ """
+ from .parameter import Parameters
+ if isinstance(inpars, Parameters):
+ result, params = None, inpars
+ if hasattr(inpars, 'params'):
+ result = inpars
+ params = inpars.params
+
+ if sort_pars:
+ if callable(sort_pars):
+ key = sort_pars
+ else:
+ key = alphanumeric_sort
+ parnames = sorted(params, key=key)
+ else:
+ # dict.keys() returns a KeysView in py3, and they're indexed
+ # further down
+ parnames = list(params.keys())
+
+ buff = []
+ add = buff.append
+ namelen = max(len(n) for n in parnames)
+ if result is not None:
+ add("[[Fit Statistics]]")
+ add(f" # fitting method = {result.method}")
+ add(f" # function evals = {getfloat_attr(result, 'nfev')}")
+ add(f" # data points = {getfloat_attr(result, 'ndata')}")
+ add(f" # variables = {getfloat_attr(result, 'nvarys')}")
+ add(f" chi-square = {getfloat_attr(result, 'chisqr')}")
+ add(f" reduced chi-square = {getfloat_attr(result, 'redchi')}")
+ add(f" Akaike info crit = {getfloat_attr(result, 'aic')}")
+ add(f" Bayesian info crit = {getfloat_attr(result, 'bic')}")
+ if hasattr(result, 'rsquared'):
+ add(f" R-squared = {getfloat_attr(result, 'rsquared')}")
+ if not result.errorbars:
+ add("## Warning: uncertainties could not be estimated:")
+ if result.method in ('leastsq', 'least_squares') or HAS_NUMDIFFTOOLS:
+ parnames_varying = [par for par in result.params
+ if result.params[par].vary]
+ for name in parnames_varying:
+ par = params[name]
+ space = ' '*(namelen-len(name))
+ if par.init_value and np.allclose(par.value, par.init_value):
+ add(f' {name}:{space} at initial value')
+ if (np.allclose(par.value, par.min) or np.allclose(par.value, par.max)):
+ add(f' {name}:{space} at boundary')
+ else:
+ add(" this fitting method does not natively calculate uncertainties")
+ add(" and numdifftools is not installed for lmfit to do this. Use")
+ add(" `pip install numdifftools` for lmfit to estimate uncertainties")
+ add(" with this fitting method.")
+
+ add("[[Variables]]")
+ for name in parnames:
+ par = params[name]
+ space = ' '*(namelen-len(name))
+ nout = f"{name}:{space}"
+ inval = '(init = ?)'
+ if par.init_value is not None:
+ inval = f'(init = {par.init_value:.7g})'
+ if modelpars is not None and name in modelpars:
+ inval = f'{inval}, model_value = {modelpars[name].value:.7g}'
+ try:
+ sval = gformat(par.value)
+ except (TypeError, ValueError):
+ sval = ' Non Numeric Value?'
+ if par.stderr is not None:
+ serr = gformat(par.stderr)
+ try:
+ spercent = f'({abs(par.stderr/par.value):.2%})'
+ except ZeroDivisionError:
+ spercent = ''
+ sval = f'{sval} +/-{serr} {spercent}'
+
+ if par.vary:
+ add(f" {nout} {sval} {inval}")
+ elif par.expr is not None:
+ add(f" {nout} {sval} == '{par.expr}'")
+ else:
+ add(f" {nout} {par.value: .7g} (fixed)")
+
+ if show_correl and correl_mode.startswith('tab'):
+ add('[[Correlations]] ')
+ for line in correl_table(params).split('\n'):
+ buff.append(' %s' % line)
+ elif show_correl:
+ correls = {}
+ for i, name in enumerate(parnames):
+ par = params[name]
+ if not par.vary:
+ continue
+ if hasattr(par, 'correl') and par.correl is not None:
+ for name2 in parnames[i+1:]:
+ if (name != name2 and name2 in par.correl and
+ abs(par.correl[name2]) > min_correl):
+ correls[f"{name}, {name2}"] = par.correl[name2]
+
+ sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
+ sort_correl.reverse()
+ if len(sort_correl) > 0:
+ add('[[Correlations]] (unreported correlations are < '
+ f'{min_correl:.3f})')
+ maxlen = max(len(k) for k in list(correls.keys()))
+ for name, val in sort_correl:
+ lspace = max(0, maxlen - len(name))
+ add(f" C({name}){(' '*30)[:lspace]} = {val:+.4f}")
+ return '\n'.join(buff)
+
+
+def fitreport_html_table(result, show_correl=True, min_correl=0.1):
+ """Generate a report of the fitting result as an HTML table.
+
+ Parameters
+ ----------
+ result : MinimizerResult or ModelResult
+ Object containing the optimized parameters and several
+ goodness-of-fit statistics.
+ show_correl : bool, optional
+ Whether to show list of sorted correlations (default is True).
+ min_correl : float, optional
+ Smallest correlation in absolute value to show (default is 0.1).
+
+ Returns
+ -------
+ str
+ Multi-line HTML code of fit report.
+
+ """
+ html = []
+ add = html.append
+
+ def stat_row(label, val, val2=''):
+ add(f'<tr><td>{label}</td><td>{val}</td><td>{val2}</td></tr>')
+
+ add('<h2>Fit Statistics</h2>')
+ add('<table>')
+ stat_row('fitting method', result.method)
+ stat_row('# function evals', result.nfev)
+ stat_row('# data points', result.ndata)
+ stat_row('# variables', result.nvarys)
+ stat_row('chi-square', gformat(result.chisqr))
+ stat_row('reduced chi-square', gformat(result.redchi))
+ stat_row('Akaike info crit.', gformat(result.aic))
+ stat_row('Bayesian info crit.', gformat(result.bic))
+ if hasattr(result, 'rsquared'):
+ stat_row('R-squared', gformat(result.rsquared))
+ add('</table>')
+ add('<h2>Variables</h2>')
+ add(params_html_table(result.params))
+ if show_correl:
+ correls = []
+ parnames = list(result.params.keys())
+ for i, name in enumerate(result.params):
+ par = result.params[name]
+ if not par.vary:
+ continue
+ if hasattr(par, 'correl') and par.correl is not None:
+ for name2 in parnames[i+1:]:
+ if (name != name2 and name2 in par.correl and
+ abs(par.correl[name2]) > min_correl):
+ correls.append((name, name2, par.correl[name2]))
+ if len(correls) > 0:
+ sort_correls = sorted(correls, key=lambda val: abs(val[2]))
+ sort_correls.reverse()
+ extra = f'(unreported correlations are < {min_correl:.3f})'
+ add(f'<h2>Correlations {extra}</h2>')
+ add('<table>')
+ for name1, name2, val in sort_correls:
+ stat_row(name1, name2, f"{val:+.4f}")
+ add('</table>')
+ return ''.join(html)
+
+
+def correl_table(params):
+ """Return a printable correlation table for a Parameters object."""
+ varnames = [vname for vname in params if params[vname].vary]
+ nwid = max(8, max([len(vname) for vname in varnames])) + 1
+
+ def sfmt(a):
+ return f" {a:{nwid}s}"
+
+ def ffmt(a):
+ return sfmt(f"{a:+.4f}")
+
+ title = ['', sfmt('Variable')]
+ title.extend([sfmt(vname) for vname in varnames])
+
+ title = '|'.join(title) + '|'
+ bar = [''] + ['-'*(nwid+1) for i in range(len(varnames)+1)] + ['']
+ bar = '+'.join(bar)
+
+ buff = [bar, title, bar]
+
+ for vname, par in params.items():
+ if not par.vary:
+ continue
+ line = ['', sfmt(vname)]
+ for vother in varnames:
+ if vother == vname:
+ line.append(ffmt(1))
+ elif vother in par.correl:
+ line.append(ffmt(par.correl[vother]))
+ else:
+ line.append('unknown')
+ buff.append('|'.join(line) + '|')
+ buff.append(bar)
+ return '\n'.join(buff)
+
+
+def params_html_table(params):
+ """Return an HTML representation of Parameters.
+
+ Parameters
+ ----------
+ params : Parameters
+ Object containing the Parameters of the model.
+
+ Returns
+ -------
+ str
+ Multi-line HTML code of fitting parameters.
+
+ """
+ has_err = any(p.stderr is not None for p in params.values())
+ has_expr = any(p.expr is not None for p in params.values())
+ has_brute = any(p.brute_step is not None for p in params.values())
+
+ html = []
+ add = html.append
+
+ def cell(x, cat='td'):
+ return add(f'<{cat}> {x} </{cat}>')
+
+ add('<table><tr>')
+ headers = ['name', 'value']
+ if has_err:
+ headers.extend(['standard error', 'relative error'])
+ headers.extend(['initial value', 'min', 'max', 'vary'])
+ if has_expr:
+ headers.append('expression')
+ if has_brute:
+ headers.append('brute step')
+ for h in headers:
+ cell(h, cat='th')
+ add('</tr>')
+
+ for par in params.values():
+ rows = [par.name, gformat(par.value)]
+ if has_err:
+ serr = ''
+ spercent = ''
+ if par.stderr is not None:
+ serr = gformat(par.stderr)
+ try:
+ spercent = f'({abs(par.stderr/par.value):.2%})'
+ except ZeroDivisionError:
+ pass
+ rows.extend([serr, spercent])
+ rows.extend((par.init_value, gformat(par.min),
+ gformat(par.max), f'{par.vary}'))
+ if has_expr:
+ expr = ''
+ if par.expr is not None:
+ expr = par.expr
+ rows.append(expr)
+ if has_brute:
+ brute_step = 'None'
+ if par.brute_step is not None:
+ brute_step = gformat(par.brute_step)
+ rows.append(brute_step)
+
+ add('<tr>')
+ for r in rows:
+ cell(r)
+ add('</tr>')
+ add('</table>')
+ return ''.join(html)
+
+
+def report_fit(params, **kws):
+ """Print a report of the fitting results."""
+ print(fit_report(params, **kws))
+
+
+def ci_report(ci, with_offset=True, ndigits=5):
+ """Return text of a report for confidence intervals.
+
+ Parameters
+ ----------
+ ci : dict
+ The result of :func:`~lmfit.confidence.conf_interval`: a dictionary
+ containing a list of ``(sigma, vals)``-tuples for each parameter.
+ with_offset : bool, optional
+ Whether to subtract best value from all other values (default is
+ True).
+ ndigits : int, optional
+ Number of significant digits to show (default is 5).
+
+ Returns
+ -------
+ str
+ Text of formatted report on confidence intervals.
+
+ """
+ maxlen = max(len(i) for i in ci)
+ buff = []
+ add = buff.append
+
+ def convp(x):
+ """Convert probabilities into header for CI report."""
+ if abs(x[0]) < 1.e-2:
+ return "_BEST_"
+ return f"{x[0] * 100:.2f}%"
+
+ title_shown = False
+ fmt_best = fmt_diff = "{0:.%if}" % ndigits
+ if with_offset:
+ fmt_diff = "{0:+.%if}" % ndigits
+ for name, row in ci.items():
+ if not title_shown:
+ add("".join([''.rjust(maxlen+1)] + [i.rjust(ndigits+5)
+ for i in map(convp, row)]))
+ title_shown = True
+ thisrow = [f" {name.ljust(maxlen)}:"]
+ offset = 0.0
+ if with_offset:
+ for cval, val in row:
+ if abs(cval) < 1.e-2:
+ offset = val
+ for cval, val in row:
+ if cval < 1.e-2:
+ sval = fmt_best.format(val)
+ else:
+ sval = fmt_diff.format(val-offset)
+ thisrow.append(sval.rjust(ndigits+5))
+ add("".join(thisrow))
+
+ return '\n'.join(buff)
+
+
+def report_ci(ci):
+ """Print a report for confidence intervals."""
+ print(ci_report(ci))
diff --git a/lmfit/version.py b/lmfit/version.py
new file mode 100644
index 0000000..7972c15
--- /dev/null
+++ b/lmfit/version.py
@@ -0,0 +1,4 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+__version__ = version = '1.2.1'
+__version_tuple__ = version_tuple = (1, 2, 1)
diff --git a/publish_docs.sh b/publish_docs.sh
new file mode 100755
index 0000000..5ed4247
--- /dev/null
+++ b/publish_docs.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env sh
+
+# shell script for building the documentation and updating GitHub Pages
+
+cd doc
+echo '# Building lmfit documentation (PDF/EPUB/HTML)'
+make clean
+make all
+cd ../
+
+echo '# Building tarball of documentation'
+tar czf lmfit_docs.tar.gz doc/_build/html/* -C doc/_build/html .
+
+echo "# Switching to gh-pages branch"
+git checkout gh-pages
+
+if [ $? -ne 0 ] ; then
+ echo ' failed.'
+ exit
+fi
+
+echo '# Clean-up old documentation files'
+rm -rf *.html *.js
+rm -rf _download _images _sources _static
+
+echo '# Unpack new documentation files'
+tar xzf lmfit_docs.tar.gz
+rm -f lmfit_docs.tar.gz
+rm -f .buildinfo
+
+echo '# Commit changes to gh-pages branch'
+export version=`git tag | sort | tail -1`
+git add *
+PRE_COMMIT_ALLOW_NO_CONFIG=1 git commit -am "DOC: update documentation for ${version}" --no-verify
+
+if [ $? -ne 0 ] ; then
+ echo ' failed.'
+ exit
+fi
+
+echo '# Please check the commit and if everything looks good, push the changes:'
+echo 'for example by doing: `git push` or `git push upstream`'
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..5a184b9
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,7 @@
+[build-system]
+requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.2"]
+build-backend = "setuptools.build_meta"
+
+[tool.setuptools_scm]
+write_to = "lmfit/version.py"
+version_scheme = "post-release"
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..050d2f3
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,104 @@
+[metadata]
+name = lmfit
+description = Least-Squares Minimization with Bounds and Constraints
+long_description = file: README.rst
+long_description_content_type = text/x-rst
+author = LMFit Development Team
+author_email = matt.newville@gmail.com
+url = https://lmfit.github.io//lmfit-py/
+license = BSD 3-Clause
+platforms = any
+classifiers =
+ Development Status :: 5 - Production/Stable
+ Intended Audience :: Science/Research
+ Topic :: Scientific/Engineering
+ License :: OSI Approved :: BSD License
+ Operating System :: OS Independent
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3 :: Only
+ Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
+ Programming Language :: Python :: 3.11
+ Programming Language :: Python :: Implementation :: CPython
+ Programming Language :: Python :: Implementation :: PyPy
+keywords = curve-fitting, least-squares minimization
+project_urls =
+ Source = https://github.com/lmfit/lmfit-py
+ Changelog = https://lmfit.github.io/lmfit-py/whatsnew.html
+ Documentation = https://lmfit.github.io/lmfit-py/
+ Tracker = https://github.com/lmfit/lmfit-py/issues
+
+[options]
+packages = find:
+python_requires = >=3.7
+setup_requires = setuptools_scm
+install_requires =
+ asteval>=0.9.28
+ numpy>=1.19
+ scipy>=1.6
+ uncertainties>=3.1.4
+
+[options.packages.find]
+include =
+ lmfit
+
+[options.extras_require]
+dev =
+ build
+ check-wheel-contents
+ pre-commit
+ twine
+doc =
+ cairosvg
+ corner
+ dill
+ emcee>=3.0.0
+ ipykernel
+ jupyter_sphinx>=0.2.4
+ matplotlib
+ numdifftools
+ pandas
+ Pillow
+ pycairo;platform_system=="Windows"
+ Sphinx
+ sphinx-gallery>=0.10
+ sphinxcontrib-svg2pdfconverter
+ sympy
+test =
+ coverage
+ flaky
+ pytest
+ pytest-cov
+all =
+ %(dev)s
+ %(test)s
+ %(doc)s
+
+[isort]
+skip = lmfit/__init__.py,doc/conf.py
+known_third_party = asteval,dill,emcee,IPython,matplotlib,numdifftools,numpy,NISTModels,pandas,pytest,scipy,uncertainties
+known_first_party = lmfit,lmfit_testutils
+force_sort_within_sections = True
+
+[rstcheck]
+report = warning
+ignore_substitutions = release
+ignore_roles = scipydoc,numpydoc
+ignore_directives = autoclass,autodoc,autofunction,automethod,jupyter-execute,math
+
+[flake8]
+ignore = E121,E123,E126,E226,W503,W504,E501,E731
+exclude = doc/conf.py,lmfit/__init__.py
+
+[coverage:run]
+omit = tests/*
+
+[tool:pytest]
+addopts = --cov=lmfit --cov-report html
+
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..bac24a4
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+
+import setuptools
+
+if __name__ == "__main__":
+ setuptools.setup()
diff --git a/tests/NISTModels.py b/tests/NISTModels.py
new file mode 100644
index 0000000..792da57
--- /dev/null
+++ b/tests/NISTModels.py
@@ -0,0 +1,241 @@
+import os
+
+from numpy import arctan, array, cos, exp, log, sin
+
+from lmfit import Parameters
+
+thisdir, thisfile = os.path.split(__file__)
+NIST_DIR = os.path.join(thisdir, '..', 'NIST_STRD')
+
+
+def read_params(params):
+ if isinstance(params, Parameters):
+ return [par.value for par in params.values()]
+ else:
+ return params
+
+
+def Bennet5(b, x, y=0):
+ b = read_params(b)
+ return y - b[0] * (b[1]+x)**(-1/b[2])
+
+
+def BoxBOD(b, x, y=0):
+ b = read_params(b)
+ model = b[0]*(1-exp(-b[1]*x))
+ return model - y
+
+
+def Chwirut(b, x, y=0):
+ b = read_params(b)
+ model = exp(-b[0]*x)/(b[1]+b[2]*x)
+ return model - y
+
+
+def DanWood(b, x, y=0):
+ b = read_params(b)
+ model = b[0]*x**b[1]
+ return model - y
+
+
+def ENSO(b, x, y=0):
+ b = read_params(b)
+ pi = 3.141592653589793238462643383279
+ model = b[0] + (b[1]*cos(2*pi*x/12) + b[2]*sin(2*pi*x/12) +
+ b[4]*cos(2*pi*x/b[3]) + b[5]*sin(2*pi*x/b[3]) +
+ b[7]*cos(2*pi*x/b[6]) + b[8]*sin(2*pi*x/b[6]))
+ return model - y
+
+
+def Eckerle4(b, x, y=0):
+ b = read_params(b)
+ model = (b[0]/b[1]) * exp(-0.5*((x-b[2])/b[1])**2)
+ return model - y
+
+
+def Gauss(b, x, y=0):
+ b = read_params(b)
+ model = (b[0]*exp(-b[1]*x) + (b[2]*exp(-(x-b[3])**2 / b[4]**2) +
+ b[5]*exp(-(x-b[6])**2 / b[7]**2)))
+ return model - y
+
+
+def Hahn1(b, x, y=0):
+ b = read_params(b)
+ model = (b[0]+b[1]*x+b[2]*x**2+b[3]*x**3) / (1+b[4]*x+b[5]*x**2+b[6]*x**3)
+ return model - y
+
+
+def Kirby(b, x, y=0):
+ b = read_params(b)
+ model = (b[0] + b[1]*x + b[2]*x**2) / (1 + b[3]*x + b[4]*x**2)
+ return model - y
+
+
+def Lanczos(b, x, y=0):
+ b = read_params(b)
+ model = b[0]*exp(-b[1]*x) + b[2]*exp(-b[3]*x) + b[4]*exp(-b[5]*x)
+ return model - y
+
+
+def MGH09(b, x, y=0):
+ b = read_params(b)
+ model = b[0]*(x**2+x*b[1]) / (x**2+x*b[2]+b[3])
+ return model - y
+
+
+def MGH10(b, x, y=0):
+ b = read_params(b)
+ model = b[0] * exp(b[1]/(x+b[2]))
+ return model - y
+
+
+def MGH17(b, x, y=0):
+ b = read_params(b)
+ model = b[0] + b[1]*exp(-x*b[3]) + b[2]*exp(-x*b[4])
+ return model - y
+
+
+def Misra1a(b, x, y=0):
+ b = read_params(b)
+ model = b[0]*(1-exp(-b[1]*x))
+ return model - y
+
+
+def Misra1b(b, x, y=0):
+ b = read_params(b)
+ model = b[0] * (1-(1+b[1]*x/2)**(-2))
+ return model - y
+
+
+def Misra1c(b, x, y=0):
+ b = read_params(b)
+ model = b[0] * (1-(1+2*b[1]*x)**(-.5))
+ return model - y
+
+
+def Misra1d(b, x, y=0):
+ b = read_params(b)
+ model = b[0]*b[1]*x*((1+b[1]*x)**(-1))
+ return model - y
+
+
+def Nelson(b, x, y=None):
+ b = read_params(b)
+ x1 = x[:, 0]
+ x2 = x[:, 1]
+ model = b[0] - b[1]*x1 * exp(-b[2]*x2)
+ return model - log(y)
+
+
+def Rat42(b, x, y=0):
+ b = read_params(b)
+ model = b[0] / (1+exp(b[1]-b[2]*x))
+ return model - y
+
+
+def Rat43(b, x, y=0):
+ b = read_params(b)
+ model = b[0] / ((1+exp(b[1]-b[2]*x))**(1/b[3]))
+ return model - y
+
+
+def Roszman1(b, x, y=0):
+ b = read_params(b)
+ pi = 3.141592653589793238462643383279
+ model = b[0] - b[1]*x - arctan(b[2]/(x-b[3]))/pi
+ return model - y
+
+
+def Thurber(b, x, y=0):
+ b = read_params(b)
+ model = ((b[0] + b[1]*x + b[2]*x**2 + b[3]*x**3) /
+ (1 + b[4]*x + b[5]*x**2 + b[6]*x**3))
+ return model - y
+
+
+# Model name fcn, #fitting params, dim of x
+Models = {'Bennett5': (Bennet5, 3, 1),
+ 'BoxBOD': (BoxBOD, 2, 1),
+ 'Chwirut1': (Chwirut, 3, 1),
+ 'Chwirut2': (Chwirut, 3, 1),
+ 'DanWood': (DanWood, 2, 1),
+ 'ENSO': (ENSO, 9, 1),
+ 'Eckerle4': (Eckerle4, 3, 1),
+ 'Gauss1': (Gauss, 8, 1),
+ 'Gauss2': (Gauss, 8, 1),
+ 'Gauss3': (Gauss, 8, 1),
+ 'Hahn1': (Hahn1, 7, 1),
+ 'Kirby2': (Kirby, 5, 1),
+ 'Lanczos1': (Lanczos, 6, 1),
+ 'Lanczos2': (Lanczos, 6, 1),
+ 'Lanczos3': (Lanczos, 6, 1),
+ 'MGH09': (MGH09, 4, 1),
+ 'MGH10': (MGH10, 3, 1),
+ 'MGH17': (MGH17, 5, 1),
+ 'Misra1a': (Misra1a, 2, 1),
+ 'Misra1b': (Misra1b, 2, 1),
+ 'Misra1c': (Misra1c, 2, 1),
+ 'Misra1d': (Misra1d, 2, 1),
+ 'Nelson': (Nelson, 3, 2),
+ 'Rat42': (Rat42, 3, 1),
+ 'Rat43': (Rat43, 4, 1),
+ 'Roszman1': (Roszman1, 4, 1),
+ 'Thurber': (Thurber, 7, 1)}
+
+
+def ReadNistData(dataset):
+ """NIST STRD data is in a simple, fixed format with
+ line numbers being significant!
+ """
+ finp = open(os.path.join(NIST_DIR, f"{dataset}.dat"))
+ lines = [line[:-1] for line in finp.readlines()]
+ finp.close()
+ ModelLines = lines[30:39]
+ ParamLines = lines[40:58]
+ DataLines = lines[60:]
+
+ words = ModelLines[1].strip().split()
+ nparams = int(words[0])
+
+ start1 = [0]*nparams
+ start2 = [0]*nparams
+ certval = [0]*nparams
+ certerr = [0]*nparams
+ for i, text in enumerate(ParamLines[:nparams]):
+ [s1, s2, val, err] = [float(x) for x in text.split('=')[1].split()]
+ start1[i] = s1
+ start2[i] = s2
+ certval[i] = val
+ certerr[i] = err
+
+ for t in ParamLines[nparams:]:
+ t = t.strip()
+ if ':' not in t:
+ continue
+ val = float(t.split(':')[1])
+ if t.startswith('Residual Sum of Squares'):
+ sum_squares = val
+ elif t.startswith('Residual Standard Deviation'):
+ std_dev = val
+ elif t.startswith('Degrees of Freedom'):
+ nfree = int(val)
+ elif t.startswith('Number of Observations'):
+ ndata = int(val)
+
+ y, x = [], []
+ for d in DataLines:
+ vals = [float(i) for i in d.strip().split()]
+ y.append(vals[0])
+ if len(vals) > 2:
+ x.append(vals[1:])
+ else:
+ x.append(vals[1])
+
+ y = array(y)
+ x = array(x)
+ out = {'y': y, 'x': x, 'nparams': nparams, 'ndata': ndata,
+ 'nfree': nfree, 'start1': start1, 'start2': start2,
+ 'sum_squares': sum_squares, 'std_dev': std_dev,
+ 'cert': certval, 'cert_values': certval, 'cert_stderr': certerr}
+ return out
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..ac69033
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,32 @@
+import os
+
+import numpy as np
+import pytest
+
+import lmfit
+
+
+@pytest.fixture
+def minimizer_Alpine02():
+ """Return a lmfit Minimizer object for the Alpine02 function."""
+ def residual_Alpine02(params):
+ x0 = params['x0'].value
+ x1 = params['x1'].value
+ return np.prod(np.sqrt(x0) * np.sin(x0)) * np.prod(np.sqrt(x1) *
+ np.sin(x1))
+
+ # create Parameters and set initial values and bounds
+ pars = lmfit.Parameters()
+ pars.add_many(('x0', 1., True, 0.0, 10.0),
+ ('x1', 1., True, 0.0, 10.0))
+
+ mini = lmfit.Minimizer(residual_Alpine02, pars)
+ return mini
+
+
+@pytest.fixture
+def peakdata():
+ """Return the peak-like test data."""
+ data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'test_peak.dat'))
+ return data.T
diff --git a/tests/gauss_modelresult_lmfit100.sav b/tests/gauss_modelresult_lmfit100.sav
new file mode 100644
index 0000000..5756f2b
--- /dev/null
+++ b/tests/gauss_modelresult_lmfit100.sav
@@ -0,0 +1 @@
+{"__class__": "lmfit.ModelResult", "__version__": "1", "model": {"__class__": "Tuple", "value": [{"__class__": "Tuple", "value": ["gaussian", {"__class__": "Callable", "__name__": "gaussian", "pyversion": "3.10", "value": "gASVIQAAAAAAAACMEGxtZml0LmxpbmVzaGFwZXOUjAhnYXVzc2lhbpSTlC4=", "importer": null}, "gaussian", "", {"__class__": "List", "value": ["x"]}, {"__class__": "List", "value": ["amplitude", "center", "sigma"]}, {"__class__": "Dict", "sigma": {"__class__": "Dict", "min": 0.0}, "fwhm": {"__class__": "Dict", "expr": "2.3548200*sigma"}, "height": {"__class__": "Dict", "expr": "0.3989423*amplitude/max(2.220446049250313e-16, sigma)"}}, "raise", {"__class__": "Dict"}]}, null, null]}, "params": [["amplitude", 8.880222770531812, true, null, -Infinity, Infinity, null, 0.11359552348525037, {"center": 8.013053184908601e-10, "sigma": 0.5773502771310342}, 5, null], ["center", 5.658660808233311, true, null, -Infinity, Infinity, null, 0.010305056229246058, {"amplitude": 8.013053184908601e-10, "sigma": -5.9073974220763886e-08}, 5, null], ["sigma", 0.6976553846499309, true, null, 0, Infinity, null, 0.010305029116553194, {"amplitude": 0.5773502771310342, "center": -5.9073974220763886e-08}, 1, null], ["fwhm", 1.6428528528813504, false, "2.3548200*sigma", -Infinity, Infinity, null, 0.024266488625497608, null, 2.35482, null], ["height", 5.078003516544183, false, "0.3989423*amplitude/max(2.220446049250313e-16, sigma)", -Infinity, Infinity, null, 0.06495781093054068, null, 1.9947115000000002, null]], "unique_symbols": {"NAN": NaN, "None": null, "erf": {"__class__": "Callable", "__name__": "erf", "pyversion": "3.10", "value": "gASVIQAAAAAAAACMFXNjaXB5LnNwZWNpYWwuX3VmdW5jc5SMA2VyZpSTlC4=", "importer": null}, "fwhm": 1.6428528528813504, "e": 2.718281828459045, "pi": 3.141592653589793, "inf": Infinity, "nan": NaN, "height": 5.078003516544183, "amplitude": 8.880222770531812, "gamfcn": {"__class__": "Callable", "__name__": "gamma", "pyversion": "3.10", "value": "gASVIwAAAAAAAACMFXNjaXB5LnNwZWNpYWwuX3VmdW5jc5SMBWdhbW1hlJOULg==", "importer": null}, "infty": Infinity, "newaxis": null, "Inf": Infinity, "False": 0.0, "erfc": {"__class__": "Callable", "__name__": "erfc", "pyversion": "3.10", "value": "gASVIgAAAAAAAACMFXNjaXB5LnNwZWNpYWwuX3VmdW5jc5SMBGVyZmOUk5Qu", "importer": null}, "little_endian": 1.0, "wofz": {"__class__": "Callable", "__name__": "wofz", "pyversion": "3.10", "value": "gASVIgAAAAAAAACMFXNjaXB5LnNwZWNpYWwuX3VmdW5jc5SMBHdvZnqUk5Qu", "importer": null}, "True": 1.0, "sigma": 0.6976553846499309, "center": 5.658660808233311}, "aborted": 0.0, "aic": -336.26371326528914, "best_values": {"__class__": "Dict", "amplitude": 8.880222770531812, "center": 5.658660808233311, "sigma": 0.6976553846499309}, "bic": -328.4183517147654, "chisqr": 3.408835988076817, "ci_out": null, "col_deriv": 0.0, "covar": {"__class__": "NDArray", "__shape__": [3, 3], "__dtype__": "float64", "value": [0.012903942955888068, 9.3801462212821e-13, 0.0006758492233667576, 9.3801462212821e-13, 0.00010619418388792298, -6.273295976250341e-12, 0.0006758492233667576, -6.273295976250341e-12, 0.00010619362509300911]}, "errorbars": 1.0, "flatchain": null, "ier": 1.0, "init_values": {"__class__": "Dict", "amplitude": 5.0, "center": 5.0, "sigma": 1.0}, "lmdif_message": "Both actual and predicted relative reductions in the sum of squares\n are at most 0.000000", "message": "Fit succeeded.", "method": "leastsq", "nan_policy": "raise", "ndata": 101.0, "nfev": 29.0, "nfree": 98.0, "nvarys": 3.0, "redchi": 0.0347840406946614, "scale_covar": 1.0, "calc_covar": 1.0, "success": 1.0, "userargs": {"__class__": "Tuple", "value": [{"__class__": "NDArray", "__shape__": [101], "__dtype__": "float64", "value": [-0.305196, 0.004932, 0.192535, 0.100639, 0.244992, -0.001095, -0.01719, -0.13833, -0.065546, 0.150089, 0.021981, 0.23161, 0.186122, 0.224188, 0.355904, -0.069747, 0.062342, -0.025591, 0.05208, -0.329106, -0.012132, 0.205438, 0.118093, 0.018204, -0.113374, -0.086265, -0.074747, 0.179214, 0.168398, 0.067954, 0.076506, 0.433768, 0.019097, 0.239973, 0.006607, -0.121174, 0.162577, 0.04203, 0.288718, 0.13744, 0.593153, 0.480413, 0.901715, 0.868281, 1.301646, 1.093022, 1.53177, 1.772498, 2.346719, 2.716594, 3.333042, 3.688503, 3.821775, 4.583784, 4.805664, 5.125762, 4.964982, 4.988856, 4.854896, 4.738134, 4.815129, 4.070525, 3.983041, 3.107054, 2.841105, 2.610117, 2.146078, 1.683386, 1.317547, 0.789538, 0.585832, 0.494665, 0.447038, 0.441926, 0.393547, -0.0339, 0.042947, -0.116248, 0.061516, 0.183615, -0.127174, 0.368512, 0.194381, 0.301574, 0.045097, 0.110543, 0.263164, 0.190722, 0.425007, 0.253164, 0.201519, 0.132292, 0.304519, 0.129096, 0.269171, 0.189405, 0.243728, 0.411963, 0.080682, 0.332672, -0.0671]}, null]}, "userkws": {"__class__": "Dict", "x": {"__class__": "NDArray", "__shape__": [101], "__dtype__": "float64", "value": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 5.8, 5.9, 6.0, 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8, 6.9, 7.0, 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 7.7, 7.8, 7.9, 8.0, 8.1, 8.2, 8.3, 8.4, 8.5, 8.6, 8.7, 8.8, 8.9, 9.0, 9.1, 9.2, 9.3, 9.4, 9.5, 9.6, 9.7, 9.8, 9.9, 10.0]}}, "values": {"__class__": "Dict", "amplitude": 8.880222770531812, "center": 5.658660808233311, "sigma": 0.6976553846499309, "fwhm": 1.6428528528813504, "height": 5.078003516544183}, "var_names": {"__class__": "List", "value": ["amplitude", "center", "sigma"]}, "weights": null, "user_options": null}
diff --git a/tests/test_1variable.py b/tests/test_1variable.py
new file mode 100644
index 0000000..b17d19e
--- /dev/null
+++ b/tests/test_1variable.py
@@ -0,0 +1,49 @@
+# test of fitting one variable
+# From Nick Schurch
+
+import numpy
+from numpy.testing import assert_allclose
+
+import lmfit
+
+
+def linear_chisq(params, x, data, errs=None):
+ """Calculates chi-squared residuals for linear model."""
+ if not isinstance(params, lmfit.parameter.Parameters):
+ msg = "Params argument is not a lmfit parameter set"
+ raise TypeError(msg)
+
+ if "m" not in params.keys():
+ msg = "No slope parameter (m) defined in the model"
+ raise KeyError(msg)
+
+ if "c" not in params.keys():
+ msg = "No intercept parameter (c) defined in the model"
+ raise KeyError(msg)
+
+ model = params["m"]*x + params["c"]
+ residuals = (data-model)
+ if errs is not None:
+ residuals = residuals/errs
+
+ return residuals
+
+
+def test_1var():
+ rands = [-0.21698284, 0.41900591, 0.02349374, -0.218552, -0.3513699,
+ 0.33418304, 0.04226855, 0.213303, 0.45948731, 0.33587736]
+
+ x = numpy.arange(10)+1
+ y = numpy.arange(10)+1+rands
+
+ params = lmfit.Parameters()
+ params.add(name="m", value=1.0, vary=True)
+ params.add(name="c", value=0.0, vary=False)
+
+ out = lmfit.minimize(linear_chisq, params, args=(x, y))
+
+ assert_allclose(params['m'].value, 1.025, rtol=0.02, atol=0.02)
+ assert len(params) == 2
+ assert out.nvarys == 1
+ assert out.chisqr > 0.01
+ assert out.chisqr < 5.00
diff --git a/tests/test_NIST_Strd.py b/tests/test_NIST_Strd.py
new file mode 100644
index 0000000..2b4b56b
--- /dev/null
+++ b/tests/test_NIST_Strd.py
@@ -0,0 +1,297 @@
+import math
+from optparse import OptionParser
+
+from numpy.testing import assert_allclose
+
+from lmfit import Parameters, minimize
+
+from .NISTModels import Models, ReadNistData
+
+
+def ndig(a, b):
+ """Precision for NIST values."""
+ return round(-math.log10((abs(abs(a)-abs(b)) + 1.e-15) / abs(b)))
+
+
+ABAR = ' |----------------+----------------+------------------+-------------------|'
+
+
+def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
+ buff = [' ======================================',
+ f' {DataSet}: ',
+ ' | Parameter Name | Value Found | Certified Value | # Matching Digits |']
+ buff.append(ABAR)
+
+ val_dig_min = 200
+ err_dig_min = 200
+ fmt = ' | %s | % -.7e | % -.7e | %2i |'
+ for i in range(NISTdata['nparams']):
+ parname = f'b{i+1}'
+ par = params[parname]
+ thisval = par.value
+ certval = NISTdata['cert_values'][i]
+ vdig = ndig(thisval, certval)
+ pname = (parname + ' value ' + ' '*14)[:14]
+ buff.append(fmt % (pname, thisval, certval, vdig))
+ val_dig_min = min(val_dig_min, vdig)
+
+ thiserr = par.stderr
+ certerr = NISTdata['cert_stderr'][i]
+ if thiserr is not None and myfit.errorbars:
+ edig = ndig(thiserr, certerr)
+ ename = (parname + ' stderr' + ' '*14)[:14]
+ buff.append(fmt % (ename, thiserr, certerr, edig))
+ err_dig_min = min(err_dig_min, edig)
+
+ buff.append(ABAR)
+ sumsq = NISTdata['sum_squares']
+ try:
+ chi2 = myfit.chisqr
+ buff.append(' | Sum of Squares | %.7e | %.7e | %2i |'
+ % (chi2, sumsq, ndig(chi2, sumsq)))
+ except Exception:
+ pass
+ buff.append(ABAR)
+ if not myfit.errorbars:
+ buff.append(' | * * * * COULD NOT ESTIMATE UNCERTAINTIES * * * * |')
+ err_dig_min = 0
+ if err_dig_min < 199:
+ buff.append(f' Worst agreement: {val_dig_min} digits for value, '
+ f'{err_dig_min} digits for error ')
+ else:
+ buff.append(f' Worst agreement: {val_dig_min} digits')
+ return val_dig_min, '\n'.join(buff)
+
+
+def NIST_Dataset(DataSet, method='leastsq', start='start2',
+ plot=False, verbose=False):
+
+ NISTdata = ReadNistData(DataSet)
+ resid, npar, dimx = Models[DataSet]
+ y = NISTdata['y']
+ x = NISTdata['x']
+
+ params = Parameters()
+ for i in range(npar):
+ pname = f'b{i+1}'
+ pval1 = NISTdata[start][i]
+ params.add(pname, value=pval1)
+ try:
+ myfit = minimize(resid, params, method=method, args=(x,), kws={'y': y}, nan_policy='raise')
+ except ValueError:
+ if verbose:
+ print("Fit failed... nans?")
+ return False
+ digs, buff = Compare_NIST_Results(DataSet, myfit, myfit.params, NISTdata)
+ if verbose:
+ print(buff)
+
+ return digs > 2
+
+
+def build_usage():
+ modelnames = []
+ ms = ''
+ for d in sorted(Models.keys()):
+ ms = ms + f' {d} '
+ if len(ms) > 55:
+ modelnames.append(ms)
+ ms = ' '
+ modelnames.append(ms)
+ modelnames = '\n'.join(modelnames)
+
+ usage = f"""
+ === Test Fit to NIST StRD Models ===
+
+usage:
+------
+ python fit_NIST.py [options] Model Start
+
+where Start is one of 'start1','start2' or 'cert', for different
+starting values, and Model is one of
+
+ {modelnames}
+
+if Model = 'all', all models and starting values will be run.
+
+options:
+--------
+ -m name of fitting method. One of:
+ leastsq, nelder, powell, lbfgsb, bfgs,
+ tnc, cobyla, slsqp, cg, newton-cg
+ leastsq (Levenberg-Marquardt) is the default
+"""
+ return usage
+############################
+
+
+def run_interactive():
+ usage = build_usage()
+ parser = OptionParser(usage=usage, prog="fit-NIST.py")
+
+ parser.add_option("-m", "--method", dest="method",
+ metavar='METH',
+ default='leastsq',
+ help="set method name, default = 'leastsq'")
+
+ (opts, args) = parser.parse_args()
+ dset = ''
+ start = 'start2'
+ if len(args) > 0:
+ dset = args[0]
+ if len(args) > 1:
+ start = args[1]
+
+ if dset.lower() == 'all':
+ tpass = 0
+ tfail = 0
+ failures = []
+ dsets = sorted(Models.keys())
+ for dset in dsets:
+ for start in ('start1', 'start2', 'cert'):
+ if NIST_Dataset(dset, method=opts.method, start=start,
+ plot=False, verbose=True):
+ tpass += 1
+ else:
+ tfail += 1
+ failures.append(f" {dset} (starting at '{start}')")
+ print('--------------------------------------')
+ print(f' Fit Method: {opts.method} ')
+ print(f' Final Results: {tpass} pass, {tfail} fail.')
+ print(' Tests Failed for:\n %s' % '\n '.join(failures))
+ print('--------------------------------------')
+ elif dset not in Models:
+ print(usage)
+ else:
+ return NIST_Dataset(dset, method=opts.method,
+ start=start, plot=False, verbose=True)
+
+
+def RunNIST_Model(model):
+
+ dset = ReadNistData(model)
+ func, npar, dimx = Models[model]
+ rss = (func(dset['cert_values'], x=dset['x'], y=dset['y'])**2).sum()
+ tiny_rss = 1.e-16
+ print(rss, dset['sum_squares'], tiny_rss)
+
+ if dset['sum_squares'] < tiny_rss:
+ assert rss < tiny_rss
+ else:
+ assert_allclose(rss, dset['sum_squares'])
+
+ out1 = NIST_Dataset(model, start='start1', plot=False, verbose=False)
+ out2 = NIST_Dataset(model, start='start2', plot=False, verbose=False)
+ assert (out1 or out2)
+
+
+def test_Bennett5():
+ RunNIST_Model('Bennett5')
+
+
+def test_BoxBOD():
+ RunNIST_Model('BoxBOD')
+
+
+def test_Chwirut1():
+ RunNIST_Model('Chwirut1')
+
+
+def test_Chwirut2():
+ RunNIST_Model('Chwirut2')
+
+
+def test_DanWood():
+ RunNIST_Model('DanWood')
+
+
+def test_ENSO():
+ RunNIST_Model('ENSO')
+
+
+def test_Eckerle4():
+ RunNIST_Model('Eckerle4')
+
+
+def test_Gauss1():
+ RunNIST_Model('Gauss1')
+
+
+def test_Gauss2():
+ RunNIST_Model('Gauss2')
+
+
+def test_Gauss3():
+ RunNIST_Model('Gauss3')
+
+
+def test_Hahn1():
+ RunNIST_Model('Hahn1')
+
+
+def test_Kirby2():
+ RunNIST_Model('Kirby2')
+
+
+def test_Lanczos1():
+ RunNIST_Model('Lanczos1')
+
+
+def test_Lanczos2():
+ RunNIST_Model('Lanczos2')
+
+
+def test_Lanczos3():
+ RunNIST_Model('Lanczos3')
+
+
+def test_MGH09():
+ RunNIST_Model('MGH09')
+
+
+def test_MGH10():
+ RunNIST_Model('MGH10')
+
+
+def test_MGH17():
+ RunNIST_Model('MGH17')
+
+
+def test_Misra1a():
+ RunNIST_Model('Misra1a')
+
+
+def test_Misra1b():
+ RunNIST_Model('Misra1b')
+
+
+def test_Misra1c():
+ RunNIST_Model('Misra1c')
+
+
+def test_Misra1d():
+ RunNIST_Model('Misra1d')
+
+
+def test_Nelson():
+ RunNIST_Model('Nelson')
+
+
+def test_Rat42():
+ RunNIST_Model('Rat42')
+
+
+def test_Rat43():
+ RunNIST_Model('Rat43')
+
+
+def test_Roszman1():
+ RunNIST_Model('Roszman1')
+
+
+def test_Thurber():
+ RunNIST_Model('Thurber')
+
+
+if __name__ == '__main__':
+ run_interactive()
diff --git a/tests/test_algebraic_constraint.py b/tests/test_algebraic_constraint.py
new file mode 100644
index 0000000..94dc407
--- /dev/null
+++ b/tests/test_algebraic_constraint.py
@@ -0,0 +1,113 @@
+"""Tests for algebraic parameter constraints."""
+import numpy as np
+import pytest
+
+from lmfit import Minimizer, Model, Parameters
+from lmfit.lineshapes import gaussian, lorentzian
+
+
+@pytest.fixture
+def minimizer():
+ """Return the Minimizer object."""
+ def residual(pars, x, sigma=None, data=None):
+ """Define objective function."""
+ yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
+ yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])
+
+ model = yg + yl + pars['line_off'] + x * pars['line_slope']
+
+ if data is None:
+ return model
+ if sigma is None:
+ return model - data
+ return (model-data) / sigma
+
+ # generate synthetic data
+ n = 601
+ xmin = 0.
+ xmax = 20.0
+ x = np.linspace(xmin, xmax, n)
+
+ data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) +
+ np.random.normal(scale=0.23, size=n) + x*0.5)
+
+ # create initial Parameters
+ pars = Parameters()
+ pars.add(name='amp_g', value=10)
+ pars.add(name='cen_g', value=9)
+ pars.add(name='wid_g', value=1)
+ pars.add(name='amp_tot', value=20)
+ pars.add(name='amp_l', expr='amp_tot - amp_g')
+ pars.add(name='cen_l', expr='1.5+cen_g')
+ pars.add(name='wid_l', expr='2*wid_g')
+ pars.add(name='line_slope', value=0.0)
+ pars.add(name='line_off', value=0.0)
+
+ sigma = 0.021 # estimate of data error (for all data points)
+
+ mini = Minimizer(residual, pars, fcn_args=(x,), fcn_kws={'sigma': sigma,
+ 'data': data})
+
+ return mini
+
+
+def test_algebraic_constraints(minimizer):
+ """Test algebraic constraints."""
+ result = minimizer.minimize(method='leastsq')
+
+ pfit = result.params
+ assert pfit['cen_l'].value == 1.5 + pfit['cen_g'].value
+ assert pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value
+ assert pfit['wid_l'].value == 2.0 * pfit['wid_g'].value
+
+
+def test_algebraic_constraints_function(minimizer):
+ """Test constraints with a user-defined function added to symbol table."""
+ def width_func(wpar):
+ return 2.5*wpar
+
+ minimizer.params._asteval.symtable['wfun'] = width_func
+ minimizer.params.add(name='wid_l', expr='wfun(wid_g)')
+ result = minimizer.minimize(method='leastsq')
+
+ pfit = result.params
+ assert pfit['cen_l'].value == 1.5 + pfit['cen_g'].value
+ assert pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value
+ assert pfit['wid_l'].value == 2.5 * pfit['wid_g'].value
+
+
+def test_constraints_function_call():
+ """Test a constraint with simple function call in Model class."""
+ x = [1723, 1773, 1823, 1523, 1773, 1033.03078, 1042.98077, 1047.90937,
+ 1053.95899, 1057.94906, 1063.13788, 1075.74218, 1086.03102]
+ y = [0.79934, -0.31876, -0.46852, 0.05, -0.21, 11.1708, 10.31844, 9.73069,
+ 9.21319, 9.12457, 9.05243, 8.66407, 8.29664]
+
+ def VFT(T, ninf=-3, A=5e3, T0=800):
+ return ninf + A/(T-T0)
+
+ vftModel = Model(VFT)
+ vftModel.set_param_hint('D', vary=False, expr=r'A*log(10)/T0')
+ result = vftModel.fit(y, T=x)
+
+ assert 2600.0 < result.params['A'].value < 2650.0
+ assert 7.0 < result.params['D'].value < 7.5
+
+
+def test_constraints(minimizer):
+ """Test changing of algebraic constraints."""
+ result = minimizer.minimize(method='leastsq')
+
+ pfit = result.params
+ assert pfit['cen_l'].value == 1.5 + pfit['cen_g'].value
+ assert pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value
+ assert pfit['wid_l'].value == 2.0*pfit['wid_g'].value
+
+ # now, change fit slightly and re-run
+ minimizer.params['wid_l'].expr = '1.25*wid_g'
+ result = minimizer.minimize(method='leastsq')
+ pfit = result.params
+
+ assert pfit['cen_l'].value == 1.5 + pfit['cen_g'].value
+ assert pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value
+ assert pfit['wid_l'].value == 1.25*pfit['wid_g'].value
diff --git a/tests/test_ampgo.py b/tests/test_ampgo.py
new file mode 100644
index 0000000..3faf405
--- /dev/null
+++ b/tests/test_ampgo.py
@@ -0,0 +1,132 @@
+"""Tests for the AMPGO global minimization algorithm."""
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+from scipy import __version__ as scipy_version
+
+import lmfit
+from lmfit._ampgo import ampgo, tunnel
+
+# correct result for Alpine02 function
+global_optimum = [7.91705268, 4.81584232]
+fglob = -6.12950
+
+
+@pytest.mark.parametrize("tabustrategy", ['farthest', 'oldest'])
+def test_ampgo_Alpine02(minimizer_Alpine02, tabustrategy):
+ """Test AMPGO algorithm on Alpine02 function."""
+ kws = {'tabustrategy': tabustrategy}
+ out = minimizer_Alpine02.minimize(method='ampgo', **kws)
+ out_x = np.array([out.params['x0'].value, out.params['x1'].value])
+
+ assert_allclose(out.residual, fglob, rtol=1e-5)
+ assert_allclose(min(out_x), min(global_optimum), rtol=1e-3)
+ assert_allclose(max(out_x), max(global_optimum), rtol=1e-3)
+ assert 'global' in out.ampgo_msg
+
+
+def test_ampgo_bounds(minimizer_Alpine02):
+ """Test AMPGO algorithm with bounds."""
+ # change boundaries of parameters
+ pars_bounds = lmfit.Parameters()
+ pars_bounds.add_many(('x0', 1., True, 5.0, 15.0),
+ ('x1', 1., True, 2.5, 7.5))
+
+ out = minimizer_Alpine02.minimize(params=pars_bounds, method='ampgo')
+ assert 5.0 <= out.params['x0'].value <= 15.0
+ assert 2.5 <= out.params['x1'].value <= 7.5
+
+
+def test_ampgo_disp_true(minimizer_Alpine02, capsys):
+ """Test AMPGO algorithm with disp is True."""
+ # disp to False for L-BFGS-B to avoid too much output...
+ kws = {'disp': True, 'local_opts': {'disp': False}}
+ minimizer_Alpine02.minimize(method='ampgo', **kws)
+ captured = capsys.readouterr()
+ assert "Starting MINIMIZATION Phase" in captured.out
+
+
+def test_ampgo_maxfunevals(minimizer_Alpine02):
+ """Test AMPGO algorithm with maxfunevals."""
+ # disp to False for L-BFGS-B to avoid too much output...
+ kws = {'maxfunevals': 5, 'disp': True, 'local_opts': {'disp': False}}
+ out = minimizer_Alpine02.minimize(method='ampgo', **kws)
+
+ assert out.ampgo_msg == 'Maximum number of function evaluations exceeded'
+
+
+def test_ampgo_local_solver(minimizer_Alpine02):
+ """Test AMPGO algorithm with local solver."""
+ kws = {'local': 'Nelder-Mead'}
+
+ # bounds in Nelder-Mead are supported since SciPy v1.7.0
+ # FIXME: clean this up after we require SciPy >= 1.7.0
+ if int(scipy_version.split('.')[1]) < 7:
+ msg = r'Method Nelder-Mead cannot handle constraints nor bounds'
+ with pytest.warns(RuntimeWarning, match=msg):
+ out = minimizer_Alpine02.minimize(method='ampgo', **kws)
+ else:
+ out = minimizer_Alpine02.minimize(method='ampgo', **kws)
+
+ out_x = np.array([out.params['x0'].value, out.params['x1'].value])
+
+ assert 'ampgo' and 'Nelder-Mead' in out.method
+ assert_allclose(out.residual, fglob, rtol=1e-5)
+ assert_allclose(min(out_x), min(global_optimum), rtol=1e-3)
+ assert_allclose(max(out_x), max(global_optimum), rtol=1e-3)
+ assert 'global' in out.ampgo_msg
+
+
+def test_ampgo_invalid_local_solver(minimizer_Alpine02):
+ """Test AMPGO algorithm with invalid local solvers."""
+ kws = {'local': 'leastsq'}
+ with pytest.raises(Exception, match=r'Invalid local solver selected'):
+ minimizer_Alpine02.minimize(method='ampgo', **kws)
+
+
+def test_ampgo_invalid_tabulistsize(minimizer_Alpine02):
+ """Test AMPGO algorithm with invalid tabulistsize."""
+ kws = {'tabulistsize': 0}
+ with pytest.raises(Exception, match=r'Invalid tabulistsize specified'):
+ minimizer_Alpine02.minimize(method='ampgo', **kws)
+
+
+def test_ampgo_invalid_tabustrategy(minimizer_Alpine02):
+ """Test AMPGO algorithm with invalid tabustrategy."""
+ kws = {'tabustrategy': 'unknown'}
+ with pytest.raises(Exception, match=r'Invalid tabustrategy specified'):
+ minimizer_Alpine02.minimize(method='ampgo', **kws)
+
+
+def test_ampgo_local_opts(minimizer_Alpine02):
+ """Test AMPGO algorithm, pass local_opts to solver."""
+ # use local_opts to pass maxfun to the local optimizer: providing a string
+ # whereas an integer is required, this should throw an error.
+ kws = {'local_opts': {'maxfun': 'string'}}
+ with pytest.raises(TypeError):
+ minimizer_Alpine02.minimize(method='ampgo', **kws)
+
+ # for coverage: make sure that both occurrences are reached
+ kws = {'local_opts': {'maxfun': 10}, 'maxfunevals': 50}
+ minimizer_Alpine02.minimize(method='ampgo', **kws)
+
+
+def test_ampgo_incorrect_length_for_bounds():
+ """Test for ValueError when bounds are given for only some parameters."""
+ def func(x):
+ return x[0]**2 + 10.0*x[1]
+
+ msg = r'length of x0 != length of bounds'
+ with pytest.raises(ValueError, match=msg):
+ ampgo(func, x0=[0, 1], bounds=[(-10, 10)])
+
+
+def test_ampgo_tunnel_more_than_three_arguments():
+ """Test AMPGO tunnel function with more than three arguments."""
+ def func(x, val):
+ return x[0]**2 + val*x[1]
+
+ args = [func, 0.1, np.array([1, 2]), 5.0]
+ out = tunnel(np.array([10, 5]), *args)
+ assert_allclose(out, 185.386275588)
diff --git a/tests/test_basicfit.py b/tests/test_basicfit.py
new file mode 100644
index 0000000..00d8f79
--- /dev/null
+++ b/tests/test_basicfit.py
@@ -0,0 +1,39 @@
+import numpy as np
+from numpy.testing import assert_allclose
+
+from lmfit import Parameters, minimize
+
+
+def test_basic():
+ # create data to be fitted
+ x = np.linspace(0, 15, 301)
+ data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+ np.random.normal(size=len(x), scale=0.2))
+
+ # define objective function: returns the array to be minimized
+ def fcn2min(params, x, data):
+ """ model decaying sine wave, subtract data"""
+ amp = params['amp']
+ shift = params['shift']
+ omega = params['omega']
+ decay = params['decay']
+
+ model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+ return model - data
+
+ # create a set of Parameters
+ params = Parameters()
+ params.add('amp', value=10, min=0)
+ params.add('decay', value=0.1)
+ params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2)
+ params.add('omega', value=3.0)
+
+ # do fit, here with leastsq model
+ result = minimize(fcn2min, params, args=(x, data))
+
+ assert result.nfev > 5
+ assert result.nfev < 500
+ assert result.chisqr > 1
+ assert result.nvarys == 4
+ assert_allclose(result.params['amp'], 5.03, rtol=0.05)
+ assert_allclose(result.params['omega'], 2.0, rtol=0.05)
diff --git a/tests/test_basinhopping.py b/tests/test_basinhopping.py
new file mode 100644
index 0000000..89b7bad
--- /dev/null
+++ b/tests/test_basinhopping.py
@@ -0,0 +1,111 @@
+"""Tests for the basinhopping minimization algorithm."""
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+from scipy import __version__ as scipy_version
+from scipy.optimize import basinhopping
+
+import lmfit
+
+
+def test_basinhopping_lmfit_vs_scipy():
+ """Test basinhopping in lmfit versus scipy."""
+ # SciPy
+ def func(x):
+ return np.cos(14.5*x - 0.3) + (x+0.2) * x
+
+ minimizer_kwargs = {'method': 'L-BFGS-B'}
+ x0 = [1.]
+
+ ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs, seed=7)
+
+ # lmfit
+ def residual(params):
+ x = params['x'].value
+ return np.cos(14.5*x - 0.3) + (x+0.2) * x
+
+ pars = lmfit.Parameters()
+ pars.add_many(('x', 1.))
+ kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7}
+ mini = lmfit.Minimizer(residual, pars)
+ out = mini.minimize(method='basinhopping', **kws)
+
+ assert_allclose(out.residual, ret.fun)
+ assert_allclose(out.params['x'].value, ret.x, rtol=1e-5)
+
+
+def test_basinhopping_2d_lmfit_vs_scipy():
+ """Test basinhopping in lmfit versus scipy."""
+ # SciPy
+ def func2d(x):
+ return np.cos(14.5*x[0] - 0.3) + (x[1]+0.2) * x[1] + (x[0]+0.2) * x[0]
+
+ minimizer_kwargs = {'method': 'L-BFGS-B'}
+ x0 = [1.0, 1.0]
+
+ ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs, seed=7)
+
+ # lmfit
+ def residual_2d(params):
+ x0 = params['x0'].value
+ x1 = params['x1'].value
+ return np.cos(14.5*x0 - 0.3) + (x1+0.2) * x1 + (x0+0.2) * x0
+
+ pars = lmfit.Parameters()
+ pars.add_many(('x0', 1.), ('x1', 1.))
+
+ mini = lmfit.Minimizer(residual_2d, pars)
+ kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7}
+ out = mini.minimize(method='basinhopping', **kws)
+
+ assert_allclose(out.residual, ret.fun)
+ assert_allclose(out.params['x0'].value, ret.x[0], rtol=1e-5)
+ assert_allclose(out.params['x1'].value, ret.x[1], rtol=1e-5)
+
+ # FIXME: update when SciPy requirement is >= 1.8
+ if int(scipy_version.split('.')[1]) >= 8:
+ assert 'target_accept_rate' in out.call_kws
+ assert 'stepwise_factor' in out.call_kws
+
+
+def test_basinhopping_Alpine02(minimizer_Alpine02):
+ """Test basinhopping on Alpine02 function."""
+ global_optimum = [7.91705268, 4.81584232]
+ fglob = -6.12950
+
+ kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7}
+ out = minimizer_Alpine02.minimize(method='basinhopping', **kws)
+ out_x = np.array([out.params['x0'].value, out.params['x1'].value])
+
+ assert_allclose(out.residual, fglob, rtol=1e-5)
+ assert_allclose(min(out_x), min(global_optimum), rtol=1e-3)
+ assert_allclose(max(out_x), max(global_optimum), rtol=1e-3)
+ assert out.method == 'basinhopping'
+
+
+def test_basinhopping_bounds(minimizer_Alpine02):
+ """Test basinhopping algorithm with bounds."""
+ # change boundaries of parameters
+ pars_bounds = lmfit.Parameters()
+ pars_bounds.add_many(('x0', 1., True, 5.0, 15.0),
+ ('x1', 1., True, 2.5, 7.5))
+
+ kws = {'minimizer_kwargs': {'method': 'L-BFGS-B'}, 'seed': 7}
+ out = minimizer_Alpine02.minimize(params=pars_bounds,
+ method='basinhopping', **kws)
+ assert 5.0 <= out.params['x0'].value <= 15.0
+ assert 2.5 <= out.params['x1'].value <= 7.5
+
+
+def test_basinhopping_solver_options(minimizer_Alpine02):
+ """Test basinhopping algorithm, pass incorrect options to solver."""
+ # use minimizer_kwargs to pass an invalid method for local solver to
+ # scipy.basinhopping
+ kws = {'minimizer_kwargs': {'method': 'unknown'}}
+ with pytest.raises(ValueError, match=r'Unknown solver'):
+ minimizer_Alpine02.minimize(method='basinhopping', **kws)
+
+ # pass an incorrect value for niter to scipy.basinhopping
+ kws = {'niter': 'string'}
+ with pytest.raises(TypeError):
+ minimizer_Alpine02.minimize(method='basinhopping', **kws)
diff --git a/tests/test_bounded_jacobian.py b/tests/test_bounded_jacobian.py
new file mode 100644
index 0000000..d3d8f33
--- /dev/null
+++ b/tests/test_bounded_jacobian.py
@@ -0,0 +1,70 @@
+import numpy as np
+from numpy.testing import assert_allclose
+
+from lmfit import Parameters, minimize
+
+
+def test_bounded_jacobian():
+ pars = Parameters()
+ pars.add('x0', value=2.0)
+ pars.add('x1', value=2.0, min=1.5)
+
+ global jac_count
+
+ jac_count = 0
+
+ def resid(params):
+ x0 = params['x0']
+ x1 = params['x1']
+ return np.array([10 * (x1 - x0*x0), 1-x0])
+
+ def jac(params):
+ global jac_count
+ jac_count += 1
+ x0 = params['x0']
+ return np.array([[-20*x0, 10], [-1, 0]])
+
+ out0 = minimize(resid, pars, Dfun=None)
+
+ assert_allclose(out0.params['x0'], 1.2243, rtol=1.0e-4)
+ assert_allclose(out0.params['x1'], 1.5000, rtol=1.0e-4)
+ assert jac_count == 0
+
+ out1 = minimize(resid, pars, Dfun=jac)
+
+ assert_allclose(out1.params['x0'], 1.2243, rtol=1.0e-4)
+ assert_allclose(out1.params['x1'], 1.5000, rtol=1.0e-4)
+ assert jac_count > 5
+
+
+def test_bounded_jacobian_CG():
+ pars = Parameters()
+ pars.add('x0', value=2.0)
+ pars.add('x1', value=2.0, min=1.5)
+
+ global jac_count
+
+ jac_count = 0
+
+ def resid(params):
+ x0 = params['x0']
+ x1 = params['x1']
+ return np.array([10 * (x1 - x0*x0), 1-x0])
+
+ def jac(params):
+ global jac_count
+ jac_count += 1
+ x0 = params['x0']
+ # Jacobian of the *error*, i.e. the summed squared residuals
+ return 2 * np.sum(np.array([[-20 * x0, 10],
+ [-1, 0]]).T * resid(params), axis=1)
+
+ out0 = minimize(resid, pars, method='CG')
+ assert_allclose(out0.params['x0'], 1.2243, rtol=1.0e-4)
+ assert_allclose(out0.params['x1'], 1.5000, rtol=1.0e-4)
+ assert jac_count == 0
+
+ out1 = minimize(resid, pars, method='CG', Dfun=jac)
+ assert_allclose(out1.params['x0'], 1.2243, rtol=1.0e-4)
+ assert_allclose(out1.params['x1'], 1.5000, rtol=1.0e-4)
+ assert jac_count > 5
diff --git a/tests/test_bounds.py b/tests/test_bounds.py
new file mode 100644
index 0000000..f5c893b
--- /dev/null
+++ b/tests/test_bounds.py
@@ -0,0 +1,49 @@
+from numpy import exp, linspace, pi, random, sign, sin
+from numpy.testing import assert_allclose
+
+from lmfit import Parameters, minimize
+
+
+def test_bounds():
+ p_true = Parameters()
+ p_true.add('amp', value=14.0)
+ p_true.add('period', value=5.4321)
+ p_true.add('shift', value=0.12345)
+ p_true.add('decay', value=0.01000)
+
+ def residual(pars, x, data=None):
+ amp = pars['amp']
+ per = pars['period']
+ shift = pars['shift']
+ decay = pars['decay']
+
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+
+ model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return model - data
+
+ n = 1500
+ xmin = 0.
+ xmax = 250.0
+ random.seed(0)
+ noise = random.normal(scale=2.80, size=n)
+ x = linspace(xmin, xmax, n)
+ data = residual(p_true, x) + noise
+
+ fit_params = Parameters()
+ fit_params.add('amp', value=13.0, max=20, min=0.0)
+ fit_params.add('period', value=2, max=10)
+ fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.)
+ fit_params.add('decay', value=0.02, max=0.10, min=0.00)
+
+ out = minimize(residual, fit_params, args=(x,), kws={'data': data})
+
+ assert out.nfev > 10
+ assert out.nfree > 50
+ assert out.chisqr > 1.0
+
+ assert_allclose(out.params['decay'], 0.01, rtol=2.e-2)
+ assert_allclose(out.params['shift'], 0.123, rtol=2.e-2)
diff --git a/tests/test_brute.py b/tests/test_brute.py
new file mode 100644
index 0000000..dbad75b
--- /dev/null
+++ b/tests/test_brute.py
@@ -0,0 +1,290 @@
+"""Tests for the brute force algorithm (aka 'grid-search').
+
+Use example problem described in the SciPy documentation:
+https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brute.html
+
+"""
+import pickle
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal
+import pytest
+from scipy import optimize
+
+import lmfit
+
+
+def func_scipy(z, *params):
+ x, y = z
+ a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+
+ f1 = a * x**2 + b*x*y + c * y**2 + d*x + e*y + f
+ f2 = -g*np.exp(-((x-h)**2 + (y-i)**2) / scale)
+ f3 = -j*np.exp(-((x-k)**2 + (y-l)**2) / scale)
+
+ return f1 + f2 + f3
+
+
+params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
+
+
+def func_lmfit(p):
+ par = p.valuesdict()
+ f1 = (par['a'] * par['x']**2 + par['b']*par['x']*par['y'] +
+ par['c'] * par['y']**2 + par['d']*par['x'] + par['e']*par['y'] +
+ par['f'])
+ f2 = (-1.0*par['g']*np.exp(-((par['x']-par['h'])**2 +
+ (par['y']-par['i'])**2) / par['scale']))
+ f3 = (-1.0*par['j']*np.exp(-((par['x']-par['k'])**2 +
+ (par['y']-par['l'])**2) / par['scale']))
+
+ return f1 + f2 + f3
+
+
+@pytest.fixture
+def params_lmfit():
+ """Return lmfit.Parameters class with initial values and bounds."""
+ params = lmfit.Parameters()
+ params.add_many(
+ ('a', 2, False), ('b', 3, False), ('c', 7, False), ('d', 8, False),
+ ('e', 9, False), ('f', 10, False), ('g', 44, False), ('h', -1, False),
+ ('i', 2, False), ('j', 26, False), ('k', 1, False), ('l', -2, False),
+ ('scale', 0.5, False), ('x', -4.0, True, -4.0, 4.0, None, None),
+ ('y', -2.0, True, -2.0, 2.0, None, None))
+ return params
+
+
+def test_brute_lmfit_vs_scipy_default(params_lmfit):
+ """TEST 1: using finite bounds with Ns=20, keep=50 and brute_step=None."""
+ assert params_lmfit['x'].brute_step is None
+ assert params_lmfit['y'].brute_step is None
+
+ rranges = ((-4, 4), (-2, 2))
+ ret = optimize.brute(func_scipy, rranges, args=params, full_output=True,
+ Ns=20, finish=None)
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute', Ns=20)
+
+ assert out.method == 'brute'
+ assert_equal(out.nfev, 20**len(out.var_names)) # Ns * nmb varying params
+ assert_equal(len(out.candidates), 50) # top-50 candidates are stored
+
+ assert_equal(ret[2], out.brute_grid) # grid identical
+ assert_equal(ret[3], out.brute_Jout) # function values on grid identical
+
+ # best-fit values identical / stored correctly in MinimizerResult
+ assert_equal(ret[0][0], out.brute_x0[0])
+ assert_equal(ret[0][0], out.params['x'].value)
+
+ assert_equal(ret[0][1], out.brute_x0[1])
+ assert_equal(ret[0][1], out.params['y'].value)
+
+ assert_equal(ret[1], out.brute_fval)
+ assert_equal(ret[1], out.residual)
+
+
+def test_brute_lmfit_vs_scipy_Ns(params_lmfit):
+ """TEST 2: using finite bounds, with Ns=40 and brute_step=None."""
+ rranges = ((-4, 4), (-2, 2))
+ ret = optimize.brute(func_scipy, rranges, args=params, full_output=True,
+ Ns=40, finish=None)
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute', Ns=40)
+
+ assert_equal(ret[2], out.brute_grid) # grid identical
+ assert_equal(ret[3], out.brute_Jout) # function values on grid identical
+ assert_equal(out.nfev, 40**len(out.var_names)) # Ns * nmb varying params
+
+ # best-fit values and function value identical
+ assert_equal(ret[0][0], out.brute_x0[0])
+ assert_equal(ret[0][1], out.brute_x0[1])
+ assert_equal(ret[1], out.brute_fval)
+
+
+def test_brute_lmfit_vs_scipy_stepsize(params_lmfit):
+ """TEST 3: using finite bounds and brute_step for both parameters."""
+ # set brute_step for parameters and assert whether that worked correctly
+ params_lmfit['x'].set(brute_step=0.25)
+ params_lmfit['y'].set(brute_step=0.25)
+ assert_equal(params_lmfit['x'].brute_step, 0.25)
+ assert_equal(params_lmfit['y'].brute_step, 0.25)
+
+ rranges = (slice(-4, 4, 0.25), slice(-2, 2, 0.25))
+ ret = optimize.brute(func_scipy, rranges, args=params, full_output=True,
+ Ns=20, finish=None)
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute')
+
+ assert_equal(ret[2], out.brute_grid) # grid identical
+ assert_equal(ret[3], out.brute_Jout) # function values on grid identical
+
+ # best-fit values and function value identical
+ assert_equal(ret[0][0], out.brute_x0[0])
+ assert_equal(ret[0][1], out.brute_x0[1])
+ assert_equal(ret[1], out.brute_fval)
+
+ points_x = np.arange(rranges[0].start, rranges[0].stop, rranges[0].step).size
+ points_y = np.arange(rranges[1].start, rranges[1].stop, rranges[1].step).size
+ nmb_evals = points_x * points_y
+ assert_equal(out.nfev, nmb_evals)
+
+
+def test_brute_lmfit_vs_scipy_Ns_stepsize(params_lmfit):
+ """TEST 4: using finite bounds, using Ns, brute_step for 'x'."""
+ # set brute_step for x to 0.15 and reset to None for y and assert result
+ params_lmfit['x'].set(brute_step=0.15)
+ assert_equal(params_lmfit['x'].brute_step, 0.15)
+
+ rranges = (slice(-4, 4, 0.15), (-2, 2))
+ ret = optimize.brute(func_scipy, rranges, args=params, full_output=True,
+ Ns=10, finish=None)
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute', Ns=10)
+
+ assert_equal(ret[2], out.brute_grid) # grid identical
+ assert_equal(ret[3], out.brute_Jout) # function values on grid identical
+
+ points_x = np.arange(rranges[0].start, rranges[0].stop, rranges[0].step).size
+ points_y = 10
+ nmb_evals = points_x * points_y
+ assert_equal(out.nfev, nmb_evals)
+
+ # best-fit values and function value identical
+ assert_equal(ret[0][0], out.brute_x0[0])
+ assert_equal(ret[0][1], out.brute_x0[1])
+ assert_equal(ret[1], out.brute_fval)
+
+
+def test_brute_upper_bounds_and_brute_step(params_lmfit):
+ """TEST 5: using finite upper bounds, Ns=20, and brute_step specified."""
+ Ns = 20
+ params_lmfit['x'].set(min=-np.inf)
+ params_lmfit['x'].set(brute_step=0.25)
+
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute', Ns=Ns)
+
+ assert_equal(out.params['x'].min, -np.inf)
+ assert_equal(out.params['x'].brute_step, 0.25)
+
+ grid_x_expected = np.linspace(params_lmfit['x'].max -
+ Ns*params_lmfit['x'].brute_step,
+ params_lmfit['x'].max, Ns, False)
+ grid_x = np.unique([par.ravel() for par in out.brute_grid][0])
+ assert_allclose(grid_x, grid_x_expected)
+
+ grid_y_expected = np.linspace(params_lmfit['y'].min,
+ params_lmfit['y'].max, Ns)
+ grid_y = np.unique([par.ravel() for par in out.brute_grid][1])
+ assert_allclose(grid_y, grid_y_expected)
+
+
+def test_brute_lower_bounds_and_brute_step(params_lmfit):
+ """TEST 6: using finite lower bounds, Ns=15, and brute_step specified."""
+ Ns = 15
+ params_lmfit['y'].set(max=np.inf)
+ params_lmfit['y'].set(brute_step=0.1)
+
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute', Ns=Ns)
+
+ grid_x_expected = np.linspace(params_lmfit['x'].min,
+ params_lmfit['x'].max, Ns)
+ grid_x = np.unique([par.ravel() for par in out.brute_grid][0])
+ assert_allclose(grid_x, grid_x_expected)
+
+ grid_y_expected = np.linspace(params_lmfit['y'].min,
+ params_lmfit['y'].min +
+ Ns*params_lmfit['y'].brute_step, Ns, False)
+ grid_y = np.unique([par.ravel() for par in out.brute_grid][1])
+ assert_allclose(grid_y, grid_y_expected)
+
+
+def test_brute_no_bounds_with_brute_step(params_lmfit):
+ """TEST 7: using no bounds, but brute_step specified (Ns=15)."""
+ Ns = 15
+ params_lmfit['x'].set(min=-np.inf, max=np.inf, brute_step=0.1)
+ params_lmfit['y'].set(min=-np.inf, max=np.inf, brute_step=0.2)
+
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute', Ns=15)
+
+ grid_x_expected = np.linspace(params_lmfit['x'].value -
+ (Ns//2)*params_lmfit['x'].brute_step,
+ params_lmfit['x'].value +
+ (Ns//2)*params_lmfit['x'].brute_step, Ns)
+ grid_x = np.unique([par.ravel() for par in out.brute_grid][0])
+ assert_allclose(grid_x, grid_x_expected)
+
+ grid_y_expected = np.linspace(params_lmfit['y'].value -
+ (Ns//2)*params_lmfit['y'].brute_step,
+ params_lmfit['y'].value +
+ (Ns//2)*params_lmfit['y'].brute_step, Ns)
+ grid_y = np.unique([par.ravel() for par in out.brute_grid][1])
+ assert_allclose(grid_y, grid_y_expected)
+
+
+def test_brute_no_bounds_no_brute_step(params_lmfit):
+ """TEST 8: insufficient information provided."""
+ params_lmfit['x'].set(min=-np.inf, max=np.inf)
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+
+ msg = r'Not enough information provided for the brute force method.'
+ with pytest.raises(ValueError, match=msg):
+ mini.minimize(method='brute')
+
+
+def test_brute_one_parameter(params_lmfit):
+ """TEST 9: only one varying parameter."""
+ params_lmfit['x'].set(vary=False)
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute')
+ assert out.candidates[0].score <= out.candidates[1].score
+ assert isinstance(out.candidates[0], lmfit.minimizer.Candidate)
+ assert isinstance(out.candidates[0].params, lmfit.Parameters)
+ assert isinstance(out.candidates[0].score, float)
+
+
+def test_brute_keep(params_lmfit, capsys):
+ """TEST 10: using 'keep' argument and check candidates attribute."""
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute')
+ assert_equal(len(out.candidates), 50) # default
+
+ out_keep_all = mini.minimize(method='brute', keep='all')
+ assert_equal(len(out_keep_all.candidates),
+ len(out_keep_all.brute_Jout.ravel()))
+
+ out_keep10 = mini.minimize(method='brute', keep=10)
+ assert_equal(len(out_keep10.candidates), 10)
+
+ assert isinstance(out.candidates[0], lmfit.minimizer.Candidate)
+ assert isinstance(out.candidates[0].params, lmfit.Parameters)
+ assert isinstance(out.candidates[0].score, float)
+
+ with pytest.raises(ValueError, match=r"'candidate_nmb' should be between"):
+ out_keep10.show_candidates(25)
+
+ with pytest.raises(ValueError, match=r"'candidate_nmb' should be between"):
+ out_keep10.show_candidates(0)
+
+ out_keep10.show_candidates(5)
+ captured = capsys.readouterr()
+ assert 'Candidate #5' in captured.out
+
+ # for coverage and to make sure the 'all' argument works; no assert...
+ out_keep10.show_candidates('all')
+
+
+def test_brute_pickle(params_lmfit):
+ """TEST 11: make sure the MinimizerResult can be pickle'd."""
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit)
+ out = mini.minimize(method='brute')
+ pickle.dumps(out)
+
+
+def test_nfev_workers(params_lmfit):
+ """TEST 12: make sure the nfev is correct for workers != 1."""
+ mini = lmfit.Minimizer(func_lmfit, params_lmfit, workers=-1)
+ out = mini.minimize(method='brute')
+ assert_equal(out.nfev, 20**len(out.var_names))
diff --git a/tests/test_builtin_models.py b/tests/test_builtin_models.py
new file mode 100644
index 0000000..aedde97
--- /dev/null
+++ b/tests/test_builtin_models.py
@@ -0,0 +1,312 @@
+"""Tests for built-in models."""
+
+import inspect
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+from scipy.optimize import fsolve
+
+from lmfit import lineshapes, models
+from lmfit.models import GaussianModel
+
+
+def check_height_fwhm(x, y, lineshape, model):
+ """Check height and fwhm parameters."""
+ pars = model.guess(y, x=x)
+ out = model.fit(y, pars, x=x)
+
+ # account for functions whose centers are not mu
+ mu = out.params['center'].value
+ if lineshape is lineshapes.lognormal:
+ cen = np.exp(mu - out.params['sigma']**2)
+ elif lineshape is lineshapes.pearson4:
+ cen = out.params['position']
+ else:
+ cen = mu
+
+ # get arguments for lineshape
+ sig = inspect.signature(lineshape)
+ args = {key: out.best_values[key] for key in sig.parameters.keys()
+ if key != 'x'}
+
+ # output format for assertion errors
+ fmt = ("Program calculated values and real values do not match!\n"
+ "{:^20s}{:^20s}{:^20s}{:^20s}\n"
+ "{:^20s}{:^20f}{:^20f}{:^20f}")
+
+ if 'height' in out.params:
+ height_pro = out.params['height'].value
+ height_act = lineshape(cen, **args)
+ diff = height_act - height_pro
+
+ assert abs(diff) < 0.001, fmt.format(model._name, 'Actual', 'program',
+ 'Difference', 'Height',
+ height_act, height_pro, diff)
+
+ if 'fwhm' in out.params:
+ fwhm_pro = out.params['fwhm'].value
+ func = lambda x: lineshape(x, **args) - 0.5*height_act
+ ret = fsolve(func, [cen - fwhm_pro/4, cen + fwhm_pro/2])
+ fwhm_act = ret[1] - ret[0]
+ diff = fwhm_act - fwhm_pro
+
+ assert abs(diff) < 0.5, fmt.format(model._name, 'Actual',
+ 'program', 'Difference',
+ 'FWHM', fwhm_act, fwhm_pro,
+ diff)
+
+
+def test_height_fwhm_calculation(peakdata):
+ """Test for correctness of height and FWHM calculation."""
+ # mu = 0
+ # variance = 1.0
+ # sigma = np.sqrt(variance)
+ # x = np.linspace(mu - 20*sigma, mu + 20*sigma, 100.0)
+ # y = norm.pdf(x, mu, 1)
+ x = peakdata[0]
+ y = peakdata[1]
+ check_height_fwhm(x, y, lineshapes.voigt, models.VoigtModel())
+ check_height_fwhm(x, y, lineshapes.pvoigt, models.PseudoVoigtModel())
+ check_height_fwhm(x, y, lineshapes.pearson4, models.Pearson4Model())
+ check_height_fwhm(x, y, lineshapes.pearson7, models.Pearson7Model())
+ check_height_fwhm(x, y, lineshapes.moffat, models.MoffatModel())
+ check_height_fwhm(x, y, lineshapes.students_t, models.StudentsTModel())
+ check_height_fwhm(x, y, lineshapes.breit_wigner, models.BreitWignerModel())
+ check_height_fwhm(x, y, lineshapes.damped_oscillator,
+ models.DampedOscillatorModel())
+ check_height_fwhm(x, y, lineshapes.dho,
+ models.DampedHarmonicOscillatorModel())
+ check_height_fwhm(x, y, lineshapes.expgaussian,
+ models.ExponentialGaussianModel())
+ check_height_fwhm(x, y, lineshapes.skewed_gaussian,
+ models.SkewedGaussianModel())
+ check_height_fwhm(x, y, lineshapes.doniach, models.DoniachModel())
+ # this test fails after allowing 'center' to be negative (see PR #645)
+ # it's a bit strange to fit a LognormalModel to a Voigt-like lineshape
+ # anyway, so adisable the test for now
+ # x = x-9 # Lognormal will only fit peaks with centers < 1
+ # check_height_fwhm(x, y, lineshapes.lognormal, models.LognormalModel())
+
+
+def test_height_and_fwhm_expression_evalution_in_builtin_models():
+ """Assert models do not throw an ZeroDivisionError."""
+ mod = models.GaussianModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)
+ params.update_constraints()
+
+ mod = models.LorentzianModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)
+ params.update_constraints()
+
+ mod = models.SplitLorentzianModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.0)
+ params.update_constraints()
+
+ mod = models.VoigtModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=1.0)
+ params.update_constraints()
+
+ mod = models.PseudoVoigtModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, fraction=0.5)
+ params.update_constraints()
+
+ mod = models.MoffatModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, beta=0.0)
+ params.update_constraints()
+
+ mod = models.Pearson4Model()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, expon=1.0, skew=5.0)
+ params.update_constraints()
+
+ mod = models.Pearson7Model()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, expon=1.0)
+ params.update_constraints()
+
+ mod = models.StudentsTModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)
+ params.update_constraints()
+
+ mod = models.BreitWignerModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, q=0.0)
+ params.update_constraints()
+
+ mod = models.LognormalModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)
+ params.update_constraints()
+
+ mod = models.DampedOscillatorModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)
+ params.update_constraints()
+
+ mod = models.DampedHarmonicOscillatorModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)
+ params.update_constraints()
+
+ mod = models.ExponentialGaussianModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)
+ params.update_constraints()
+
+ mod = models.SkewedGaussianModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)
+ params.update_constraints()
+
+ mod = models.SkewedVoigtModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0,
+ skew=0.0)
+ params.update_constraints()
+
+ mod = models.DoniachModel()
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)
+ params.update_constraints()
+
+ mod = models.StepModel()
+ for f in ('linear', 'arctan', 'erf', 'logistic'):
+ params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, form=f)
+ params.update_constraints()
+
+ mod = models.RectangleModel()
+ for f in ('linear', 'arctan', 'erf', 'logistic'):
+ params = mod.make_params(amplitude=1.0, center1=0.0, sigma1=0.0,
+ center2=0.0, sigma2=0.0, form=f)
+ params.update_constraints()
+
+ mod = models.Gaussian2dModel()
+ params = mod.make_params(amplitude=1.0, centerx=0.0, sigmax=0.9,
+ centery=0.0, sigmay=0.9)
+ params.update_constraints()
+
+
+def test_guess_modelparams():
+ """Tests for the 'guess' function of built-in models."""
+ x = np.linspace(-10, 10, 501)
+
+ mod = models.ConstantModel()
+ y = 6.0 + x*0.005
+ pars = mod.guess(y)
+ assert_allclose(pars['c'].value, 6.0, rtol=0.01)
+
+ mod = models.ComplexConstantModel(prefix='f_')
+ y = 6.0 + x*0.005 + (4.0 - 0.02*x)*1j
+ pars = mod.guess(y)
+ assert_allclose(pars['f_re'].value, 6.0, rtol=0.01)
+ assert_allclose(pars['f_im'].value, 4.0, rtol=0.01)
+
+ mod = models.QuadraticModel(prefix='g_')
+ y = -0.2 + 3.0*x + 0.005*x**2
+ pars = mod.guess(y, x=x)
+ assert_allclose(pars['g_a'].value, 0.005, rtol=0.01)
+ assert_allclose(pars['g_b'].value, 3.0, rtol=0.01)
+ assert_allclose(pars['g_c'].value, -0.2, rtol=0.01)
+
+ mod = models.PolynomialModel(4, prefix='g_')
+ y = -0.2 + 3.0*x + 0.005*x**2 - 3.3e-6*x**3 + 1.e-9*x**4
+ pars = mod.guess(y, x=x)
+ assert_allclose(pars['g_c0'].value, -0.2, rtol=0.01)
+ assert_allclose(pars['g_c1'].value, 3.0, rtol=0.01)
+ assert_allclose(pars['g_c2'].value, 0.005, rtol=0.1)
+ assert_allclose(pars['g_c3'].value, -3.3e-6, rtol=0.1)
+ assert_allclose(pars['g_c4'].value, 1.e-9, rtol=0.1)
+
+ mod = models.GaussianModel(prefix='g_')
+ y = lineshapes.gaussian(x, amplitude=2.2, center=0.25, sigma=1.3)
+ y += np.random.normal(size=len(x), scale=0.004)
+ pars = mod.guess(y, x=x)
+ assert_allclose(pars['g_amplitude'].value, 3, rtol=2)
+ assert_allclose(pars['g_center'].value, 0.25, rtol=1)
+ assert_allclose(pars['g_sigma'].value, 1.3, rtol=1)
+
+ mod = models.LorentzianModel(prefix='l_')
+ pars = mod.guess(y, x=x)
+ assert_allclose(pars['l_amplitude'].value, 3, rtol=2)
+ assert_allclose(pars['l_center'].value, 0.25, rtol=1)
+ assert_allclose(pars['l_sigma'].value, 1.3, rtol=1)
+
+ mod = models.Pearson4Model(prefix='g_')
+ pars = mod.guess(y, x=x)
+ assert_allclose(pars['g_amplitude'].value, 3, rtol=2)
+ assert_allclose(pars['g_center'].value, 0.25, rtol=1)
+ assert_allclose(pars['g_sigma'].value, 1.3, rtol=1)
+
+ mod = models.SplitLorentzianModel(prefix='s_')
+ pars = mod.guess(y, x=x)
+ assert_allclose(pars['s_amplitude'].value, 3, rtol=2)
+ assert_allclose(pars['s_center'].value, 0.25, rtol=1)
+ assert_allclose(pars['s_sigma'].value, 1.3, rtol=1)
+ assert_allclose(pars['s_sigma_r'].value, 1.3, rtol=1)
+
+ mod = models.VoigtModel(prefix='l_')
+ pars = mod.guess(y, x=x)
+ assert_allclose(pars['l_amplitude'].value, 3, rtol=2)
+ assert_allclose(pars['l_center'].value, 0.25, rtol=1)
+ assert_allclose(pars['l_sigma'].value, 1.3, rtol=1)
+
+ mod = models.SkewedVoigtModel(prefix='l_')
+ pars = mod.guess(y, x=x)
+ assert_allclose(pars['l_amplitude'].value, 3, rtol=2)
+ assert_allclose(pars['l_center'].value, 0.25, rtol=1)
+ assert_allclose(pars['l_sigma'].value, 1.3, rtol=1)
+
+
+def test_splitlorentzian_prefix():
+ """Regression test for SplitLorentzian model (see GH #566)."""
+ mod1 = models.SplitLorentzianModel()
+ par1 = mod1.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.3)
+ par1.update_constraints()
+
+ mod2 = models.SplitLorentzianModel(prefix='prefix_')
+ par2 = mod2.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.3)
+ par2.update_constraints()
+
+
+def test_guess_from_peak():
+ """Regression test for guess_from_peak function (see GH #627)."""
+ x = np.linspace(-5, 5)
+ amplitude = 0.8
+ center = 1.7
+ sigma = 0.3
+ y = lineshapes.lorentzian(x, amplitude=amplitude, center=center, sigma=sigma)
+
+ model = models.LorentzianModel()
+ guess_increasing_x = model.guess(y, x=x)
+ guess_decreasing_x = model.guess(y[::-1], x=x[::-1])
+
+ assert guess_increasing_x == guess_decreasing_x
+
+ for param, value in zip(['amplitude', 'center', 'sigma'],
+ [amplitude, center, sigma]):
+ assert np.abs((guess_increasing_x[param].value - value)/value) < 0.5
+
+
+def test_guess_from_peak2d():
+ """Regression test for guess_from_peak2d function (see GH #627)."""
+ x = np.linspace(-5, 5)
+ y = np.linspace(-5, 5)
+ amplitude = 0.8
+ centerx = 1.7
+ sigmax = 0.3
+ centery = 1.3
+ sigmay = 0.2
+ z = lineshapes.gaussian2d(x, y, amplitude=amplitude,
+ centerx=centerx, sigmax=sigmax,
+ centery=centery, sigmay=sigmay)
+
+ model = models.Gaussian2dModel()
+ guess_increasing_x = model.guess(z, x=x, y=y)
+ guess_decreasing_x = model.guess(z[::-1], x=x[::-1], y=y[::-1])
+
+ assert guess_increasing_x == guess_decreasing_x
+
+ for param, value in zip(['centerx', 'centery'], [centerx, centery]):
+ assert np.abs((guess_increasing_x[param].value - value)/value) < 0.5
+
+
+def test_guess_requires_x():
+ """Regression test for GH #747."""
+ x = np.arange(100)
+ y = np.exp(-(x-50)**2/(2*10**2))
+
+ mod = GaussianModel()
+ msg = r"guess\(\) missing 1 required positional argument: 'x'"
+ with pytest.raises(TypeError, match=msg):
+ mod.guess(y)
diff --git a/tests/test_confidence.py b/tests/test_confidence.py
new file mode 100644
index 0000000..78e22c9
--- /dev/null
+++ b/tests/test_confidence.py
@@ -0,0 +1,261 @@
+"""Tests for the calculation of confidence intervals."""
+import copy
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+from scipy.stats import f
+
+import lmfit
+
+
+@pytest.fixture
+def data():
+ """Generate synthetic data."""
+ x = np.linspace(0.3, 10, 100)
+ np.random.seed(0)
+ y = 1.0 / (0.1 * x) + 2.0 + 0.1 * np.random.randn(x.size)
+ return (x, y)
+
+
+@pytest.fixture
+def pars():
+ """Create and initialize parameter set."""
+ parameters = lmfit.Parameters()
+ parameters.add_many(('a', 0.1), ('b', 1))
+ return parameters
+
+
+def residual(params, x, data):
+ """Define objective function for the minimization."""
+ model = 1.0 / (params['a'] * x) + params['b']
+ return data - model
+
+
+def test_default_f_compare(data, pars):
+ """Test the default f_compare function: F-test."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+
+ # "fixing" a parameter, keeping the chisqr the same
+ out2 = copy.deepcopy(out)
+ out2.nvarys = 1
+ prob = lmfit.confidence.f_compare(out, out2)
+ assert_allclose(prob, 0.0)
+
+ # "fixing" a parameter, increasing the chisqr
+ out2.chisqr = 1.0015*out.chisqr
+ prob = lmfit.confidence.f_compare(out, out2)
+ assert_allclose(prob, 0.2977506)
+
+
+def test_copy_and_restore_vals(data, pars):
+ """Test functions to save and restore parameter values and stderrs."""
+ # test copy_vals without/with stderr present
+ copy_pars = lmfit.confidence.copy_vals(pars)
+
+ assert isinstance(copy_pars, dict)
+ for _, par in enumerate(pars):
+ assert_allclose(pars[par].value, copy_pars[par][0])
+ assert copy_pars[par][1] is None # no stderr present
+
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+ copy_pars_out = lmfit.confidence.copy_vals(out.params)
+
+ assert isinstance(copy_pars_out, dict)
+ for _, par in enumerate(out.params):
+ assert_allclose(out.params[par].value, copy_pars_out[par][0])
+ assert_allclose(out.params[par].stderr, copy_pars_out[par][1])
+
+ # test restore_vals to the original parameter set after changing them first
+ pars['a'].set(value=1.0)
+ pars['b'].set(value=10)
+ lmfit.confidence.restore_vals(copy_pars, pars)
+
+ assert isinstance(pars, lmfit.parameter.Parameters)
+ assert_allclose(pars['a'].value, 0.1)
+ assert_allclose(pars['b'].value, 1.0)
+ assert pars['a'].stderr is None
+ assert pars['b'].stderr is None
+
+ lmfit.confidence.restore_vals(copy_pars_out, pars)
+ for _, par in enumerate(pars):
+ assert_allclose(pars[par].value, out.params[par].value)
+ assert_allclose(pars[par].stderr, out.params[par].stderr)
+
+
+@pytest.mark.parametrize("verbose", [False, True])
+def test_confidence_leastsq(data, pars, verbose, capsys):
+ """Calculate confidence interval after leastsq minimization."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+
+ assert 5 < out.nfev < 500
+ assert out.chisqr < 3.0
+ assert out.nvarys == 2
+ assert_allclose(out.params['a'], 0.1, rtol=0.01)
+ assert_allclose(out.params['b'], 2.0, rtol=0.01)
+
+ ci = lmfit.conf_interval(minimizer, out, verbose=verbose)
+ assert_allclose(ci['b'][0][0], 0.997, rtol=0.01)
+ assert_allclose(ci['b'][0][1], 1.947, rtol=0.01)
+ assert_allclose(ci['b'][2][0], 0.683, rtol=0.01)
+ assert_allclose(ci['b'][2][1], 1.972, rtol=0.01)
+ assert_allclose(ci['b'][5][0], 0.95, rtol=0.01)
+ assert_allclose(ci['b'][5][1], 2.01, rtol=0.01)
+
+ if verbose:
+ captured = capsys.readouterr()
+ assert 'Calculating CI for' in captured.out
+
+
+def test_confidence_pnames(data, pars):
+ """Test if pnames works as expected."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+
+ assert_allclose(out.params['a'], 0.1, rtol=0.01)
+ assert_allclose(out.params['b'], 2.0, rtol=0.01)
+
+ ci = lmfit.conf_interval(minimizer, out, p_names=['a'])
+ assert 'a' in ci
+ assert 'b' not in ci
+
+
+def test_confidence_bounds_reached(data, pars):
+ """Check if conf_interval handles bounds correctly"""
+
+ # Should work
+ pars['a'].max = 0.2
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+ out.params['a'].stderr = 1
+ lmfit.conf_interval(minimizer, out, verbose=True)
+
+ # Should warn (i.e,. limit < para.min)
+ pars['b'].max = 2.03
+ pars['b'].min = 1.97
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+ out.params['b'].stderr = 0.005
+ out.params['a'].stderr = 0.01
+ with pytest.warns(UserWarning, match="Bound reached"):
+ lmfit.conf_interval(minimizer, out, verbose=True)
+
+ # Should warn (i.e,. limit > para.max)
+ out.params['b'].stderr = 0.1
+ with pytest.warns(UserWarning, match="Bound reached"):
+ lmfit.conf_interval(minimizer, out, verbose=True)
+
+
+def test_confidence_sigma_vs_prob(data, pars):
+ """Calculate confidence by specifying sigma or probability."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=(data))
+ out = minimizer.leastsq()
+
+ ci_sigma_None = lmfit.conf_interval(minimizer, out, sigmas=None)
+ ci_sigmas = lmfit.conf_interval(minimizer, out, sigmas=[1, 2, 3])
+ ci_1sigma = lmfit.conf_interval(minimizer, out, sigmas=[1])
+ ci_probs = lmfit.conf_interval(minimizer, out,
+ sigmas=[0.68269, 0.9545, 0.9973])
+
+ assert ci_sigma_None == ci_sigmas
+ assert_allclose(ci_sigmas['a'][0][1], ci_probs['a'][0][1], rtol=0.01)
+ assert_allclose(ci_sigmas['b'][2][1], ci_probs['b'][2][1], rtol=0.01)
+ assert len(ci_1sigma['a']) == 3
+ assert len(ci_probs['a']) == 7
+
+
+def test_confidence_exceptions(data, pars):
+ """Make sure the proper exceptions are raised when needed."""
+ minimizer = lmfit.Minimizer(residual, pars, calc_covar=False,
+ fcn_args=data)
+ out = minimizer.minimize(method='nelder')
+ out_lsq = minimizer.minimize(params=out.params, method='leastsq')
+
+ # no uncertainty estimated
+ msg = 'Cannot determine Confidence Intervals without sensible uncertainty'
+ with pytest.raises(lmfit.MinimizerException, match=msg):
+ lmfit.conf_interval(minimizer, out)
+
+ # uncertainty is NaN
+ out_lsq.params['a'].stderr = np.nan
+ with pytest.raises(lmfit.MinimizerException, match=msg):
+ lmfit.conf_interval(minimizer, out_lsq)
+
+ # only one varying parameter
+ out_lsq.params['a'].vary = False
+ msg = r'Cannot determine Confidence Intervals with < 2 variables'
+ with pytest.raises(lmfit.MinimizerException, match=msg):
+ lmfit.conf_interval(minimizer, out_lsq)
+
+
+def test_confidence_warnings(data, pars):
+ """Make sure the proper warnings are emitted when needed."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
+ out = minimizer.minimize(method='leastsq')
+
+ with pytest.warns(UserWarning) as record:
+ lmfit.conf_interval(minimizer, out, maxiter=1)
+ assert 'maxiter=1 reached and prob' in str(record[0].message)
+
+
+def test_confidence_with_trace(data, pars):
+ """Test calculation of confidence intervals with trace."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
+ out = minimizer.leastsq()
+
+ ci, tr = lmfit.conf_interval(minimizer, out, sigmas=[0.6827], trace=True)
+ for p in out.params:
+ diff1 = ci[p][1][1] - ci[p][0][1]
+ diff2 = ci[p][2][1] - ci[p][1][1]
+ stderr = out.params[p].stderr
+ assert abs(diff1 - stderr) / stderr < 0.05
+ assert abs(diff2 - stderr) / stderr < 0.05
+
+ assert p in tr.keys()
+ assert 'prob' in tr[p].keys()
+
+
+def test_confidence_2d(data, pars):
+ """Test the 2D confidence interval calculation."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
+ out = minimizer.minimize(method='leastsq')
+
+ cx, cy, grid = lmfit.conf_interval2d(minimizer, out, 'a', 'b', 30, 20)
+ assert len(cx.ravel()) == 30
+ assert len(cy.ravel()) == 20
+ assert grid.shape == (20, 30)
+
+
+def test_confidence_2d_limits(data, pars):
+ """Test the 2D confidence interval calculation using limits."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
+ out = minimizer.minimize(method='leastsq')
+
+ lim = ((1.0e-6, 0.02), (1.0e-6, -4.0))
+ cx, cy, grid = lmfit.conf_interval2d(minimizer, out, 'a', 'b', limits=lim)
+ assert grid.shape == (10, 10)
+ assert_allclose(min(cx.ravel()), 1.0e-6)
+ assert_allclose(max(cx.ravel()), 0.02)
+ assert_allclose(min(cy.ravel()), -4.0)
+ assert_allclose(max(cy.ravel()), 1.0e-6)
+
+
+def test_confidence_prob_func(data, pars):
+ """Test conf_interval with alternate prob_func."""
+ minimizer = lmfit.Minimizer(residual, pars, fcn_args=data)
+ out = minimizer.minimize(method='leastsq')
+ called = 0
+
+ def my_f_compare(best_fit, new_fit):
+ nonlocal called
+ called += 1
+ nfree = best_fit.nfree
+ nfix = best_fit.nvarys - new_fit.nvarys
+ dchi = new_fit.chisqr / best_fit.chisqr - 1.0
+ return f.cdf(dchi * nfree / nfix, nfix, nfree)
+
+ lmfit.conf_interval(minimizer, out, sigmas=[1], prob_func=my_f_compare)
+ assert called > 10
diff --git a/tests/test_covariance_matrix.py b/tests/test_covariance_matrix.py
new file mode 100644
index 0000000..8c7d733
--- /dev/null
+++ b/tests/test_covariance_matrix.py
@@ -0,0 +1,243 @@
+import os
+
+import numpy as np
+from numpy import pi
+from numpy.testing import assert_allclose, assert_almost_equal
+import pytest
+
+from lmfit import Parameters, minimize
+from lmfit.lineshapes import exponential
+from lmfit.models import ExponentialModel, LinearModel, VoigtModel
+
+
+def check(para, real_val, sig=3):
+ err = abs(para.value - real_val)
+ assert err < sig * para.stderr
+
+
+def test_bounded_parameters():
+ # create data to be fitted
+ np.random.seed(1)
+ x = np.linspace(0, 15, 301)
+ data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+ np.random.normal(size=len(x), scale=0.2))
+
+ # define objective function: returns the array to be minimized
+ def fcn2min(params, x, data):
+ """ model decaying sine wave, subtract data"""
+ amp = params['amp']
+ shift = params['shift']
+ omega = params['omega']
+ decay = params['decay']
+
+ model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+ return model - data
+
+ # create a set of Parameters
+ params = Parameters()
+ params.add('amp', value=10, min=0, max=50)
+ params.add('decay', value=0.1, min=0, max=10)
+ params.add('shift', value=0.0, min=-pi/2., max=pi/2.)
+ params.add('omega', value=3.0, min=0, max=np.inf)
+
+ # do fit, here with leastsq model
+ result = minimize(fcn2min, params, args=(x, data),
+ epsfcn=1.e-14)
+
+ # assert that the real parameters are found
+ for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]):
+ check(para, val)
+
+ # assert that the covariance matrix is correct [cf. lmfit v0.9.10]
+ cov_x = np.array([
+ [1.42428250e-03, 9.45395985e-06, -4.33997922e-05, 1.07362106e-05],
+ [9.45395985e-06, 1.84110424e-07, -2.90588963e-07, 7.19107184e-08],
+ [-4.33997922e-05, -2.90588963e-07, 9.53427031e-05, -2.37750362e-05],
+ [1.07362106e-05, 7.19107184e-08, -2.37750362e-05, 9.60952336e-06]])
+ assert_allclose(result.covar, cov_x, rtol=1.e-3, atol=1.e-6)
+
+ # assert that stderr and correlations are correct [cf. lmfit v0.9.10]
+ assert_almost_equal(result.params['amp'].stderr, 0.03773967, decimal=4)
+ assert_almost_equal(result.params['decay'].stderr, 4.2908e-04, decimal=4)
+ assert_almost_equal(result.params['shift'].stderr, 0.00976436, decimal=4)
+ assert_almost_equal(result.params['omega'].stderr, 0.00309992, decimal=4)
+
+ assert_almost_equal(result.params['amp'].correl['decay'],
+ 0.5838166760743324, decimal=4)
+ assert_almost_equal(result.params['amp'].correl['shift'],
+ -0.11777303073961824, decimal=4)
+ assert_almost_equal(result.params['amp'].correl['omega'],
+ 0.09177027400788784, decimal=4)
+ assert_almost_equal(result.params['decay'].correl['shift'],
+ -0.0693579417651835, decimal=4)
+ assert_almost_equal(result.params['decay'].correl['omega'],
+ 0.05406342001021014, decimal=4)
+ assert_almost_equal(result.params['shift'].correl['omega'],
+ -0.7854644476455469, decimal=4)
+
+
+def test_bounds_expression():
+ # load data to be fitted
+ data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples',
+ 'test_peak.dat'))
+ x = data[:, 0]
+ y = data[:, 1]
+
+ # define the model and initialize parameters
+ mod = VoigtModel()
+ params = mod.guess(y, x=x)
+ params['amplitude'].set(min=0, max=100)
+ params['center'].set(min=5, max=10)
+
+ # do fit, here with leastsq model
+ result = mod.fit(y, params, x=x, fit_kws={'epsfcn': 1.e-14})
+
+ # assert that stderr and correlations are correct [cf. lmfit v0.9.10]
+ assert_almost_equal(result.params['sigma'].stderr, 0.00368468, decimal=4)
+ assert_almost_equal(result.params['center'].stderr, 0.00505496, decimal=4)
+ assert_almost_equal(result.params['amplitude'].stderr, 0.13861506,
+ decimal=4)
+ assert_almost_equal(result.params['gamma'].stderr, 0.00368468, decimal=4)
+ assert_almost_equal(result.params['fwhm'].stderr, 0.01326862028, decimal=4)
+ assert_almost_equal(result.params['height'].stderr, 0.0395990, decimal=4)
+
+ assert_almost_equal(result.params['sigma'].correl['center'],
+ -4.6623973788006615e-05, decimal=4)
+ assert_almost_equal(result.params['sigma'].correl['amplitude'],
+ 0.651304091954038, decimal=4)
+ assert_almost_equal(result.params['center'].correl['amplitude'],
+ -4.390334984618851e-05, decimal=4)
+
+
+@pytest.mark.parametrize("fit_method", ['nelder', 'lbfgs'])
+def test_numdifftools_no_bounds(fit_method):
+ pytest.importorskip("numdifftools")
+
+ np.random.seed(7)
+ x = np.linspace(0, 100, num=50)
+ noise = np.random.normal(scale=0.25, size=x.size)
+ y = exponential(x, amplitude=5, decay=15) + noise
+
+ mod = ExponentialModel()
+ params = mod.guess(y, x=x)
+
+ # do fit, here with leastsq model
+ result = mod.fit(y, params, x=x, method='leastsq')
+
+ result_ndt = mod.fit(y, params, x=x, method=fit_method)
+
+ # assert that fit converged to the same result
+ vals = [result.params[p].value for p in result.params.valuesdict()]
+ vals_ndt = [result_ndt.params[p].value for p in result_ndt.params.valuesdict()]
+ assert_allclose(vals_ndt, vals, rtol=0.1)
+ assert_allclose(result_ndt.chisqr, result.chisqr, rtol=1e-5)
+
+ # assert that parameter uncertainties from leastsq and calculated from
+ # the covariance matrix using numdifftools are very similar
+ stderr = [result.params[p].stderr for p in result.params.valuesdict()]
+ stderr_ndt = [result_ndt.params[p].stderr for p in result_ndt.params.valuesdict()]
+
+ perr = np.array(stderr) / np.array(vals)
+ perr_ndt = np.array(stderr_ndt) / np.array(vals_ndt)
+ assert_almost_equal(perr_ndt, perr, decimal=3)
+
+ # assert that parameter correlatations from leastsq and calculated from
+ # the covariance matrix using numdifftools are very similar
+ for par1 in result.var_names:
+ cor = [result.params[par1].correl[par2] for par2 in
+ result.params[par1].correl.keys()]
+ cor_ndt = [result_ndt.params[par1].correl[par2] for par2 in
+ result_ndt.params[par1].correl.keys()]
+ assert_almost_equal(cor_ndt, cor, decimal=2)
+
+
+@pytest.mark.parametrize("fit_method", ['nelder', 'basinhopping', 'ampgo',
+ 'shgo', 'dual_annealing'])
+def test_numdifftools_with_bounds(fit_method):
+ pytest.importorskip("numdifftools")
+
+ # load data to be fitted
+ data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples',
+ 'test_peak.dat'))
+ x = data[:, 0]
+ y = data[:, 1]
+
+ # define the model and initialize parameters
+ mod = VoigtModel()
+ params = mod.guess(y, x=x)
+ params['amplitude'].set(min=25, max=70)
+ params['sigma'].set(max=1)
+ params['center'].set(min=5, max=15)
+
+ # do fit, here with leastsq model
+ result = mod.fit(y, params, x=x, method='leastsq')
+
+ result_ndt = mod.fit(y, params, x=x, method=fit_method)
+
+ # assert that fit converged to the same result
+ vals = [result.params[p].value for p in result.params.valuesdict()]
+ vals_ndt = [result_ndt.params[p].value for p in result_ndt.params.valuesdict()]
+ assert_allclose(vals_ndt, vals, rtol=0.1)
+ assert_allclose(result_ndt.chisqr, result.chisqr, rtol=1e-5)
+
+ # assert that parameter uncertainties from leastsq and calculated from
+ # the covariance matrix using numdifftools are very similar
+ stderr = [result.params[p].stderr for p in result.params.valuesdict()]
+ stderr_ndt = [result_ndt.params[p].stderr for p in result_ndt.params.valuesdict()]
+
+ perr = np.array(stderr) / np.array(vals)
+ perr_ndt = np.array(stderr_ndt) / np.array(vals_ndt)
+ assert_almost_equal(perr_ndt, perr, decimal=3)
+
+ # assert that parameter correlatations from leastsq and calculated from
+ # the covariance matrix using numdifftools are very similar
+ for par1 in result.var_names:
+ cor = [result.params[par1].correl[par2] for par2 in
+ result.params[par1].correl.keys()]
+ cor_ndt = [result_ndt.params[par1].correl[par2] for par2 in
+ result_ndt.params[par1].correl.keys()]
+ assert_almost_equal(cor_ndt, cor, decimal=2)
+
+
+def test_numdifftools_calc_covar_false():
+ pytest.importorskip("numdifftools")
+ # load data to be fitted
+ data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples',
+ 'test_peak.dat'))
+ x = data[:, 0]
+ y = data[:, 1]
+
+ # define the model and initialize parameters
+ mod = VoigtModel()
+ params = mod.guess(y, x=x)
+ params['sigma'].set(min=-np.inf)
+
+ # do fit, with leastsq and nelder
+ result = mod.fit(y, params, x=x, method='leastsq')
+ result_ndt = mod.fit(y, params, x=x, method='nelder', calc_covar=False)
+
+ # assert that fit converged to the same result
+ vals = [result.params[p].value for p in result.params.valuesdict()]
+ vals_ndt = [result_ndt.params[p].value for p in result_ndt.params.valuesdict()]
+ assert_allclose(vals_ndt, vals, rtol=5e-3)
+ assert_allclose(result_ndt.chisqr, result.chisqr)
+
+ assert result_ndt.covar is None
+ assert result_ndt.errorbars is False
+
+
+def test_final_parameter_values():
+ model = LinearModel()
+ params = model.make_params()
+ params['intercept'].set(value=-1, min=-20, max=0)
+ params['slope'].set(value=1, min=-100, max=400)
+
+ np.random.seed(78281)
+ x = np.linspace(0, 9, 10)
+ y = x * 1.34 - 4.5 + np.random.normal(scale=0.05, size=x.size)
+
+ result = model.fit(y, x=x, method='nelder', params=params)
+
+ assert_almost_equal(result.chisqr, 0.014625543, decimal=6)
+ assert_almost_equal(result.params['intercept'].value, -4.511087126, decimal=6)
+ assert_almost_equal(result.params['slope'].value, 1.339685514, decimal=6)
diff --git a/tests/test_custom_independentvar.py b/tests/test_custom_independentvar.py
new file mode 100644
index 0000000..4be2f53
--- /dev/null
+++ b/tests/test_custom_independentvar.py
@@ -0,0 +1,45 @@
+import numpy as np
+
+from lmfit.lineshapes import gaussian
+from lmfit.models import Model
+
+
+class Stepper:
+ def __init__(self, start, stop, npts):
+ self.start = start
+ self.stop = stop
+ self.npts = npts
+
+ def get_x(self):
+ return np.linspace(self.start, self.stop, self.npts)
+
+
+def gaussian_mod(obj, amplitude, center, sigma):
+ return gaussian(obj.get_x(), amplitude, center, sigma)
+
+
+def test_custom_independentvar():
+ """Tests using a non-trivial object as an independent variable."""
+ npts = 501
+ xmin = 1
+ xmax = 21
+ cen = 8
+ obj = Stepper(xmin, xmax, npts)
+ y = gaussian(obj.get_x(), amplitude=3.0, center=cen, sigma=2.5)
+ y += np.random.normal(scale=0.2, size=npts)
+
+ gmod = Model(gaussian_mod)
+
+ params = gmod.make_params(amplitude=2, center=5, sigma=8)
+ out = gmod.fit(y, params, obj=obj)
+
+ assert out.nvarys == 3
+ assert out.nfev > 10
+ assert out.chisqr > 1
+ assert out.chisqr < 100
+ assert out.params['sigma'].value < 3
+ assert out.params['sigma'].value > 2
+ assert out.params['center'].value > xmin
+ assert out.params['center'].value < xmax
+ assert out.params['amplitude'].value > 1
+ assert out.params['amplitude'].value < 5
diff --git a/tests/test_default_kws.py b/tests/test_default_kws.py
new file mode 100644
index 0000000..8ca90ff
--- /dev/null
+++ b/tests/test_default_kws.py
@@ -0,0 +1,24 @@
+import numpy as np
+
+from lmfit.lineshapes import gaussian
+from lmfit.models import GaussianModel
+
+
+def test_default_inputs_gauss():
+ area = 1
+ cen = 0
+ std = 0.2
+ x = np.arange(-3, 3, 0.01)
+ y = gaussian(x, area, cen, std)
+
+ g = GaussianModel()
+
+ fit_option1 = {'xtol': 1e-2}
+ result1 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5,
+ max_nfev=5000, fit_kws=fit_option1)
+
+ fit_option2 = {'xtol': 1e-6}
+ result2 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5,
+ max_nfev=5000, fit_kws=fit_option2)
+
+ assert result1.values != result2.values
diff --git a/tests/test_dual_annealing.py b/tests/test_dual_annealing.py
new file mode 100644
index 0000000..c3e5ca0
--- /dev/null
+++ b/tests/test_dual_annealing.py
@@ -0,0 +1,74 @@
+"""Tests for the Dual Annealing algorithm."""
+
+import numpy as np
+from numpy.testing import assert_allclose
+import scipy
+from scipy import __version__ as scipy_version
+
+import lmfit
+
+
+def eggholder(x):
+ return (-(x[1] + 47.0) * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
+ - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0)))))
+
+
+def eggholder_lmfit(params):
+ x0 = params['x0'].value
+ x1 = params['x1'].value
+
+ return (-(x1 + 47.0) * np.sin(np.sqrt(abs(x0/2.0 + (x1 + 47.0))))
+ - x0 * np.sin(np.sqrt(abs(x0 - (x1 + 47.0)))))
+
+
+def test_da_scipy_vs_lmfit():
+ """Test DA algorithm in lmfit versus SciPy."""
+ bounds = [(-512, 512), (-512, 512)]
+ result_scipy = scipy.optimize.dual_annealing(eggholder, bounds, seed=7)
+
+ pars = lmfit.Parameters()
+ pars.add_many(('x0', 0, True, -512, 512), ('x1', 0, True, -512, 512))
+ mini = lmfit.Minimizer(eggholder_lmfit, pars)
+ result = mini.minimize(method='dual_annealing', seed=7)
+ out_x = np.array([result.params['x0'].value, result.params['x1'].value])
+
+ assert_allclose(result_scipy.fun, result.residual)
+ assert_allclose(result_scipy.x, out_x)
+
+
+# TODO: add scipy example from docstring after the reproducibility issue in
+# https://github.com/scipy/scipy/issues/9732 is resolved.
+
+# correct result for Alpine02 function
+global_optimum = [7.91705268, 4.81584232]
+fglob = -6.12950
+
+
+def test_da_Alpine02(minimizer_Alpine02):
+ """Test dual_annealing algorithm on Alpine02 function."""
+ out = minimizer_Alpine02.minimize(method='dual_annealing')
+ out_x = np.array([out.params['x0'].value, out.params['x1'].value])
+
+ assert_allclose(out.residual, fglob, rtol=1e-5)
+ assert_allclose(min(out_x), min(global_optimum), rtol=1e-3)
+ assert_allclose(max(out_x), max(global_optimum), rtol=1e-3)
+ assert out.method == 'dual_annealing'
+
+ # FIXME: update when SciPy requirement is >= 1.8
+ # ``local_search_options`` deprecated in favor of ``minimizer_kwargs``
+ if int(scipy_version.split('.')[1]) >= 8:
+ assert 'minimizer_kwargs' in out.call_kws
+ else:
+ assert 'local_search_options' in out.call_kws
+
+
+def test_da_bounds(minimizer_Alpine02):
+ """Test dual_annealing algorithm with bounds."""
+ pars_bounds = lmfit.Parameters()
+ pars_bounds.add_many(('x0', 1., True, 5.0, 15.0),
+ ('x1', 1., True, 2.5, 7.5))
+
+ out = minimizer_Alpine02.minimize(params=pars_bounds,
+ method='dual_annealing')
+ assert 5.0 <= out.params['x0'].value <= 15.0
+ assert 2.5 <= out.params['x1'].value <= 7.5
diff --git a/tests/test_itercb.py b/tests/test_itercb.py
new file mode 100644
index 0000000..16901a3
--- /dev/null
+++ b/tests/test_itercb.py
@@ -0,0 +1,130 @@
+"""Tests for the Iteration Callback Function."""
+
+import numpy as np
+import pytest
+
+from lmfit.lineshapes import gaussian
+from lmfit.minimizer import Minimizer
+from lmfit.models import GaussianModel, LinearModel
+
+try:
+ import numdifftools # noqa: F401
+ calc_covar_options = [False, True]
+except ImportError:
+ calc_covar_options = [False]
+
+
+np.random.seed(7)
+x = np.linspace(0, 20, 401)
+y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
+y -= 0.20*x + 3.333 + np.random.normal(scale=0.23, size=len(x))
+mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
+
+
+def residual(pars, x, data):
+ parvals = pars.valuesdict()
+ gauss = gaussian(x, parvals['peak_amplitude'], parvals['peak_center'],
+ parvals['peak_sigma'])
+ linear = parvals['bkg_slope']*x + parvals['bkg_intercept']
+ return data - gauss - linear
+
+
+pars = mod.make_params(peak_amplitude=21.0, peak_center=7.0,
+ peak_sigma=2.0, bkg_intercept=2, bkg_slope=0.0)
+
+# set bounds for use with 'differential_evolution' and 'brute'
+pars['bkg_intercept'].set(min=0, max=10)
+pars['bkg_slope'].set(min=-5, max=5)
+pars['peak_amplitude'].set(min=20, max=25)
+pars['peak_center'].set(min=5, max=10)
+pars['peak_sigma'].set(min=0.5, max=2)
+
+
+def per_iteration(pars, iteration, resid, *args, **kws):
+ """Iteration callback, will abort at iteration 23."""
+ return iteration == 17
+
+
+fitmethods = ['ampgo', 'brute', 'basinhopping', 'differential_evolution',
+ 'leastsq', 'least_squares', 'nelder', 'shgo', 'dual_annealing']
+
+
+@pytest.mark.parametrize("calc_covar", calc_covar_options)
+@pytest.mark.parametrize("method", fitmethods)
+def test_itercb_model_class(method, calc_covar):
+ """Test the iteration callback for all solvers."""
+ out = mod.fit(y, pars, x=x, method=method, iter_cb=per_iteration,
+ calc_covar=calc_covar)
+
+ assert out.nfev == 17
+ assert out.aborted
+ assert not out.errorbars
+ assert not out.success
+
+
+@pytest.mark.parametrize("calc_covar", calc_covar_options)
+@pytest.mark.parametrize("method", fitmethods)
+def test_itercb_minimizer_class(method, calc_covar):
+ """Test the iteration callback for all solvers."""
+ if method in ('nelder', 'differential_evolution'):
+ pytest.xfail("scalar_minimizers behave differently, but shouldn't!!")
+
+ mini = Minimizer(residual, pars, fcn_args=(x, y), iter_cb=per_iteration,
+ calc_covar=calc_covar)
+ out = mini.minimize(method=method)
+
+ assert out.nfev == 17
+ assert out.aborted
+ assert not out.errorbars
+ assert not out.success
+ assert mini._abort
+
+
+fitmethods = ['leastsq', 'least_squares']
+
+
+@pytest.mark.parametrize("method", fitmethods)
+def test_itercb_reset_abort(method):
+ """Regression test for GH Issue #756.
+
+ Make sure that ``self._abort`` is reset to ``False`` at the start of each
+ fit.
+
+ """
+ if method in ('nelder', 'differential_evolution'):
+ pytest.xfail("scalar_minimizers behave differently, but shouldn't!!")
+
+ must_stop = True
+
+ def callback(*args, **kwargs):
+ return must_stop
+
+ # perform minimization with ``iter_cb``
+ out_model = mod.fit(y, pars, x=x, method=method, iter_cb=callback)
+
+ mini = Minimizer(residual, pars, fcn_args=(x, y), iter_cb=callback)
+ out_minimizer = mini.minimize(method=method)
+
+ assert out_model.aborted is must_stop
+ assert out_model.errorbars is not must_stop
+ assert out_model.success is not must_stop
+ assert out_minimizer.aborted is must_stop
+ assert out_minimizer.errorbars is not must_stop
+ assert out_minimizer.success is not must_stop
+ assert mini._abort is must_stop
+
+ # perform another minimization now without ``iter_cb``
+ must_stop = False
+ out_minimizer_no_callback = mini.minimize(method=method)
+ assert out_minimizer_no_callback.aborted is must_stop
+ assert out_minimizer_no_callback.errorbars is not must_stop
+ assert out_minimizer_no_callback.success is not must_stop
+ assert mini._abort is must_stop
+
+ # reset to mini._abort to False and call the optimization method directly
+ func = getattr(mini, method)
+ out_no_callback = func()
+ assert out_no_callback.aborted is must_stop
+ assert out_no_callback.errorbars is not must_stop
+ assert out_no_callback.success is not must_stop
+ assert mini._abort is must_stop
diff --git a/tests/test_jsonutils.py b/tests/test_jsonutils.py
new file mode 100644
index 0000000..802f045
--- /dev/null
+++ b/tests/test_jsonutils.py
@@ -0,0 +1,87 @@
+"""Tests for the JSON utilities."""
+from types import BuiltinFunctionType, FunctionType
+
+import numpy as np
+import pytest
+from scipy.optimize import basinhopping
+
+import lmfit
+from lmfit.jsonutils import decode4js, encode4js, find_importer, import_from
+from lmfit.printfuncs import alphanumeric_sort
+
+
+@pytest.mark.parametrize('obj', [alphanumeric_sort, np.array, basinhopping])
+def test_import_from(obj):
+ """Check return value of find_importer function."""
+ importer = find_importer(obj)
+ assert isinstance(import_from(importer, obj.__name__),
+ (BuiltinFunctionType, FunctionType))
+
+
+# test-case missing for string object that causes a UnicodeError; cannot find
+# a way to trigger that exception (perhaps not needed in PY3 anymore?)
+objects = [('test_string', (str,)),
+ (np.array([7.0]), np.ndarray),
+ (np.array([1.0+2.0j]), np.ndarray),
+ (123.456, float),
+ (10, int),
+ ('café', (str,)),
+ (10.0-5.0j, complex),
+ (['a', 'b', 'c'], list),
+ (('a', 'b', 'c'), tuple),
+ ({'a': 1.0, 'b': 2.0, 'c': 3.0}, dict),
+ (lmfit.lineshapes.gaussian, FunctionType),
+ (np.array(['a', np.array([1, 2, 3])], dtype=object), np.ndarray)]
+
+
+@pytest.mark.parametrize('obj, obj_type', objects)
+def test_encode_decode(obj, obj_type):
+ """Test encoding/decoding of the various object types to/from JSON."""
+ encoded = encode4js(obj)
+ decoded = decode4js(encoded)
+
+ if isinstance(obj, np.ndarray) and obj.dtype == 'object':
+ assert decoded[0] == obj[0]
+ assert np.all(decoded[1] == obj[1])
+ else:
+ assert decoded == obj
+
+ assert isinstance(decoded, obj_type)
+
+
+def test_encode_decode_pandas():
+ """Test encoding/decoding of various pandas objects to/from JSON."""
+ pytest.importorskip('pandas')
+ import pandas as pd
+
+ obj_df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
+ columns=['a', 'b', 'c'])
+ encoded_df = encode4js(obj_df)
+ decoded_df = decode4js(encoded_df)
+ assert np.all(pd.DataFrame.eq(obj_df, decoded_df))
+ assert isinstance(decoded_df, pd.DataFrame)
+
+ obj_ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
+ encoded_ser = encode4js(obj_ser)
+ decoded_ser = decode4js(encoded_ser)
+ assert np.all(pd.Series.eq(obj_ser, decoded_ser))
+ assert isinstance(decoded_ser, pd.Series)
+
+
+def test_altered_params_json():
+ """Regression test for loading altered JSON Parameters (see GH #739)."""
+ pars = lmfit.Parameters()
+ pars.add('a', 3.0, min=0)
+ pars.add('b', 10.0, max=1000)
+ pars.add('c', 20.0)
+ pars.add('d', expr='c - b/a')
+
+ # mangle JSON as JavaScript or others might:
+ json_rep = pars.dumps().replace('-Infinity', 'null').replace('Infinity', 'null')
+
+ new = lmfit.Parameters()
+ new.loads(json_rep)
+ for vname in ('a', 'b', 'c', 'd'):
+ assert new[vname].value == pars[vname].value
+ assert new[vname].min == pars[vname].min
+ assert new[vname].max == pars[vname].max
diff --git a/tests/test_least_squares.py b/tests/test_least_squares.py
new file mode 100644
index 0000000..bb592c7
--- /dev/null
+++ b/tests/test_least_squares.py
@@ -0,0 +1,171 @@
+"""Tests for the least_squares minimization algorithm."""
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+from scipy.sparse import bsr_matrix
+from scipy.sparse.linalg import aslinearoperator
+
+import lmfit
+from lmfit.models import VoigtModel
+
+
+def test_least_squares_with_bounds():
+ """Test least_squares algorihhm with bounds."""
+ # define "true" parameters
+ p_true = lmfit.Parameters()
+ p_true.add('amp', value=14.0)
+ p_true.add('period', value=5.4321)
+ p_true.add('shift', value=0.12345)
+ p_true.add('decay', value=0.01000)
+
+ def residual(pars, x, data=None):
+ """Objective function of decaying sine wave."""
+ amp = pars['amp']
+ per = pars['period']
+ shift = pars['shift']
+ decay = pars['decay']
+
+ if abs(shift) > np.pi/2:
+ shift = shift - np.sign(shift)*np.pi
+
+ model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return model - data
+
+ # generate synthetic data
+ np.random.seed(0)
+ x = np.linspace(0.0, 250.0, 1500)
+ noise = np.random.normal(scale=2.80, size=x.size)
+ data = residual(p_true, x) + noise
+
+ # create Parameters and set initial values and bounds
+ fit_params = lmfit.Parameters()
+ fit_params.add('amp', value=13.0, min=0.0, max=20)
+ fit_params.add('period', value=2, max=10)
+ fit_params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2.)
+ fit_params.add('decay', value=0.02, min=0.0, max=0.10)
+
+ mini = lmfit.Minimizer(residual, fit_params, fcn_args=(x, data))
+ out = mini.minimize(method='least_squares')
+
+ assert out.method == 'least_squares'
+ assert out.nfev > 10
+ assert out.nfree > 50
+ assert out.chisqr > 1.0
+ assert out.errorbars
+ assert out.success
+ assert_allclose(out.params['decay'], p_true['decay'], rtol=1e-2)
+ assert_allclose(out.params['shift'], p_true['shift'], rtol=1e-2)
+
+
+@pytest.mark.parametrize("bounds", [False, True])
+def test_least_squares_cov_x(peakdata, bounds):
+ """Test calculation of cov. matrix from Jacobian, with/without bounds."""
+ x = peakdata[0]
+ y = peakdata[1]
+
+ # define the model and initialize parameters
+ mod = VoigtModel()
+ params = mod.guess(y, x=x)
+
+ if bounds:
+ params['amplitude'].set(min=25, max=70)
+ params['sigma'].set(min=0, max=1)
+ params['center'].set(min=5, max=15)
+ else:
+ params['sigma'].set(min=-np.inf)
+
+ # do fit with least_squares and leastsq algorithm
+ result = mod.fit(y, params, x=x, method='least_squares')
+ result_lsq = mod.fit(y, params, x=x, method='leastsq',
+ fit_kws={'epsfcn': 1.e-14})
+
+ # assert that fit converged to the same result
+ vals = [result.params[p].value for p in result.params.valuesdict()]
+ vals_lsq = [result_lsq.params[p].value for p in
+ result_lsq.params.valuesdict()]
+ assert_allclose(vals, vals_lsq, rtol=1e-5)
+ assert_allclose(result.chisqr, result_lsq.chisqr)
+
+ # assert that parameter uncertainties obtained from the leastsq method and
+ # those from the covariance matrix estimated from the Jacbian matrix in
+ # least_squares are similar
+ stderr = [result.params[p].stderr for p in result.params.valuesdict()]
+ stderr_lsq = [result_lsq.params[p].stderr for p in
+ result_lsq.params.valuesdict()]
+ assert_allclose(stderr, stderr_lsq, rtol=1e-4)
+
+ # assert that parameter correlations obtained from the leastsq method and
+ # those from the covariance matrix estimated from the Jacbian matrix in
+ # least_squares are similar
+ for par1 in result.var_names:
+ cor = [result.params[par1].correl[par2] for par2 in
+ result.params[par1].correl.keys()]
+ cor_lsq = [result_lsq.params[par1].correl[par2] for par2 in
+ result_lsq.params[par1].correl.keys()]
+
+ assert_allclose(cor, cor_lsq, rtol=0.01, atol=1.e-6)
+
+
+def test_least_squares_solver_options(peakdata, capsys):
+ """Test least_squares algorithm, pass options to solver."""
+ x = peakdata[0]
+ y = peakdata[1]
+ mod = VoigtModel()
+ params = mod.guess(y, x=x)
+ solver_kws = {'verbose': 2}
+ mod.fit(y, params, x=x, method='least_squares', fit_kws=solver_kws)
+ captured = capsys.readouterr()
+
+ assert 'Iteration' in captured.out
+ assert 'final cost' in captured.out
+
+
+def test_least_squares_jacobian_types():
+ """Test support for Jacobian of all types supported by least_squares."""
+ # Build function
+ # f(x, y) = (x - a)^2 + (y - b)^2
+ np.random.seed(42)
+ a = np.random.normal(0, 1, 50)
+ np.random.seed(43)
+ b = np.random.normal(0, 1, 50)
+
+ def f(params):
+ return (params['x'] - a)**2 + (params['y'] - b)**2
+
+ # Build analytic Jacobian functions with the different possible return types
+ # numpy.ndarray, scipy.sparse.spmatrix, scipy.sparse.linalg.LinearOperator
+ # J = [ 2x - 2a , 2y - 2b ]
+ def jac_array(params, *args, **kwargs):
+ return np.column_stack((2 * params[0] - 2 * a, 2 * params[1] - 2 * b))
+
+ def jac_sparse(params, *args, **kwargs):
+ return bsr_matrix(jac_array(params, *args, **kwargs))
+
+ def jac_operator(params, *args, **kwargs):
+ return aslinearoperator(jac_array(params, *args, **kwargs))
+ # Build parameters
+ params = lmfit.Parameters()
+ params.add('x', value=0)
+ params.add('y', value=0)
+ # Solve model for numerical Jacobian and each analytic Jacobian function
+ result = lmfit.minimize(f, params, method='least_squares')
+ result_array = lmfit.minimize(
+ f, params, method='least_squares',
+ jac=jac_array)
+ result_sparse = lmfit.minimize(
+ f, params, method='least_squares',
+ jac=jac_sparse)
+ result_operator = lmfit.minimize(
+ f, params, method='least_squares',
+ jac=jac_operator)
+ # Check that all have uncertainties
+ assert result.errorbars
+ assert result_array.errorbars
+ assert result_sparse.errorbars
+ assert result_operator.errorbars
+ # Check that all have ~equal covariance matrix
+ assert_allclose(result.covar, result_array.covar)
+ assert_allclose(result.covar, result_sparse.covar)
+ assert_allclose(result.covar, result_operator.covar)
diff --git a/tests/test_lineshapes.py b/tests/test_lineshapes.py
new file mode 100644
index 0000000..1d28aad
--- /dev/null
+++ b/tests/test_lineshapes.py
@@ -0,0 +1,146 @@
+"""Tests for lineshape functions."""
+
+import inspect
+
+import numpy as np
+from numpy.testing import assert_almost_equal
+import pytest
+
+import lmfit
+from lmfit.lineshapes import not_zero, tiny
+
+
+@pytest.mark.parametrize(
+ "value, expected_result",
+ [(1, 1.0), (-1, -1.0), (0, tiny), (-0, -tiny), (np.array([1]), 1.0)],
+)
+def test_not_zero(value, expected_result):
+ """Test that not_zero gives the expected results"""
+ assert_almost_equal(not_zero(value), expected_result)
+
+
+@pytest.mark.parametrize("lineshape", lmfit.lineshapes.functions)
+def test_no_ZeroDivisionError_and_finite_output(lineshape):
+ """Tests for finite output and ZeroDivisionError is not raised."""
+ xvals = np.linspace(0, 10, 100)
+
+ func = getattr(lmfit.lineshapes, lineshape)
+ assert callable(func)
+ sig = inspect.signature(func)
+
+ # set the following function arguments:
+ # x = xvals
+ # center = 0.5*(max(xvals)-min(xvals))
+ # center1 = 0.25*(max(xvals)-min(xvals))
+ # center2 = 0.75*(max(xvals)-min(xvals))
+ # form = default value (i.e., 'linear' or 'bose')
+ xvals_mid_range = xvals.mean()
+ zero_pars = [par_name for par_name in sig.parameters.keys() if par_name
+ not in ('x', 'form')]
+
+ for par_zero in zero_pars:
+ fnc_args = []
+ for par in sig.parameters.keys():
+ if par == 'x':
+ fnc_args.append(xvals)
+ elif par == 'center':
+ fnc_args.append(0.5*xvals_mid_range)
+ elif par == 'center1':
+ fnc_args.append(0.25*xvals_mid_range)
+ elif par == 'center2':
+ fnc_args.append(0.75*xvals_mid_range)
+ elif par == par_zero:
+ fnc_args.append(0.0)
+ else:
+ fnc_args.append(sig.parameters[par].default)
+
+ fnc_output = func(*fnc_args)
+ assert len(xvals) == len(fnc_output)
+ assert np.all(np.isfinite(fnc_output))
+
+
+@pytest.mark.parametrize("lineshape", lmfit.lineshapes.functions)
+def test_x_float_value(lineshape):
+ """Test lineshapes when x is not an array but a float."""
+ xval = 7.0
+
+ func = getattr(lmfit.lineshapes, lineshape)
+ sig = inspect.signature(func)
+
+ fnc_args = [xval]
+
+ for par in [par_name for par_name in sig.parameters.keys()
+ if par_name != 'x']:
+ fnc_args.append(sig.parameters[par].default)
+
+ fnc_output = func(*fnc_args)
+ assert isinstance(fnc_output, float)
+
+
+rising_form = ['erf', 'logistic', 'atan', 'arctan', 'linear', 'unknown']
+
+
+@pytest.mark.parametrize("form", rising_form)
+@pytest.mark.parametrize("lineshape", ['step', 'rectangle'])
+def test_form_argument_step_rectangle(form, lineshape):
+ """Test 'form' argument for step- and rectangle-functions."""
+ xvals = np.linspace(0, 10, 100)
+
+ func = getattr(lmfit.lineshapes, lineshape)
+ sig = inspect.signature(func)
+
+ fnc_args = [xvals]
+ for par in [par_name for par_name in sig.parameters.keys()
+ if par_name != 'x']:
+ if par == 'form':
+ fnc_args.append(form)
+ else:
+ fnc_args.append(sig.parameters[par].default)
+
+ if form == 'unknown':
+ msg = r"Invalid value .* for argument .*; should be one of .*"
+ with pytest.raises(ValueError, match=msg):
+ func(*fnc_args)
+ else:
+ fnc_output = func(*fnc_args)
+ assert len(fnc_output) == len(xvals)
+
+
+@pytest.mark.parametrize('form', rising_form)
+@pytest.mark.parametrize('lineshape', ['step', 'rectangle'])
+def test_value_step_rectangle(form, lineshape):
+ """Test values at mu1/mu2 for step- and rectangle-functions."""
+ func = getattr(lmfit.lineshapes, lineshape)
+ # at position mu1 we should be at A/2
+ assert_almost_equal(func(0), 0.5)
+ # for a rectangular shape we have the same at mu2
+ if lineshape == 'rectangle':
+ assert_almost_equal(func(1), 0.5)
+
+
+thermal_form = ['bose', 'maxwell', 'fermi', 'Bose-Einstein', 'unknown']
+
+
+@pytest.mark.parametrize("form", thermal_form)
+def test_form_argument_thermal_distribution(form):
+ """Test 'form' argument for thermal_distribution function."""
+ xvals = np.linspace(0, 10, 100)
+
+ func = lmfit.lineshapes.thermal_distribution
+ sig = inspect.signature(lmfit.lineshapes.thermal_distribution)
+
+ fnc_args = [xvals]
+ for par in [par_name for par_name in sig.parameters.keys()
+ if par_name != 'x']:
+ if par == 'form':
+ fnc_args.append(form)
+ else:
+ fnc_args.append(sig.parameters[par].default)
+
+ if form == 'unknown':
+ msg = r"Invalid value .* for argument .*; should be one of .*"
+ with pytest.raises(ValueError, match=msg):
+ func(*fnc_args)
+ else:
+ fnc_output = func(*fnc_args)
+ assert len(fnc_output) == len(xvals)
diff --git a/tests/test_manypeaks_speed.py b/tests/test_manypeaks_speed.py
new file mode 100644
index 0000000..5c51bd5
--- /dev/null
+++ b/tests/test_manypeaks_speed.py
@@ -0,0 +1,35 @@
+"""Test speed of building complex model."""
+from copy import deepcopy
+import sys
+import time
+
+import numpy as np
+import pytest
+
+from lmfit import Model
+from lmfit.lineshapes import gaussian
+
+sys.setrecursionlimit(2000)
+
+
+@pytest.mark.flaky(max_runs=5)
+def test_manypeaks_speed():
+ model = None
+ t0 = time.time()
+ for i in np.arange(500):
+ g = Model(gaussian, prefix=f'g{i}')
+ if model is None:
+ model = g
+ else:
+ model += g
+ t1 = time.time()
+ pars = model.make_params()
+ t2 = time.time()
+ _cpars = deepcopy(pars) # noqa: F841
+ t3 = time.time()
+
+ # these are very conservative tests that
+ # should be satisfied on nearly any machine
+ assert (t3-t2) < 0.5
+ assert (t2-t1) < 0.5
+ assert (t1-t0) < 5.0
diff --git a/tests/test_max_nfev.py b/tests/test_max_nfev.py
new file mode 100644
index 0000000..2e6c820
--- /dev/null
+++ b/tests/test_max_nfev.py
@@ -0,0 +1,98 @@
+"""Tests for maximum number of function evaluations (max_nfev)."""
+
+import numpy as np
+import pytest
+
+from lmfit.lineshapes import gaussian
+from lmfit.minimizer import Minimizer
+from lmfit.models import GaussianModel, LinearModel
+
+nvarys = 5
+methods = ['leastsq', 'least_squares', 'nelder', 'brute', 'ampgo',
+ 'basinopping', 'differential_evolution', 'shgo', 'dual_annealing']
+
+
+@pytest.fixture
+def modelGaussian():
+ """Return data, parameters and Model class for Gaussian + Linear model."""
+ # generate data with random noise added
+ np.random.seed(7)
+ x = np.linspace(0, 20, 401)
+ y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
+ y -= 0.20*x + 3.333 + np.random.normal(scale=0.23, size=len(x))
+
+ mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
+
+ # make parameters and set bounds
+ pars = mod.make_params(peak_amplitude=21.0, peak_center=7.0,
+ peak_sigma=2.0, bkg_intercept=2, bkg_slope=0.0)
+
+ pars['bkg_intercept'].set(min=0, max=10, brute_step=5.0)
+ pars['bkg_slope'].set(min=-5, max=5, brute_step=5.0)
+ pars['peak_amplitude'].set(min=20, max=25, brute_step=2.5)
+ pars['peak_center'].set(min=5, max=10, brute_step=2.5)
+ pars['peak_sigma'].set(min=0.5, max=2, brute_step=0.5)
+
+ return x, y, mod, pars
+
+
+@pytest.fixture
+def minimizerGaussian(modelGaussian):
+ """Return a Mininizer class for the Gaussian + Linear model."""
+ x, y, _, pars = modelGaussian
+
+ def residual(params, x, y):
+ pars = params.valuesdict()
+ model = (gaussian(x, pars['peak_amplitude'], pars['peak_center'],
+ pars['peak_sigma']) +
+ pars['bkg_intercept'] + x*pars['bkg_slope'])
+ return y - model
+
+ mini = Minimizer(residual, pars, fcn_args=(x, y))
+
+ return mini
+
+
+@pytest.mark.parametrize("method", methods)
+def test_max_nfev_Minimizer(minimizerGaussian, method):
+ """Test the max_nfev argument for all solvers using Minimizer interface."""
+ result = minimizerGaussian.minimize(method=method, max_nfev=10)
+ assert minimizerGaussian.max_nfev == 10
+ assert result.nfev < 15
+ assert result.aborted
+ assert not result.errorbars
+ assert not result.success
+
+
+@pytest.mark.parametrize("method", methods)
+def test_max_nfev_Model(modelGaussian, minimizerGaussian, method):
+ """Test the max_nfev argument for all solvers using Model interfce."""
+ x, y, mod, pars = modelGaussian
+ out = mod.fit(y, pars, x=x, method=method, max_nfev=10)
+
+ assert out.max_nfev == 10
+ assert out.nfev < 15
+ assert out.aborted
+ assert not out.errorbars
+ assert not out.success
+
+
+@pytest.mark.parametrize("method, default_max_nfev",
+ [('leastsq', 2000*(nvarys+1)),
+ ('least_squares', 2000*(nvarys+1)),
+ ('nelder', 2000*(nvarys+1)),
+ ('differential_evolution', 2000*(nvarys+1)),
+ ('ampgo', 200000*(nvarys+1)),
+ ('brute', 200000*(nvarys+1)),
+ ('basinhopping', 200000*(nvarys+1)),
+ ('shgo', 200000*(nvarys+1)),
+ ('dual_annealing', 200000*(nvarys+1))])
+def test_default_max_nfev(modelGaussian, minimizerGaussian, method,
+ default_max_nfev):
+ """Test the default values when setting max_nfev=None."""
+ x, y, mod, pars = modelGaussian
+ result = mod.fit(y, pars, x=x, method=method, max_nfev=None)
+ assert result.max_nfev == default_max_nfev
+
+ _ = minimizerGaussian.minimize(method=method, max_nfev=None)
+ assert minimizerGaussian.max_nfev == default_max_nfev
diff --git a/tests/test_minimizer.py b/tests/test_minimizer.py
new file mode 100644
index 0000000..cb5e5b2
--- /dev/null
+++ b/tests/test_minimizer.py
@@ -0,0 +1,20 @@
+from lmfit import Minimizer, Parameters
+
+
+def test_scalar_minimize_neg_value():
+ x0 = 3.14
+ fmin = -1.1
+ xtol = 0.001
+ ftol = 2.0 * xtol
+
+ def objective(pars):
+ return (pars['x'] - x0) ** 2.0 + fmin
+
+ params = Parameters()
+ params.add('x', value=2*x0)
+
+ minr = Minimizer(objective, params)
+ result = minr.scalar_minimize(method='Nelder-Mead',
+ options={'xatol': xtol, 'fatol': ftol})
+ assert abs(result.params['x'].value - x0) < xtol
+ assert abs(result.fun - fmin) < ftol
diff --git a/tests/test_model.py b/tests/test_model.py
new file mode 100644
index 0000000..2978360
--- /dev/null
+++ b/tests/test_model.py
@@ -0,0 +1,1435 @@
+"""Tests for the Model, CompositeModel, and ModelResult classes."""
+
+import functools
+import unittest
+import warnings
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+from scipy import __version__ as scipy_version
+
+import lmfit
+from lmfit import Model, models
+from lmfit.lineshapes import gaussian, lorentzian
+from lmfit.model import get_reducer, propagate_err
+from lmfit.models import PseudoVoigtModel
+
+
+@pytest.fixture()
+def gmodel():
+ """Return a Gaussian model."""
+ return Model(gaussian)
+
+
+def test_get_reducer_invalid_option():
+ """Tests for ValueError when using an unsupported option."""
+ option = 'unknown'
+ msg = r'Invalid option'
+ with pytest.raises(ValueError, match=msg):
+ get_reducer(option)
+
+
+test_data_get_reducer = [('real', [1.0, 1.0, 2.0, 2.0]),
+ ('imag', [0.0, 10.0, 0.0, 20.0]),
+ ('abs', [1.0, 10.04987562, 2.0, 20.09975124]),
+ ('angle', [0.0, 1.47112767, 0.0, 1.471127670])]
+
+
+@pytest.mark.parametrize('option, expected_array', test_data_get_reducer)
+def test_get_reducer(option, expected_array):
+ """Tests for ValueError when using an unsupported option."""
+ complex_array = np.array([1.0, 1.0+10j, 2.0, 2.0+20j], dtype='complex')
+ func = get_reducer(option)
+ real_array = func(complex_array)
+
+ assert np.all(np.isreal(real_array))
+ assert_allclose(real_array, expected_array)
+
+ # nothing should happen to an array that only contains real data
+ assert_allclose(func(real_array), real_array)
+
+
+def test_propagate_err_invalid_option():
+ """Tests for ValueError when using an unsupported option."""
+ z = np.array([0, 1, 2, 3, 4, 5])
+ dz = np.random.normal(size=z.size, scale=0.1)
+ option = 'unknown'
+ msg = r'Invalid option'
+ with pytest.raises(ValueError, match=msg):
+ propagate_err(z, dz, option)
+
+
+def test_propagate_err_unequal_shape_z_dz():
+ """Tests for ValueError when using unequal arrays for z and dz."""
+ z = np.array([0, 1, 2, 3, 4, 5])
+ dz = np.random.normal(size=z.size-1, scale=0.1)
+ msg = r'shape of z:'
+ with pytest.raises(ValueError, match=msg):
+ propagate_err(z, dz, option='abs')
+
+
+@pytest.mark.parametrize('option', ['real', 'imag', 'abs', 'angle'])
+def test_propagate_err(option):
+ """Tests for ValueError when using an unsupported option."""
+ np.random.seed(2020)
+ z = np.array([1.0, 1.0+10j, 2.0, 2.0+20j], dtype='complex')
+ dz = np.random.normal(z.size, scale=0.1)*z
+
+ # if `z` is real, assume that `dz` is also real and return it as-is
+ err = propagate_err(np.real(z), np.real(dz), option)
+ assert_allclose(err, np.real(dz))
+
+ # if `z` is complex, but `dz` is real apply the err to both real/imag
+ err_complex_real = propagate_err(z, np.real(dz), option)
+ assert np.all(np.isreal(err_complex_real))
+ dz_used = np.real(dz)+1j*np.real(dz)
+ if option == 'real':
+ assert_allclose(err_complex_real, np.real(dz_used))
+ elif option == 'imag':
+ assert_allclose(err_complex_real, np.imag(dz_used))
+ elif option == 'abs':
+ assert_allclose(err_complex_real,
+ [3.823115, 3.823115, 7.646231, 7.646231],
+ rtol=1.0e-5)
+ elif option == 'angle':
+ assert_allclose(err_complex_real,
+ [3.823115, 0.380414, 3.823115, 0.380414],
+ rtol=1.0e-5)
+
+ # both `z` and `dz` are complex
+ err_complex_complex = propagate_err(z, dz, option)
+ assert np.all(np.isreal(err_complex_complex))
+ if option == 'real':
+ assert_allclose(err_complex_complex, np.real(dz))
+ elif option == 'imag':
+ assert_allclose(err_complex_complex, np.imag(dz))
+ elif option == 'abs':
+ assert_allclose(err_complex_complex,
+ [3.823115, 38.043322, 7.646231, 76.086645],
+ rtol=1.0e-5)
+ elif option == 'angle':
+ assert_allclose(err_complex_complex, [0., 0.535317, 0., 0.535317],
+ rtol=1.0e-5)
+
+
+def test_initialize_Model_class_default_arguments(gmodel):
+ """Test for Model class initialized with default arguments."""
+ assert gmodel.prefix == ''
+ assert gmodel._param_root_names == ['amplitude', 'center', 'sigma']
+ assert gmodel.param_names == ['amplitude', 'center', 'sigma']
+ assert gmodel.independent_vars == ['x']
+ assert gmodel.nan_policy == 'raise'
+ assert gmodel.name == 'Model(gaussian)'
+ assert gmodel.opts == {}
+ assert gmodel.def_vals == {'amplitude': 1.0, 'center': 0.0, 'sigma': 1.0}
+
+
+def test_initialize_Model_class_independent_vars():
+ """Test for Model class initialized with independent_vars."""
+ model = Model(gaussian, independent_vars=['amplitude'])
+ assert model._param_root_names == ['x', 'center', 'sigma']
+ assert model.param_names == ['x', 'center', 'sigma']
+ assert model.independent_vars == ['amplitude']
+
+
+def test_initialize_Model_class_param_names():
+ """Test for Model class initialized with param_names."""
+ model = Model(gaussian, param_names=['amplitude'])
+
+ assert model._param_root_names == ['amplitude']
+ assert model.param_names == ['amplitude']
+
+
+@pytest.mark.parametrize("policy", ['raise', 'omit', 'propagate'])
+def test_initialize_Model_class_nan_policy(policy):
+ """Test for Model class initialized with nan_policy."""
+ model = Model(gaussian, nan_policy=policy)
+
+ assert model.nan_policy == policy
+
+
+def test_initialize_Model_class_prefix():
+ """Test for Model class initialized with prefix."""
+ model = Model(gaussian, prefix='test_')
+
+ assert model.prefix == 'test_'
+ assert model._param_root_names == ['amplitude', 'center', 'sigma']
+ assert model.param_names == ['test_amplitude', 'test_center', 'test_sigma']
+ assert model.name == "Model(gaussian, prefix='test_')"
+
+ model = Model(gaussian, prefix=None)
+
+ assert model.prefix == ''
+
+
+def test_initialize_Model_name():
+ """Test for Model class initialized with name."""
+ model = Model(gaussian, name='test_function')
+
+ assert model.name == 'Model(test_function)'
+
+
+def test_initialize_Model_kws():
+ """Test for Model class initialized with **kws."""
+ kws = {'amplitude': 10.0}
+ model = Model(gaussian,
+ independent_vars=['x', 'amplitude'], **kws)
+
+ assert model._param_root_names == ['center', 'sigma']
+ assert model.param_names == ['center', 'sigma']
+ assert model.independent_vars == ['x', 'amplitude']
+ assert model.opts == kws
+
+
+test_reprstring_data = [(False, 'Model(gaussian)'),
+ (True, "Model(gaussian, amplitude='10.0')")]
+
+
+@pytest.mark.parametrize("option, expected", test_reprstring_data)
+def test_Model_reprstring(option, expected):
+ """Test for Model class function _reprstring."""
+ kws = {'amplitude': 10.0}
+ model = Model(gaussian,
+ independent_vars=['x', 'amplitude'], **kws)
+
+ assert model._reprstring(option) == expected
+
+
+def test_Model_get_state(gmodel):
+ """Test for Model class function _get_state."""
+ out = gmodel._get_state()
+
+ assert isinstance(out, tuple)
+ assert out[1] == out[2] is None
+ assert (out[0][1] is not None) == lmfit.jsonutils.HAS_DILL
+
+ assert out[0][0] == 'gaussian'
+ assert out[0][2:] == ('gaussian', '', ['x'],
+ ['amplitude', 'center', 'sigma'], {}, 'raise', {})
+
+
+def test_Model_set_state(gmodel):
+ """Test for Model class function _set_state.
+
+ This function is just calling `_buildmodel`, which will be tested
+ below together with the use of `funcdefs`.
+
+ """
+ out = gmodel._get_state()
+
+ new_model = Model(lorentzian)
+ new_model = new_model._set_state(out)
+
+ assert new_model.prefix == gmodel.prefix
+ assert new_model._param_root_names == gmodel._param_root_names
+ assert new_model.param_names == gmodel.param_names
+ assert new_model.independent_vars == gmodel.independent_vars
+ assert new_model.nan_policy == gmodel.nan_policy
+ assert new_model.name == gmodel.name
+ assert new_model.opts == gmodel.opts
+
+
+def test_Model_dumps_loads(gmodel):
+ """Test for Model class functions dumps and loads.
+
+ These function are used when saving/loading the Model class and will be
+ tested more thoroughly in test_model_saveload.py.
+
+ """
+ model_json = gmodel.dumps()
+ _ = gmodel.loads(model_json)
+
+
+def test_Model_getter_setter_name(gmodel):
+ """Test for Model class getter/setter functions for name."""
+ assert gmodel.name == 'Model(gaussian)'
+
+ gmodel.name = 'test_gaussian'
+ assert gmodel.name == 'Model(test_gaussian)'
+
+
+def test_Model_getter_setter_prefix(gmodel):
+ """Test for Model class getter/setter functions for prefix."""
+ assert gmodel.prefix == ''
+ assert gmodel.param_names == ['amplitude', 'center', 'sigma']
+
+ gmodel.prefix = 'g1_'
+ assert gmodel.prefix == 'g1_'
+ assert gmodel.param_names == ['g1_amplitude', 'g1_center', 'g1_sigma']
+
+ gmodel.prefix = ''
+ assert gmodel.prefix == ''
+ assert gmodel.param_names == ['amplitude', 'center', 'sigma']
+
+
+def test_Model_getter_param_names(gmodel):
+ """Test for Model class getter function for param_names."""
+ assert gmodel.param_names == ['amplitude', 'center', 'sigma']
+
+
+def test_Model__repr__(gmodel):
+ """Test for Model class __repr__ method."""
+ assert gmodel.__repr__() == '<lmfit.Model: Model(gaussian)>'
+
+
+def test_Model_copy(gmodel):
+ """Test for Model class copy method."""
+ msg = 'Model.copy does not work. Make a new Model'
+ with pytest.raises(NotImplementedError, match=msg):
+ gmodel.copy()
+
+
+def test__parse_params_func_None():
+ """Test for _parse_params function with func=None."""
+ mod = Model(None)
+
+ assert mod._prefix == ''
+ assert mod.func is None
+ assert mod._func_allargs == []
+ assert mod._func_haskeywords is False
+ assert mod.independent_vars == []
+
+
+def test__parse_params_asteval_functions():
+ """Test for _parse_params function with asteval functions."""
+ # TODO: cannot find a use-case for this....
+ pass
+
+
+def test__parse_params_inspect_signature():
+ """Test for _parse_params function using inspect.signature."""
+ # 1. function with a positional argument
+ def func_var_positional(a, *b):
+ pass
+
+ with pytest.raises(ValueError, match=r"varargs '\*b' is not supported"):
+ Model(func_var_positional)
+
+ # 2. function with a keyword argument
+ def func_keyword(a, b, **c):
+ pass
+
+ mod = Model(func_keyword)
+ assert mod._func_allargs == ['a', 'b']
+ assert mod._func_haskeywords is True
+ assert mod.independent_vars == ['a']
+ assert mod.def_vals == {}
+
+ # 3. function with keyword argument only
+ def func_keyword_only(**b):
+ pass
+
+ mod = Model(func_keyword_only)
+ assert mod._func_allargs == []
+ assert mod._func_haskeywords is True
+ assert mod.independent_vars == []
+ assert mod._param_root_names is None
+
+ # 4. function with default value
+ def func_default_value(a, b, c=10):
+ pass
+
+ mod = Model(func_default_value)
+ assert mod._func_allargs == ['a', 'b', 'c']
+ assert mod._func_haskeywords is False
+ assert mod.independent_vars == ['a']
+
+ assert isinstance(mod.def_vals, dict)
+ assert_allclose(mod.def_vals['c'], 10)
+
+
+def test__parse_params_forbidden_variable_names():
+ """Tests for _parse_params function using invalid variable names."""
+
+ def func_invalid_var(data, a):
+ pass
+
+ def func_invalid_par(a, weights):
+ pass
+
+ msg = r"Invalid independent variable name \('data'\) for function func_invalid_var"
+ with pytest.raises(ValueError, match=msg):
+ Model(func_invalid_var)
+
+ msg = r"Invalid parameter name \('weights'\) for function func_invalid_par"
+ with pytest.raises(ValueError, match=msg):
+ Model(func_invalid_par)
+
+
+input_dtypes = [(np.int32, np.int32), (np.float32, np.float32),
+ (np.complex64, np.complex64), ('list', np.float64),
+ ('tuple', np.float64), ('pandas-real', np.float64),
+ ('pandas-complex', np.complex128)]
+
+
+@pytest.mark.parametrize('input_dtype, expected_dtype', input_dtypes)
+def test_coercion_of_input_data(peakdata, input_dtype, expected_dtype):
+ """Test for coercion of 'data' and 'independent_vars'.
+
+ - 'data' should become 'float64' or 'complex128'
+ - dtype for 'indepdendent_vars' is only changed when the input is a list,
+ tuple, numpy.ndarray, or pandas.Series
+
+ """
+ x, y = peakdata
+ model = lmfit.Model(gaussian)
+ pars = model.make_params()
+
+ if (not lmfit.minimizer.HAS_PANDAS and input_dtype in ['pandas-real',
+ 'pandas-complex']):
+ return
+
+ elif input_dtype == 'pandas-real':
+ result = model.fit(lmfit.model.Series(y, dtype=np.float32), pars,
+ x=lmfit.model.Series(x, dtype=np.float32))
+ elif input_dtype == 'pandas-complex':
+ result = model.fit(lmfit.model.Series(y, dtype=np.complex64), pars,
+ x=lmfit.model.Series(x, dtype=np.complex64))
+ elif input_dtype == 'list':
+ result = model.fit(y.tolist(), pars, x=x.tolist())
+ elif input_dtype == 'tuple':
+ result = model.fit(tuple(y), pars, x=tuple(x))
+ else:
+ result = model.fit(np.asarray(y, dtype=input_dtype), pars,
+ x=np.asarray(x, dtype=input_dtype))
+
+ assert result.__dict__['userkws']['x'].dtype == expected_dtype
+ assert result.__dict__['userargs'][0].dtype == expected_dtype
+
+
+def test_figure_default_title(peakdata):
+ """Test default figure title."""
+ pytest.importorskip('matplotlib')
+
+ x, y = peakdata
+ pvmodel = PseudoVoigtModel()
+ params = pvmodel.guess(y, x=x)
+ result = pvmodel.fit(y, params, x=x)
+
+ ax = result.plot_fit()
+ assert ax.axes.get_title() == 'Model(pvoigt)'
+
+ ax = result.plot_residuals()
+ assert ax.axes.get_title() == 'Model(pvoigt)'
+
+ fig = result.plot()
+ assert fig.axes[0].get_title() == 'Model(pvoigt)' # default model.name
+ assert fig.axes[1].get_title() == '' # no title for fit subplot
+
+
+def test_figure_title_using_title_keyword_argument(peakdata):
+ """Test setting figure title using title keyword argument."""
+ pytest.importorskip('matplotlib')
+
+ x, y = peakdata
+ pvmodel = PseudoVoigtModel()
+ params = pvmodel.guess(y, x=x)
+ result = pvmodel.fit(y, params, x=x)
+
+ ax = result.plot_fit(title='test')
+ assert ax.axes.get_title() == 'test'
+
+ ax = result.plot_residuals(title='test')
+ assert ax.axes.get_title() == 'test'
+
+ fig = result.plot(title='test')
+ assert fig.axes[0].get_title() == 'test'
+ assert fig.axes[1].get_title() == '' # no title for fit subplot
+
+
+def test_figure_title_using_title_to_ax_kws(peakdata):
+ """Test setting figure title by supplying ax_{fit,res}_kws."""
+ pytest.importorskip('matplotlib')
+
+ x, y = peakdata
+ pvmodel = PseudoVoigtModel()
+ params = pvmodel.guess(y, x=x)
+ result = pvmodel.fit(y, params, x=x)
+
+ ax = result.plot_fit(ax_kws={'title': 'ax_kws'})
+ assert ax.axes.get_title() == 'ax_kws'
+
+ ax = result.plot_residuals(ax_kws={'title': 'ax_kws'})
+ assert ax.axes.get_title() == 'ax_kws'
+
+ fig = result.plot(ax_res_kws={'title': 'ax_res_kws'})
+ assert fig.axes[0].get_title() == 'ax_res_kws'
+ assert fig.axes[1].get_title() == ''
+
+ fig = result.plot(ax_fit_kws={'title': 'ax_fit_kws'})
+ assert fig.axes[0].get_title() == 'Model(pvoigt)' # default model.name
+ assert fig.axes[1].get_title() == '' # no title for fit subplot
+
+
+def test_priority_setting_figure_title(peakdata):
+ """Test for setting figure title: title keyword argument has priority."""
+ pytest.importorskip('matplotlib')
+
+ x, y = peakdata
+ pvmodel = PseudoVoigtModel()
+ params = pvmodel.guess(y, x=x)
+ result = pvmodel.fit(y, params, x=x)
+
+ ax = result.plot_fit(ax_kws={'title': 'ax_kws'}, title='test')
+ assert ax.axes.get_title() == 'test'
+
+ ax = result.plot_residuals(ax_kws={'title': 'ax_kws'}, title='test')
+ assert ax.axes.get_title() == 'test'
+
+ fig = result.plot(ax_res_kws={'title': 'ax_res_kws'}, title='test')
+ assert fig.axes[0].get_title() == 'test'
+ assert fig.axes[1].get_title() == ''
+
+ fig = result.plot(ax_fit_kws={'title': 'ax_fit_kws'}, title='test')
+ assert fig.axes[0].get_title() == 'test'
+ assert fig.axes[1].get_title() == ''
+
+
+def test_eval_with_kwargs():
+ # Check eval() with both params and kwargs, even when there are
+ # constraints
+ x = np.linspace(0, 30, 301)
+ np.random.seed(13)
+ y1 = (gaussian(x, amplitude=10, center=12.0, sigma=2.5) +
+ gaussian(x, amplitude=20, center=19.0, sigma=2.5))
+
+ y2 = (gaussian(x, amplitude=10, center=12.0, sigma=1.5) +
+ gaussian(x, amplitude=20, center=19.0, sigma=2.5))
+
+ model = Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')
+ params = model.make_params(g1_amplitude=10, g1_center=12.0, g1_sigma=1,
+ g2_amplitude=20, g2_center=19.0,
+ g2_sigma={'expr': 'g1_sigma'})
+
+ r1 = model.eval(params, g1_sigma=2.5, x=x)
+ assert_allclose(r1, y1, atol=1.e-3)
+
+ assert params['g2_sigma'].value == 1
+ assert params['g1_sigma'].value == 1
+
+ params['g1_sigma'].value = 1.5
+ params['g2_sigma'].expr = None
+ params['g2_sigma'].value = 2.5
+
+ r2 = model.eval(params, x=x)
+ assert_allclose(r2, y2, atol=1.e-3)
+
+
+def test_guess_requires_x():
+ """Test to make sure that ``guess()`` method requires the argument ``x``.
+
+ The ``guess`` method needs ``x`` values (i.e., the independent variable)
+ to estimate initial parameters, but this was not a required argument.
+ See GH #747.
+
+ """
+ mod = lmfit.model.Model(gaussian)
+
+ msg = r"guess\(\) missing 2 required positional arguments: 'data' and 'x'"
+ with pytest.raises(TypeError, match=msg):
+ mod.guess()
+
+
+# Below is the content of the original test_model.py file. These tests still
+# need to be checked and possibly updated to the pytest-style. They work fine
+# though so leave them in for now.
+
+def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03, err_msg='',
+ verbose=True):
+ for param_name, value in desired.items():
+ assert_allclose(actual[param_name], value, rtol, atol, err_msg,
+ verbose)
+
+
+def firstarg_ndarray(func):
+ """a simple wrapper used for testing that wrapped
+ functions can be model functions"""
+ @functools.wraps(func)
+ def wrapper(x, *args, **kws):
+ x = np.asarray(x)
+ return func(x, *args, **kws)
+ return wrapper
+
+
+@firstarg_ndarray
+def linear_func(x, a, b):
+ "test wrapped model function"
+ return a*x+b
+
+
+class CommonTests:
+ # to be subclassed for testing predefined models
+
+ def setUp(self):
+ np.random.seed(1)
+ self.noise = 0.0001*np.random.randn(self.x.size)
+ # Some Models need args (e.g., polynomial order), and others don't.
+ try:
+ args = self.args
+ except AttributeError:
+ self.model = self.model_constructor()
+ self.model_omit = self.model_constructor(nan_policy='omit')
+ self.model_raise = self.model_constructor(nan_policy='raise')
+ self.model_explicit_var = self.model_constructor(['x'])
+ func = self.model.func
+ else:
+ self.model = self.model_constructor(*args)
+ self.model_omit = self.model_constructor(*args, nan_policy='omit')
+ self.model_raise = self.model_constructor(*args, nan_policy='raise')
+ self.model_explicit_var = self.model_constructor(
+ *args, independent_vars=['x'])
+ func = self.model.func
+ self.data = func(x=self.x, **self.true_values()) + self.noise
+
+ @property
+ def x(self):
+ return np.linspace(1, 10, num=1000)
+
+ def test_fit(self):
+ model = self.model
+
+ # Pass Parameters object.
+ params = model.make_params(**self.guess())
+ result = model.fit(self.data, params, x=self.x)
+ assert_results_close(result.values, self.true_values())
+
+ # Pass individual Parameter objects as kwargs.
+ kwargs = dict(params.items())
+ result = self.model.fit(self.data, x=self.x, **kwargs)
+ assert_results_close(result.values, self.true_values())
+
+ # Pass guess values (not Parameter objects) as kwargs.
+ kwargs = {name: p.value for name, p in params.items()}
+ result = self.model.fit(self.data, x=self.x, **kwargs)
+ assert_results_close(result.values, self.true_values())
+
+ def test_explicit_independent_vars(self):
+ self.check_skip_independent_vars()
+ model = self.model_explicit_var
+ pars = model.make_params(**self.guess())
+ result = model.fit(self.data, pars, x=self.x)
+ assert_results_close(result.values, self.true_values())
+
+ def test_fit_with_weights(self):
+ model = self.model
+
+ # fit without weights
+ params = model.make_params(**self.guess())
+ out1 = model.fit(self.data, params, x=self.x)
+
+ # fit with weights
+ weights = 1.0/(0.5 + self.x**2)
+ out2 = model.fit(self.data, params, weights=weights, x=self.x)
+
+ max_diff = 0.0
+ for parname, val1 in out1.values.items():
+ val2 = out2.values[parname]
+ if max_diff < abs(val1-val2):
+ max_diff = abs(val1-val2)
+ assert max_diff > 1.e-8
+
+ def test_result_attributes(self):
+ pars = self.model.make_params(**self.guess())
+ result = self.model.fit(self.data, pars, x=self.x)
+
+ # result.init_values
+ assert_results_close(result.values, self.true_values())
+ self.assertEqual(result.init_values, self.guess())
+
+ # result.init_params
+ params = self.model.make_params()
+ for param_name, value in self.guess().items():
+ params[param_name].value = value
+ self.assertEqual(result.init_params, params)
+
+ # result.best_fit
+ assert_allclose(result.best_fit, self.data, atol=self.noise.max())
+
+ # result.init_fit
+ init_fit = self.model.func(x=self.x, **self.guess())
+ assert_allclose(result.init_fit, init_fit)
+
+ # result.model
+ self.assertTrue(result.model is self.model)
+
+ def test_result_eval(self):
+ # Check eval() output against init_fit and best_fit.
+ pars = self.model.make_params(**self.guess())
+ result = self.model.fit(self.data, pars, x=self.x)
+ assert_allclose(result.eval(x=self.x, **result.values),
+ result.best_fit)
+ assert_allclose(result.eval(x=self.x, **result.init_values),
+ result.init_fit)
+
+ def test_result_eval_custom_x(self):
+ self.check_skip_independent_vars()
+ pars = self.model.make_params(**self.guess())
+ result = self.model.fit(self.data, pars, x=self.x)
+
+ # Check that the independent variable is respected.
+ short_eval = result.eval(x=np.array([0, 1, 2]), **result.values)
+ if hasattr(short_eval, '__len__'):
+ self.assertEqual(len(short_eval), 3)
+
+ def test_result_report(self):
+ pars = self.model.make_params(**self.guess())
+ result = self.model.fit(self.data, pars, x=self.x)
+ report = result.fit_report()
+ assert "[[Model]]" in report
+ assert "[[Variables]]" in report
+ assert "[[Fit Statistics]]" in report
+ assert " # function evals =" in report
+ assert " Akaike " in report
+ assert " chi-square " in report
+
+ def test_data_alignment(self):
+ pytest.importorskip('pandas')
+
+ from pandas import Series
+
+ # Align data and indep var of different lengths using pandas index.
+ data = Series(self.data.copy()).iloc[10:-10]
+ x = Series(self.x.copy())
+
+ model = self.model
+ params = model.make_params(**self.guess())
+ result = model.fit(data, params, x=x)
+ result = model.fit(data, params, x=x)
+ assert_results_close(result.values, self.true_values())
+
+ # Skip over missing (NaN) values, aligning via pandas index.
+ data.iloc[500:510] = np.nan
+ result = self.model_omit.fit(data, params, x=x)
+ assert_results_close(result.values, self.true_values())
+
+ # Raise if any NaN values are present.
+ raises = lambda: self.model_raise.fit(data, params, x=x)
+ self.assertRaises(ValueError, raises)
+
+ def check_skip_independent_vars(self):
+ # to be overridden for models that do not accept indep vars
+ pass
+
+ def test_aic(self):
+ model = self.model
+
+ # Pass Parameters object.
+ params = model.make_params(**self.guess())
+ result = model.fit(self.data, params, x=self.x)
+ aic = result.aic
+ self.assertTrue(aic < 0) # aic must be negative
+
+ # Pass extra unused Parameter.
+ params.add("unused_param", value=1.0, vary=True)
+ result = model.fit(self.data, params, x=self.x)
+ aic_extra = result.aic
+ self.assertTrue(aic_extra < 0) # aic must be negative
+ self.assertTrue(aic < aic_extra) # extra param should lower the aic
+
+ def test_bic(self):
+ model = self.model
+
+ # Pass Parameters object.
+ params = model.make_params(**self.guess())
+ result = model.fit(self.data, params, x=self.x)
+ bic = result.bic
+ self.assertTrue(bic < 0) # aic must be negative
+
+ # Compare to AIC
+ aic = result.aic
+ self.assertTrue(aic < bic) # aic should be lower than bic
+
+ # Pass extra unused Parameter.
+ params.add("unused_param", value=1.0, vary=True)
+ result = model.fit(self.data, params, x=self.x)
+ bic_extra = result.bic
+ self.assertTrue(bic_extra < 0) # bic must be negative
+ self.assertTrue(bic < bic_extra) # extra param should lower the bic
+
+
+class TestUserDefiniedModel(CommonTests, unittest.TestCase):
+ # mainly aimed at checking that the API does what it says it does
+ # and raises the right exceptions or warnings when things are not right
+ def setUp(self):
+ self.true_values = lambda: dict(amplitude=7.1, center=1.1, sigma=2.40)
+ self.guess = lambda: dict(amplitude=5, center=2, sigma=4)
+ # return a fresh copy
+ self.model_constructor = (
+ lambda *args, **kwargs: Model(gaussian, *args, **kwargs))
+ super().setUp()
+
+ @property
+ def x(self):
+ return np.linspace(-10, 10, num=1000)
+
+ def test_lists_become_arrays(self):
+ # smoke test
+ self.model.fit([1, 2, 3], x=[1, 2, 3], **self.guess())
+ pytest.raises(ValueError,
+ self.model.fit,
+ [1, 2, None, 3],
+ x=[1, 2, 3, 4],
+ **self.guess())
+
+ def test_missing_param_raises_error(self):
+ # using keyword argument parameters
+ guess_missing_sigma = self.guess()
+ del guess_missing_sigma['sigma']
+ # f = lambda: self.model.fit(self.data, x=self.x, **guess_missing_sigma)
+ # self.assertRaises(ValueError, f)
+
+ # using Parameters
+ params = self.model.make_params()
+ for param_name, value in guess_missing_sigma.items():
+ params[param_name].value = value
+ self.model.fit(self.data, params, x=self.x)
+
+ def test_extra_param_issues_warning(self):
+ # The function accepts extra params, Model will warn but not raise.
+ def flexible_func(x, amplitude, center, sigma, **kwargs):
+ return gaussian(x, amplitude, center, sigma)
+
+ flexible_model = Model(flexible_func)
+ pars = flexible_model.make_params(**self.guess())
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ flexible_model.fit(self.data, pars, x=self.x, extra=5)
+ self.assertTrue(len(w) == 1)
+ self.assertTrue(issubclass(w[-1].category, UserWarning))
+
+ def test_missing_independent_variable_raises_error(self):
+ pars = self.model.make_params(**self.guess())
+ f = lambda: self.model.fit(self.data, pars)
+ self.assertRaises(KeyError, f)
+
+ def test_bounding(self):
+ true_values = self.true_values()
+ true_values['center'] = 1.3 # as close as it's allowed to get
+ pars = self.model.make_params(**self.guess())
+ pars['center'].set(value=2, min=1.3)
+ result = self.model.fit(self.data, pars, x=self.x)
+ assert_results_close(result.values, true_values, rtol=0.05)
+
+ def test_vary_false(self):
+ true_values = self.true_values()
+ true_values['center'] = 1.3
+ pars = self.model.make_params(**self.guess())
+ pars['center'].set(value=1.3, vary=False)
+ result = self.model.fit(self.data, pars, x=self.x)
+ assert_results_close(result.values, true_values, rtol=0.05)
+
+ # testing model addition...
+
+ def test_user_defined_gaussian_plus_constant(self):
+ data = self.data + 5.0
+ model = self.model + models.ConstantModel()
+ guess = self.guess()
+ pars = model.make_params(c=10.1, **guess)
+ true_values = self.true_values()
+ true_values['c'] = 5.0
+
+ result = model.fit(data, pars, x=self.x)
+ assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
+
+ def test_model_with_prefix(self):
+ # model with prefix of 'a' and 'b'
+ mod = models.GaussianModel(prefix='a')
+ vals = {'center': 2.45, 'sigma': 0.8, 'amplitude': 3.15}
+ data = gaussian(x=self.x, **vals) + self.noise/3.0
+ pars = mod.guess(data, x=self.x)
+ self.assertTrue('aamplitude' in pars)
+ self.assertTrue('asigma' in pars)
+ out = mod.fit(data, pars, x=self.x)
+ self.assertTrue(out.params['aamplitude'].value > 2.0)
+ self.assertTrue(out.params['acenter'].value > 2.0)
+ self.assertTrue(out.params['acenter'].value < 3.0)
+
+ mod = models.GaussianModel(prefix='b')
+ data = gaussian(x=self.x, **vals) + self.noise/3.0
+ pars = mod.guess(data, x=self.x)
+ self.assertTrue('bamplitude' in pars)
+ self.assertTrue('bsigma' in pars)
+
+ def test_change_prefix(self):
+ "should pass!"
+ mod = models.GaussianModel(prefix='b')
+ set_prefix_failed = None
+ try:
+ mod.prefix = 'c'
+ set_prefix_failed = False
+ except AttributeError:
+ set_prefix_failed = True
+ except Exception:
+ set_prefix_failed = None
+ self.assertFalse(set_prefix_failed)
+
+ new_expr = mod.param_hints['fwhm']['expr']
+ self.assertTrue('csigma' in new_expr)
+ self.assertFalse('bsigma' in new_expr)
+
+ def test_model_name(self):
+ # test setting the name for built-in models
+ mod = models.GaussianModel(name='user_name')
+ self.assertEqual(mod.name, "Model(user_name)")
+
+ def test_sum_of_two_gaussians(self):
+ # two user-defined gaussians
+ model1 = self.model
+ f2 = lambda x, amp, cen, sig: gaussian(x, amplitude=amp, center=cen,
+ sigma=sig)
+ model2 = Model(f2)
+ values1 = self.true_values()
+ values2 = {'cen': 2.45, 'sig': 0.8, 'amp': 3.15}
+
+ data = (gaussian(x=self.x, **values1) + f2(x=self.x, **values2) +
+ self.noise/3.0)
+ model = self.model + model2
+ pars = model.make_params()
+ pars['sigma'].set(value=2, min=0)
+ pars['center'].set(value=1, min=0.2, max=1.8)
+ pars['amplitude'].set(value=3, min=0)
+ pars['sig'].set(value=1, min=0)
+ pars['cen'].set(value=2.4, min=2, max=3.5)
+ pars['amp'].set(value=1, min=0)
+
+ true_values = dict(list(values1.items()) + list(values2.items()))
+ result = model.fit(data, pars, x=self.x)
+ assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
+
+ # user-defined models with common parameter names
+ # cannot be added, and should raise
+ f = lambda: model1 + model1
+ self.assertRaises(NameError, f)
+
+ # two predefined_gaussians, using suffix to differentiate
+ model1 = models.GaussianModel(prefix='g1_')
+ model2 = models.GaussianModel(prefix='g2_')
+ model = model1 + model2
+ true_values = {'g1_center': values1['center'],
+ 'g1_amplitude': values1['amplitude'],
+ 'g1_sigma': values1['sigma'],
+ 'g2_center': values2['cen'],
+ 'g2_amplitude': values2['amp'],
+ 'g2_sigma': values2['sig']}
+ pars = model.make_params()
+ pars['g1_sigma'].set(2)
+ pars['g1_center'].set(1)
+ pars['g1_amplitude'].set(3)
+ pars['g2_sigma'].set(1)
+ pars['g2_center'].set(2.4)
+ pars['g2_amplitude'].set(1)
+
+ result = model.fit(data, pars, x=self.x)
+ assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
+
+ # without suffix, the names collide and Model should raise
+ model1 = models.GaussianModel()
+ model2 = models.GaussianModel()
+ f = lambda: model1 + model2
+ self.assertRaises(NameError, f)
+
+ def test_sum_composite_models(self):
+ # test components of composite model created adding composite model
+ model1 = models.GaussianModel(prefix='g1_')
+ model2 = models.GaussianModel(prefix='g2_')
+ model3 = models.GaussianModel(prefix='g3_')
+ model4 = models.GaussianModel(prefix='g4_')
+
+ model_total1 = (model1 + model2) + model3
+ for mod in [model1, model2, model3]:
+ self.assertTrue(mod in model_total1.components)
+
+ model_total2 = model1 + (model2 + model3)
+ for mod in [model1, model2, model3]:
+ self.assertTrue(mod in model_total2.components)
+
+ model_total3 = (model1 + model2) + (model3 + model4)
+ for mod in [model1, model2, model3, model4]:
+ self.assertTrue(mod in model_total3.components)
+
+ def test_eval_components(self):
+ model1 = models.GaussianModel(prefix='g1_')
+ model2 = models.GaussianModel(prefix='g2_')
+ model3 = models.ConstantModel(prefix='bkg_')
+ mod = model1 + model2 + model3
+ pars = mod.make_params()
+
+ values1 = dict(amplitude=7.10, center=1.1, sigma=2.40)
+ values2 = dict(amplitude=12.2, center=2.5, sigma=0.5)
+ data = (1.01 + gaussian(x=self.x, **values1) +
+ gaussian(x=self.x, **values2) + 0.05*self.noise)
+
+ pars['g1_sigma'].set(2)
+ pars['g1_center'].set(1, max=1.5)
+ pars['g1_amplitude'].set(3)
+ pars['g2_sigma'].set(1)
+ pars['g2_center'].set(2.6, min=2.0)
+ pars['g2_amplitude'].set(1)
+ pars['bkg_c'].set(1.88)
+
+ result = mod.fit(data, params=pars, x=self.x)
+
+ self.assertTrue(abs(result.params['g1_amplitude'].value - 7.1) < 1.5)
+ self.assertTrue(abs(result.params['g2_amplitude'].value - 12.2) < 1.5)
+ self.assertTrue(abs(result.params['g1_center'].value - 1.1) < 0.2)
+ self.assertTrue(abs(result.params['g2_center'].value - 2.5) < 0.2)
+ self.assertTrue(abs(result.params['bkg_c'].value - 1.0) < 0.25)
+
+ comps = mod.eval_components(x=self.x)
+ assert 'bkg_' in comps
+
+ def test_composite_has_bestvalues(self):
+ # test that a composite model has non-empty best_values
+ model1 = models.GaussianModel(prefix='g1_')
+ model2 = models.GaussianModel(prefix='g2_')
+
+ mod = model1 + model2
+ pars = mod.make_params()
+
+ values1 = dict(amplitude=7.10, center=1.1, sigma=2.40)
+ values2 = dict(amplitude=12.2, center=2.5, sigma=0.5)
+ data = (gaussian(x=self.x, **values1) + gaussian(x=self.x, **values2)
+ + 0.1*self.noise)
+
+ pars['g1_sigma'].set(value=2)
+ pars['g1_center'].set(value=1, max=1.5)
+ pars['g1_amplitude'].set(value=3)
+ pars['g2_sigma'].set(value=1)
+ pars['g2_center'].set(value=2.6, min=2.0)
+ pars['g2_amplitude'].set(value=1)
+
+ result = mod.fit(data, params=pars, x=self.x)
+
+ self.assertTrue(len(result.best_values) == 6)
+
+ self.assertTrue(abs(result.params['g1_amplitude'].value - 7.1) < 0.5)
+ self.assertTrue(abs(result.params['g2_amplitude'].value - 12.2) < 0.5)
+ self.assertTrue(abs(result.params['g1_center'].value - 1.1) < 0.2)
+ self.assertTrue(abs(result.params['g2_center'].value - 2.5) < 0.2)
+
+ for _, par in pars.items():
+ assert len(repr(par)) > 5
+
+ @pytest.mark.skipif(not lmfit.model._HAS_MATPLOTLIB,
+ reason="requires matplotlib.pyplot")
+ def test_composite_plotting(self):
+ # test that a composite model has non-empty best_values
+ import matplotlib
+ matplotlib.use('Agg')
+
+ model1 = models.GaussianModel(prefix='g1_')
+ model2 = models.GaussianModel(prefix='g2_')
+
+ mod = model1 + model2
+ pars = mod.make_params()
+
+ values1 = dict(amplitude=7.10, center=1.1, sigma=2.40)
+ values2 = dict(amplitude=12.2, center=2.5, sigma=0.5)
+ data = (gaussian(x=self.x, **values1) + gaussian(x=self.x, **values2)
+ + 0.1*self.noise)
+
+ pars['g1_sigma'].set(2)
+ pars['g1_center'].set(1, max=1.5)
+ pars['g1_amplitude'].set(3)
+ pars['g2_sigma'].set(1)
+ pars['g2_center'].set(2.6, min=2.0)
+ pars['g2_amplitude'].set(1)
+
+ result = mod.fit(data, params=pars, x=self.x)
+ fig = result.plot(show_init=True)
+
+ assert isinstance(fig, matplotlib.figure.Figure)
+
+ comps = result.eval_components(x=self.x)
+ assert len(comps) == 2
+ assert 'g1_' in comps
+
+ def test_hints_in_composite_models(self):
+ # test propagation of hints from base models to composite model
+ def func(x, amplitude):
+ pass
+
+ m1 = Model(func, prefix='p1_')
+ m2 = Model(func, prefix='p2_')
+
+ m1.set_param_hint('amplitude', value=1)
+ m2.set_param_hint('amplitude', value=2)
+
+ mx = (m1 + m2)
+ params = mx.make_params()
+ param_values = {name: p.value for name, p in params.items()}
+ self.assertEqual(param_values['p1_amplitude'], 1)
+ self.assertEqual(param_values['p2_amplitude'], 2)
+
+ def test_hints_for_peakmodels(self):
+ # test that height/fwhm do not cause asteval errors.
+
+ x = np.linspace(-10, 10, 101)
+ y = np.sin(x / 3) + x/100.
+
+ m1 = models.LinearModel(prefix='m1_')
+ params = m1.guess(y, x=x)
+
+ m2 = models.GaussianModel(prefix='m2_')
+ params.update(m2.make_params())
+
+ _m = m1 + m2 # noqa: F841
+
+ param_values = {name: p.value for name, p in params.items()}
+ self.assertTrue(param_values['m1_intercept'] < -0.0)
+ self.assertEqual(param_values['m2_amplitude'], 1)
+
+ def test_weird_param_hints(self):
+ # tests Github Issue 312, a very weird way to access param_hints
+ def func(x, amp):
+ return amp*x
+
+ m = Model(func)
+ models = {}
+ for i in range(2):
+ m.set_param_hint('amp', value=1)
+ m.set_param_hint('amp', value=25)
+
+ models[i] = Model(func, prefix=f'mod{i}')
+ models[i].param_hints['amp'] = m.param_hints['amp']
+
+ self.assertEqual(models[0].param_hints['amp'],
+ models[1].param_hints['amp'])
+
+ def test_param_hint_explicit_value(self):
+ # tests Github Issue 384
+ pmod = PseudoVoigtModel()
+ params = pmod.make_params(sigma=2, fraction=0.77)
+ assert_allclose(params['fraction'].value, 0.77, rtol=0.01)
+
+ def test_symmetric_boundss(self):
+ # tests Github Issue 700
+ np.random.seed(0)
+
+ x = np.linspace(0, 20, 51)
+ y = gaussian(x, amplitude=8.0, center=13, sigma=2.5)
+ y += np.random.normal(size=len(x), scale=0.1)
+
+ mod = Model(gaussian)
+ params = mod.make_params(sigma=2.2, center=10, amplitude=10)
+ # carefully selected to have inexact floating-point representation
+ params['sigma'].min = 2.2 - 0.95
+ params['sigma'].max = 2.2 + 0.95
+
+ result = mod.fit(y, params, x=x)
+ print(result.fit_report())
+ self.assertTrue(result.params['sigma'].value > 2.3)
+ self.assertTrue(result.params['sigma'].value < 2.7)
+ self.assertTrue(result.params['sigma'].stderr is not None)
+ self.assertTrue(result.params['amplitude'].stderr is not None)
+ self.assertTrue(result.params['sigma'].stderr > 0.02)
+ self.assertTrue(result.params['sigma'].stderr < 0.50)
+
+ def test_unprefixed_name_collisions(self):
+ # tests Github Issue 710
+ np.random.seed(0)
+ x = np.linspace(0, 20, 201)
+ y = 6 + x * 0.55 + gaussian(x, 4.5, 8.5, 2.1) + np.random.normal(size=len(x), scale=0.03)
+
+ def myline(x, a, b):
+ return a + b * x
+
+ def mygauss(x, a, b, c):
+ return gaussian(x, a, b, c)
+
+ mod = Model(myline, prefix='line_') + Model(mygauss, prefix='peak_')
+ pars = mod.make_params(line_a=5, line_b=1, peak_a=10, peak_b=10, peak_c=5)
+ pars.add('a', expr='line_a + peak_a')
+
+ result = mod.fit(y, pars, x=x)
+ self.assertTrue(result.params['peak_a'].value > 4)
+ self.assertTrue(result.params['peak_a'].value < 5)
+ self.assertTrue(result.params['peak_b'].value > 8)
+ self.assertTrue(result.params['peak_b'].value < 9)
+ self.assertTrue(result.params['peak_c'].value > 1.5)
+ self.assertTrue(result.params['peak_c'].value < 2.5)
+ self.assertTrue(result.params['line_a'].value > 5.5)
+ self.assertTrue(result.params['line_a'].value < 6.5)
+ self.assertTrue(result.params['line_b'].value > 0.25)
+ self.assertTrue(result.params['line_b'].value < 0.75)
+ self.assertTrue(result.params['a'].value > 10)
+ self.assertTrue(result.params['a'].value < 11)
+
+ def test_composite_model_with_expr_constrains(self):
+ """Smoke test for composite model fitting with expr constraints."""
+ y = [0, 0, 4, 2, 1, 8, 21, 21, 23, 35, 50, 54, 46, 70, 77, 87, 98,
+ 113, 148, 136, 185, 195, 194, 168, 170, 139, 155, 115, 132, 109,
+ 102, 85, 69, 81, 82, 80, 71, 64, 79, 88, 111, 97, 97, 73, 72, 62,
+ 41, 30, 13, 3, 9, 7, 0, 0, 0]
+ x = np.arange(-0.2, 1.2, 0.025)[:-1] + 0.5*0.025
+
+ def gauss(x, sigma, mu, A):
+ return A*np.exp(-(x-mu)**2/(2*sigma**2))
+
+ # Initial values
+ p1_mu = 0.2
+ p1_sigma = 0.1
+ p2_sigma = 0.1
+
+ peak1 = Model(gauss, prefix='p1_')
+ peak2 = Model(gauss, prefix='p2_')
+ model = peak1 + peak2
+
+ model.set_param_hint('p1_mu', value=p1_mu, min=-1, max=2)
+ model.set_param_hint('p1_sigma', value=p1_sigma, min=0.01, max=0.2)
+ model.set_param_hint('p2_sigma', value=p2_sigma, min=0.01, max=0.2)
+ model.set_param_hint('p1_A', value=100, min=0.01)
+ model.set_param_hint('p2_A', value=50, min=0.01)
+
+ # Constrains the distance between peaks to be > 0
+ model.set_param_hint('pos_delta', value=0.3, min=0)
+ model.set_param_hint('p2_mu', min=-1, expr='p1_mu + pos_delta')
+
+ # Test fitting
+ result = model.fit(y, x=x)
+ self.assertTrue(result.params['pos_delta'].value > 0)
+
+ def test_model_nan_policy(self):
+ """Tests for nan_policy with NaN values in the input data."""
+ x = np.linspace(0, 10, 201)
+ np.random.seed(0)
+ y = gaussian(x, 10.0, 6.15, 0.8)
+ y += gaussian(x, 8.0, 6.35, 1.1)
+ y += gaussian(x, 0.25, 6.00, 7.5)
+ y += np.random.normal(size=len(x), scale=0.5)
+
+ # with NaN values in the input data
+ y[55] = y[91] = np.nan
+ mod = PseudoVoigtModel()
+ params = mod.make_params(amplitude=20, center=5.5,
+ sigma=1, fraction=0.25)
+ params['fraction'].vary = False
+
+ # with raise, should get a ValueError
+ result = lambda: mod.fit(y, params, x=x, nan_policy='raise')
+ msg = ('NaN values detected in your input data or the output of your '
+ 'objective/model function - fitting algorithms cannot handle this!')
+ self.assertRaisesRegex(ValueError, msg, result)
+
+ # with propagate, should get no error, but bad results
+ result = mod.fit(y, params, x=x, nan_policy='propagate')
+
+ # for SciPy v1.10+ this results in an AbortFitException, even with
+ # `max_nfev=100000`:
+ # lmfit.minimizer.AbortFitException: fit aborted: too many function
+ # evaluations xxxxx
+ if int(scipy_version.split('.')[1]) < 10:
+ self.assertTrue(np.isnan(result.chisqr))
+ self.assertTrue(np.isnan(result.aic))
+ self.assertFalse(result.errorbars)
+ self.assertTrue(result.params['amplitude'].stderr is None)
+ self.assertTrue(abs(result.params['amplitude'].value - 20.0) < 0.001)
+ else:
+ pass
+
+ # with omit, should get good results
+ result = mod.fit(y, params, x=x, nan_policy='omit')
+ self.assertTrue(result.success)
+ self.assertTrue(result.chisqr > 2.0)
+ self.assertTrue(result.aic < -100)
+ self.assertTrue(result.errorbars)
+ self.assertTrue(result.params['amplitude'].stderr > 0.1)
+ self.assertTrue(abs(result.params['amplitude'].value - 20.0) < 5.0)
+ self.assertTrue(abs(result.params['center'].value - 6.0) < 0.5)
+
+ # with 'wrong_argument', should get a ValueError
+ err_msg = r"nan_policy must be 'propagate', 'omit', or 'raise'."
+ with pytest.raises(ValueError, match=err_msg):
+ mod.fit(y, params, x=x, nan_policy='wrong_argument')
+
+ def test_model_nan_policy_NaNs_by_model(self):
+ """Test for nan_policy with NaN values generated by the model function."""
+ def double_exp(x, a1, t1, a2, t2):
+ return a1*np.exp(-x/t1) + a2*np.exp(-(x-0.1) / t2)
+
+ model = Model(double_exp)
+
+ truths = (3.0, 2.0, -5.0, 10.0)
+ x = np.linspace(1, 10, 250)
+ np.random.seed(0)
+ y = double_exp(x, *truths) + 0.1*np.random.randn(x.size)
+
+ p = model.make_params(a1=4, t1=3, a2=4, t2=3)
+ result = lambda: model.fit(data=y, params=p, x=x, method='Nelder',
+ nan_policy='raise')
+
+ msg = 'The model function generated NaN values and the fit aborted!'
+ self.assertRaisesRegex(ValueError, msg, result)
+
+ def test_wrapped_model_func(self):
+ x = np.linspace(-1, 1, 51)
+ y = 2.0*x + 3 + 0.0003 * x*x
+ y += np.random.normal(size=len(x), scale=0.025)
+ mod = Model(linear_func)
+ pars = mod.make_params(a=1.5, b=2.5)
+
+ tmp = mod.eval(pars, x=x)
+
+ self.assertTrue(tmp.max() > 3)
+ self.assertTrue(tmp.min() > -20)
+
+ result = mod.fit(y, pars, x=x)
+ self.assertTrue(result.chisqr < 0.05)
+ self.assertTrue(result.aic < -350)
+ self.assertTrue(result.errorbars)
+
+ self.assertTrue(abs(result.params['a'].value - 2.0) < 0.05)
+ self.assertTrue(abs(result.params['b'].value - 3.0) < 0.41)
+
+ def test_different_independent_vars_composite_modeld(self):
+ """Regression test for different independent variables in CompositeModel.
+
+ See: https://github.com/lmfit/lmfit-py/discussions/787
+
+ """
+ def two_independent_vars(y, z, a):
+ return a * y + z
+
+ BackgroundModel = Model(two_independent_vars,
+ independent_vars=["y", "z"], prefix="yz_")
+ PeakModel = Model(gaussian, independent_vars=["x"], prefix="x_")
+ CompModel = BackgroundModel + PeakModel
+ assert CompModel.independent_vars == ['x', 'y', 'z']
+
+
+class TestLinear(CommonTests, unittest.TestCase):
+
+ def setUp(self):
+ self.true_values = lambda: dict(slope=5, intercept=2)
+ self.guess = lambda: dict(slope=10, intercept=6)
+ self.model_constructor = models.LinearModel
+ super().setUp()
+
+
+class TestParabolic(CommonTests, unittest.TestCase):
+
+ def setUp(self):
+ self.true_values = lambda: dict(a=5, b=2, c=8)
+ self.guess = lambda: dict(a=1, b=6, c=3)
+ self.model_constructor = models.ParabolicModel
+ super().setUp()
+
+
+class TestPolynomialOrder2(CommonTests, unittest.TestCase):
+ # class Polynomial constructed with order=2
+ def setUp(self):
+ self.true_values = lambda: dict(c2=5, c1=2, c0=8)
+ self.guess = lambda: dict(c1=1, c2=6, c0=3)
+ self.model_constructor = models.PolynomialModel
+ self.args = (2,)
+ super().setUp()
+
+
+class TestPolynomialOrder3(CommonTests, unittest.TestCase):
+ # class Polynomial constructed with order=3
+ def setUp(self):
+ self.true_values = lambda: dict(c3=2, c2=5, c1=2, c0=8)
+ self.guess = lambda: dict(c3=1, c1=1, c2=6, c0=3)
+ self.model_constructor = models.PolynomialModel
+ self.args = (3,)
+ super().setUp()
+
+
+class TestConstant(CommonTests, unittest.TestCase):
+ def setUp(self):
+ self.true_values = lambda: dict(c=5)
+ self.guess = lambda: dict(c=2)
+ self.model_constructor = models.ConstantModel
+ super().setUp()
+
+ def check_skip_independent_vars(self):
+ raise pytest.skip("ConstantModel has not independent_vars.")
+
+
+class TestPowerlaw(CommonTests, unittest.TestCase):
+ def setUp(self):
+ self.true_values = lambda: dict(amplitude=5, exponent=3)
+ self.guess = lambda: dict(amplitude=2, exponent=8)
+ self.model_constructor = models.PowerLawModel
+ super().setUp()
+
+
+class TestExponential(CommonTests, unittest.TestCase):
+ def setUp(self):
+ self.true_values = lambda: dict(amplitude=5, decay=3)
+ self.guess = lambda: dict(amplitude=2, decay=8)
+ self.model_constructor = models.ExponentialModel
+ super().setUp()
+
+
+class TestComplexConstant(CommonTests, unittest.TestCase):
+ def setUp(self):
+ self.true_values = lambda: dict(re=5, im=5)
+ self.guess = lambda: dict(re=2, im=2)
+ self.model_constructor = models.ComplexConstantModel
+ super().setUp()
+
+
+class TestExpression(CommonTests, unittest.TestCase):
+ def setUp(self):
+ self.true_values = lambda: dict(off_c=0.25, amp_c=1.0, x0=2.0)
+ self.guess = lambda: dict(off_c=0.20, amp_c=1.5, x0=2.5)
+ self.expression = "off_c + amp_c * exp(-x/x0)"
+ self.model_constructor = (
+ lambda *args, **kwargs: models.ExpressionModel(self.expression, *args, **kwargs))
+ super().setUp()
+
+ def test_composite_with_expression(self):
+ expression_model = models.ExpressionModel("exp(-x/x0)", name='exp')
+ amp_model = models.ConstantModel(prefix='amp_')
+ off_model = models.ConstantModel(prefix='off_', name="off")
+
+ comp_model = off_model + amp_model * expression_model
+
+ x = self.x
+ true_values = self.true_values()
+ data = comp_model.eval(x=x, **true_values) + self.noise
+ # data = 0.25 + 1 * np.exp(-x / 2.)
+
+ params = comp_model.make_params(**self.guess())
+
+ result = comp_model.fit(data, x=x, params=params)
+ assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
+
+ data_components = comp_model.eval_components(x=x)
+ self.assertIn('exp', data_components)
+
+
+def test_make_params_valuetypes():
+ mod = lmfit.models.SineModel()
+
+ pars = mod.make_params(amplitude=1, frequency=1, shift=-0.2)
+
+ pars = mod.make_params(amplitude={'value': 0.9, 'min': 0},
+ frequency=1.03,
+ shift={'value': -0.2, 'vary': False})
+
+ val_i32 = np.arange(10, dtype=np.int32)
+ val_i64 = np.arange(10, dtype=np.int64)
+ # np.longdouble equals to np.float128 on Linux and macOS, np.float64 on Windows
+ val_ld = np.arange(10, dtype=np.longdouble)/3.0
+ val_c128 = np.arange(10, dtype=np.complex128)/3.0
+
+ pars = mod.make_params(amplitude=val_i64[2],
+ frequency=val_i32[3],
+ shift=-val_ld[4])
+
+ pars = mod.make_params(amplitude=val_c128[2],
+ frequency=val_i32[3],
+ shift=-val_ld[4])
+
+ assert pars is not None
+ with pytest.raises(ValueError):
+ pars = mod.make_params(amplitude='a string', frequency=2, shift=7)
+
+ with pytest.raises(TypeError):
+ pars = mod.make_params(amplitude={'v': 3}, frequency=2, shift=7)
+
+ with pytest.raises(TypeError):
+ pars = mod.make_params(amplitude={}, frequency=2, shift=7)
diff --git a/tests/test_model_saveload.py b/tests/test_model_saveload.py
new file mode 100644
index 0000000..c43cd3d
--- /dev/null
+++ b/tests/test_model_saveload.py
@@ -0,0 +1,318 @@
+"""Tests for saving/loading Models and ModelResults."""
+
+import json
+import os
+import time
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+
+from lmfit import Parameters
+import lmfit.jsonutils
+from lmfit.lineshapes import gaussian, lorentzian
+from lmfit.model import (Model, ModelResult, load_model, load_modelresult,
+ save_model, save_modelresult)
+from lmfit.models import (ExponentialModel, ExpressionModel, GaussianModel,
+ VoigtModel)
+
+y, x = np.loadtxt(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'NIST_Gauss2.dat')).T
+
+SAVE_MODEL = 'model_1.sav'
+SAVE_MODELRESULT = 'modelresult_1.sav'
+
+MODELRESULT_LMFIT_1_0 = 'gauss_modelresult_lmfit100.sav'
+
+
+def clear_savefile(fname):
+ """Remove save files so that tests start fresh."""
+ try:
+ os.unlink(fname)
+ except OSError:
+ pass
+
+
+def wait_for_file(fname, timeout=10):
+ """Check whether file is created within certain amount of time."""
+ end_time = time.time() + timeout
+ while time.time() < end_time:
+ if os.path.exists(fname):
+ return True
+ time.sleep(0.05)
+ return False
+
+
+def create_model_params(x, y):
+ """Create the model and parameters."""
+ exp_mod = ExponentialModel(prefix='exp_')
+ params = exp_mod.guess(y, x=x)
+
+ gauss1 = GaussianModel(prefix='g1_')
+ params.update(gauss1.make_params())
+
+ gauss2 = GaussianModel(prefix='g2_')
+ params.update(gauss2.make_params())
+
+ params['g1_center'].set(value=105, min=75, max=125)
+ params['g1_sigma'].set(value=15, min=3)
+ params['g1_amplitude'].set(value=2000, min=10)
+
+ params['g2_center'].set(value=155, min=125, max=175)
+ params['g2_sigma'].set(value=15, min=3)
+ params['g2_amplitude'].set(value=2000, min=10)
+
+ model = gauss1 + gauss2 + exp_mod
+ return model, params
+
+
+def check_fit_results(result):
+ """Check the result of optimization."""
+ assert result.nvarys == 8
+ assert_allclose(result.chisqr, 1247.528209, rtol=1.0e-5)
+ assert_allclose(result.aic, 417.864631, rtol=1.0e-5)
+
+ pars = result.params
+ assert_allclose(pars['exp_decay'], 90.950886, rtol=1.0e-5)
+ assert_allclose(pars['exp_amplitude'], 99.018328, rtol=1.0e-5)
+
+ assert_allclose(pars['g1_sigma'], 16.672575, rtol=1.0e-5)
+ assert_allclose(pars['g1_center'], 107.030954, rtol=1.0e-5)
+ assert_allclose(pars['g1_amplitude'], 4257.773192, rtol=1.0e-5)
+ assert_allclose(pars['g1_fwhm'], 39.260914, rtol=1.0e-5)
+ assert_allclose(pars['g1_height'], 101.880231, rtol=1.0e-5)
+
+ assert_allclose(pars['g2_sigma'], 13.806948, rtol=1.0e-5)
+ assert_allclose(pars['g2_center'], 153.270101, rtol=1.0e-5)
+ assert_allclose(pars['g2_amplitude'], 2493.417703, rtol=1.0e-5)
+ assert_allclose(pars['g2_fwhm'], 32.512878, rtol=1.0e-5)
+ assert_allclose(pars['g2_height'], 72.045593, rtol=1.0e-5)
+
+
+@pytest.mark.parametrize("dill", [False, True])
+def test_save_load_model(dill):
+ """Save/load Model with/without dill."""
+ if dill:
+ pytest.importorskip("dill")
+ else:
+ lmfit.jsonutils.HAS_DILL = False
+
+ # create/save Model and perform some tests
+ model, _pars = create_model_params(x, y)
+ save_model(model, SAVE_MODEL)
+
+ file_exists = wait_for_file(SAVE_MODEL, timeout=10)
+ assert file_exists
+
+ with open(SAVE_MODEL) as fh:
+ text = fh.read()
+ assert 1000 < len(text) < 2500
+
+ # load the Model, perform fit and assert results
+ saved_model = load_model(SAVE_MODEL)
+ params = saved_model.make_params()
+
+ params['exp_decay'].set(100)
+ params['exp_amplitude'].set(100)
+ params['g1_center'].set(105, min=75, max=125)
+ params['g1_sigma'].set(15, min=3)
+ params['g1_amplitude'].set(2000, min=10)
+
+ params['g2_center'].set(155, min=125, max=175)
+ params['g2_sigma'].set(15, min=3)
+ params['g2_amplitude'].set(2000, min=10)
+
+ result = saved_model.fit(y, params, x=x)
+ check_fit_results(result)
+
+ clear_savefile(SAVE_MODEL)
+
+
+@pytest.mark.parametrize("dill", [False, True])
+def test_save_load_modelresult(dill):
+ """Save/load ModelResult with/without dill."""
+ if dill:
+ pytest.importorskip("dill")
+ else:
+ lmfit.jsonutils.HAS_DILL = False
+
+ # create model, perform fit, save ModelResult and perform some tests
+ model, params = create_model_params(x, y)
+ result = model.fit(y, params, x=x)
+ save_modelresult(result, SAVE_MODELRESULT)
+
+ file_exists = wait_for_file(SAVE_MODELRESULT, timeout=10)
+ assert file_exists
+
+ text = ''
+ with open(SAVE_MODELRESULT) as fh:
+ text = fh.read()
+ assert 12000 < len(text) < 60000 # depending on whether dill is present
+
+ # load the saved ModelResult from file and compare results
+ result_saved = load_modelresult(SAVE_MODELRESULT)
+ assert result_saved.residual is not None
+ check_fit_results(result_saved)
+
+ clear_savefile(SAVE_MODEL)
+
+
+def test_load_legacy_modelresult():
+ """Load legacy ModelResult."""
+ fname = os.path.join(os.path.dirname(__file__), MODELRESULT_LMFIT_1_0)
+ result_saved = load_modelresult(fname)
+ assert result_saved is not None
+
+
+def test_saveload_modelresult_attributes():
+ """Test for restoring all attributes of the ModelResult."""
+ model, params = create_model_params(x, y)
+ result = model.fit(y, params, x=x)
+ save_modelresult(result, SAVE_MODELRESULT)
+
+ time.sleep(0.25)
+ file_exists = wait_for_file(SAVE_MODELRESULT, timeout=10)
+ assert file_exists
+ time.sleep(0.25)
+
+ loaded = load_modelresult(SAVE_MODELRESULT)
+
+ assert len(result.data) == len(loaded.data)
+ assert_allclose(result.data, loaded.data)
+
+ for pname in result.params.keys():
+ assert_allclose(result.init_params[pname].value,
+ loaded.init_params[pname].value)
+
+ clear_savefile(SAVE_MODELRESULT)
+
+
+def test_saveload_modelresult_exception():
+ """Make sure the proper exceptions are raised when needed."""
+ model, _pars = create_model_params(x, y)
+ save_model(model, SAVE_MODEL)
+
+ with pytest.raises(AttributeError, match=r'needs saved ModelResult'):
+ load_modelresult(SAVE_MODEL)
+ clear_savefile(SAVE_MODEL)
+
+
+@pytest.mark.parametrize("method", ['leastsq', 'nelder', 'powell', 'cobyla',
+ 'bfgs', 'lbfgsb', 'differential_evolution',
+ 'brute', 'basinhopping', 'ampgo', 'shgo',
+ 'dual_annealing'])
+def test_saveload_modelresult_roundtrip(method):
+ """Test for modelresult.loads()/dumps() and repeating that."""
+ def mfunc(x, a, b):
+ return a * (x-b)
+
+ model = Model(mfunc)
+ params = model.make_params(a=0.1, b=3.0)
+ params['a'].set(min=.01, max=1, brute_step=0.01)
+ params['b'].set(min=.01, max=3.1, brute_step=0.01)
+
+ np.random.seed(2020)
+ xx = np.linspace(-5, 5, 201)
+ yy = 0.5 * (xx - 0.22) + np.random.normal(scale=0.01, size=xx.size)
+
+ result1 = model.fit(yy, params=params, x=xx, method=method)
+
+ result2 = ModelResult(model, Parameters())
+ result2.loads(result1.dumps(), funcdefs={'mfunc': mfunc})
+
+ result3 = ModelResult(model, Parameters())
+ result3.loads(result2.dumps(), funcdefs={'mfunc': mfunc})
+
+ assert result3 is not None
+ assert_allclose(result2.params['a'], 0.5, rtol=1.0e-2)
+ assert_allclose(result2.params['b'], 0.22, rtol=1.0e-2)
+ assert_allclose(result3.params['a'], 0.50, rtol=1.0e-2)
+ assert_allclose(result3.params['b'], 0.22, rtol=1.0e-2)
+
+
+def test_saveload_modelresult_expression_model():
+ """Test for ModelResult.loads()/dumps() for ExpressionModel.
+
+ * make sure that the loaded ModelResult has `init_params` and `init_fit`.
+
+ """
+ savefile = 'expr_modres.txt'
+ x = np.linspace(-10, 10, 201)
+ amp, cen, wid = 3.4, 1.8, 0.5
+
+ y = amp * np.exp(-(x-cen)**2 / (2*wid**2)) / (np.sqrt(2*np.pi)*wid)
+ y = y + np.random.normal(size=x.size, scale=0.01)
+
+ gmod = ExpressionModel("amp * exp(-(x-cen)**2 /(2*wid**2))/(sqrt(2*pi)*wid)")
+ result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+ save_modelresult(result, savefile)
+ time.sleep(0.25)
+
+ result2 = load_modelresult(savefile)
+
+ assert result2 is not None
+ assert result2.residual is not None
+ assert result2.init_fit is not None
+ assert_allclose((result2.init_fit - result.init_fit).sum() + 1.00, 1.00,
+ rtol=1.0e-2)
+ os.unlink(savefile)
+
+
+def test_saveload_usersyms():
+ """Test save/load of ModelResult with non-trivial user symbols.
+
+ This example uses a VoigtModel, where `wofz()` is used in a constraint
+ expression.
+
+ """
+ x = np.linspace(0, 20, 501)
+ y = gaussian(x, 1.1, 8.5, 2) + lorentzian(x, 1.7, 8.5, 1.5)
+ np.random.seed(20)
+ y = y + np.random.normal(size=len(x), scale=0.025)
+
+ model = VoigtModel()
+ pars = model.guess(y, x=x)
+ result = model.fit(y, pars, x=x)
+
+ savefile = 'tmpvoigt_modelresult.sav'
+ save_modelresult(result, savefile)
+
+ assert_allclose(result.params['sigma'], 1.075487, rtol=1.0e-5)
+ assert_allclose(result.params['center'], 8.489738, rtol=1.0e-5)
+ assert_allclose(result.params['height'], 0.557778, rtol=1.0e-5)
+
+ time.sleep(0.25)
+ result2 = load_modelresult(savefile)
+
+ assert result2.residual is not None
+ assert_allclose(result2.params['sigma'], 1.075487, rtol=1.0e-5)
+ assert_allclose(result2.params['center'], 8.489738, rtol=1.0e-5)
+ assert_allclose(result2.params['height'], 0.557778, rtol=1.0e-5)
+
+
+def test_modelresult_summary():
+ """Test summary() method of ModelResult.
+ """
+ x = np.linspace(0, 20, 501)
+ y = gaussian(x, 1.1, 8.5, 2) + lorentzian(x, 1.7, 8.5, 1.5)
+ np.random.seed(20)
+ y = y + np.random.normal(size=len(x), scale=0.025)
+
+ model = VoigtModel()
+ pars = model.guess(y, x=x)
+ result = model.fit(y, pars, x=x)
+
+ summary = result.summary()
+
+ assert isinstance(summary, dict)
+
+ for attr in ('ndata', 'nvarys', 'nfree', 'chisqr', 'redchi', 'aic',
+ 'bic', 'rsquared', 'nfev', 'max_nfev', 'aborted',
+ 'errorbars', 'success', 'message', 'lmdif_message', 'ier',
+ 'nan_policy', 'scale_covar', 'calc_covar', 'ci_out',
+ 'col_deriv', 'flatchain', 'call_kws', 'var_names',
+ 'user_options', 'kws', 'init_values', 'best_values'):
+ val = summary.get(attr, '__INVALID__')
+ assert val != '__INVALID__'
+
+ assert len(json.dumps(summary)) > 100
diff --git a/tests/test_model_uncertainties.py b/tests/test_model_uncertainties.py
new file mode 100644
index 0000000..4771e4e
--- /dev/null
+++ b/tests/test_model_uncertainties.py
@@ -0,0 +1,129 @@
+"""Tests of ModelResult.eval_uncertainty()"""
+import os
+
+import numpy as np
+from numpy.testing import assert_allclose
+
+from lmfit.lineshapes import gaussian
+from lmfit.models import ExponentialModel, GaussianModel, LinearModel
+
+
+def get_linearmodel(slope=0.8, intercept=0.5, noise=1.5):
+ # create data to be fitted
+ np.random.seed(88)
+ x = np.linspace(0, 10, 101)
+ y = intercept + x*slope
+ y = y + np.random.normal(size=len(x), scale=noise)
+
+ model = LinearModel()
+ params = model.make_params(intercept=intercept, slope=slope)
+
+ return x, y, model, params
+
+
+def get_gaussianmodel(amplitude=1.0, center=5.0, sigma=1.0, noise=0.1):
+ # create data to be fitted
+ np.random.seed(7392)
+ x = np.linspace(-20, 20, 201)
+ y = gaussian(x, amplitude, center=center, sigma=sigma)
+ y = y + np.random.normal(size=len(x), scale=noise)
+
+ model = GaussianModel()
+ params = model.make_params(amplitude=amplitude/5.0,
+ center=center-1.0,
+ sigma=sigma*2.0)
+ return x, y, model, params
+
+
+def test_linear_constant_intercept():
+ x, y, model, params = get_linearmodel(slope=4, intercept=-10)
+
+ params['intercept'].vary = False
+
+ ret = model.fit(y, params, x=x)
+
+ dely = ret.eval_uncertainty(sigma=1)
+ slope_stderr = ret.params['slope'].stderr
+
+ assert_allclose(dely.min(), 0, rtol=1.e-2)
+ assert_allclose(dely.max(), slope_stderr*x.max(), rtol=1.e-2)
+ assert_allclose(dely.mean(), slope_stderr*x.mean(), rtol=1.e-2)
+
+
+def test_linear_constant_slope():
+ x, y, model, params = get_linearmodel(slope=-4, intercept=2.3)
+
+ params['slope'].vary = False
+
+ ret = model.fit(y, params, x=x)
+
+ dely = ret.eval_uncertainty(sigma=1)
+
+ intercept_stderr = ret.params['intercept'].stderr
+
+ assert_allclose(dely.min(), intercept_stderr, rtol=1.e-2)
+ assert_allclose(dely.max(), intercept_stderr, rtol=1.e-2)
+
+
+def test_gauss_sigmalevel():
+ """Test that dely increases as sigma increases."""
+ x, y, model, params = get_gaussianmodel(amplitude=50.0, center=4.5,
+ sigma=0.78, noise=0.1)
+ ret = model.fit(y, params, x=x)
+
+ dely_sigma1 = ret.eval_uncertainty(sigma=1)
+ dely_sigma2 = ret.eval_uncertainty(sigma=2)
+ dely_sigma3 = ret.eval_uncertainty(sigma=3)
+
+ assert dely_sigma3.mean() > 1.5*dely_sigma2.mean()
+ assert dely_sigma2.mean() > 1.5*dely_sigma1.mean()
+
+
+def test_gauss_noiselevel():
+ """Test that dely increases as expected with changing noise level."""
+ lonoise = 0.05
+ hinoise = 10*lonoise
+ x, y, model, params = get_gaussianmodel(amplitude=20.0, center=2.1,
+ sigma=1.0, noise=lonoise)
+ ret1 = model.fit(y, params, x=x)
+ dely_lonoise = ret1.eval_uncertainty(sigma=1)
+
+ x, y, model, params = get_gaussianmodel(amplitude=20.0, center=2.1,
+ sigma=1.0, noise=hinoise)
+ ret2 = model.fit(y, params, x=x)
+ dely_hinoise = ret2.eval_uncertainty(sigma=1)
+
+ assert_allclose(dely_hinoise.mean(), 10*dely_lonoise.mean(), rtol=1.e-2)
+
+
+def test_component_uncertainties():
+ "test dely_comps"
+ y, x = np.loadtxt(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'NIST_Gauss2.dat')).T
+ model = (GaussianModel(prefix='g1_') +
+ GaussianModel(prefix='g2_') +
+ ExponentialModel(prefix='bkg_'))
+
+ params = model.make_params(bkg_amplitude=100, bkg_decay=80,
+ g1_amplitude=3000,
+ g1_center=100,
+ g1_sigma=10,
+ g2_amplitude=3000,
+ g2_center=150,
+ g2_sigma=10)
+
+ result = model.fit(y, params, x=x)
+ comps = result.eval_components(x=x)
+ dely = result.eval_uncertainty(sigma=3)
+
+ assert 'g1_' in comps
+ assert 'g2_' in comps
+ assert 'bkg_' in comps
+ assert dely.mean() > 0.8
+ assert dely.mean() < 2.0
+ assert result.dely_comps['g1_'].mean() > 0.5
+ assert result.dely_comps['g1_'].mean() < 1.5
+ assert result.dely_comps['g2_'].mean() > 0.5
+ assert result.dely_comps['g2_'].mean() < 1.5
+ assert result.dely_comps['bkg_'].mean() > 0.5
+ assert result.dely_comps['bkg_'].mean() < 1.5
diff --git a/tests/test_models.py b/tests/test_models.py
new file mode 100644
index 0000000..b491e7f
--- /dev/null
+++ b/tests/test_models.py
@@ -0,0 +1,154 @@
+import numpy as np
+
+import lmfit
+from lmfit.lineshapes import gaussian
+from lmfit.models import GaussianModel, SplineModel
+
+
+def _isclose(name, expected_value, fit_value, atol, rtol):
+ """isclose with error message"""
+ assert np.isclose(expected_value, fit_value, atol=atol, rtol=rtol), \
+ f"bad value for {name}: expected {expected_value}, got {fit_value}."
+
+
+def check_fit(model, params, x, y, test_values, noise_scale=1.e-3, atol=0.1, rtol=0.05):
+ """Checks that a model fits noisy data well
+
+ Parameters
+ -----------
+ model: model to use
+ par: parameters to use
+ x: x data
+ y: y data
+ test_values: dict of 'true values'
+ noise_scale: float, optional
+ The standard deviation of noise that is added to the test data.
+ atol: float, optional
+ Absolute tolerance for considering fit parameters close to the
+ parameters test data was generated with.
+ rtol: float, optional
+ Relative tolerance for considering fit parameters close to the
+ parameters test data was generated with.
+
+ Returns
+ -------
+ fit result
+
+ Raises
+ -------
+ AssertionError
+ Any fit parameter that is not close to the parameter used to
+ generate the test data raises this error.
+ """
+ y += np.random.normal(scale=noise_scale, size=len(y))
+ result = model.fit(y, params, x=x)
+ fit_values = result.best_values
+ for name, test_val in test_values.items():
+ _isclose(name, test_val, fit_values[name], atol, rtol)
+ return result
+
+
+def testLinear():
+ mod = lmfit.models.LinearModel()
+ x = np.linspace(-1, 1, 201)
+ y = 10*x + 2
+ params = mod.make_params(intercept=1, slope=2)
+ check_fit(mod, params, x, y, dict(intercept=2, slope=10))
+
+
+def testQuadratic():
+ mod = lmfit.models.QuadraticModel()
+ x = np.linspace(-1, 1, 201)
+ y = 0.3*x*x + 10*x + 2
+ params = mod.make_params(a=0, b=5, c=1)
+ check_fit(mod, params, x, y, dict(a=0.3, b=10, c=2))
+
+
+def testSine_partialperiod():
+ mod = lmfit.models.SineModel()
+ x = np.linspace(-1, 1, 201)
+ pars = dict(amplitude=1.5, frequency=0.9, shift=0.4)
+
+ y = pars['amplitude']*np.sin(x*pars['frequency'] + pars['shift'])
+
+ params = mod.make_params(amplitude=1, frequency=1, shift=-0.2)
+ check_fit(mod, params, x, y, pars)
+
+
+def testSineWithLine():
+ mod = lmfit.models.SineModel() + lmfit.models.LinearModel()
+ x = np.linspace(-5, 5, 501)
+ pars = dict(amplitude=5.3, frequency=3.8, shift=0.1, intercept=8.2, slope=0.2)
+
+ y = pars['amplitude']*np.sin(x*pars['frequency'] + pars['shift'])
+ y += pars['intercept'] + x * pars['slope']
+
+ params = mod.make_params(amplitude=10, frequency=4.5, shift=0.1,
+ intercept=10, slope=0)
+
+ check_fit(mod, params, x, y, pars, noise_scale=0.02)
+
+
+def testSineManyShifts():
+ mod = lmfit.models.SineModel() + lmfit.models.LinearModel()
+ x = np.linspace(-5, 5, 501)
+ pars = dict(amplitude=5.3, frequency=3.8, intercept=8.2, slope=0.2)
+
+ for shift in (0.1, 0.5, 1.0, 1.5):
+ pars['shift'] = shift
+ y = pars['amplitude']*np.sin(x*pars['frequency'] + pars['shift'])
+ y += pars['intercept'] + x*pars['slope']
+
+ params = mod.make_params(amplitude=10, frequency=4.5, shift=0.8,
+ intercept=10, slope=0)
+
+ check_fit(mod, params, x, y, pars, noise_scale=0.02)
+
+
+def testSineModel_guess():
+ mod = lmfit.models.SineModel()
+ x = np.linspace(-10, 10, 201)
+ pars = dict(amplitude=1.5, frequency=0.5, shift=0.4)
+
+ y = pars['amplitude']*np.sin(x*pars['frequency'] + pars['shift'])
+
+ params = mod.guess(y, x=x)
+ assert params['amplitude'] > 0.5
+ assert params['amplitude'] < 5.0
+ assert params['frequency'] > 0.1
+ assert params['frequency'] < 1.5
+ assert params['shift'] > 0.0
+ assert params['shift'] < 1.0
+
+
+def testSineModel_make_params():
+ mod = lmfit.models.SineModel()
+ pars = mod.make_params(amplitude=1.5, frequency=0.5,
+ shift=dict(value=0.4, min=-10, max=10))
+
+ assert pars['amplitude'].value > 1.4
+ assert pars['amplitude'].value < 1.6
+ assert pars['amplitude'].min == 0
+ assert pars['amplitude'].max > 1e99
+ assert pars['shift'].value > 0.35
+ assert pars['shift'].value < 0.45
+ assert pars['shift'].min < -9.5
+ assert pars['shift'].max > 9.5
+
+
+def testSplineModel():
+ x = np.linspace(0, 25, 501)
+ y = gaussian(x, amplitude=10, center=16.2, sigma=0.8) + 3 + 0.03*x + np.sin(x/4)
+
+ model = GaussianModel(prefix='peak_')
+ params = model.make_params(amplitude=8, center=16, sigma=1)
+
+ knot_xvals = np.array([1, 3, 5, 7, 9, 11, 13, 19, 21, 23, 25])
+
+ bkg = SplineModel(prefix='bkg_', xknots=knot_xvals)
+ params.update(bkg.guess(y, x))
+
+ model = model + bkg
+
+ pars = dict(peak_amplitude=10, peak_center=16.2, peak_sigma=0.8)
+ check_fit(model, params, x, y, pars, noise_scale=0.05)
diff --git a/tests/test_multidatasets.py b/tests/test_multidatasets.py
new file mode 100644
index 0000000..3486c1a
--- /dev/null
+++ b/tests/test_multidatasets.py
@@ -0,0 +1,67 @@
+"""Example fitting to multiple (simulated) data sets"""
+
+import numpy as np
+
+from lmfit import Parameters, minimize
+from lmfit.lineshapes import gaussian
+
+
+def gauss_dataset(params, i, x):
+ """calc gaussian from params for data set i
+ using simple, hardwired naming convention"""
+ amp = params[f'amp_{i+1}']
+ cen = params[f'cen_{i+1}']
+ sig = params[f'sig_{i+1}']
+ return gaussian(x, amp, cen, sig)
+
+
+def objective(params, x, data):
+ """ calculate total residual for fits to several data sets held
+ in a 2-D array, and modeled by Gaussian functions"""
+ ndata, _ = data.shape
+ resid = 0.0*data[:]
+ # make residual per data set
+ for i in range(ndata):
+ resid[i, :] = data[i, :] - gauss_dataset(params, i, x)
+ # now flatten this to a 1D array, as minimize() needs
+ return resid.flatten()
+
+
+def test_multidatasets():
+ # create 5 datasets
+ x = np.linspace(-1, 2, 151)
+ data = []
+ for _ in np.arange(5):
+ amp = 2.60 + 1.50*np.random.rand()
+ cen = -0.20 + 1.50*np.random.rand()
+ sig = 0.25 + 0.03*np.random.rand()
+ dat = gaussian(x, amp, cen, sig) + np.random.normal(size=len(x),
+ scale=0.1)
+ data.append(dat)
+
+ # data has shape (5, 151)
+ data = np.array(data)
+ assert data.shape == (5, 151)
+
+ # create 5 sets of parameters, one per data set
+ pars = Parameters()
+ for iy, _ in enumerate(data):
+ pars.add(f'amp_{iy+1}', value=0.5, min=0.0, max=200)
+ pars.add(f'cen_{iy+1}', value=0.4, min=-2.0, max=2.0)
+ pars.add(f'sig_{iy+1}', value=0.3, min=0.01, max=3.0)
+
+ # but now constrain all values of sigma to have the same value
+ # by assigning sig_2, sig_3, .. sig_5 to be equal to sig_1
+ for iy in (2, 3, 4, 5):
+ pars[f'sig_{iy}'].expr = 'sig_1'
+
+ # run the global fit to all the data sets
+ out = minimize(objective, pars, args=(x, data))
+
+ assert len(pars) == 15
+ assert out.nvarys == 11
+ assert out.nfev > 15
+ assert out.chisqr > 1.0
+ assert pars['amp_1'].value > 0.1
+ assert pars['sig_1'].value > 0.1
+ assert pars['sig_2'].value == pars['sig_1'].value
diff --git a/tests/test_nose.py b/tests/test_nose.py
new file mode 100644
index 0000000..b8237f5
--- /dev/null
+++ b/tests/test_nose.py
@@ -0,0 +1,698 @@
+import sys
+import unittest
+
+import numpy as np
+from numpy import pi
+from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
+import pytest
+from scipy.optimize import rosen_der
+from uncertainties import ufloat
+
+from lmfit import Minimizer, Parameters, minimize
+from lmfit.lineshapes import gaussian
+from lmfit.minimizer import (HAS_EMCEE, SCALAR_METHODS, MinimizerResult,
+ _nan_policy)
+from lmfit.models import SineModel
+
+try:
+ import numdifftools # noqa: F401
+ HAS_NUMDIFFTOOLS = True
+except ImportError:
+ HAS_NUMDIFFTOOLS = False
+
+
+def check(para, real_val, sig=3):
+ err = abs(para.value - real_val)
+ assert err < sig * para.stderr
+
+
+def check_wo_stderr(para, real_val, sig=0.1):
+ err = abs(para.value - real_val)
+ assert err < sig
+
+
+def check_paras(para_fit, para_real, sig=3):
+ for i in para_fit:
+ check(para_fit[i], para_real[i].value, sig=sig)
+
+
+def test_simple():
+ # create data to be fitted
+ np.random.seed(1)
+ x = np.linspace(0, 15, 301)
+ data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+ np.random.normal(size=len(x), scale=0.2))
+
+ # define objective function: returns the array to be minimized
+ def fcn2min(params, x, data):
+ """model decaying sine wave, subtract data"""
+ amp = params['amp']
+ shift = params['shift']
+ omega = params['omega']
+ decay = params['decay']
+
+ model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+ return model - data
+
+ # create a set of Parameters
+ params = Parameters()
+ params.add('amp', value=10, min=0)
+ params.add('decay', value=0.1)
+ params.add('shift', value=0.0, min=-pi/2., max=pi/2)
+ params.add('omega', value=3.0)
+
+ # do fit, here with leastsq model
+ result = minimize(fcn2min, params, args=(x, data))
+
+ # assert that the real parameters are found
+ for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]):
+ check(para, val)
+
+
+def test_lbfgsb():
+ p_true = Parameters()
+ p_true.add('amp', value=14.0)
+ p_true.add('period', value=5.33)
+ p_true.add('shift', value=0.123)
+ p_true.add('decay', value=0.010)
+
+ def residual(pars, x, data=None):
+ amp = pars['amp']
+ per = pars['period']
+ shift = pars['shift']
+ decay = pars['decay']
+
+ if abs(shift) > pi/2:
+ shift = shift - np.sign(shift) * pi
+ model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
+ if data is None:
+ return model
+ return model - data
+
+ n = 2500
+ xmin = 0.
+ xmax = 250.0
+ noise = np.random.normal(scale=0.7215, size=n)
+ x = np.linspace(xmin, xmax, n)
+ data = residual(p_true, x) + noise
+
+ fit_params = Parameters()
+ fit_params.add('amp', value=11.0, min=5, max=20)
+ fit_params.add('period', value=5., min=1., max=7)
+ fit_params.add('shift', value=.10, min=0.0, max=0.2)
+ fit_params.add('decay', value=6.e-3, min=0, max=0.1)
+
+ out = minimize(residual, fit_params, method='lbfgsb', args=(x,),
+ kws={'data': data})
+
+ for para, true_para in zip(out.params.values(), p_true.values()):
+ check_wo_stderr(para, true_para.value)
+
+
+def test_derive():
+ def func(pars, x, data=None):
+ model = pars['a'] * np.exp(-pars['b'] * x) + pars['c']
+ if data is None:
+ return model
+ return model - data
+
+ def dfunc(pars, x, data=None):
+ v = np.exp(-pars['b']*x)
+ return np.array([v, -pars['a']*x*v, np.ones(len(x))])
+
+ def f(var, x):
+ return var[0] * np.exp(-var[1] * x) + var[2]
+
+ params1 = Parameters()
+ params1.add('a', value=10)
+ params1.add('b', value=10)
+ params1.add('c', value=10)
+
+ params2 = Parameters()
+ params2.add('a', value=10)
+ params2.add('b', value=10)
+ params2.add('c', value=10)
+
+ a, b, c = 2.5, 1.3, 0.8
+ x = np.linspace(0, 4, 50)
+ y = f([a, b, c], x)
+ data = y + 0.15*np.random.normal(size=len(x))
+
+ # fit without analytic derivative
+ min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data': data})
+ out1 = min1.leastsq()
+
+ # fit with analytic derivative
+ min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data': data})
+ out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
+
+ check_wo_stderr(out1.params['a'], out2.params['a'].value, 0.00005)
+ check_wo_stderr(out1.params['b'], out2.params['b'].value, 0.00005)
+ check_wo_stderr(out1.params['c'], out2.params['c'].value, 0.00005)
+
+
+def test_peakfit():
+ def residual(pars, x, data=None):
+ g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1'])
+ g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2'])
+ model = g1 + g2
+ if data is None:
+ return model
+ return model - data
+
+ n = 601
+ xmin = 0.
+ xmax = 15.0
+ noise = np.random.normal(scale=.65, size=n)
+ x = np.linspace(xmin, xmax, n)
+
+ org_params = Parameters()
+ org_params.add_many(('a1', 12.0, True, None, None, None),
+ ('c1', 5.3, True, None, None, None),
+ ('w1', 1.0, True, None, None, None),
+ ('a2', 9.1, True, None, None, None),
+ ('c2', 8.1, True, None, None, None),
+ ('w2', 2.5, True, None, None, None))
+
+ data = residual(org_params, x) + noise
+
+ fit_params = Parameters()
+ fit_params.add_many(('a1', 8.0, True, None, 14., None),
+ ('c1', 5.0, True, None, None, None),
+ ('w1', 0.7, True, None, None, None),
+ ('a2', 3.1, True, None, None, None),
+ ('c2', 8.8, True, None, None, None))
+
+ fit_params.add('w2', expr='2.5*w1')
+
+ myfit = Minimizer(residual, fit_params, fcn_args=(x,),
+ fcn_kws={'data': data})
+
+ myfit.prepare_fit()
+ out = myfit.leastsq()
+ check_paras(out.params, org_params)
+
+
+def test_scalar_minimize_has_no_uncertainties():
+ # scalar_minimize doesn't calculate uncertainties.
+ # when a scalar_minimize is run the stderr and correl for each parameter
+ # should be None. (stderr and correl are set to None when a Parameter is
+ # initialised).
+ # This requires a reset after a leastsq fit has been done.
+ # Only when scalar_minimize calculates stderr and correl can this test
+ # be removed.
+
+ np.random.seed(1)
+ x = np.linspace(0, 15, 301)
+ data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+ np.random.normal(size=len(x), scale=0.2))
+
+ # define objective function: returns the array to be minimized
+ def fcn2min(params, x, data):
+ """model decaying sine wave, subtract data"""
+ amp = params['amp']
+ shift = params['shift']
+ omega = params['omega']
+ decay = params['decay']
+
+ model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+ return model - data
+
+ # create a set of Parameters
+ params = Parameters()
+ params.add('amp', value=10, min=0)
+ params.add('decay', value=0.1)
+ params.add('shift', value=0.0, min=-pi/2., max=pi/2)
+ params.add('omega', value=3.0)
+
+ mini = Minimizer(fcn2min, params, fcn_args=(x, data))
+ out = mini.minimize()
+ assert np.isfinite(out.params['amp'].stderr)
+ assert out.errorbars
+ out2 = mini.minimize(method='nelder-mead')
+
+ for par in ('amp', 'decay', 'shift', 'omega'):
+ assert HAS_NUMDIFFTOOLS == (out2.params[par].stderr is not None)
+ assert HAS_NUMDIFFTOOLS == (out2.params[par].correl is not None)
+
+ assert HAS_NUMDIFFTOOLS == out2.errorbars
+
+
+def test_scalar_minimize_reduce_fcn():
+ # test that the reduce_fcn option for scalar_minimize
+ # gives different and improved results with outliers
+
+ np.random.seed(2)
+ x = np.linspace(0, 10, 101)
+
+ yo = 1.0 + 2.0*np.sin(4*x) * np.exp(-x / 5)
+ y = yo + np.random.normal(size=len(yo), scale=0.250)
+ outliers = np.random.randint(int(len(x)/3.0), len(x), int(len(x)/12))
+ y[outliers] += 5*np.random.random(len(outliers))
+
+ # define objective function: returns the array to be minimized
+ def objfunc(pars, x, data):
+ decay = pars['decay']
+ offset = pars['offset']
+ omega = pars['omega']
+ amp = pars['amp']
+ model = offset + amp * np.sin(x*omega) * np.exp(-x/decay)
+ return model - data
+
+ # create a set of Parameters
+ params = Parameters()
+ params.add('offset', 2.0)
+ params.add('omega', 3.3)
+ params.add('amp', 2.5)
+ params.add('decay', 1.0)
+
+ method = 'L-BFGS-B'
+ out1 = minimize(objfunc, params, args=(x, y), method=method)
+ out2 = minimize(objfunc, params, args=(x, y), method=method,
+ reduce_fcn='neglogcauchy')
+
+ assert_allclose(out1.params['omega'].value, 4.0, rtol=0.01)
+ assert_allclose(out1.params['decay'].value, 7.6, rtol=0.01)
+
+ assert_allclose(out2.params['omega'].value, 4.0, rtol=0.01)
+ assert_allclose(out2.params['decay'].value, 5.8, rtol=0.01)
+
+
+def test_multidimensional_fit_GH205():
+ # test that you don't need to flatten the output from the objective
+ # function. Tests regression for GH205.
+ pos = np.linspace(0, 99, 100)
+ xv, yv = np.meshgrid(pos, pos)
+ f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1) +
+ np.cos(yv * lambda2))
+
+ data = f(xv, yv, 0.3, 3)
+ assert data.ndim, 2
+
+ def fcn2min(params, xv, yv, data):
+ """model decaying sine wave, subtract data"""
+ model = f(xv, yv, params['lambda1'], params['lambda2'])
+ return model - data
+
+ # create a set of Parameters
+ params = Parameters()
+ params.add('lambda1', value=0.4)
+ params.add('lambda2', value=3.2)
+
+ mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data))
+ mini.minimize()
+
+
+def test_ufloat():
+ """Test of ufloat from uncertainties."""
+ x = ufloat(1, 0.1)
+ assert_allclose(x.nominal_value, 1.0, rtol=1.e-7)
+ assert_allclose(x.std_dev, 0.1, rtol=1.e-7)
+
+ y = x*x
+ assert_allclose(y.nominal_value, 1.0, rtol=1.e-7)
+ assert_allclose(y.std_dev, 0.2, rtol=1.e-7)
+
+ y = x - x
+ assert_allclose(y.nominal_value, 0.0, rtol=1.e-7)
+ assert_allclose(y.std_dev, 0.0, rtol=1.e-7)
+
+
+def test_stderr_propagation():
+ """Test propagation of uncertainties to constraint expressions."""
+ model = SineModel()
+ params = model.make_params(amplitude=1, frequency=9.0, shift=0)
+ params.add("period", expr="1/frequency")
+ params.add("period_2a", expr="2*period")
+ params.add("period_2b", expr="2/frequency")
+ params.add("thing_1", expr="shift + frequency")
+ params.add("thing_2", expr="shift + 1/period")
+
+ np.random.seed(3)
+ xs = np.linspace(0, 1, 51)
+ ys = np.sin(7.45 * xs) + 0.01 * np.random.normal(size=xs.shape)
+
+ result = model.fit(ys, x=xs, params=params)
+
+ opars = result.params
+ assert_allclose(opars['period'].stderr, 1.1587e-04, rtol=1.e-3)
+ assert_allclose(opars['period_2b'].stderr, 2.3175e-04, rtol=1.e-3)
+ assert_allclose(opars['thing_1'].stderr, 0.0037291, rtol=1.e-3)
+
+ assert_allclose(opars['period_2a'].stderr, 2*opars['period'].stderr, rtol=1.e-5)
+ assert_allclose(opars['period_2b'].stderr, opars['period_2a'].stderr, rtol=1.e-5)
+ assert_allclose(opars['thing_1'].stderr, opars['thing_2'].stderr, rtol=1.e-5)
+
+
+class CommonMinimizerTest(unittest.TestCase):
+
+ def setUp(self):
+ """
+ test scale minimizers except newton-cg (needs jacobian) and
+ anneal (doesn't work out of the box).
+ """
+ p_true = Parameters()
+ p_true.add('amp', value=14.0)
+ p_true.add('period', value=5.33)
+ p_true.add('shift', value=0.123)
+ p_true.add('decay', value=0.010)
+ self.p_true = p_true
+
+ n = 2500
+ xmin = 0.
+ xmax = 250.0
+ noise = np.random.normal(scale=0.7215, size=n)
+ self.x = np.linspace(xmin, xmax, n)
+ self.data = self.residual(p_true, self.x) + noise
+
+ fit_params = Parameters()
+ fit_params.add('amp', value=11.0, min=5, max=20)
+ fit_params.add('period', value=5., min=1., max=7)
+ fit_params.add('shift', value=.10, min=0.0, max=0.2)
+ fit_params.add('decay', value=6.e-3, min=0, max=0.1)
+ self.fit_params = fit_params
+ self.mini = Minimizer(self.residual, fit_params, [self.x, self.data])
+
+ def residual(self, pars, x, data=None):
+ amp = pars['amp']
+ per = pars['period']
+ shift = pars['shift']
+ decay = pars['decay']
+
+ if abs(shift) > pi/2:
+ shift = shift - np.sign(shift) * pi
+ model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return model - data
+
+ def test_diffev_bounds_check(self):
+ # You need finite (min, max) for each parameter if you're using
+ # differential_evolution.
+ self.fit_params['decay'].min = -np.inf
+ self.fit_params['decay'].vary = True
+ self.minimizer = 'differential_evolution'
+ pytest.raises(ValueError, self.scalar_minimizer)
+
+ # but only if a parameter is not fixed
+ self.fit_params['decay'].vary = False
+ self.mini.scalar_minimize(method='differential_evolution', max_nfev=1)
+
+ def test_scalar_minimizers(self):
+ # test all the scalar minimizers
+ for method in SCALAR_METHODS:
+ if method in ['newton', 'dogleg', 'trust-ncg', 'cg', 'trust-exact',
+ 'trust-krylov', 'trust-constr']:
+ continue
+ self.minimizer = SCALAR_METHODS[method]
+ if method == 'Nelder-Mead':
+ sig = 0.2
+ else:
+ sig = 0.15
+ self.scalar_minimizer(sig=sig)
+
+ def scalar_minimizer(self, sig=0.15):
+ out = self.mini.scalar_minimize(method=self.minimizer)
+
+ self.residual(out.params, self.x)
+
+ for para, true_para in zip(out.params.values(), self.p_true.values()):
+ check_wo_stderr(para, true_para.value, sig=sig)
+
+ def test_nan_policy(self):
+ # check that an error is raised if there are nan in
+ # the data returned by userfcn
+ self.data[0] = np.nan
+
+ for method in SCALAR_METHODS:
+ if method == 'cobyla' and sys.platform == 'darwin':
+ pytest.xfail("this aborts Python on macOS...")
+ elif method == 'differential_evolution':
+ pytest.raises(RuntimeError, self.mini.scalar_minimize,
+ SCALAR_METHODS[method])
+ else:
+ pytest.raises(ValueError, self.mini.scalar_minimize,
+ SCALAR_METHODS[method])
+
+ pytest.raises(ValueError, self.mini.minimize)
+
+ # now check that the fit proceeds if nan_policy is 'omit'
+ self.mini.nan_policy = 'omit'
+ res = self.mini.minimize()
+ assert_equal(res.ndata, np.size(self.data, 0) - 1)
+
+ for para, true_para in zip(res.params.values(), self.p_true.values()):
+ check_wo_stderr(para, true_para.value, sig=0.15)
+
+ def test_nan_policy_function(self):
+ a = np.array([0, 1, 2, 3, np.nan])
+ pytest.raises(ValueError, _nan_policy, a)
+ assert np.isnan(_nan_policy(a, nan_policy='propagate')[-1])
+ assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
+
+ a[-1] = np.inf
+ pytest.raises(ValueError, _nan_policy, a)
+ assert np.isposinf(_nan_policy(a, nan_policy='propagate')[-1])
+ assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
+ assert_equal(_nan_policy(a, handle_inf=False), a)
+
+ def test_emcee(self):
+ # test emcee
+ if not HAS_EMCEE:
+ return True
+
+ np.random.seed(123456)
+ out = self.mini.emcee(nwalkers=100, steps=200, burn=50, thin=10)
+
+ check_paras(out.params, self.p_true, sig=3)
+
+ def test_emcee_method_kwarg(self):
+ # test with emcee as method keyword argument
+ if not HAS_EMCEE:
+ return True
+ np.random.seed(123456)
+ out = self.mini.minimize(method='emcee',
+ nwalkers=50, steps=200,
+ burn=50, thin=10)
+ assert out.method == 'emcee'
+ assert out.nfev == 50*200
+
+ check_paras(out.params, self.p_true, sig=3)
+
+ out_unweighted = self.mini.minimize(method='emcee',
+ nwalkers=50, steps=200,
+ burn=50, thin=10,
+ is_weighted=False)
+ assert out_unweighted.method == 'emcee'
+
+ def test_emcee_multiprocessing(self):
+ # test multiprocessing runs
+ raise pytest.skip("Pytest fails with multiprocessing")
+ pytest.importorskip("dill")
+ if not HAS_EMCEE:
+ return True
+ self.mini.emcee(steps=50, workers=4, nwalkers=20)
+
+ def test_emcee_bounds_length(self):
+ # the log-probability functions check if the parameters are
+ # inside the bounds. Check that the bounds and parameters
+ # are the right lengths for comparison. This can be done
+ # if nvarys != nparams
+ if not HAS_EMCEE:
+ return True
+ self.mini.params['amp'].vary = False
+ self.mini.params['period'].vary = False
+ self.mini.params['shift'].vary = False
+
+ self.mini.emcee(steps=10)
+
+ def test_emcee_partial_bounds(self):
+ # mcmc with partial bounds
+ if not HAS_EMCEE:
+ return True
+
+ np.random.seed(123456)
+ # test mcmc output vs lm, some parameters not bounded
+ self.fit_params['amp'].max = np.inf
+ # self.fit_params['amp'].min = -np.inf
+ out = self.mini.emcee(nwalkers=100, steps=300, burn=100, thin=10)
+
+ check_paras(out.params, self.p_true, sig=3)
+
+ def test_emcee_init_with_chain(self):
+ # can you initialise with a previous chain
+ if not HAS_EMCEE:
+ return True
+
+ out = self.mini.emcee(nwalkers=100, steps=5)
+ # can initialise with a chain
+ self.mini.emcee(nwalkers=100, steps=1, pos=out.chain)
+ # can initialise with a correct subset of a chain
+ self.mini.emcee(nwalkers=100, steps=1, pos=out.chain[-1, ...])
+
+ # but you can't initialise if the shape is wrong.
+ pytest.raises(ValueError,
+ self.mini.emcee,
+ nwalkers=100,
+ steps=1,
+ pos=out.chain[-1, :-1, ...])
+
+ def test_emcee_reuse_sampler(self):
+ if not HAS_EMCEE:
+ return True
+
+ self.mini.emcee(nwalkers=20, steps=25)
+
+ # if you've run the sampler the Minimizer object should have a _lastpos
+ # attribute
+ assert hasattr(self.mini, '_lastpos')
+
+ # now try and re-use sampler
+ out2 = self.mini.emcee(steps=10, reuse_sampler=True)
+ assert out2.chain.shape == (35, 20, 4)
+
+ # you shouldn't be able to reuse the sampler if nvarys has changed.
+ self.mini.params['amp'].vary = False
+ pytest.raises(ValueError, self.mini.emcee, reuse_sampler=True)
+
+ def test_emcee_lnpost(self):
+ # check ln likelihood is calculated correctly. It should be
+ # -0.5 * chi**2.
+ result = self.mini.minimize()
+
+ # obtain the numeric values
+ # note - in this example all the parameters are varied
+ fvars = np.array([par.value for par in result.params.values()])
+
+ # calculate the cost function with scaled values (parameters all have
+ # lower and upper bounds.
+ scaled_fvars = []
+ for par, fvar in zip(result.params.values(), fvars):
+ par.value = fvar
+ scaled_fvars.append(par.setup_bounds())
+
+ val = self.mini.penalty(np.array(scaled_fvars))
+
+ # calculate the log-likelihood value
+ bounds = np.array([(par.min, par.max)
+ for par in result.params.values()])
+
+ val2 = self.mini._lnprob(fvars, self.residual, result.params,
+ result.var_names, bounds,
+ userargs=(self.x, self.data))
+
+ assert_almost_equal(-0.5 * val, val2)
+
+ def test_emcee_output(self):
+ # test mcmc output
+ if not HAS_EMCEE:
+ return True
+ try:
+ from pandas import DataFrame
+ except ImportError:
+ return True
+ out = self.mini.emcee(nwalkers=10, steps=20, burn=5, thin=2)
+ assert isinstance(out, MinimizerResult)
+ assert isinstance(out.flatchain, DataFrame)
+
+ # check that we can access the chains via parameter name
+ # print( out.flatchain['amp'].shape[0], 200)
+ assert out.flatchain['amp'].shape[0] == 70
+ assert out.errorbars
+ assert np.isfinite(out.params['amp'].correl['period'])
+
+ # the lnprob array should be the same as the chain size
+ assert np.size(out.chain)//out.nvarys == np.size(out.lnprob)
+
+ # test chain output shapes
+ print(out.lnprob.shape, out.chain.shape, out.flatchain.shape)
+ assert out.lnprob.shape == (7, 10)
+ assert out.chain.shape == (7, 10, 4)
+ assert out.flatchain.shape == (70, 4)
+
+ def test_emcee_float(self):
+ # test that it works if the residuals returns a float, not a vector
+ if not HAS_EMCEE:
+ return True
+
+ def resid(pars, x, data=None):
+ return -0.5 * np.sum(self.residual(pars, x, data=data)**2)
+
+ # just return chi2
+ def resid2(pars, x, data=None):
+ return np.sum(self.residual(pars, x, data=data)**2)
+
+ self.mini.userfcn = resid
+ np.random.seed(123456)
+ out = self.mini.emcee(nwalkers=100, steps=200, burn=50, thin=10)
+ check_paras(out.params, self.p_true, sig=3)
+
+ self.mini.userfcn = resid2
+ np.random.seed(123456)
+ out = self.mini.emcee(nwalkers=100, steps=200,
+ burn=50, thin=10, float_behavior='chi2')
+ check_paras(out.params, self.p_true, sig=3)
+
+ def test_emcee_seed(self):
+ # test emcee seeding can reproduce a sampling run
+ if not HAS_EMCEE:
+ return True
+
+ out = self.mini.emcee(params=self.fit_params,
+ nwalkers=100,
+ steps=1, seed=1)
+ out2 = self.mini.emcee(params=self.fit_params,
+ nwalkers=100,
+ steps=1, seed=1)
+
+ assert_almost_equal(out.chain, out2.chain)
+
+ def test_emcee_ntemps(self):
+ # check for DeprecationWarning when using ntemps > 1
+ if not HAS_EMCEE:
+ return True
+
+ with pytest.raises(DeprecationWarning):
+ _ = self.mini.emcee(params=self.fit_params, ntemps=5)
+
+ def test_emcee_custom_pool(self):
+ # tests use of a custom pool
+
+ if not HAS_EMCEE:
+ return True
+
+ global emcee_counter
+ emcee_counter = 0
+
+ class my_pool:
+ def map(self, f, arg):
+ global emcee_counter
+ emcee_counter += 1
+ return map(f, arg)
+
+ def cost_fun(params, **kwargs):
+ return rosen_der([params['a'], params['b']])
+
+ params = Parameters()
+ params.add('a', 1, min=-5, max=5, vary=True)
+ params.add('b', 1, min=-5, max=5, vary=True)
+
+ fitter = Minimizer(cost_fun, params)
+ fitter.emcee(workers=my_pool(), steps=1000, nwalkers=100)
+ assert emcee_counter > 500
+
+
+def residual_for_multiprocessing(pars, x, data=None):
+ # a residual function defined in the top level is needed for
+ # multiprocessing. bound methods don't work.
+ amp = pars['amp']
+ per = pars['period']
+ shift = pars['shift']
+ decay = pars['decay']
+
+ if abs(shift) > pi/2:
+ shift = shift - np.sign(shift) * pi
+ model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return model - data
diff --git a/tests/test_pandas.py b/tests/test_pandas.py
new file mode 100644
index 0000000..71af67a
--- /dev/null
+++ b/tests/test_pandas.py
@@ -0,0 +1,35 @@
+"""Tests for using data in pandas.[DataFrame|Series]."""
+import os
+
+import numpy as np
+import pytest
+
+import lmfit
+
+pandas = pytest.importorskip('pandas')
+
+
+def test_pandas_guess_from_peak():
+ """Regression test for failure in guess_from_peak with pandas (GH #629)."""
+ data = pandas.read_csv(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'peak.csv'))
+ xdat, ydat = np.loadtxt(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'peak.csv'),
+ unpack=True, skiprows=1, delimiter=',')
+
+ model = lmfit.models.LorentzianModel()
+ guess_pd = model.guess(data['y'], x=data['x'])
+ guess = model.guess(ydat, x=xdat)
+
+ assert guess_pd == guess
+
+
+def test_pandas_Voigt_model():
+ """Regression test for Series.real reported in GH Issues 727."""
+ data = pandas.read_csv(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'peak.csv'))
+ model = lmfit.models.VoigtModel()
+ params = model.make_params()
+ fit = model.fit(data['y'], params, x=data['x'])
+
+ assert fit.success
diff --git a/tests/test_parameter.py b/tests/test_parameter.py
new file mode 100644
index 0000000..a8bf0e3
--- /dev/null
+++ b/tests/test_parameter.py
@@ -0,0 +1,580 @@
+"""Tests for the Parameter class."""
+
+from math import trunc
+import pickle
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+
+import lmfit
+
+
+@pytest.fixture
+def parameters():
+ """Initialize a Parameters class for tests."""
+ pars = lmfit.Parameters()
+ pars.add(lmfit.Parameter(name='a', value=10.0, vary=True, min=-100.0,
+ max=100.0, expr=None, brute_step=5.0,
+ user_data=1))
+ pars.add(lmfit.Parameter(name='b', value=0.0, vary=True, min=-250.0,
+ max=250.0, expr="2.0*a", brute_step=25.0,
+ user_data=2.5))
+ exp_attr_values_A = ('a', 10.0, True, -100.0, 100.0, None, 5.0, 1)
+ exp_attr_values_B = ('b', 20.0, False, -250.0, 250.0, "2.0*a", 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+ return pars, exp_attr_values_A, exp_attr_values_B
+
+
+@pytest.fixture
+def parameter():
+ """Initialize parameter for tests."""
+ param = lmfit.Parameter(name='a', value=10.0, vary=True, min=-100.0,
+ max=100.0, expr=None, brute_step=5.0, user_data=1)
+ expected_attribute_values = ('a', 10.0, True, -100.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(param, expected_attribute_values)
+ return param, expected_attribute_values
+
+
+def assert_parameter_attributes(par, expected):
+ """Assert that parameter attributes have the expected values."""
+ par_attr_values = (par.name, par._val, par.vary, par.min, par.max,
+ par._expr, par.brute_step, par.user_data)
+ assert par_attr_values == expected
+
+
+in_out = [(lmfit.Parameter(name='a'), # set name
+ ('a', -np.inf, True, -np.inf, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', value=10.0), # set value
+ ('a', 10.0, True, -np.inf, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', vary=False), # fix parameter, set vary to False
+ ('a', -np.inf, False, -np.inf, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', min=-10.0), # set lower bound, value reset to min
+ ('a', -10.0, True, -10.0, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', value=-5.0, min=-10.0), # set lower bound
+ ('a', -5.0, True, -10.0, np.inf, None, None, None)),
+ (lmfit.Parameter(name='a', max=10.0), # set upper bound
+ ('a', -np.inf, True, -np.inf, 10.0, None, None, None)),
+ (lmfit.Parameter(name='a', value=25.0, max=10.0), # set upper bound, value reset
+ ('a', 10.0, True, -np.inf, 10.0, None, None, None)),
+ (lmfit.Parameter(name='a', expr="2.0*10.0"), # set expression, vary becomes False
+ ('a', -np.inf, True, -np.inf, np.inf, '2.0*10.0', None, None)),
+ (lmfit.Parameter(name='a', brute_step=0.1), # set brute_step
+ ('a', -np.inf, True, -np.inf, np.inf, None, 0.1, None)),
+ (lmfit.Parameter(name='a', user_data={'b': {}}), # set user_data
+ ('a', -np.inf, True, -np.inf, np.inf, None, None, {'b': {}}))]
+
+
+@pytest.mark.parametrize('par, attr_values', in_out)
+def test_initialize_Parameter(par, attr_values):
+ """Test the initialization of the Parameter class."""
+ assert_parameter_attributes(par, attr_values)
+
+ # check for other default attributes
+ for attribute in ['_expr', '_expr_ast', '_expr_eval', '_expr_deps',
+ '_delay_asteval', 'stderr', 'correl', 'from_internal',
+ '_val']:
+ assert hasattr(par, attribute)
+
+
+def test_Parameter_no_name():
+ """Test for Parameter name, now required positional argument."""
+ msg = r"missing 1 required positional argument: 'name'"
+ with pytest.raises(TypeError, match=msg):
+ lmfit.Parameter()
+
+
+def test_init_bounds():
+ """Tests to make sure that initial bounds are consistent.
+
+ Only for specific cases not tested above with the initializations of the
+ Parameter class.
+
+ """
+ # test 1: min > max; should swap min and max
+ par = lmfit.Parameter(name='a', value=0.0, min=10.0, max=-10.0)
+ assert par.min == -10.0
+ assert par.max == 10.0
+
+ # test 2: min == max; should raise a ValueError
+ msg = r"Parameter 'a' has min == max"
+ with pytest.raises(ValueError, match=msg):
+ par = lmfit.Parameter(name='a', value=0.0, min=10.0, max=10.0)
+
+ # FIXME: ideally this should be impossible to happen ever....
+ # perhaps we should add a setter method for MIN and MAX as well?
+ # test 3: max or min is equal to None
+ par.min = None
+ par._init_bounds()
+ assert par.min == -np.inf
+
+ par.max = None
+ par._init_bounds()
+ assert par.max == np.inf
+
+
+def test_parameter_set_value(parameter):
+ """Test the Parameter.set() function with value."""
+ par, initial_attribute_values = parameter
+
+ par.set(value=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(value=5.0)
+ changed_attribute_values = ('a', 5.0, True, -100.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+ # check if set value works with new bounds, see issue#636
+ par.set(value=500.0, min=400, max=600)
+ changed_attribute_values2 = ('a', 500.0, True, 400.0, 600.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values2)
+
+
+def test_parameter_set_vary(parameter):
+ """Test the Parameter.set() function with vary."""
+ par, initial_attribute_values = parameter
+
+ par.set(vary=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(vary=False)
+ changed_attribute_values = ('a', 10.0, False, -100.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_parameter_set_min(parameter):
+ """Test the Parameter.set() function with min."""
+ par, initial_attribute_values = parameter
+
+ par.set(min=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(min=-50.0)
+ changed_attribute_values = ('a', 10.0, True, -50.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_parameter_set_max(parameter):
+ """Test the Parameter.set() function with max."""
+ par, initial_attribute_values = parameter
+
+ par.set(max=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(max=50.0)
+ changed_attribute_values = ('a', 10.0, True, -100.0, 50.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_parameter_set_expr(parameter):
+ """Test the Parameter.set() function with expr.
+
+ Of note, this only tests for setting/removal of the expression; nothing
+ else gets evaluated here... More specific tests that require a Parameters
+ class can be found below.
+
+ """
+ par, _ = parameter
+
+ par.set(expr='2.0*50.0') # setting an expression, vary --> False
+ changed_attribute_values = ('a', 10.0, False, -100.0, 100.0, '2.0*50.0',
+ 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+ par.set(expr=None) # nothing should change
+ assert_parameter_attributes(par, changed_attribute_values)
+
+ par.set(expr='') # should remove the expression
+ changed_attribute_values = ('a', 10.0, False, -100.0, 100.0, None, 5.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_parameters_set_value_with_expr(parameters):
+ """Test the Parameter.set() function with value in presence of expr."""
+ pars, _, _ = parameters
+
+ pars['a'].set(value=5.0)
+ pars.update_constraints() # update constraints/expressions
+ changed_attr_values_A = ('a', 5.0, True, -100.0, 100.0, None, 5.0, 1)
+ changed_attr_values_B = ('b', 10.0, False, -250.0, 250.0, "2.0*a", 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], changed_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+ # with expression present, setting a value works and will leave vary=False
+ pars['b'].set(value=1.0)
+ pars.update_constraints() # update constraints/expressions
+ changed_attr_values_A = ('a', 5.0, True, -100.0, 100.0, None, 5.0, 1)
+ changed_attr_values_B = ('b', 1.0, False, -250.0, 250.0, None, 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], changed_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+
+def test_parameters_set_vary_with_expr(parameters):
+ """Test the Parameter.set() function with vary in presence of expr."""
+ pars, init_attr_values_A, _ = parameters
+
+ pars['b'].set(vary=True) # expression should get cleared
+ pars.update_constraints() # update constraints/expressions
+ changed_attr_values_B = ('b', 20.0, True, -250.0, 250.0, None, 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], init_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+
+def test_parameters_set_expr(parameters):
+ """Test the Parameter.set() function with expr."""
+ pars, init_attr_values_A, init_attr_values_B = parameters
+
+ pars['b'].set(expr=None) # nothing should change
+ pars.update_constraints() # update constraints/expressions
+ assert_parameter_attributes(pars['a'], init_attr_values_A)
+ assert_parameter_attributes(pars['b'], init_attr_values_B)
+
+ pars['b'].set(expr='') # expression should get cleared, vary still False
+ pars.update_constraints() # update constraints/expressions
+ changed_attr_values_B = ('b', 20.0, False, -250.0, 250.0, None, 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], init_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+ pars['a'].set(expr="b/4.0") # expression should be set, vary --> False
+ pars.update_constraints()
+ changed_attr_values_A = ('a', 5.0, False, -100.0, 100.0, "b/4.0", 5.0, 1)
+ changed_attr_values_B = ('b', 20.0, False, -250.0, 250.0, None, 25.0, 2.5)
+ assert_parameter_attributes(pars['a'], changed_attr_values_A)
+ assert_parameter_attributes(pars['b'], changed_attr_values_B)
+
+
+def test_parameter_set_brute_step(parameter):
+ """Test the Parameter.set() function with brute_step."""
+ par, initial_attribute_values = parameter
+
+ par.set(brute_step=None) # nothing should change
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.set(brute_step=0.0) # brute_step set to None
+ changed_attribute_values = ('a', 10.0, True, -100.0, 100.0, None, None, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+ par.set(brute_step=1.0)
+ changed_attribute_values = ('a', 10.0, True, -100.0, 100.0, None, 1.0, 1)
+ assert_parameter_attributes(par, changed_attribute_values)
+
+
+def test_getstate(parameter):
+ """Test for the __getstate__ method."""
+ par, _ = parameter
+ assert par.__getstate__() == ('a', 10.0, True, None, -100.0, 100.0, 5.0,
+ None, None, 10, 1)
+
+
+def test_setstate(parameter):
+ """Test for the __setstate__ method."""
+ par, initial_attribute_values = parameter
+ state = par.__getstate__()
+
+ par_new = lmfit.Parameter('new')
+ attributes_new = ('new', -np.inf, True, -np.inf, np.inf, None, None, None)
+ assert_parameter_attributes(par_new, attributes_new)
+
+ par_new.__setstate__(state)
+ assert_parameter_attributes(par_new, initial_attribute_values)
+
+
+def test_parameter_pickle_(parameter):
+ """Test that we can pickle a Parameter."""
+ par, _ = parameter
+ pkl = pickle.dumps(par)
+ loaded_par = pickle.loads(pkl)
+
+ assert loaded_par == par
+
+
+def test_repr():
+ """Tests for the __repr__ method."""
+ par = lmfit.Parameter(name='test', value=10.0, min=0.0, max=20.0)
+ assert par.__repr__() == "<Parameter 'test', value=10.0, bounds=[0.0:20.0]>"
+
+ par = lmfit.Parameter(name='test', value=10.0, vary=False)
+ assert par.__repr__() == "<Parameter 'test', value=10.0 (fixed), bounds=[-inf:inf]>"
+
+ par.set(vary=True)
+ par.stderr = 0.1
+ assert par.__repr__() == "<Parameter 'test', value=10.0 +/- 0.1, bounds=[-inf:inf]>"
+
+ par = lmfit.Parameter(name='test', expr='10.0*2.5')
+ assert par.__repr__() == "<Parameter 'test', value=-inf, bounds=[-inf:inf], expr='10.0*2.5'>"
+
+ par = lmfit.Parameter(name='test', brute_step=0.1)
+ assert par.__repr__() == "<Parameter 'test', value=-inf, bounds=[-inf:inf], brute_step=0.1>"
+
+
+def test_setup_bounds_and_scale_gradient_methods():
+ """Tests for the setup_bounds and scale_gradient methods.
+
+ Make use of the MINUIT-style transformation to obtain the the Parameter
+ values and scaling factor for the gradient.
+ See: https://lmfit.github.io/lmfit-py/bounds.html
+
+ """
+ # situation 1: no bounds
+ par_no_bounds = lmfit.Parameter('no_bounds', value=10.0)
+ assert_allclose(par_no_bounds.setup_bounds(), 10.0)
+ assert_allclose(par_no_bounds.scale_gradient(par_no_bounds.value), 1.0)
+
+ # situation 2: no bounds, min/max set to None after creating the parameter
+ # TODO: ideally this should never happen; perhaps use a setter here
+ par_no_bounds = lmfit.Parameter('no_bounds', value=10.0)
+ par_no_bounds.min = None
+ par_no_bounds.max = None
+ assert_allclose(par_no_bounds.setup_bounds(), 10.0)
+ assert_allclose(par_no_bounds.scale_gradient(par_no_bounds.value), 1.0)
+
+ # situation 3: upper bound
+ par_upper_bound = lmfit.Parameter('upper_bound', value=10.0, max=25.0)
+ assert_allclose(par_upper_bound.setup_bounds(), 15.968719422671311)
+ assert_allclose(par_upper_bound.scale_gradient(par_upper_bound.value),
+ -0.99503719, rtol=1.e-6)
+
+ # situation 4: lower bound
+ par_lower_bound = lmfit.Parameter('upper_bound', value=10.0, min=-25.0)
+ assert_allclose(par_lower_bound.setup_bounds(), 35.98610843)
+ assert_allclose(par_lower_bound.scale_gradient(par_lower_bound.value),
+ 0.995037, rtol=1.e-6)
+
+ # situation 5: both lower and upper bounds
+ par_both_bounds = lmfit.Parameter('both_bounds', value=10.0, min=-25.0,
+ max=25.0)
+ assert_allclose(par_both_bounds.setup_bounds(), 0.4115168460674879)
+ assert_allclose(par_both_bounds.scale_gradient(par_both_bounds.value),
+ -20.976788, rtol=1.e-6)
+
+
+def test_value_setter(parameter):
+ """Tests for the value setter."""
+ par, initial_attribute_values = parameter
+ assert_parameter_attributes(par, initial_attribute_values)
+
+ par.value = 200.0 # above maximum
+ assert_allclose(par.value, 100.0)
+
+ par.value = -200.0 # below minimum
+ assert_allclose(par.value, -100.0)
+
+ del par._expr_eval
+ par.value = 10.0
+ assert_allclose(par.value, 10.0)
+ assert hasattr(par, '_expr_eval')
+
+
+# Tests for magic methods of the Parameter class
+def test__array__(parameter):
+ """Test the __array__ magic method."""
+ par, _ = parameter
+ assert np.array(par) == np.array(10.0)
+
+
+def test__str__(parameter):
+ """Test the __str__ magic method."""
+ par, _ = parameter
+ assert str(par) == "<Parameter 'a', value=10.0, bounds=[-100.0:100.0], brute_step=5.0>"
+
+
+def test__abs__(parameter):
+ """Test the __abs__ magic method."""
+ par, _ = parameter
+ assert_allclose(abs(par), 10.0)
+ par.set(value=-10.0)
+ assert_allclose(abs(par), 10.0)
+
+
+def test__neg__(parameter):
+ """Test the __neg__ magic method."""
+ par, _ = parameter
+ assert_allclose(-par, -10.0)
+ par.set(value=-10.0)
+ assert_allclose(-par, 10.0)
+
+
+def test__pos__(parameter):
+ """Test the __pos__ magic method."""
+ par, _ = parameter
+ assert_allclose(+par, 10.0)
+ par.set(value=-10.0)
+ assert_allclose(+par, -10.0)
+
+
+def test__bool__(parameter):
+ """Test the __bool__ magic method."""
+ par, _ = parameter
+ assert bool(par)
+
+
+def test__int__(parameter):
+ """Test the __int__ magic method."""
+ par, _ = parameter
+ assert isinstance(int(par), int)
+ assert_allclose(int(par), 10)
+
+
+def test__float__(parameter):
+ """Test the __float__ magic method."""
+ par, _ = parameter
+ par.set(value=5)
+ assert isinstance(float(par), float)
+ assert_allclose(float(par), 5.0)
+
+
+def test__trunc__(parameter):
+ """Test the __trunc__ magic method."""
+ par, _ = parameter
+ par.set(value=10.5)
+ assert isinstance(trunc(par), int)
+ assert_allclose(trunc(par), 10)
+
+
+def test__add__(parameter):
+ """Test the __add__ magic method."""
+ par, _ = parameter
+ assert_allclose(par + 5.25, 15.25)
+
+
+def test__sub__(parameter):
+ """Test the __sub__ magic method."""
+ par, _ = parameter
+ assert_allclose(par - 5.25, 4.75)
+
+
+def test__truediv__(parameter):
+ """Test the __truediv__ magic method."""
+ par, _ = parameter
+ assert_allclose(par / 1.25, 8.0)
+
+
+def test__floordiv__(parameter):
+ """Test the __floordiv__ magic method."""
+ par, _ = parameter
+ par.set(value=5)
+ assert_allclose(par // 2, 2)
+
+
+def test__divmod__(parameter):
+ """Test the __divmod__ magic method."""
+ par, _ = parameter
+ assert_allclose(divmod(par, 3), (3, 1))
+
+
+def test__mod__(parameter):
+ """Test the __mod__ magic method."""
+ par, _ = parameter
+ assert_allclose(par % 2, 0)
+ assert_allclose(par % 3, 1)
+
+
+def test__mul__(parameter):
+ """Test the __mul__ magic method."""
+ par, _ = parameter
+ assert_allclose(par * 2.5, 25.0)
+ assert_allclose(par * -0.1, -1.0)
+
+
+def test__pow__(parameter):
+ """Test the __pow__ magic method."""
+ par, _ = parameter
+ assert_allclose(par ** 0.5, 3.16227766)
+ assert_allclose(par ** 4, 1e4)
+
+
+def test__gt__(parameter):
+ """Test the __gt__ magic method."""
+ par, _ = parameter
+ assert par < 11
+ assert not par < 10
+
+
+def test__ge__(parameter):
+ """Test the __ge__ magic method."""
+ par, _ = parameter
+ assert par <= 11
+ assert par <= 10
+ assert not par <= 9
+
+
+def test__le__(parameter):
+ """Test the __le__ magic method."""
+ par, _ = parameter
+ assert par >= 9
+ assert par >= 10
+ assert not par >= 11
+
+
+def test__lt__(parameter):
+ """Test the __lt__ magic method."""
+ par, _ = parameter
+ assert par > 9
+ assert not par > 10
+
+
+def test__eq__(parameter):
+ """Test the __eq__ magic method."""
+ par, _ = parameter
+ assert par == 10
+ assert not par == 9
+
+
+def test__ne__(parameter):
+ """Test the __ne__ magic method."""
+ par, _ = parameter
+ assert par != 9
+ assert not par != 10
+
+
+def test__radd__(parameter):
+ """Test the __radd__ magic method."""
+ par, _ = parameter
+ assert_allclose(5.25 + par, 15.25)
+
+
+def test__rtruediv__(parameter):
+ """Test the __rtruediv__ magic method."""
+ par, _ = parameter
+ assert_allclose(1.25 / par, 0.125)
+
+
+def test__rdivmod__(parameter):
+ """Test the __rdivmod__ magic method."""
+ par, _ = parameter
+ assert_allclose(divmod(3, par), (0, 3))
+
+
+def test__rfloordiv__(parameter):
+ """Test the __rfloordiv__ magic method."""
+ par, _ = parameter
+ assert_allclose(2 // par, 0)
+ assert_allclose(20 // par, 2)
+
+
+def test__rmod__(parameter):
+ """Test the __rmod__ magic method."""
+ par, _ = parameter
+ assert_allclose(2 % par, 2)
+ assert_allclose(25 % par, 5)
+
+
+def test__rmul__(parameter):
+ """Test the __rmul__ magic method."""
+ par, _ = parameter
+ assert_allclose(2.5 * par, 25.0)
+ assert_allclose(-0.1 * par, -1.0)
+
+
+def test__rpow__(parameter):
+ """Test the __rpow__ magic method."""
+ par, _ = parameter
+ assert_allclose(0.5 ** par, 0.0009765625)
+ assert_allclose(4 ** par, 1048576)
+
+
+def test__rsub__(parameter):
+ """Test the __rsub__ magic method."""
+ par, _ = parameter
+ assert_allclose(5.25 - par, -4.75)
diff --git a/tests/test_parameters.py b/tests/test_parameters.py
new file mode 100644
index 0000000..6fea71d
--- /dev/null
+++ b/tests/test_parameters.py
@@ -0,0 +1,636 @@
+"""Tests for the Parameters class."""
+
+from copy import copy, deepcopy
+import os
+import pickle
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+
+import lmfit
+from lmfit.models import VoigtModel
+
+
+@pytest.fixture
+def parameters():
+ """Initialize a Parameters class for tests."""
+ pars = lmfit.Parameters()
+ pars.add(lmfit.Parameter(name='a', value=10.0, vary=True, min=-100.0,
+ max=100.0, expr=None, brute_step=5.0,
+ user_data=1))
+ pars.add(lmfit.Parameter(name='b', value=0.0, vary=True, min=-250.0,
+ max=250.0, expr="2.0*a", brute_step=25.0,
+ user_data={'test': 123}))
+ exp_attr_values_A = ('a', 10.0, True, -100.0, 100.0, None, 5.0, 1)
+ exp_attr_values_B = ('b', 20.0, False, -250.0, 250.0, "2.0*a", 25.0, {'test': 123})
+ assert_parameter_attributes(pars['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+ return pars, exp_attr_values_A, exp_attr_values_B
+
+
+def assert_parameter_attributes(par, expected):
+ """Assert that parameter attributes have the expected values."""
+ par_attr_values = (par.name, par._val, par.vary, par.min, par.max,
+ par._expr, par.brute_step, par.user_data)
+ assert par_attr_values == expected
+
+
+def test_check_ast_errors():
+ """Assert that an exception is raised upon AST errors."""
+ pars = lmfit.Parameters()
+
+ msg = r"at expr='<_?ast.Module object at"
+ with pytest.raises(NameError, match=msg):
+ pars.add('par1', expr='2.0*par2')
+
+
+def test_parameters_init_with_usersyms():
+ """Test for initialization of the Parameters class with usersyms."""
+ pars = lmfit.Parameters(usersyms={'test': np.sin})
+ assert 'test' in pars._asteval.symtable
+
+
+def test_parameters_copy(parameters):
+ """Tests for copying a Parameters class; all use the __deepcopy__ method."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+
+ copy_pars = copy(pars)
+ pars_copy = pars.copy()
+ pars__copy__ = pars.__copy__()
+
+ # modifying the original parameters should not modify the copies
+ pars['a'].set(value=100)
+ pars['b'].user_data['test'] = 456
+
+ for copied in [copy_pars, pars_copy, pars__copy__]:
+ assert isinstance(copied, lmfit.Parameters)
+ assert copied != pars
+ assert copied._asteval is not None
+ assert copied._asteval.symtable is not None
+ assert_parameter_attributes(copied['a'], exp_attr_values_A)
+ assert_parameter_attributes(copied['b'], exp_attr_values_B)
+
+
+def test_parameters_deepcopy(parameters):
+ """Tests for deepcopy of a Parameters class."""
+ pars, _, _ = parameters
+
+ deepcopy_pars = deepcopy(pars)
+ assert isinstance(deepcopy_pars, lmfit.Parameters)
+ assert deepcopy_pars == pars
+
+ # check that we can add a symbol to the interpreter
+ pars['b'].expr = 'sin(1)'
+ pars['b'].value = 10
+ assert_allclose(pars['b'].value, np.sin(1))
+ assert_allclose(pars._asteval.symtable['b'], np.sin(1))
+
+ # check that the symbols in the interpreter are still the same after
+ # deepcopying
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+ deepcopy_pars = deepcopy(pars)
+
+ unique_symbols_pars = pars._asteval.user_defined_symbols()
+ unique_symbols_copied = deepcopy_pars._asteval.user_defined_symbols()
+ assert unique_symbols_copied == unique_symbols_pars
+
+ for unique_symbol in unique_symbols_copied:
+ if pars._asteval.symtable[unique_symbol] is not np.nan:
+ assert (pars._asteval.symtable[unique_symbol] ==
+ deepcopy_pars._asteval.symtable[unique_symbol])
+
+
+def test_parameters_deepcopy_subclass():
+ """Test that a subclass of parameters is preserved when performing a deepcopy"""
+ class ParametersSubclass(lmfit.Parameters):
+ pass
+
+ parameters = ParametersSubclass()
+ assert isinstance(parameters, ParametersSubclass)
+
+ parameterscopy = deepcopy(parameters)
+ assert isinstance(parameterscopy, ParametersSubclass)
+
+
+def test_parameters_update(parameters):
+ """Tests for updating a Parameters class."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+
+ msg = r"'test' is not a Parameters object"
+ with pytest.raises(ValueError, match=msg):
+ pars.update('test')
+
+ pars2 = lmfit.Parameters()
+ pars2.add(lmfit.Parameter(name='c', value=7.0, vary=True, min=-70.0,
+ max=70.0, expr=None, brute_step=0.7,
+ user_data=7))
+ exp_attr_values_C = ('c', 7.0, True, -70.0, 70.0, None, 0.7, 7)
+
+ pars_updated = pars.update(pars2)
+
+ assert_parameter_attributes(pars_updated['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars_updated['b'], exp_attr_values_B)
+ assert_parameter_attributes(pars_updated['c'], exp_attr_values_C)
+
+
+def test_parameters__setitem__(parameters):
+ """Tests for __setitem__ method of a Parameters class."""
+ pars, _, exp_attr_values_B = parameters
+
+ msg = r"'10' is not a valid Parameters name"
+ with pytest.raises(KeyError, match=msg):
+ pars.__setitem__('10', None)
+
+ msg = r"'not_a_parameter' is not a Parameter"
+ with pytest.raises(ValueError, match=msg):
+ pars.__setitem__('a', 'not_a_parameter')
+
+ par = lmfit.Parameter('b', value=10, min=-25.0, brute_step=1)
+ pars.__setitem__('b', par)
+
+ exp_attr_values_B = ('b', 10, True, -25.0, np.inf, None, 1, None)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+
+
+def test_parameters__add__(parameters):
+ """Test the __add__ magic method."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+
+ msg = r"'other' is not a Parameters object"
+ with pytest.raises(ValueError, match=msg):
+ _ = pars + 'other'
+
+ pars2 = lmfit.Parameters()
+ pars2.add_many(('c', 1., True, None, None, None),
+ ('d', 2., True, None, None, None))
+ exp_attr_values_C = ('c', 1, True, -np.inf, np.inf, None, None, None)
+ exp_attr_values_D = ('d', 2, True, -np.inf, np.inf, None, None, None)
+
+ pars_added = pars + pars2
+
+ assert_parameter_attributes(pars_added['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars_added['b'], exp_attr_values_B)
+ assert_parameter_attributes(pars_added['c'], exp_attr_values_C)
+ assert_parameter_attributes(pars_added['d'], exp_attr_values_D)
+
+
+def test_parameters__iadd__(parameters):
+ """Test the __iadd__ magic method."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+
+ msg = r"'other' is not a Parameters object"
+ with pytest.raises(ValueError, match=msg):
+ pars += 'other'
+
+ pars2 = lmfit.Parameters()
+ pars2.add_many(('c', 1., True, None, None, None),
+ ('d', 2., True, None, None, None))
+ exp_attr_values_C = ('c', 1, True, -np.inf, np.inf, None, None, None)
+ exp_attr_values_D = ('d', 2, True, -np.inf, np.inf, None, None, None)
+
+ pars += pars2
+
+ assert_parameter_attributes(pars['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+ assert_parameter_attributes(pars['c'], exp_attr_values_C)
+ assert_parameter_attributes(pars['d'], exp_attr_values_D)
+
+
+def test_parameters_add_with_symtable():
+ """Regression test for GitHub Issue 607."""
+ pars1 = lmfit.Parameters()
+ pars1.add('a', value=1.0)
+
+ def half(x):
+ return 0.5*x
+
+ pars2 = lmfit.Parameters(usersyms={"half": half})
+ pars2.add("b", value=3.0)
+ pars2.add("c", expr="half(b)")
+
+ params = pars1 + pars2
+ assert_allclose(params['c'].value, 1.5)
+
+ params = pars2 + pars1
+ assert_allclose(params['c'].value, 1.5)
+
+ params = deepcopy(pars1)
+ params.update(pars2)
+ assert_allclose(params['c'].value, 1.5)
+
+ pars1 += pars2
+ assert_allclose(params['c'].value, 1.5)
+
+
+def test_parameters__array__(parameters):
+ """Test the __array__ magic method."""
+ pars, _, _ = parameters
+
+ assert_allclose(np.array(pars), np.array([10.0, 20.0]))
+
+
+def test_parameters__reduce__(parameters):
+ """Test the __reduce__ magic method."""
+ pars, _, _ = parameters
+ reduced = pars.__reduce__()
+
+ assert isinstance(reduced[2], dict)
+ assert 'unique_symbols' in reduced[2].keys()
+ assert reduced[2]['unique_symbols']['b'] == 20
+ assert 'params' in reduced[2].keys()
+ assert isinstance(reduced[2]['params'][0], lmfit.Parameter)
+
+
+def test_parameters__setstate__(parameters):
+ """Test the __setstate__ magic method."""
+ pars, exp_attr_values_A, exp_attr_values_B = parameters
+ reduced = pars.__reduce__()
+
+ pars_setstate = lmfit.Parameters()
+ pars_setstate.__setstate__(reduced[2])
+
+ assert isinstance(pars_setstate, lmfit.Parameters)
+ assert_parameter_attributes(pars_setstate['a'], exp_attr_values_A)
+ assert_parameter_attributes(pars_setstate['b'], exp_attr_values_B)
+
+
+def test_pickle_parameters():
+ """Test that we can pickle a Parameters object."""
+ p = lmfit.Parameters()
+ p.add('a', 10, True, 0, 100)
+ p.add('b', 10, True, 0, 100, 'a * sin(1)')
+ p.update_constraints()
+ p._asteval.symtable['abc'] = '2 * 3.142'
+
+ pkl = pickle.dumps(p, -1)
+ q = pickle.loads(pkl)
+
+ q.update_constraints()
+ assert p == q
+ assert p is not q
+
+ # now test if the asteval machinery survived
+ assert q._asteval.symtable['abc'] == '2 * 3.142'
+
+ # check that unpickling of Parameters is not affected by expr that
+ # refer to Parameter that are added later on. In the following
+ # example var_0.expr refers to var_1, which is a Parameter later
+ # on in the Parameters dictionary.
+ p = lmfit.Parameters()
+ p.add('var_0', value=1)
+ p.add('var_1', value=2)
+ p['var_0'].expr = 'var_1'
+ pkl = pickle.dumps(p)
+ q = pickle.loads(pkl)
+
+
+def test_parameters_eval(parameters):
+ """Test the eval method."""
+ pars, _, _ = parameters
+ evaluated = pars.eval('10.0*a+b')
+ assert_allclose(evaluated, 120)
+
+ # check that eval() works with usersyms and parameter values
+ def myfun(x):
+ return 2.0 * x
+
+ pars2 = lmfit.Parameters(usersyms={"myfun": myfun})
+ pars2.add('a', value=4.0)
+ pars2.add('b', value=3.0)
+ assert_allclose(pars2.eval('myfun(2.0) * a'), 16)
+ assert_allclose(pars2.eval('b / myfun(3.0)'), 0.5)
+
+
+def test_parameters_pretty_repr(parameters):
+ """Test the pretty_repr method."""
+ pars, _, _ = parameters
+ output = pars.pretty_repr()
+ output_oneline = pars.pretty_repr(oneline=True)
+
+ split_output = output.split('\n')
+ assert len(split_output) == 5
+ assert 'Parameters' in split_output[0]
+ assert "Parameter 'a'" in split_output[1]
+ assert "Parameter 'b'" in split_output[2]
+
+ oneliner = ("Parameters([('a', <Parameter 'a', value=10.0, "
+ "bounds=[-100.0:100.0], brute_step=5.0>), ('b', <Parameter "
+ "'b', value=20.0, bounds=[-250.0:250.0], expr='2.0*a', "
+ "brute_step=25.0>)])")
+ assert output_oneline == oneliner
+
+
+def test_parameters_pretty_print(parameters, capsys):
+ """Test the pretty_print method."""
+ pars, _, _ = parameters
+
+ # oneliner
+ pars.pretty_print(oneline=True)
+ captured = capsys.readouterr()
+ oneliner = ("Parameters([('a', <Parameter 'a', value=10.0, "
+ "bounds=[-100.0:100.0], brute_step=5.0>), ('b', <Parameter "
+ "'b', value=20.0, bounds=[-250.0:250.0], expr='2.0*a', "
+ "brute_step=25.0>)])")
+ assert oneliner in captured.out
+
+ # default
+ pars.pretty_print()
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ assert len(captured_split) == 4
+ header = ('Name Value Min Max Stderr Vary '
+ 'Expr Brute_Step')
+ assert captured_split[0] == header
+
+ # specify columnwidth
+ pars.pretty_print(colwidth=12)
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ header = ('Name Value Min Max Stderr '
+ ' Vary Expr Brute_Step')
+ assert captured_split[0] == header
+
+ # specify columns
+ pars['a'].stderr = 0.01
+ pars.pretty_print(columns=['value', 'min', 'max', 'stderr'])
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ assert captured_split[0] == 'Name Value Min Max Stderr'
+ assert captured_split[1] == 'a 10 -100 100 0.01'
+ assert captured_split[2] == 'b 20 -250 250 None'
+
+ # specify fmt
+ pars.pretty_print(fmt='e', columns=['value', 'min', 'max'])
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ assert captured_split[0] == 'Name Value Min Max'
+ assert captured_split[1] == 'a 1.0000e+01 -1.0000e+02 1.0000e+02'
+ assert captured_split[2] == 'b 2.0000e+01 -2.5000e+02 2.5000e+02'
+
+ # specify precision
+ pars.pretty_print(precision=2, fmt='e', columns=['value', 'min', 'max'])
+ captured = capsys.readouterr()
+ captured_split = captured.out.split('\n')
+ assert captured_split[0] == 'Name Value Min Max'
+ assert captured_split[1] == 'a 1.00e+01 -1.00e+02 1.00e+02'
+ assert captured_split[2] == 'b 2.00e+01 -2.50e+02 2.50e+02'
+
+
+def test_parameters__repr_html_(parameters):
+ """Test _repr_html method to generate HTML table for Parameters class."""
+ pars, _, _ = parameters
+ repr_html = pars._repr_html_()
+
+ assert isinstance(repr_html, str)
+ assert '<table><tr><th> name </th><th> value </th>' in repr_html
+
+
+def test_parameters_add():
+ """Tests for adding a Parameter to the Parameters class."""
+ pars = lmfit.Parameters()
+ pars_from_par = lmfit.Parameters()
+
+ pars.add('a')
+ exp_attr_values_A = ('a', -np.inf, True, -np.inf, np.inf, None, None, None)
+ assert_parameter_attributes(pars['a'], exp_attr_values_A)
+
+ pars_from_par.add(lmfit.Parameter('a'))
+ assert pars_from_par == pars
+
+ pars.add('b', value=1, vary=False, min=-5.0, max=5.0, brute_step=0.1)
+ exp_attr_values_B = ('b', 1.0, False, -5.0, 5.0, None, 0.1, None)
+ assert_parameter_attributes(pars['b'], exp_attr_values_B)
+
+ pars_from_par.add(lmfit.Parameter('b', value=1, vary=False, min=-5.0,
+ max=5.0, brute_step=0.1))
+ assert pars_from_par == pars
+
+
+def test_add_params_expr_outoforder():
+ """Regression test for GitHub Issue 560."""
+ params1 = lmfit.Parameters()
+ params1.add("a", value=1.0)
+
+ params2 = lmfit.Parameters()
+ params2.add("b", value=1.0)
+ params2.add("c", value=2.0)
+ params2['b'].expr = 'c/2'
+
+ params = params1 + params2
+ assert 'b' in params
+ assert_allclose(params['b'].value, 1.0)
+
+
+def test_parameters_add_many():
+ """Tests for add_many method."""
+ a = lmfit.Parameter('a', 1)
+ b = lmfit.Parameter('b', 2)
+
+ par = lmfit.Parameters()
+ par.add_many(a, b)
+
+ par_with_tuples = lmfit.Parameters()
+ par_with_tuples.add_many(('a', 1), ('b', 2))
+
+ assert list(par.keys()) == ['a', 'b']
+ assert par == par_with_tuples
+
+
+def test_parameters_valuesdict(parameters):
+ """Test for valuesdict method."""
+ pars, _, _ = parameters
+ vals_dict = pars.valuesdict()
+
+ assert isinstance(vals_dict, dict)
+ assert_allclose(vals_dict['a'], pars['a'].value)
+ assert_allclose(vals_dict['b'], pars['b'].value)
+
+
+def test_dumps_loads_parameters(parameters):
+ """Test for dumps and loads methods for a Parameters class."""
+ pars, _, _ = parameters
+
+ dumps = pars.dumps()
+ assert isinstance(dumps, str)
+ newpars = lmfit.Parameters().loads(dumps)
+ assert newpars == pars
+
+ newpars['a'].value = 100.0
+ assert_allclose(newpars['b'].value, 200.0)
+
+
+def test_dump_load_parameters(parameters):
+ """Test for dump and load methods for a Parameters class."""
+ pars, _, _ = parameters
+
+ with open('parameters.sav', 'w') as outfile:
+ pars.dump(outfile)
+
+ with open('parameters.sav') as infile:
+ newpars = pars.load(infile)
+
+ assert newpars == pars
+ newpars['a'].value = 100.0
+ assert_allclose(newpars['b'].value, 200.0)
+
+
+def test_dumps_loads_parameters_usersyms():
+ """Test for dumps/loads methods for a Parameters class with usersyms."""
+ def half(x):
+ return 0.5*x
+
+ pars = lmfit.Parameters(usersyms={"half": half, 'my_func': np.sqrt})
+ pars.add(lmfit.Parameter(name='a', value=9.0, min=-100.0, max=100.0))
+ pars.add(lmfit.Parameter(name='b', value=100.0, min=-250.0, max=250.0))
+ pars.add("c", expr="half(b) + my_func(a)")
+
+ dumps = pars.dumps()
+ assert isinstance(dumps, str)
+ assert '"half": {' in dumps
+ assert '"my_func": {' in dumps
+
+ newpars = lmfit.Parameters().loads(dumps)
+ assert 'half' in newpars._asteval.symtable
+ assert 'my_func' in newpars._asteval.symtable
+ assert_allclose(newpars['a'].value, 9.0)
+ assert_allclose(newpars['b'].value, 100.0)
+
+ # within the py.test environment the encoding of the function 'half' does
+ # not work correctly as it is changed from <function half at 0x?????????>"
+ # to "<function test_dumps_loads_parameters_usersyms.<locals>.half at 0x?????????>
+ # This result in the "importer" to be set to None and the final "decode4js"
+ # does not do the correct thing.
+ #
+ # Of note, this is only an issue within the py.test framework and it DOES
+ # work correctly in a normal Python interpreter. Also, it isn't an issue
+ # when DILL is used, so in that case the two asserts below will pass.
+ if lmfit.jsonutils.HAS_DILL:
+ assert newpars == pars
+ assert_allclose(newpars['c'].value, 53.0)
+
+
+def test_parameters_expr_and_constraints():
+ """Regression tests for GitHub Issue #265. Test that parameters are re-
+ evaluated if they have bounds and expr.
+
+ """
+ p = lmfit.Parameters()
+ p.add(lmfit.Parameter('a', 10, True))
+ p.add(lmfit.Parameter('b', 10, True, 0, 20))
+
+ assert_allclose(p['b'].min, 0)
+ assert_allclose(p['b'].max, 20)
+
+ p['a'].expr = '2 * b'
+ assert_allclose(p['a'].value, 20)
+
+ p['b'].value = 15
+ assert_allclose(p['b'].value, 15)
+ assert_allclose(p['a'].value, 30)
+
+ p['b'].value = 30
+ assert_allclose(p['b'].value, 20)
+ assert_allclose(p['a'].value, 40)
+
+
+def test_parameters_usersyms():
+ """Test for passing usersyms to Parameters()."""
+ def myfun(x):
+ return x**3
+
+ params = lmfit.Parameters(usersyms={"myfun": myfun})
+ params.add("a", value=2.3)
+ params.add("b", expr="myfun(a)")
+
+ np.random.seed(2020)
+ xx = np.linspace(0, 1, 10)
+ yy = 3 * xx + np.random.normal(scale=0.002, size=xx.size)
+
+ model = lmfit.Model(lambda x, a: a * x)
+ result = model.fit(yy, params=params, x=xx)
+ assert_allclose(result.params['a'].value, 3.0, rtol=1e-3)
+ assert (result.nfev > 3 and result.nfev < 300)
+
+
+def test_parameters_expr_with_bounds():
+ """Test Parameters using an expression with bounds, without value."""
+ pars = lmfit.Parameters()
+ pars.add('c1', value=0.2)
+ pars.add('c2', value=0.2)
+ pars.add('c3', value=0.2)
+ pars.add('csum', value=0.8)
+
+ # this should not raise TypeError:
+ pars.add('c4', expr='csum-c1-c2-c3', min=0, max=1)
+ assert_allclose(pars['c4'].value, 0.2)
+
+
+def test_invalid_expr_exceptions():
+ """Regression test for GitHub Issue #486: check that an exception is
+ raised for invalid expressions.
+
+ """
+ p1 = lmfit.Parameters()
+ p1.add('t', 2.0, min=0.0, max=5.0)
+ p1.add('x', 10.0)
+
+ with pytest.raises(SyntaxError):
+ p1.add('y', expr='x*t + sqrt(t)/')
+ assert len(p1['y']._expr_eval.error) > 0
+
+ p1.add('y', expr='x*t + sqrt(t)/3.0')
+ p1['y'].set(expr='x*3.0 + t**2')
+ assert 'x*3' in p1['y'].expr
+ assert len(p1['y']._expr_eval.error) == 0
+
+ with pytest.raises(SyntaxError):
+ p1['y'].set(expr='t+')
+ assert len(p1['y']._expr_eval.error) > 0
+ assert_allclose(p1['y'].value, 34.0)
+
+
+def test_create_params():
+ """Tests for create_params() function."""
+ pars1 = lmfit.create_params(a=8, b=9,
+ c=dict(value=3, min=0, max=10),
+ d=dict(expr='a+b/c'),
+ e=dict(value=10000, brute_step=4))
+
+ assert pars1['a'].value == 8
+ assert pars1['b'].value == 9
+ assert pars1['c'].value == 3
+ assert pars1['c'].min == 0
+ assert pars1['c'].max == 10
+ assert pars1['d'].expr == 'a+b/c'
+ assert pars1['d'].value == 11
+ assert pars1['e'].value == 10000
+ assert pars1['e'].brute_step == 4
+
+
+def test_unset_constrained_param():
+ """test 'unsetting' a constrained parameter by
+ just setting `param.vary = True`
+
+ """
+ data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..',
+ 'examples', 'test_peak.dat'))
+ x = data[:, 0]
+ y = data[:, 1]
+
+ # initial fit
+ mod = VoigtModel()
+ params = mod.guess(y, x=x)
+ out1 = mod.fit(y, params, x=x)
+
+ assert out1.nvarys == 3
+ assert out1.chisqr < 20.0
+
+ # now just gamma to vary
+ params['gamma'].vary = True
+ out2 = mod.fit(y, params, x=x)
+
+ assert out2.nvarys == 4
+ assert out2.chisqr < out1.chisqr
+ assert out2.rsquared > out1.rsquared
+ assert out2.params['gamma'].correl['sigma'] < -0.6
diff --git a/tests/test_printfuncs.py b/tests/test_printfuncs.py
new file mode 100644
index 0000000..30314e7
--- /dev/null
+++ b/tests/test_printfuncs.py
@@ -0,0 +1,411 @@
+"""Tests for the print/report functions."""
+import numpy as np
+import pytest
+
+import lmfit
+from lmfit import (Minimizer, Parameters, ci_report, conf_interval, fit_report,
+ report_ci, report_fit)
+from lmfit.lineshapes import gaussian
+from lmfit.models import GaussianModel
+from lmfit.printfuncs import (alphanumeric_sort, correl_table,
+ fitreport_html_table, getfloat_attr, gformat)
+
+np.random.seed(0)
+
+
+@pytest.fixture
+def params():
+ """Return a lmfit.Parameters class with initial values."""
+ pars = Parameters()
+ pars.add_many(('a1', 4), ('b', -20.0), ('c1', 3), ('a', 10.0), ('a2', 5),
+ ('b10', 6), ('d', None), ('b01', 8), ('e', 9), ('aa1', 10))
+ return pars
+
+
+@pytest.fixture
+def fitresult():
+ """Return a ModelResult after fitting a randomized Gaussian data set."""
+ x = np.linspace(0, 12, 601)
+ data = gaussian(x, amplitude=36.4, center=6.70, sigma=0.88)
+ data = data + np.random.normal(x.size, scale=3.2)
+
+ model = GaussianModel()
+ params = model.make_params(amplitude=50, center=5, sigma=2)
+
+ params['amplitude'].min = 1
+ params['amplitude'].max = 100.0
+ params['sigma'].min = 0
+ params['sigma'].brute_step = 0.001
+
+ result = model.fit(data, params, x=x)
+ return result
+
+
+@pytest.fixture
+def confidence_interval():
+ """Return the result of the confidence interval (ci) calculation."""
+ def residual(pars, x, data=None):
+ argu = (x*pars['decay'])**2
+ shift = pars['shift']
+ if abs(shift) > np.pi/2:
+ shift = shift - np.sign(shift)*np.pi
+ model = pars['amp']*np.sin(shift + x/pars['period']) * np.exp(-argu)
+ if data is None:
+ return model
+ return model - data
+
+ p_true = Parameters()
+ p_true.add_many(('amp', 14.0), ('period', 5.33), ('shift', 0.123),
+ ('decay', 0.010))
+
+ x = np.linspace(0.0, 250.0, 2500)
+ data = residual(p_true, x) + np.random.normal(scale=0.7215, size=x.size)
+
+ fit_params = Parameters()
+ fit_params.add_many(('amp', 13.0), ('period', 2), ('shift', 0.0),
+ ('decay', 0.02))
+
+ mini = Minimizer(residual, fit_params, fcn_args=(x,),
+ fcn_kws={'data': data})
+ out = mini.leastsq()
+ ci = conf_interval(mini, out)
+ return ci
+
+
+def test_alphanumeric_sort(params):
+ """Test alphanumeric sort of the parameters."""
+ sorted_params = sorted(params, key=alphanumeric_sort)
+ expected = ['a', 'a1', 'a2', 'aa1', 'b', 'b01', 'b10', 'c1', 'd', 'e']
+ assert sorted_params == expected
+
+
+test_data_getfloat_attr = [('a', 'value', '10.0000000'),
+ ('b', 'value', '-20.0000000'),
+ ('c1', 'value', '3'),
+ ('d', 'value', '-inf'),
+ ('e', 'non_existent_attr', 'unknown'),
+ ('aa1', 'test', '(20+5j)')]
+
+
+@pytest.mark.parametrize("par, attr, expected", test_data_getfloat_attr)
+def test_getfloat_attr(params, par, attr, expected):
+ """Test getfloat_attr function."""
+ if par == 'aa1':
+ # add an attribute that is not None, float, or int
+ # This will never occur for Parameter values, but this function is
+ # also used on the MinimizerResult/ModelResult where it could happen.
+ params['aa1'].test = 20+5j
+
+ output = getfloat_attr(params[par], attr)
+ assert output == expected
+
+ if par == 'a':
+ assert len(output) == 10 # leading blank for pos values is stripped
+ elif par == 'b':
+ assert len(output) == 11
+ elif par == 'c1':
+ assert len(output) == 1
+
+
+test_data_gformat = [(-1.25, '-1.25000000'), (1.25, ' 1.25000000'),
+ (-1234567890.1234567890, '-1.2346e+09'),
+ (1234567890.1234567890, ' 1.2346e+09'),
+ (12345.67890e150, ' 1.235e+154')]
+
+
+@pytest.mark.parametrize("test_input, expected", test_data_gformat)
+def test_gformat(test_input, expected):
+ """Test gformat function."""
+ output = gformat(test_input)
+ assert output == expected
+
+
+def test_reports_created(fitresult):
+ """Verify that the fit reports are created and all headers are present."""
+ report_headers = ['[[Model]]', '[[Fit Statistics]]', '[[Variables]]',
+ '[[Correlations]] (unreported correlations are < 0.100)']
+
+ report = fitresult.fit_report()
+ assert len(report) > 500
+ for header in report_headers:
+ assert header in report
+
+ report1 = fit_report(fitresult)
+ for header in report_headers[1:]:
+ assert header in report1
+
+ html_params = fitresult.params._repr_html_()
+ assert len(html_params) > 500
+ assert 'brute' in html_params
+ assert 'standard error' in html_params
+ assert 'relative error' in html_params
+
+ html_report = fitresult._repr_html_()
+ assert len(html_report) > 1000
+ for header in report_headers:
+ header_title = header.replace('[', '').replace(']', '').strip()
+ assert header_title in html_report
+
+
+def test_fitreports_init_values(fitresult):
+ """Verify that initial values are displayed as expected."""
+ fitresult.params['sigma'].init_value = None
+ report_split = fitresult.fit_report().split('\n')
+ indx = [i for i, val in enumerate(report_split) if 'sigma' in val][0]
+ assert '(init = ?)' in report_split[indx]
+
+ indx_center = [i for i, val in enumerate(report_split) if
+ 'center:' in val][0]
+ indx_amplitude = [i for i, val in enumerate(report_split) if
+ 'amplitude:' in val][0]
+ for indx, init_val in zip([indx_center, indx_amplitude], [5, 50]):
+ assert f'(init = {init_val})' in report_split[indx]
+
+
+def test_fitreports_min_correl(fitresult):
+ """Verify that only correlation >= min_correl are displayed."""
+ report = fitresult.fit_report(min_correl=0.6)
+ assert '[[Correlation]]' not in report
+
+ html_report = fitresult._repr_html_(min_correl=0.6)
+ assert 'Correlation' not in html_report
+
+
+def test_fitreports_show_corre(fitresult):
+ """Verify that correlation are not shown when show_correl=False."""
+ report = fitresult.fit_report(show_correl=False)
+ assert '[[Correlation]]' not in report
+
+ html_report = fitresult._repr_html_(show_correl=False)
+ assert 'Correlation' not in html_report
+
+
+def test_fitreports_sort_pars(fitresult):
+ """Test sorting of parameters in the fit report."""
+ # not sorted
+ report_split = fitresult.fit_report(sort_pars=False).split('\n')
+ indx_vars = report_split.index('[[Variables]]')
+ first_par = list(fitresult.params.keys())[0]
+ assert first_par in report_split[indx_vars+1]
+
+ # sorted using default alphanumeric sort
+ report_split = fitresult.fit_report(sort_pars=True).split('\n')
+ indx_vars = report_split.index('[[Variables]]')
+ assert 'amplitude' in report_split[indx_vars+1]
+
+ # sorted using custom sorting algorithm: length of variable name
+ def sort_length(s):
+ return len(s)
+
+ report_split = fitresult.fit_report(sort_pars=sort_length).split('\n')
+ indx_vars = report_split.index('[[Variables]]')
+ assert 'fwhm' in report_split[indx_vars+1]
+
+
+def test_correl_table(fitresult, capsys):
+ """Verify that ``correl_table`` is not empty."""
+ table_lines = correl_table(fitresult.params).split('\n')
+ nvarys = fitresult.nvarys
+
+ assert len(table_lines) == nvarys+4
+ assert len(table_lines[5]) > nvarys*10
+
+
+def test_fit_report_correl_table(fitresult, capsys):
+ """Verify that ``correl_table`` is not empty."""
+ out = fitresult.fit_report(correl_mode='table')
+ assert '[[Correlations]]' in out
+ assert '----+' in out
+
+
+def test_report_fit(fitresult, capsys):
+ """Verify that the fit report is printed when using report_fit."""
+ # report_fit with MinimizerResult/ModelResult as argument gives full
+ # output of fitting results (except for [[Model]])
+ report_fit(fitresult)
+ report_headers = ['[[Fit Statistics]]', '[[Variables]]',
+ '[[Correlations]] (unreported correlations are < 0.100)']
+ captured = capsys.readouterr()
+ for header in report_headers:
+ assert header in captured.out
+
+ # report_fit with Parameter set as argument gives [[Variables]] and
+ # [[Correlations]]
+ report_fit(fitresult)
+ report_headers = ['[[Variables]]',
+ '[[Correlations]] (unreported correlations are < 0.100)']
+ captured = capsys.readouterr()
+ for header in report_headers:
+ assert header in captured.out
+
+
+def test_report_leastsq_no_errorbars(fitresult):
+ """Verify correct message when uncertainties could not be estimated."""
+ # general warning is shown
+ fitresult.errorbars = False
+ report = fitresult.fit_report()
+ assert 'Warning: uncertainties could not be estimated:' in report
+
+ # parameter is at initial value
+ fitresult.params['amplitude'].value = 50.0
+ report = fitresult.fit_report()
+ assert 'amplitude: at initial value' in report
+
+ # parameter is at boundary max/min
+ fitresult.params['amplitude'].value = 100.0
+ report = fitresult.fit_report()
+ assert 'amplitude: at boundary' in report
+
+ fitresult.params['amplitude'].value = 1.0
+ report = fitresult.fit_report()
+ assert 'amplitude: at boundary' in report
+
+
+def test_report_no_errorbars_no_numdifftools(fitresult):
+ """Verify message without numdifftools and not using leastsq/least_squares."""
+ fitresult.fit(method='nelder')
+ lmfit.printfuncs.HAS_NUMDIFFTOOLS = False
+ fitresult.errorbars = False
+ report = fitresult.fit_report()
+ msg = 'this fitting method does not natively calculate uncertainties'
+ assert msg in report
+ assert 'numdifftools' in report
+
+
+def test_report_no_errorbars_with_numdifftools_no_init_value(fitresult):
+ """No TypeError for parameters without initial value when no errorbars.
+
+ Verify that for parameters without an init_value the fit_report() function
+ does not raise a TypeError when comparing if a parameter is at its initial
+ value (if HAS_NUMDIFFTOOLS is True and result.errorbars is False).
+
+ See GitHub Issue 578: https://github.com/lmfit/lmfit-py/issues/578
+
+ """
+ fitresult.fit(method='nelder')
+ lmfit.printfuncs.HAS_NUMDIFFTOOLS = True
+ fitresult.errorbars = False
+ fitresult.params['amplitude'].init_value = None
+ report = fitresult.fit_report()
+ assert 'Warning: uncertainties could not be estimated:' in report
+
+
+def test_report_fixed_parameter(fitresult):
+ """Verify that a fixed parameter is shown correctly."""
+ fitresult.params['center'].vary = False
+ report_split = fitresult.fit_report().split('\n')
+ indx = [i for i, val in enumerate(report_split) if 'center' in val][0]
+ assert '(fixed)' in report_split[indx]
+
+
+def test_report_expression_parameter(fitresult):
+ """Verify that a parameter with expression is shown correctly."""
+ report_split = fitresult.fit_report().split('\n')
+ indices = [i for i, val in enumerate(report_split) if
+ 'fwhm' in val or 'height' in val]
+ for indx in indices:
+ assert '==' in report_split[indx]
+
+ html_params = fitresult.params._repr_html_()
+ assert 'expression' in html_params
+
+
+def test_report_modelpars(fitresult):
+ """Verify that model_values are shown when modelpars are given."""
+ model = GaussianModel()
+ params = model.make_params(amplitude=35, center=7, sigma=0.9)
+ report_split = fitresult.fit_report(modelpars=params).split('\n')
+ indices = [i for i, val in enumerate(report_split) if
+ ('sigma:' in val or 'center:' in val or 'amplitude:' in val)]
+ for indx in indices:
+ assert 'model_value' in report_split[indx]
+
+
+def test_report_parvalue_non_numeric(fitresult):
+ """Verify that a non-numeric value is handled gracefully."""
+ fitresult.params['center'].value = None
+ fitresult.params['center'].stderr = None
+ report = fitresult.fit_report()
+ assert len(report) > 50
+
+
+def test_report_zero_value_spercent(fitresult):
+ """Verify that ZeroDivisionError in spercent calc. gives empty string."""
+ fitresult.params['center'].value = 0
+ fitresult.params['center'].stderr = 0.1
+ report_split = fitresult.fit_report().split('\n')
+ indx = [i for i, val in enumerate(report_split) if 'center:' in val][0]
+ assert '%' not in report_split[indx]
+ assert '%' in report_split[indx+1]
+
+ html_params_split = fitresult.params._repr_html_().split('<tr>')
+ indx = [i for i, val in enumerate(html_params_split) if 'center' in val][0]
+ assert '%' not in html_params_split[indx]
+ assert '%' in html_params_split[indx+1]
+
+
+@pytest.mark.skipif(not lmfit.minimizer.HAS_EMCEE, reason="requires emcee v3")
+def test_spercent_html_table():
+ """Regression test for GitHub Issue #768."""
+ np.random.seed(2021)
+ x = np.random.uniform(size=100)
+ y = x + 0.1 * np.random.uniform(size=x.size)
+
+ def res(par, x, y):
+ return y - par['k'] * x + par['b']
+
+ params = lmfit.Parameters()
+ params.add('b', 0, vary=False)
+ params.add('k', 1)
+
+ fitter = lmfit.Minimizer(res, params, fcn_args=(x, y))
+ fit_res = fitter.minimize(method='emcee', steps=5)
+ fitreport_html_table(fit_res)
+
+
+def test_ci_report(confidence_interval):
+ """Verify that the CI report is created when using ci_report."""
+ report = ci_report(confidence_interval)
+ assert len(report) > 250
+
+ for par in confidence_interval.keys():
+ assert par in report
+
+ for interval in ['99.73', '95.45', '68.27', '_BEST_']:
+ assert interval in report
+
+
+def test_report_ci(confidence_interval, capsys):
+ """Verify that the CI report is printed when using report_ci."""
+ report_ci(confidence_interval)
+ captured = capsys.readouterr()
+
+ assert len(captured.out) > 250
+
+ for par in confidence_interval.keys():
+ assert par in captured.out
+
+ for interval in ['99.73', '95.45', '68.27', '_BEST_']:
+ assert interval in captured.out
+
+
+def test_ci_report_with_offset(confidence_interval):
+ """Verify output of CI report when using with_offset."""
+ report_split = ci_report(confidence_interval,
+ with_offset=True).split('\n') # default
+ amp_values = [abs(float(val)) for val in report_split[1].split()[2:]]
+ assert np.all(np.less(np.delete(amp_values, 3), 0.2))
+
+ report_split = ci_report(confidence_interval,
+ with_offset=False).split('\n')
+ amp_values = [float(val) for val in report_split[1].split()[2:]]
+ assert np.all(np.greater(amp_values, 13))
+
+
+@pytest.mark.parametrize("ndigits", [3, 5, 7])
+def test_ci_report_with_ndigits(confidence_interval, ndigits):
+ """Verify output of CI report when specifying ndigits."""
+ report_split = ci_report(confidence_interval, ndigits=ndigits).split('\n')
+ period_values = list(report_split[2].split()[2:])
+ length = [len(val.split('.')[-1]) for val in period_values]
+ assert np.all(np.equal(length, ndigits))
diff --git a/tests/test_shgo.py b/tests/test_shgo.py
new file mode 100644
index 0000000..1504d38
--- /dev/null
+++ b/tests/test_shgo.py
@@ -0,0 +1,129 @@
+"""Tests for the SHGO global minimization algorithm."""
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+import scipy
+from scipy import __version__ as scipy_version
+
+import lmfit
+
+
+def eggholder(x):
+ return (-(x[1] + 47.0) * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
+ - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0)))))
+
+
+def eggholder_lmfit(params):
+ x0 = params['x0'].value
+ x1 = params['x1'].value
+
+ return (-(x1 + 47.0) * np.sin(np.sqrt(abs(x0/2.0 + (x1 + 47.0))))
+ - x0 * np.sin(np.sqrt(abs(x0 - (x1 + 47.0)))))
+
+
+def test_shgo_scipy_vs_lmfit():
+ """Test SHGO algorithm in lmfit versus SciPy."""
+ bounds = [(-512, 512), (-512, 512)]
+ result_scipy = scipy.optimize.shgo(eggholder, bounds, n=30,
+ sampling_method='sobol')
+
+ # in SciPy v1.7.0: "sobol was fixed and is now using scipy.stats.qmc.Sobol"
+ # FIXME: clean this up after we require SciPy >= 1.7.0
+ if int(scipy_version.split('.')[1]) < 7:
+ assert len(result_scipy.xl) == 13
+ else:
+ assert len(result_scipy.xl) == 6
+
+ pars = lmfit.Parameters()
+ pars.add_many(('x0', 0, True, -512, 512), ('x1', 0, True, -512, 512))
+ mini = lmfit.Minimizer(eggholder_lmfit, pars)
+ result = mini.minimize(method='shgo', n=30, sampling_method='sobol')
+ out_x = np.array([result.params['x0'].value, result.params['x1'].value])
+
+ assert_allclose(result_scipy.fun, result.residual)
+ assert_allclose(result_scipy.funl, result.shgo_funl)
+ assert_allclose(result_scipy.xl, result.shgo_xl)
+ assert_allclose(result.shgo_x, out_x)
+
+
+def test_shgo_scipy_vs_lmfit_2():
+ """Test SHGO algorithm in lmfit versus SciPy."""
+ bounds = [(-512, 512), (-512, 512)]
+ result_scipy = scipy.optimize.shgo(eggholder, bounds, n=60, iters=5,
+ sampling_method='sobol')
+
+ # in SciPy v1.7.0: "sobol was fixed and is now using scipy.stats.qmc.Sobol"
+ # FIXME: clean this up after we require SciPy >= 1.7.0
+ if int(scipy_version.split('.')[1]) < 7:
+ assert len(result_scipy.xl) == 39
+ else:
+ assert len(result_scipy.xl) == 74
+
+ pars = lmfit.Parameters()
+ pars.add_many(('x0', 0, True, -512, 512), ('x1', 0, True, -512, 512))
+ mini = lmfit.Minimizer(eggholder_lmfit, pars)
+ result = mini.minimize(method='shgo', n=60, iters=5,
+ sampling_method='sobol')
+ assert_allclose(result_scipy.fun, result.residual)
+ assert_allclose(result_scipy.xl, result.shgo_xl)
+ assert_allclose(result_scipy.funl, result.shgo_funl)
+
+
+# correct result for Alpine02 function
+global_optimum = [7.91705268, 4.81584232]
+fglob = -6.12950
+
+
+def test_shgo_simplicial_Alpine02(minimizer_Alpine02):
+ """Test SHGO algorithm on Alpine02 function."""
+ # sampling_method 'simplicial' fails with iters=1
+ out = minimizer_Alpine02.minimize(method='shgo', iters=5)
+ out_x = np.array([out.params['x0'].value, out.params['x1'].value])
+
+ assert_allclose(out.residual, fglob, rtol=1e-5)
+ assert_allclose(min(out_x), min(global_optimum), rtol=1e-3)
+ assert_allclose(max(out_x), max(global_optimum), rtol=1e-3)
+ assert out.method == 'shgo'
+
+
+def test_shgo_sobol_Alpine02(minimizer_Alpine02):
+ """Test SHGO algorithm on Alpine02 function."""
+ out = minimizer_Alpine02.minimize(method='shgo', sampling_method='sobol')
+ out_x = np.array([out.params['x0'].value, out.params['x1'].value])
+
+ assert_allclose(out.residual, fglob, rtol=1e-5)
+ assert_allclose(min(out_x), min(global_optimum), rtol=1e-3)
+ assert_allclose(max(out_x), max(global_optimum), rtol=1e-3)
+
+ # FIXME: update when SciPy requirement is >= 1.7
+ if int(scipy_version.split('.')[1]) >= 7:
+ assert out.call_kws['n'] is None
+ else:
+ assert out.call_kws['n'] == 100
+
+
+def test_shgo_bounds(minimizer_Alpine02):
+ """Test SHGO algorithm with bounds."""
+ pars_bounds = lmfit.Parameters()
+ pars_bounds.add_many(('x0', 1., True, 5.0, 15.0),
+ ('x1', 1., True, 2.5, 7.5))
+
+ out = minimizer_Alpine02.minimize(params=pars_bounds, method='shgo')
+ assert 5.0 <= out.params['x0'].value <= 15.0
+ assert 2.5 <= out.params['x1'].value <= 7.5
+
+
+def test_shgo_disp_true(minimizer_Alpine02, capsys):
+ """Test SHGO algorithm with disp is True."""
+ kws = {'disp': True}
+ minimizer_Alpine02.minimize(method='shgo', options=kws)
+ captured = capsys.readouterr()
+ assert 'Splitting first generation' in captured.out
+
+
+def test_shgo_local_solver(minimizer_Alpine02):
+ """Test SHGO algorithm with local solver."""
+ min_kws = {'method': 'unknown'}
+ with pytest.raises(KeyError, match=r'unknown'):
+ minimizer_Alpine02.minimize(method='shgo', minimizer_kwargs=min_kws)
diff --git a/tests/test_stepmodel.py b/tests/test_stepmodel.py
new file mode 100644
index 0000000..47f6dd8
--- /dev/null
+++ b/tests/test_stepmodel.py
@@ -0,0 +1,56 @@
+import numpy as np
+
+from lmfit.models import ConstantModel, StepModel
+
+
+def get_data():
+ np.random.seed(2021)
+ x = np.linspace(0, 10, 201)
+ dat = np.ones_like(x)
+ dat[:48] = 0.0
+ dat[48:77] = np.arange(77-48)/(77.0-48)
+ dat = dat + 5e-2*np.random.randn(len(x))
+ dat = 110.2 * dat + 12.0
+ return x, dat
+
+
+def test_stepmodel_linear():
+ x, y = get_data()
+ stepmod = StepModel(form='linear')
+ const = ConstantModel()
+ pars = stepmod.guess(y, x)
+ pars = pars + const.make_params(c=3*y.min())
+ mod = stepmod + const
+
+ out = mod.fit(y, pars, x=x)
+
+ assert out.nfev > 5
+ assert out.nvarys == 4
+ assert out.chisqr > 1
+ assert out.params['c'].value > 3
+ assert out.params['center'].value > 1
+ assert out.params['center'].value < 4
+ assert out.params['sigma'].value > 0.5
+ assert out.params['sigma'].value < 3.5
+ assert out.params['amplitude'].value > 50
+
+
+def test_stepmodel_erf():
+ x, y = get_data()
+ stepmod = StepModel(form='linear')
+ const = ConstantModel()
+ pars = stepmod.guess(y, x)
+ pars = pars + const.make_params(c=3*y.min())
+ mod = stepmod + const
+
+ out = mod.fit(y, pars, x=x)
+
+ assert out.nfev > 5
+ assert out.nvarys == 4
+ assert out.chisqr > 1
+ assert out.params['c'].value > 3
+ assert out.params['center'].value > 1
+ assert out.params['center'].value < 4
+ assert out.params['amplitude'].value > 50
+ assert out.params['sigma'].value > 0.2
+ assert out.params['sigma'].value < 1.5