summaryrefslogtreecommitdiff
path: root/src/silx/math/fit
diff options
context:
space:
mode:
Diffstat (limited to 'src/silx/math/fit')
-rw-r--r--src/silx/math/fit/__init__.py4
-rw-r--r--src/silx/math/fit/bgtheories.py216
-rw-r--r--src/silx/math/fit/fitmanager.py318
-rw-r--r--src/silx/math/fit/fittheories.py931
-rw-r--r--src/silx/math/fit/fittheory.py37
-rw-r--r--src/silx/math/fit/functions.pyx162
-rw-r--r--src/silx/math/fit/functions/include/functions.h1
-rw-r--r--src/silx/math/fit/functions/src/funs.c98
-rw-r--r--src/silx/math/fit/functions_wrapper.pxd6
-rw-r--r--src/silx/math/fit/leastsq.py289
-rw-r--r--src/silx/math/fit/test/test_bgtheories.py60
-rw-r--r--src/silx/math/fit/test/test_filters.py82
-rw-r--r--src/silx/math/fit/test/test_fit.py264
-rw-r--r--src/silx/math/fit/test/test_fitmanager.py183
-rw-r--r--src/silx/math/fit/test/test_functions.py125
-rw-r--r--src/silx/math/fit/test/test_peaks.py492
16 files changed, 2064 insertions, 1204 deletions
diff --git a/src/silx/math/fit/__init__.py b/src/silx/math/fit/__init__.py
index 7dd6d32..da1b03d 100644
--- a/src/silx/math/fit/__init__.py
+++ b/src/silx/math/fit/__init__.py
@@ -27,9 +27,7 @@ __date__ = "22/06/2016"
from .leastsq import leastsq, chisq_alpha_beta
-from .leastsq import \
- CFREE, CPOSITIVE, CQUOTED, CFIXED, \
- CFACTOR, CDELTA, CSUM
+from .leastsq import CFREE, CPOSITIVE, CQUOTED, CFIXED, CFACTOR, CDELTA, CSUM
from .functions import *
from .filters import *
diff --git a/src/silx/math/fit/bgtheories.py b/src/silx/math/fit/bgtheories.py
index d0f4987..e698927 100644
--- a/src/silx/math/fit/bgtheories.py
+++ b/src/silx/math/fit/bgtheories.py
@@ -1,6 +1,6 @@
-#/*##########################################################################
+# /*##########################################################################
#
-# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
+# Copyright (c) 2004-2023 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -69,10 +69,8 @@ __authors__ = ["P. Knobel"]
__license__ = "MIT"
__date__ = "16/01/2017"
-from collections import OrderedDict
import numpy
-from silx.math.fit.filters import strip, snip1d,\
- savitsky_golay
+from silx.math.fit.filters import strip, snip1d, savitsky_golay
from silx.math.fit.fittheory import FitTheory
CONFIG = {
@@ -84,7 +82,7 @@ CONFIG = {
"StripIterations": 5000,
"StripThresholdFactor": 1.0,
"SnipWidth": 16,
- "EstimatePolyOnStrip": True
+ "EstimatePolyOnStrip": True,
}
# to avoid costly computations when parameters stay the same
@@ -115,9 +113,9 @@ def _convert_anchors_to_indices(x):
of indices is empty, return None.
"""
# convert anchor X abscissa to index
- if CONFIG['AnchorsFlag'] and CONFIG['AnchorsList'] is not None:
+ if CONFIG["AnchorsFlag"] and CONFIG["AnchorsList"] is not None:
anchors_indices = []
- for anchor_x in CONFIG['AnchorsList']:
+ for anchor_x in CONFIG["AnchorsList"]:
if anchor_x <= x[0]:
continue
# take the first index where x > anchor_x
@@ -152,12 +150,13 @@ def strip_bg(x, y0, width, niter):
global _BG_OLD_ANCHORS
global _BG_OLD_ANCHORS_FLAG
- parameters_changed =\
- _BG_STRIP_OLDPARS != [width, niter] or\
- _BG_SMOOTH_OLDWIDTH != CONFIG["SmoothingWidth"] or\
- _BG_SMOOTH_OLDFLAG != CONFIG["SmoothingFlag"] or\
- _BG_OLD_ANCHORS_FLAG != CONFIG["AnchorsFlag"] or\
- _BG_OLD_ANCHORS != CONFIG["AnchorsList"]
+ parameters_changed = (
+ _BG_STRIP_OLDPARS != [width, niter]
+ or _BG_SMOOTH_OLDWIDTH != CONFIG["SmoothingWidth"]
+ or _BG_SMOOTH_OLDFLAG != CONFIG["SmoothingFlag"]
+ or _BG_OLD_ANCHORS_FLAG != CONFIG["AnchorsFlag"]
+ or _BG_OLD_ANCHORS != CONFIG["AnchorsList"]
+ )
# same parameters
if not parameters_changed:
@@ -177,11 +176,13 @@ def strip_bg(x, y0, width, niter):
anchors_indices = _convert_anchors_to_indices(x)
- background = strip(y1,
- w=width,
- niterations=niter,
- factor=CONFIG["StripThresholdFactor"],
- anchors=anchors_indices)
+ background = strip(
+ y1,
+ w=width,
+ niterations=niter,
+ factor=CONFIG["StripThresholdFactor"],
+ anchors=anchors_indices,
+ )
_BG_STRIP_OLDBG = background
@@ -198,12 +199,13 @@ def snip_bg(x, y0, width):
global _BG_OLD_ANCHORS
global _BG_OLD_ANCHORS_FLAG
- parameters_changed =\
- _BG_SNIP_OLDWIDTH != width or\
- _BG_SMOOTH_OLDWIDTH != CONFIG["SmoothingWidth"] or\
- _BG_SMOOTH_OLDFLAG != CONFIG["SmoothingFlag"] or\
- _BG_OLD_ANCHORS_FLAG != CONFIG["AnchorsFlag"] or\
- _BG_OLD_ANCHORS != CONFIG["AnchorsList"]
+ parameters_changed = (
+ _BG_SNIP_OLDWIDTH != width
+ or _BG_SMOOTH_OLDWIDTH != CONFIG["SmoothingWidth"]
+ or _BG_SMOOTH_OLDFLAG != CONFIG["SmoothingFlag"]
+ or _BG_OLD_ANCHORS_FLAG != CONFIG["AnchorsFlag"]
+ or _BG_OLD_ANCHORS != CONFIG["AnchorsList"]
+ )
# same parameters
if not parameters_changed:
@@ -230,14 +232,13 @@ def snip_bg(x, y0, width):
previous_anchor = 0
for anchor_index in anchors_indices:
if (anchor_index > previous_anchor) and (anchor_index < len(y1)):
- background[previous_anchor:anchor_index] =\
- snip1d(y1[previous_anchor:anchor_index],
- width)
- previous_anchor = anchor_index
+ background[previous_anchor:anchor_index] = snip1d(
+ y1[previous_anchor:anchor_index], width
+ )
+ previous_anchor = anchor_index
if previous_anchor < len(y1):
- background[previous_anchor:] = snip1d(y1[previous_anchor:],
- width)
+ background[previous_anchor:] = snip1d(y1[previous_anchor:], width)
_BG_SNIP_OLDBG = background
@@ -250,9 +251,7 @@ def estimate_linear(x, y):
Strip peaks, then perform a linear regression.
"""
- bg = strip_bg(x, y,
- width=CONFIG["StripWidth"],
- niter=CONFIG["StripIterations"])
+ bg = strip_bg(x, y, width=CONFIG["StripWidth"], niter=CONFIG["StripIterations"])
n = float(len(bg))
Sy = numpy.sum(bg)
Sx = float(numpy.sum(x))
@@ -278,8 +277,7 @@ def estimate_strip(x, y):
Return parameters as defined in CONFIG dict,
set constraints to FIXED.
"""
- estimated_par = [CONFIG["StripWidth"],
- CONFIG["StripIterations"]]
+ estimated_par = [CONFIG["StripWidth"], CONFIG["StripIterations"]]
constraints = numpy.zeros((len(estimated_par), 3), numpy.float64)
# code = 3: FIXED
constraints[0][0] = 3
@@ -311,46 +309,37 @@ def poly(x, y, *pars):
def estimate_poly(x, y, deg=2):
- """Estimate polynomial coefficients.
-
- """
+ """Estimate polynomial coefficients."""
# extract bg signal with strip, to estimate polynomial on background
if CONFIG["EstimatePolyOnStrip"]:
- y = strip_bg(x, y,
- CONFIG["StripWidth"],
- CONFIG["StripIterations"])
+ y = strip_bg(x, y, CONFIG["StripWidth"], CONFIG["StripIterations"])
pcoeffs = numpy.polyfit(x, y, deg)
cons = numpy.zeros((deg + 1, 3), numpy.float64)
return pcoeffs, cons
def estimate_quadratic_poly(x, y):
- """Estimate quadratic polynomial coefficients.
- """
+ """Estimate quadratic polynomial coefficients."""
return estimate_poly(x, y, deg=2)
def estimate_cubic_poly(x, y):
- """Estimate cubic polynomial coefficients.
- """
+ """Estimate cubic polynomial coefficients."""
return estimate_poly(x, y, deg=3)
def estimate_quartic_poly(x, y):
- """Estimate degree 4 polynomial coefficients.
- """
+ """Estimate degree 4 polynomial coefficients."""
return estimate_poly(x, y, deg=4)
def estimate_quintic_poly(x, y):
- """Estimate degree 5 polynomial coefficients.
- """
+ """Estimate degree 5 polynomial coefficients."""
return estimate_poly(x, y, deg=5)
def configure(**kw):
- """Update the CONFIG dict
- """
+ """Update the CONFIG dict"""
# inspect **kw to find known keys, update them in CONFIG
for key in CONFIG:
if key in kw:
@@ -359,81 +348,112 @@ def configure(**kw):
return CONFIG
-THEORY = OrderedDict(
- (('No Background',
- FitTheory(
+THEORY = dict(
+ (
+ (
+ "No Background",
+ FitTheory(
description="No background function",
function=lambda x, y0: numpy.zeros_like(x),
parameters=[],
- is_background=True)),
- ('Constant',
- FitTheory(
- description='Constant background',
+ is_background=True,
+ ),
+ ),
+ (
+ "Constant",
+ FitTheory(
+ description="Constant background",
function=lambda x, y0, c: c * numpy.ones_like(x),
- parameters=['Constant', ],
+ parameters=[
+ "Constant",
+ ],
estimate=lambda x, y: ([min(y)], [[0, 0, 0]]),
- is_background=True)),
- ('Linear',
- FitTheory(
- description="Linear background, parameters 'Constant' and"
- " 'Slope'",
+ is_background=True,
+ ),
+ ),
+ (
+ "Linear",
+ FitTheory(
+ description="Linear background, parameters 'Constant' and" " 'Slope'",
function=lambda x, y0, a, b: a + b * x,
- parameters=['Constant', 'Slope'],
+ parameters=["Constant", "Slope"],
estimate=estimate_linear,
configure=configure,
- is_background=True)),
- ('Strip',
- FitTheory(
+ is_background=True,
+ ),
+ ),
+ (
+ "Strip",
+ FitTheory(
description="Compute background using a strip filter\n"
- "Parameters 'StripWidth', 'StripIterations'",
+ "Parameters 'StripWidth', 'StripIterations'",
function=strip_bg,
- parameters=['StripWidth', 'StripIterations'],
+ parameters=["StripWidth", "StripIterations"],
estimate=estimate_strip,
configure=configure,
- is_background=True)),
- ('Snip',
- FitTheory(
+ is_background=True,
+ ),
+ ),
+ (
+ "Snip",
+ FitTheory(
description="Compute background using a snip filter\n"
- "Parameter 'SnipWidth'",
+ "Parameter 'SnipWidth'",
function=snip_bg,
- parameters=['SnipWidth'],
+ parameters=["SnipWidth"],
estimate=estimate_snip,
configure=configure,
- is_background=True)),
- ('Degree 2 Polynomial',
- FitTheory(
+ is_background=True,
+ ),
+ ),
+ (
+ "Degree 2 Polynomial",
+ FitTheory(
description="Quadratic polynomial background, Parameters "
- "'a', 'b' and 'c'\ny = a*x^2 + b*x +c",
+ "'a', 'b' and 'c'\ny = a*x^2 + b*x +c",
function=poly,
- parameters=['a', 'b', 'c'],
+ parameters=["a", "b", "c"],
estimate=estimate_quadratic_poly,
configure=configure,
- is_background=True)),
- ('Degree 3 Polynomial',
- FitTheory(
+ is_background=True,
+ ),
+ ),
+ (
+ "Degree 3 Polynomial",
+ FitTheory(
description="Cubic polynomial background, Parameters "
- "'a', 'b', 'c' and 'd'\n"
- "y = a*x^3 + b*x^2 + c*x + d",
+ "'a', 'b', 'c' and 'd'\n"
+ "y = a*x^3 + b*x^2 + c*x + d",
function=poly,
- parameters=['a', 'b', 'c', 'd'],
+ parameters=["a", "b", "c", "d"],
estimate=estimate_cubic_poly,
configure=configure,
- is_background=True)),
- ('Degree 4 Polynomial',
- FitTheory(
+ is_background=True,
+ ),
+ ),
+ (
+ "Degree 4 Polynomial",
+ FitTheory(
description="Quartic polynomial background\n"
- "y = a*x^4 + b*x^3 + c*x^2 + d*x + e",
+ "y = a*x^4 + b*x^3 + c*x^2 + d*x + e",
function=poly,
- parameters=['a', 'b', 'c', 'd', 'e'],
+ parameters=["a", "b", "c", "d", "e"],
estimate=estimate_quartic_poly,
configure=configure,
- is_background=True)),
- ('Degree 5 Polynomial',
- FitTheory(
+ is_background=True,
+ ),
+ ),
+ (
+ "Degree 5 Polynomial",
+ FitTheory(
description="Quaintic polynomial background\n"
- "y = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f",
+ "y = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f",
function=poly,
- parameters=['a', 'b', 'c', 'd', 'e', 'f'],
+ parameters=["a", "b", "c", "d", "e", "f"],
estimate=estimate_quintic_poly,
configure=configure,
- is_background=True))))
+ is_background=True,
+ ),
+ ),
+ )
+)
diff --git a/src/silx/math/fit/fitmanager.py b/src/silx/math/fit/fitmanager.py
index cbb1e34..983cbf7 100644
--- a/src/silx/math/fit/fitmanager.py
+++ b/src/silx/math/fit/fitmanager.py
@@ -1,6 +1,6 @@
# /*#########################################################################
#
-# Copyright (c) 2004-2021 European Synchrotron Radiation Facility
+# Copyright (c) 2004-2023 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -39,7 +39,6 @@ This module deals with:
- providing different background models
"""
-from collections import OrderedDict
import logging
import numpy
from numpy.linalg.linalg import LinAlgError
@@ -82,19 +81,19 @@ class FitManager(object):
uncertainties are set to 1.
:type weight_flag: boolean
"""
+
def __init__(self, x=None, y=None, sigmay=None, weight_flag=False):
- """
- """
+ """ """
self.fitconfig = {
- 'WeightFlag': weight_flag,
- 'fitbkg': 'No Background',
- 'fittheory': None,
+ "WeightFlag": weight_flag,
+ "fitbkg": "No Background",
+ "fittheory": None,
# Next few parameters are defined for compatibility with legacy theories
# which take the background as argument for their estimation function
- 'StripWidth': 2,
- 'StripIterations': 5000,
- 'StripThresholdFactor': 1.0,
- 'SmoothingFlag': False
+ "StripWidth": 2,
+ "StripIterations": 5000,
+ "StripThresholdFactor": 1.0,
+ "SmoothingFlag": False,
}
"""Dictionary of fit configuration parameters.
These parameters can be modified using the :meth:`configure` method.
@@ -109,7 +108,7 @@ class FitManager(object):
algorithm (:func:`silx.math.fit.peak_search`)
"""
- self.theories = OrderedDict()
+ self.theories = {}
"""Dictionary of fit theories, defining functions to be fitted
to individual peaks.
@@ -134,7 +133,7 @@ class FitManager(object):
"""Name of currently selected theory. This name matches a key in
:attr:`theories`."""
- self.bgtheories = OrderedDict()
+ self.bgtheories = {}
"""Dictionary of background theories.
See :attr:`theories` for documentation on theories.
@@ -143,7 +142,7 @@ class FitManager(object):
# Load default theories (constant, linear, strip)
self.loadbgtheories(bgtheories)
- self.selectedbg = 'No Background'
+ self.selectedbg = "No Background"
"""Name of currently selected background theory. This name must be
an existing key in :attr:`bgtheories`."""
@@ -220,10 +219,18 @@ class FitManager(object):
"""
self.bgtheories[bgname] = bgtheory
- def addtheory(self, name, theory=None,
- function=None, parameters=None,
- estimate=None, configure=None, derivative=None,
- description=None, pymca_legacy=False):
+ def addtheory(
+ self,
+ name,
+ theory=None,
+ function=None,
+ parameters=None,
+ estimate=None,
+ configure=None,
+ derivative=None,
+ description=None,
+ pymca_legacy=False,
+ ):
"""Add a new theory to dictionary :attr:`theories`.
You can pass a name and a :class:`FitTheory` object as arguments, or
@@ -267,17 +274,26 @@ class FitManager(object):
estimate=estimate,
configure=configure,
derivative=derivative,
- pymca_legacy=pymca_legacy
+ pymca_legacy=pymca_legacy,
)
else:
- raise TypeError("You must supply a FitTheory object or define " +
- "a fit function and its parameters.")
+ raise TypeError(
+ "You must supply a FitTheory object or define "
+ + "a fit function and its parameters."
+ )
- def addbgtheory(self, name, theory=None,
- function=None, parameters=None,
- estimate=None, configure=None,
- derivative=None, description=None):
+ def addbgtheory(
+ self,
+ name,
+ theory=None,
+ function=None,
+ parameters=None,
+ estimate=None,
+ configure=None,
+ derivative=None,
+ description=None,
+ ):
"""Add a new theory to dictionary :attr:`bgtheories`.
You can pass a name and a :class:`FitTheory` object as arguments, or
@@ -314,12 +330,14 @@ class FitManager(object):
estimate=estimate,
configure=configure,
derivative=derivative,
- is_background=True
+ is_background=True,
)
else:
- raise TypeError("You must supply a FitTheory object or define " +
- "a background function and its parameters.")
+ raise TypeError(
+ "You must supply a FitTheory object or define "
+ + "a background function and its parameters."
+ )
def configure(self, **kw):
"""Configure the current theory by filling or updating the
@@ -394,21 +412,22 @@ class FitManager(object):
update a widget displaying a status message.
:return: Estimated parameters
"""
- self.state = 'Estimate in progress'
+ self.state = "Estimate in progress"
self.chisq = None
if callback is not None:
- callback(data={'chisq': self.chisq,
- 'status': self.state})
-
- CONS = {0: 'FREE',
- 1: 'POSITIVE',
- 2: 'QUOTED',
- 3: 'FIXED',
- 4: 'FACTOR',
- 5: 'DELTA',
- 6: 'SUM',
- 7: 'IGNORE'}
+ callback(data={"chisq": self.chisq, "status": self.state})
+
+ CONS = {
+ 0: "FREE",
+ 1: "POSITIVE",
+ 2: "QUOTED",
+ 3: "FIXED",
+ 4: "FACTOR",
+ 5: "DELTA",
+ 6: "SUM",
+ 7: "IGNORE",
+ }
# Filter-out not finite data
xwork = self.xdata[self._finite_mask]
@@ -421,9 +440,9 @@ class FitManager(object):
try:
fun_params, fun_constraints = self.estimate_fun(xwork, ywork)
except LinAlgError:
- self.state = 'Estimate failed'
+ self.state = "Estimate failed"
if callback is not None:
- callback(data={'status': self.state})
+ callback(data={"status": self.state})
raise
# build the names
@@ -446,7 +465,7 @@ class FitManager(object):
xmin = min(xwork)
xmax = max(xwork)
nb_bg_params = len(bg_params)
- for (pindex, pname) in enumerate(self.parameter_names):
+ for pindex, pname in enumerate(self.parameter_names):
# First come background parameters
if pindex < nb_bg_params:
estimation_value = bg_params[pindex]
@@ -471,24 +490,27 @@ class FitManager(object):
cons1 += nb_bg_params
cons2 = fun_constraints[fun_param_index][2]
- self.fit_results.append({'name': pname,
- 'estimation': estimation_value,
- 'group': group_number,
- 'code': constraint_code,
- 'cons1': cons1,
- 'cons2': cons2,
- 'fitresult': 0.0,
- 'sigma': 0.0,
- 'xmin': xmin,
- 'xmax': xmax})
-
- self.state = 'Ready to Fit'
+ self.fit_results.append(
+ {
+ "name": pname,
+ "estimation": estimation_value,
+ "group": group_number,
+ "code": constraint_code,
+ "cons1": cons1,
+ "cons2": cons2,
+ "fitresult": 0.0,
+ "sigma": 0.0,
+ "xmin": xmin,
+ "xmax": xmax,
+ }
+ )
+
+ self.state = "Ready to Fit"
self.chisq = None
self.niter = 0
if callback is not None:
- callback(data={'chisq': self.chisq,
- 'status': self.state})
+ callback(data={"chisq": self.chisq, "status": self.state})
return numpy.append(bg_params, fun_params)
def fit(self):
@@ -522,11 +544,11 @@ class FitManager(object):
paramlist = self.fit_results
active_params = []
for param in paramlist:
- if param['code'] not in ['IGNORE', 7]:
+ if param["code"] not in ["IGNORE", 7]:
if not estimated:
- active_params.append(param['fitresult'])
+ active_params.append(param["fitresult"])
else:
- active_params.append(param['estimation'])
+ active_params.append(param["estimation"])
# Mask x with not finite (support nD x)
finite_mask = numpy.all(numpy.isfinite(x), axis=tuple(range(1, x.ndim)))
@@ -538,7 +560,8 @@ class FitManager(object):
# Create result with same number as elements as x, filling holes with NaNs
result = numpy.full((x.shape[0],), numpy.nan, dtype=numpy.float64)
result[finite_mask] = self.fitfunction(
- numpy.array(x[finite_mask], copy=True), *active_params)
+ numpy.array(x[finite_mask], copy=True), *active_params
+ )
return result
def get_estimation(self):
@@ -595,14 +618,16 @@ class FitManager(object):
:raise: ImportError if theories cannot be imported
"""
from types import ModuleType
+
if isinstance(theories, ModuleType):
theories_module = theories
else:
# if theories is not a module, it must be a string
- string_types = (basestring,) if sys.version_info[0] == 2 else (str,) # noqa
- if not isinstance(theories, string_types):
- raise ImportError("theory must be a python module, a module" +
- "name or a python filename")
+ if not isinstance(theories, str):
+ raise ImportError(
+ "theory must be a python module, a module"
+ + "name or a python filename"
+ )
# if theories is a filename
if os.path.isfile(theories):
sys.path.append(os.path.dirname(theories))
@@ -654,14 +679,16 @@ class FitManager(object):
:raise: ImportError if theories cannot be imported
"""
from types import ModuleType
+
if isinstance(theories, ModuleType):
theories_module = theories
else:
# if theories is not a module, it must be a string
- string_types = (basestring,) if sys.version_info[0] == 2 else (str,) # noqa
- if not isinstance(theories, string_types):
- raise ImportError("theory must be a python module, a module" +
- "name or a python filename")
+ if not isinstance(theories, str):
+ raise ImportError(
+ "theory must be a python module, a module"
+ + "name or a python filename"
+ )
# if theories is a filename
if os.path.isfile(theories):
sys.path.append(os.path.dirname(theories))
@@ -746,10 +773,14 @@ class FitManager(object):
# default weight
if sigmay is None:
self.sigmay0 = None
- self.sigmay = numpy.sqrt(self.ydata) if self.fitconfig["WeightFlag"] else None
+ self.sigmay = (
+ numpy.sqrt(self.ydata) if self.fitconfig["WeightFlag"] else None
+ )
else:
self.sigmay0 = numpy.array(sigmay)
- self.sigmay = numpy.array(sigmay) if self.fitconfig["WeightFlag"] else None
+ self.sigmay = (
+ numpy.array(sigmay) if self.fitconfig["WeightFlag"] else None
+ )
# take the data between limits, using boolean array indexing
if (xmin is not None or xmax is not None) and len(self.xdata):
@@ -761,8 +792,11 @@ class FitManager(object):
self.sigmay = self.sigmay[bool_array] if sigmay is not None else None
self._finite_mask = numpy.logical_and(
- numpy.all(numpy.isfinite(self.xdata), axis=tuple(range(1, self.xdata.ndim))),
- numpy.isfinite(self.ydata))
+ numpy.all(
+ numpy.isfinite(self.xdata), axis=tuple(range(1, self.xdata.ndim))
+ ),
+ numpy.isfinite(self.ydata),
+ )
def enableweight(self):
"""This method can be called to set :attr:`sigmay`. If :attr:`sigmay0` was filled with
@@ -770,7 +804,9 @@ class FitManager(object):
Else, use ``sqrt(self.ydata)``.
"""
if self.sigmay0 is None:
- self.sigmay = numpy.sqrt(self.ydata) if self.fitconfig["WeightFlag"] else None
+ self.sigmay = (
+ numpy.sqrt(self.ydata) if self.fitconfig["WeightFlag"] else None
+ )
else:
self.sigmay = self.sigmay0
@@ -822,19 +858,18 @@ class FitManager(object):
"""
# self.dataupdate()
- self.state = 'Fit in progress'
+ self.state = "Fit in progress"
self.chisq = None
if callback is not None:
- callback(data={'chisq': self.chisq,
- 'status': self.state})
+ callback(data={"chisq": self.chisq, "status": self.state})
param_val = []
param_constraints = []
# Initial values are set to the ones computed in estimate()
for param in self.fit_results:
- param_val.append(param['estimation'])
- param_constraints.append([param['code'], param['cons1'], param['cons2']])
+ param_val.append(param["estimation"])
+ param_constraints.append([param["code"], param["cons1"], param["cons2"]])
# Filter-out not finite data
ywork = self.ydata[self._finite_mask]
@@ -842,31 +877,34 @@ class FitManager(object):
try:
params, covariance_matrix, infodict = leastsq(
- self.fitfunction, # bg + actual model function
- xwork, ywork, param_val,
- sigma=self.sigmay,
- constraints=param_constraints,
- model_deriv=self.theories[self.selectedtheory].derivative,
- full_output=True, left_derivative=True)
+ self.fitfunction, # bg + actual model function
+ xwork,
+ ywork,
+ param_val,
+ sigma=self.sigmay,
+ constraints=param_constraints,
+ model_deriv=self.theories[self.selectedtheory].derivative,
+ full_output=True,
+ left_derivative=True,
+ )
except LinAlgError:
- self.state = 'Fit failed'
- callback(data={'status': self.state})
+ self.state = "Fit failed"
+ callback(data={"status": self.state})
raise
- sigmas = infodict['uncertainties']
+ sigmas = infodict["uncertainties"]
for i, param in enumerate(self.fit_results):
- if param['code'] != 'IGNORE':
- param['fitresult'] = params[i]
- param['sigma'] = sigmas[i]
+ if param["code"] != "IGNORE":
+ param["fitresult"] = params[i]
+ param["sigma"] = sigmas[i]
self.chisq = infodict["reduced_chisq"]
self.niter = infodict["niter"]
- self.state = 'Ready'
+ self.state = "Ready"
if callback is not None:
- callback(data={'chisq': self.chisq,
- 'status': self.state})
+ callback(data={"chisq": self.chisq, "status": self.state})
return params, sigmas, infodict
@@ -964,7 +1002,7 @@ class FitManager(object):
"""
estimatefunction = self.theories[self.selectedtheory].estimate
- if hasattr(estimatefunction, '__call__'):
+ if hasattr(estimatefunction, "__call__"):
if not self.theories[self.selectedtheory].pymca_legacy:
return estimatefunction(x, y)
else:
@@ -974,59 +1012,76 @@ class FitManager(object):
else:
if self.fitconfig["SmoothingFlag"]:
y = smooth1d(y)
- bg = strip(y,
- w=self.fitconfig["StripWidth"],
- niterations=self.fitconfig["StripIterations"],
- factor=self.fitconfig["StripThresholdFactor"])
+ bg = strip(
+ y,
+ w=self.fitconfig["StripWidth"],
+ niterations=self.fitconfig["StripIterations"],
+ factor=self.fitconfig["StripThresholdFactor"],
+ )
# fitconfig can be filled by user defined config function
- xscaling = self.fitconfig.get('Xscaling', 1.0)
- yscaling = self.fitconfig.get('Yscaling', 1.0)
+ xscaling = self.fitconfig.get("Xscaling", 1.0)
+ yscaling = self.fitconfig.get("Yscaling", 1.0)
return estimatefunction(x, y, bg, xscaling, yscaling)
else:
- raise TypeError("Estimation function in attribute " +
- "theories[%s]" % self.selectedtheory +
- " must be callable.")
+ raise TypeError(
+ "Estimation function in attribute "
+ + "theories[%s]" % self.selectedtheory
+ + " must be callable."
+ )
def _load_legacy_theories(self, theories_module):
"""Load theories from a custom module in the old PyMca format.
See PyMca5.PyMcaMath.fitting.SpecfitFunctions for an example.
"""
- mandatory_attributes = ["THEORY", "PARAMETERS",
- "FUNCTION", "ESTIMATE"]
+ mandatory_attributes = ["THEORY", "PARAMETERS", "FUNCTION", "ESTIMATE"]
err_msg = "Custom fit function file must define: "
err_msg += ", ".join(mandatory_attributes)
for attr in mandatory_attributes:
if not hasattr(theories_module, attr):
raise ImportError(err_msg)
- derivative = theories_module.DERIVATIVE if hasattr(theories_module, "DERIVATIVE") else None
- configure = theories_module.CONFIGURE if hasattr(theories_module, "CONFIGURE") else None
- estimate = theories_module.ESTIMATE if hasattr(theories_module, "ESTIMATE") else None
+ derivative = (
+ theories_module.DERIVATIVE
+ if hasattr(theories_module, "DERIVATIVE")
+ else None
+ )
+ configure = (
+ theories_module.CONFIGURE if hasattr(theories_module, "CONFIGURE") else None
+ )
+ estimate = (
+ theories_module.ESTIMATE if hasattr(theories_module, "ESTIMATE") else None
+ )
if isinstance(theories_module.THEORY, (list, tuple)):
# multiple fit functions
for i in range(len(theories_module.THEORY)):
deriv = derivative[i] if derivative is not None else None
config = configure[i] if configure is not None else None
estim = estimate[i] if estimate is not None else None
- self.addtheory(theories_module.THEORY[i],
- FitTheory(
- theories_module.FUNCTION[i],
- theories_module.PARAMETERS[i],
- estim,
- config,
- deriv,
- pymca_legacy=True))
+ self.addtheory(
+ theories_module.THEORY[i],
+ FitTheory(
+ theories_module.FUNCTION[i],
+ theories_module.PARAMETERS[i],
+ estim,
+ config,
+ deriv,
+ pymca_legacy=True,
+ ),
+ )
else:
# single fit function
- self.addtheory(theories_module.THEORY,
- FitTheory(
- theories_module.FUNCTION,
- theories_module.PARAMETERS,
- estimate,
- configure,
- derivative,
- pymca_legacy=True))
+ self.addtheory(
+ theories_module.THEORY,
+ FitTheory(
+ theories_module.FUNCTION,
+ theories_module.PARAMETERS,
+ estimate,
+ configure,
+ derivative,
+ pymca_legacy=True,
+ ),
+ )
def test():
@@ -1037,9 +1092,7 @@ def test():
# Create synthetic data with a sum of gaussian functions
x = numpy.arange(1000).astype(numpy.float64)
- p = [1000, 100., 250,
- 255, 690., 45,
- 1500, 800.5, 95]
+ p = [1000, 100.0, 250, 255, 690.0, 45, 1500, 800.5, 95]
y = 0.5 * x + 13 + sum_gauss(x, *p)
# Fitting
@@ -1048,9 +1101,9 @@ def test():
# overlapping peaks at x=690 and x=800.5
fit.setdata(x=x, y=y)
fit.loadtheories(fittheories)
- fit.settheory('Gaussians')
+ fit.settheory("Gaussians")
fit.loadbgtheories(bgtheories)
- fit.setbackground('Linear')
+ fit.setbackground("Linear")
fit.estimate()
fit.runfit()
@@ -1058,8 +1111,8 @@ def test():
print("Obtained parameters : ")
dummy_list = []
for param in fit.fit_results:
- print(param['name'], ' = ', param['fitresult'])
- dummy_list.append(param['fitresult'])
+ print(param["name"], " = ", param["fitresult"])
+ dummy_list.append(param["fitresult"])
print("chisq = ", fit.chisq)
# Plot
@@ -1071,6 +1124,7 @@ def test():
try:
from silx.gui import qt
from silx.gui.plot.PlotWindow import PlotWindow
+
app = qt.QApplication([])
pw = PlotWindow(control=True)
pw.addCurve(x, y, "Original")
diff --git a/src/silx/math/fit/fittheories.py b/src/silx/math/fit/fittheories.py
index 76f2478..f20cbf1 100644
--- a/src/silx/math/fit/fittheories.py
+++ b/src/silx/math/fit/fittheories.py
@@ -1,6 +1,6 @@
-#/*##########################################################################
+# /*##########################################################################
#
-# Copyright (c) 2004-2021 European Synchrotron Radiation Facility
+# Copyright (c) 2004-2023 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -59,10 +59,7 @@ a dictionary :const:`THEORY`: with the following structure::
.. note::
- Consider using an OrderedDict instead of a regular dictionary, when
- defining your own theory dictionary, if the order matters to you.
- This will likely be the case if you intend to load a selection of
- functions in a GUI such as :class:`silx.gui.fit.FitManager`.
+ The order of the provided dictionary is taken into account.
Theory names can be customized (e.g. ``gauss, lorentz, splitgauss``…).
@@ -79,7 +76,6 @@ Module members:
---------------
"""
import numpy
-from collections import OrderedDict
import logging
from silx.math.fit import functions
@@ -96,58 +92,59 @@ __date__ = "15/05/2017"
DEFAULT_CONFIG = {
- 'NoConstraintsFlag': False,
- 'PositiveFwhmFlag': True,
- 'PositiveHeightAreaFlag': True,
- 'SameFwhmFlag': False,
- 'QuotedPositionFlag': False, # peak not outside data range
- 'QuotedEtaFlag': False, # force 0 < eta < 1
+ "NoConstraintsFlag": False,
+ "PositiveFwhmFlag": True,
+ "PositiveHeightAreaFlag": True,
+ "SameFwhmFlag": False,
+ "QuotedPositionFlag": False, # peak not outside data range
+ "QuotedEtaFlag": False, # force 0 < eta < 1
# Peak detection
- 'AutoScaling': False,
- 'Yscaling': 1.0,
- 'FwhmPoints': 8,
- 'AutoFwhm': True,
- 'Sensitivity': 2.5,
- 'ForcePeakPresence': True,
+ "AutoScaling": False,
+ "Yscaling": 1.0,
+ "FwhmPoints": 8,
+ "AutoFwhm": True,
+ "Sensitivity": 2.5,
+ "ForcePeakPresence": True,
# Hypermet
- 'HypermetTails': 15,
- 'QuotedFwhmFlag': 0,
- 'MaxFwhm2InputRatio': 1.5,
- 'MinFwhm2InputRatio': 0.4,
+ "HypermetTails": 15,
+ "QuotedFwhmFlag": 0,
+ "MaxFwhm2InputRatio": 1.5,
+ "MinFwhm2InputRatio": 0.4,
# short tail parameters
- 'MinGaussArea4ShortTail': 50000.,
- 'InitialShortTailAreaRatio': 0.050,
- 'MaxShortTailAreaRatio': 0.100,
- 'MinShortTailAreaRatio': 0.0010,
- 'InitialShortTailSlopeRatio': 0.70,
- 'MaxShortTailSlopeRatio': 2.00,
- 'MinShortTailSlopeRatio': 0.50,
+ "MinGaussArea4ShortTail": 50000.0,
+ "InitialShortTailAreaRatio": 0.050,
+ "MaxShortTailAreaRatio": 0.100,
+ "MinShortTailAreaRatio": 0.0010,
+ "InitialShortTailSlopeRatio": 0.70,
+ "MaxShortTailSlopeRatio": 2.00,
+ "MinShortTailSlopeRatio": 0.50,
# long tail parameters
- 'MinGaussArea4LongTail': 1000.0,
- 'InitialLongTailAreaRatio': 0.050,
- 'MaxLongTailAreaRatio': 0.300,
- 'MinLongTailAreaRatio': 0.010,
- 'InitialLongTailSlopeRatio': 20.0,
- 'MaxLongTailSlopeRatio': 50.0,
- 'MinLongTailSlopeRatio': 5.0,
+ "MinGaussArea4LongTail": 1000.0,
+ "InitialLongTailAreaRatio": 0.050,
+ "MaxLongTailAreaRatio": 0.300,
+ "MinLongTailAreaRatio": 0.010,
+ "InitialLongTailSlopeRatio": 20.0,
+ "MaxLongTailSlopeRatio": 50.0,
+ "MinLongTailSlopeRatio": 5.0,
# step tail
- 'MinGaussHeight4StepTail': 5000.,
- 'InitialStepTailHeightRatio': 0.002,
- 'MaxStepTailHeightRatio': 0.0100,
- 'MinStepTailHeightRatio': 0.0001,
+ "MinGaussHeight4StepTail": 5000.0,
+ "InitialStepTailHeightRatio": 0.002,
+ "MaxStepTailHeightRatio": 0.0100,
+ "MinStepTailHeightRatio": 0.0001,
# Hypermet constraints
# position in range [estimated position +- estimated fwhm/2]
- 'HypermetQuotedPositionFlag': True,
- 'DeltaPositionFwhmUnits': 0.5,
- 'SameSlopeRatioFlag': 1,
- 'SameAreaRatioFlag': 1,
+ "HypermetQuotedPositionFlag": True,
+ "DeltaPositionFwhmUnits": 0.5,
+ "SameSlopeRatioFlag": 1,
+ "SameAreaRatioFlag": 1,
# Strip bg removal
- 'StripBackgroundFlag': True,
- 'SmoothingFlag': True,
- 'SmoothingWidth': 5,
- 'StripWidth': 2,
- 'StripIterations': 5000,
- 'StripThresholdFactor': 1.0}
+ "StripBackgroundFlag": True,
+ "SmoothingFlag": True,
+ "SmoothingWidth": 5,
+ "StripWidth": 2,
+ "StripIterations": 5000,
+ "StripThresholdFactor": 1.0,
+}
"""This dictionary defines default configuration parameters that have effects
on fit functions and estimation functions, mainly on fit constraints.
This dictionary is accessible as attribute :attr:`FitTheories.config`,
@@ -168,6 +165,7 @@ CIGNORED = 7
class FitTheories(object):
"""Class wrapping functions from :class:`silx.math.fit.functions`
and providing estimate functions for all of these fit functions."""
+
def __init__(self, config=None):
if config is None:
self.config = DEFAULT_CONFIG
@@ -189,13 +187,18 @@ class FitTheories(object):
For example, 15 can be expressed as ``1111`` in base 2, so a flag of
15 means all terms are active.
"""
- g_term = self.config['HypermetTails'] & 1
- st_term = (self.config['HypermetTails'] >> 1) & 1
- lt_term = (self.config['HypermetTails'] >> 2) & 1
- step_term = (self.config['HypermetTails'] >> 3) & 1
- return functions.sum_ahypermet(x, *pars,
- gaussian_term=g_term, st_term=st_term,
- lt_term=lt_term, step_term=step_term)
+ g_term = self.config["HypermetTails"] & 1
+ st_term = (self.config["HypermetTails"] >> 1) & 1
+ lt_term = (self.config["HypermetTails"] >> 2) & 1
+ step_term = (self.config["HypermetTails"] >> 3) & 1
+ return functions.sum_ahypermet(
+ x,
+ *pars,
+ gaussian_term=g_term,
+ st_term=st_term,
+ lt_term=lt_term,
+ step_term=step_term,
+ )
def poly(self, x, *pars):
"""Order n polynomial.
@@ -208,51 +211,41 @@ class FitTheories(object):
@staticmethod
def estimate_poly(x, y, n=2):
- """Estimate polynomial coefficients for a degree n polynomial.
-
- """
+ """Estimate polynomial coefficients for a degree n polynomial."""
pcoeffs = numpy.polyfit(x, y, n)
constraints = numpy.zeros((n + 1, 3), numpy.float64)
return pcoeffs, constraints
def estimate_quadratic(self, x, y):
- """Estimate quadratic coefficients
-
- """
+ """Estimate quadratic coefficients"""
return self.estimate_poly(x, y, n=2)
def estimate_cubic(self, x, y):
- """Estimate coefficients for a degree 3 polynomial
-
- """
+ """Estimate coefficients for a degree 3 polynomial"""
return self.estimate_poly(x, y, n=3)
def estimate_quartic(self, x, y):
- """Estimate coefficients for a degree 4 polynomial
-
- """
+ """Estimate coefficients for a degree 4 polynomial"""
return self.estimate_poly(x, y, n=4)
def estimate_quintic(self, x, y):
- """Estimate coefficients for a degree 5 polynomial
-
- """
+ """Estimate coefficients for a degree 5 polynomial"""
return self.estimate_poly(x, y, n=5)
def strip_bg(self, y):
"""Return the strip background of y, using parameters from
:attr:`config` dictionary (*StripBackgroundFlag, StripWidth,
StripIterations, StripThresholdFactor*)"""
- remove_strip_bg = self.config.get('StripBackgroundFlag', False)
+ remove_strip_bg = self.config.get("StripBackgroundFlag", False)
if remove_strip_bg:
- if self.config['SmoothingFlag']:
- y = savitsky_golay(y, self.config['SmoothingWidth'])
- strip_width = self.config['StripWidth']
- strip_niterations = self.config['StripIterations']
- strip_thr_factor = self.config['StripThresholdFactor']
- return strip(y, w=strip_width,
- niterations=strip_niterations,
- factor=strip_thr_factor)
+ if self.config["SmoothingFlag"]:
+ y = savitsky_golay(y, self.config["SmoothingWidth"])
+ strip_width = self.config["StripWidth"]
+ strip_niterations = self.config["StripIterations"]
+ strip_thr_factor = self.config["StripThresholdFactor"]
+ return strip(
+ y, w=strip_width, niterations=strip_niterations, factor=strip_thr_factor
+ )
else:
return numpy.zeros_like(y)
@@ -268,7 +261,7 @@ class FitTheories(object):
yy = numpy.array(y, copy=False)
# smooth
- convolution_kernel = numpy.ones(shape=(3,)) / 3.
+ convolution_kernel = numpy.ones(shape=(3,)) / 3.0
ysmooth = numpy.convolve(y, convolution_kernel, mode="same")
# remove zeros
@@ -277,9 +270,9 @@ class FitTheories(object):
ysmooth = ysmooth[idx_array]
# compute scaling factor
- chisq = numpy.mean((yy - ysmooth)**2 / numpy.fabs(yy))
+ chisq = numpy.mean((yy - ysmooth) ** 2 / numpy.fabs(yy))
if chisq > 0:
- return 1. / chisq
+ return 1.0 / chisq
else:
return 1.0
@@ -299,16 +292,22 @@ class FitTheories(object):
# add padding
ysearch = numpy.ones((len(y) + 2 * fwhm,), numpy.float64)
ysearch[0:fwhm] = y[0]
- ysearch[-1:-fwhm - 1:-1] = y[len(y)-1]
- ysearch[fwhm:fwhm + len(y)] = y[:]
+ ysearch[-1 : -fwhm - 1 : -1] = y[len(y) - 1]
+ ysearch[fwhm : fwhm + len(y)] = y[:]
- scaling = self.guess_yscaling(y) if self.config["AutoScaling"] else self.config["Yscaling"]
+ scaling = (
+ self.guess_yscaling(y)
+ if self.config["AutoScaling"]
+ else self.config["Yscaling"]
+ )
if len(ysearch) > 1.5 * fwhm:
- peaks = peak_search(scaling * ysearch,
- fwhm=fwhm, sensitivity=sensitivity)
- return [peak_index - fwhm for peak_index in peaks
- if 0 <= peak_index - fwhm < len(y)]
+ peaks = peak_search(scaling * ysearch, fwhm=fwhm, sensitivity=sensitivity)
+ return [
+ peak_index - fwhm
+ for peak_index in peaks
+ if 0 <= peak_index - fwhm < len(y)
+ ]
else:
return []
@@ -331,32 +330,32 @@ class FitTheories(object):
bg = self.strip_bg(y)
- if self.config['AutoFwhm']:
+ if self.config["AutoFwhm"]:
search_fwhm = guess_fwhm(y)
else:
- search_fwhm = int(float(self.config['FwhmPoints']))
- search_sens = float(self.config['Sensitivity'])
+ search_fwhm = int(float(self.config["FwhmPoints"]))
+ search_sens = float(self.config["Sensitivity"])
if search_fwhm < 3:
_logger.warning("Setting peak fwhm to 3 (lower limit)")
search_fwhm = 3
- self.config['FwhmPoints'] = 3
+ self.config["FwhmPoints"] = 3
if search_sens < 1:
- _logger.warning("Setting peak search sensitivity to 1. " +
- "(lower limit to filter out noise peaks)")
+ _logger.warning(
+ "Setting peak search sensitivity to 1. "
+ + "(lower limit to filter out noise peaks)"
+ )
search_sens = 1
- self.config['Sensitivity'] = 1
+ self.config["Sensitivity"] = 1
npoints = len(y)
# Find indices of peaks in data array
- peaks = self.peak_search(y,
- fwhm=search_fwhm,
- sensitivity=search_sens)
+ peaks = self.peak_search(y, fwhm=search_fwhm, sensitivity=search_sens)
if not len(peaks):
- forcepeak = int(float(self.config.get('ForcePeakPresence', 0)))
+ forcepeak = int(float(self.config.get("ForcePeakPresence", 0)))
if forcepeak:
delta = y - bg
# get index of global maximum
@@ -371,13 +370,11 @@ class FitTheories(object):
peakpos = x[int(peaks[0])]
if abs(peakpos) < 1.0e-16:
peakpos = 0.0
- param = numpy.array(
- [y[int(peaks[0])] - bg[int(peaks[0])], peakpos, sig])
+ param = numpy.array([y[int(peaks[0])] - bg[int(peaks[0])], peakpos, sig])
height_largest_peak = param[0]
peak_index = 1
for i in peaks[1:]:
- param2 = numpy.array(
- [y[int(i)] - bg[int(i)], x[int(i)], sig])
+ param2 = numpy.array([y[int(i)] - bg[int(i)], x[int(i)], sig])
param = numpy.concatenate((param, param2))
if param2[0] > height_largest_peak:
height_largest_peak = param2[0]
@@ -391,60 +388,62 @@ class FitTheories(object):
cons = numpy.zeros((len(param), 3), numpy.float64)
# peak height must be positive
- cons[0:len(param):3, 0] = CPOSITIVE
+ cons[0 : len(param) : 3, 0] = CPOSITIVE
# force peaks to stay around their position
- cons[1:len(param):3, 0] = CQUOTED
+ cons[1 : len(param) : 3, 0] = CQUOTED
# set possible peak range to estimated peak +- guessed fwhm
if len(xw) > search_fwhm:
fwhmx = numpy.fabs(xw[int(search_fwhm)] - xw[0])
- cons[1:len(param):3, 1] = param[1:len(param):3] - 0.5 * fwhmx
- cons[1:len(param):3, 2] = param[1:len(param):3] + 0.5 * fwhmx
+ cons[1 : len(param) : 3, 1] = param[1 : len(param) : 3] - 0.5 * fwhmx
+ cons[1 : len(param) : 3, 2] = param[1 : len(param) : 3] + 0.5 * fwhmx
else:
- shape = [max(1, int(x)) for x in (param[1:len(param):3])]
- cons[1:len(param):3, 1] = min(xw) * numpy.ones(
- shape,
- numpy.float64)
- cons[1:len(param):3, 2] = max(xw) * numpy.ones(
- shape,
- numpy.float64)
+ shape = [max(1, int(x)) for x in (param[1 : len(param) : 3])]
+ cons[1 : len(param) : 3, 1] = min(xw) * numpy.ones(shape, numpy.float64)
+ cons[1 : len(param) : 3, 2] = max(xw) * numpy.ones(shape, numpy.float64)
# ensure fwhm is positive
- cons[2:len(param):3, 0] = CPOSITIVE
+ cons[2 : len(param) : 3, 0] = CPOSITIVE
# run a quick iterative fit (4 iterations) to improve
# estimations
- fittedpar, _, _ = leastsq(functions.sum_gauss, xw, yw, param,
- max_iter=4, constraints=cons.tolist(),
- full_output=True)
+ fittedpar, _, _ = leastsq(
+ functions.sum_gauss,
+ xw,
+ yw,
+ param,
+ max_iter=4,
+ constraints=cons.tolist(),
+ full_output=True,
+ )
# set final constraints based on config parameters
cons = numpy.zeros((len(fittedpar), 3), numpy.float64)
peak_index = 0
for i in range(len(peaks)):
# Setup height area constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['PositiveHeightAreaFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["PositiveHeightAreaFlag"]:
cons[peak_index, 0] = CPOSITIVE
cons[peak_index, 1] = 0
cons[peak_index, 2] = 0
peak_index += 1
# Setup position constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['QuotedPositionFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["QuotedPositionFlag"]:
cons[peak_index, 0] = CQUOTED
cons[peak_index, 1] = min(x)
cons[peak_index, 2] = max(x)
peak_index += 1
# Setup positive FWHM constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['PositiveFwhmFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["PositiveFwhmFlag"]:
cons[peak_index, 0] = CPOSITIVE
cons[peak_index, 1] = 0
cons[peak_index, 2] = 0
- if self.config['SameFwhmFlag']:
+ if self.config["SameFwhmFlag"]:
if i != index_largest_peak:
cons[peak_index, 0] = CFACTOR
cons[peak_index, 1] = 3 * index_largest_peak + 2
@@ -475,8 +474,12 @@ class FitTheories(object):
height = fittedpar[3 * i]
fwhm = fittedpar[3 * i + 2]
# Replace height with area in fittedpar
- fittedpar[3 * i] = numpy.sqrt(2 * numpy.pi) * height * fwhm / (
- 2.0 * numpy.sqrt(2 * numpy.log(2)))
+ fittedpar[3 * i] = (
+ numpy.sqrt(2 * numpy.pi)
+ * height
+ * fwhm
+ / (2.0 * numpy.sqrt(2 * numpy.log(2)))
+ )
return fittedpar, cons
def estimate_alorentz(self, x, y):
@@ -501,7 +504,7 @@ class FitTheories(object):
height = fittedpar[3 * i]
fwhm = fittedpar[3 * i + 2]
# Replace height with area in fittedpar
- fittedpar[3 * i] = (height * fwhm * 0.5 * numpy.pi)
+ fittedpar[3 * i] = height * fwhm * 0.5 * numpy.pi
return fittedpar, cons
def estimate_splitgauss(self, x, y):
@@ -548,10 +551,12 @@ class FitTheories(object):
if cons[3 * i + 2, 0] == CFACTOR:
# convert indices of related parameters
# (this happens if SameFwhmFlag == True)
- estimated_constraints[4 * i + 2, 1] = \
+ estimated_constraints[4 * i + 2, 1] = (
int(cons[3 * i + 2, 1] / 3) * 4 + 2
- estimated_constraints[4 * i + 3, 1] = \
+ )
+ estimated_constraints[4 * i + 3, 1] = (
int(cons[3 * i + 2, 1] / 3) * 4 + 3
+ )
return estimated_parameters, estimated_constraints
def estimate_pvoigt(self, x, y):
@@ -580,8 +585,8 @@ class FitTheories(object):
newpar = []
newcons = numpy.zeros((4 * npeaks, 3), numpy.float64)
# find out related parameters proper index
- if not self.config['NoConstraintsFlag']:
- if self.config['SameFwhmFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["SameFwhmFlag"]:
j = 0
# get the index of the free FWHM
for i in range(npeaks):
@@ -611,7 +616,7 @@ class FitTheories(object):
newcons[4 * i + 3, 0] = CFREE
newcons[4 * i + 3, 1] = 0
newcons[4 * i + 3, 2] = 0
- if self.config['QuotedEtaFlag']:
+ if self.config["QuotedEtaFlag"]:
newcons[4 * i + 3, 0] = CQUOTED
newcons[4 * i + 3, 1] = 0.0
newcons[4 * i + 3, 2] = 1.0
@@ -641,8 +646,8 @@ class FitTheories(object):
newpar = []
newcons = numpy.zeros((5 * npeaks, 3), numpy.float64)
# find out related parameters proper index
- if not self.config['NoConstraintsFlag']:
- if self.config['SameFwhmFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["SameFwhmFlag"]:
j = 0
# get the index of the free FWHM
for i in range(npeaks):
@@ -692,12 +697,103 @@ class FitTheories(object):
newcons[5 * i + 4, 0] = CFREE
newcons[5 * i + 4, 1] = 0
newcons[5 * i + 4, 2] = 0
- if self.config['QuotedEtaFlag']:
+ if self.config["QuotedEtaFlag"]:
newcons[5 * i + 4, 0] = CQUOTED
newcons[5 * i + 4, 1] = 0.0
newcons[5 * i + 4, 2] = 1.0
return newpar, newcons
+ def estimate_splitpvoigt2(self, x, y):
+ """Estimation of *Height, Position, FWHM1, FWHM2, eta1, eta2* of peaks, for
+ asymmetric pseudo-Voigt curves.
+
+ This functions uses :meth:`estimate_height_position_fwhm`, then
+ adds an identical FWHM2 parameter and a constant estimation of
+ *eta1* and *eta2* (0.5) to the fit parameters for each peak, and the corresponding
+ constraints.
+
+ Constraint for the eta parameter can be set to QUOTED (0.--1.)
+ by setting :attr:`config`['QuotedEtaFlag'] to ``True``.
+ If this is not the case, the constraint code is set to FREE.
+
+ :param x: Array of abscissa values
+ :param y: Array of ordinate values (``y = f(x)``)
+ :return: Tuple of estimated fit parameters and fit constraints.
+ Parameters to be estimated for each peak are:
+ *Height, Position, FWHM1, FWHM2, eta1, eta2*.
+ """
+ fittedpar, cons = self.estimate_height_position_fwhm(x, y)
+ npeaks = len(fittedpar) // 3
+ newpar = []
+ newcons = numpy.zeros((5 * npeaks, 3), numpy.float64)
+ # find out related parameters proper index
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["SameFwhmFlag"]:
+ j = 0
+ # get the index of the free FWHM
+ for i in range(npeaks):
+ if cons[3 * i + 2, 0] != 4:
+ j = i
+ for i in range(npeaks):
+ if i != j:
+ cons[3 * i + 2, 1] = 4 * j + 2
+ for i in range(npeaks):
+ # height
+ newpar.append(fittedpar[3 * i])
+ # position
+ newpar.append(fittedpar[3 * i + 1])
+ # fwhm1
+ newpar.append(fittedpar[3 * i + 2])
+ # fwhm2 estimate equal to fwhm1
+ newpar.append(fittedpar[3 * i + 2])
+ # eta1
+ newpar.append(0.5)
+ # eta2
+ newpar.append(0.5)
+ # constraint codes
+ # ----------------
+ # height
+ newcons[6 * i, 0] = cons[3 * i, 0]
+ # position
+ newcons[6 * i + 1, 0] = cons[3 * i + 1, 0]
+ # fwhm1
+ newcons[6 * i + 2, 0] = cons[3 * i + 2, 0]
+ # fwhm2
+ newcons[6 * i + 3, 0] = cons[3 * i + 2, 0]
+ # cons 1
+ # ------
+ newcons[6 * i, 1] = cons[3 * i, 1]
+ newcons[6 * i + 1, 1] = cons[3 * i + 1, 1]
+ newcons[6 * i + 2, 1] = cons[3 * i + 2, 1]
+ newcons[6 * i + 3, 1] = cons[3 * i + 2, 1]
+ # cons 2
+ # ------
+ newcons[6 * i, 2] = cons[3 * i, 2]
+ newcons[6 * i + 1, 2] = cons[3 * i + 1, 2]
+ newcons[6 * i + 2, 2] = cons[3 * i + 2, 2]
+ newcons[6 * i + 3, 2] = cons[3 * i + 2, 2]
+
+ if cons[3 * i + 2, 0] == CFACTOR:
+ # fwhm2 constraint depends on fwhm1
+ newcons[6 * i + 3, 1] = newcons[6 * i + 2, 1] + 1
+ # eta1 constraints
+ newcons[6 * i + 4, 0] = CFREE
+ newcons[6 * i + 4, 1] = 0
+ newcons[6 * i + 4, 2] = 0
+ if self.config["QuotedEtaFlag"]:
+ newcons[6 * i + 4, 0] = CQUOTED
+ newcons[6 * i + 4, 1] = 0.0
+ newcons[6 * i + 4, 2] = 1.0
+ # eta2 constraints
+ newcons[6 * i + 5, 0] = CFREE
+ newcons[6 * i + 5, 1] = 0
+ newcons[6 * i + 5, 2] = 0
+ if self.config["QuotedEtaFlag"]:
+ newcons[6 * i + 5, 0] = CQUOTED
+ newcons[6 * i + 5, 1] = 0.0
+ newcons[6 * i + 5, 2] = 1.0
+ return newpar, newcons
+
def estimate_apvoigt(self, x, y):
"""Estimation of *Area, Position, FWHM1, eta* of peaks, for
pseudo-Voigt curves.
@@ -718,9 +814,9 @@ class FitTheories(object):
for i in range(npeaks):
height = fittedpar[4 * i]
fwhm = fittedpar[4 * i + 2]
- fittedpar[4 * i] = 0.5 * (height * fwhm * 0.5 * numpy.pi) +\
- 0.5 * (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
- ) * numpy.sqrt(2 * numpy.pi)
+ fittedpar[4 * i] = 0.5 * (height * fwhm * 0.5 * numpy.pi) + 0.5 * (
+ height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
+ ) * numpy.sqrt(2 * numpy.pi)
return fittedpar, cons
def estimate_ahypermet(self, x, y):
@@ -734,7 +830,7 @@ class FitTheories(object):
*area, position, fwhm, st_area_r, st_slope_r,
lt_area_r, lt_slope_r, step_height_r* .
"""
- yscaling = self.config.get('Yscaling', 1.0)
+ yscaling = self.config.get("Yscaling", 1.0)
if yscaling == 0:
yscaling = 1.0
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
@@ -743,8 +839,8 @@ class FitTheories(object):
newcons = numpy.zeros((8 * npeaks, 3), numpy.float64)
main_peak = 0
# find out related parameters proper index
- if not self.config['NoConstraintsFlag']:
- if self.config['SameFwhmFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["SameFwhmFlag"]:
j = 0
# get the index of the free FWHM
for i in range(npeaks):
@@ -762,8 +858,9 @@ class FitTheories(object):
height = fittedpar[3 * i]
position = fittedpar[3 * i + 1]
fwhm = fittedpar[3 * i + 2]
- area = (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
- ) * numpy.sqrt(2 * numpy.pi)
+ area = (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))) * numpy.sqrt(
+ 2 * numpy.pi
+ )
# the gaussian parameters
newpar.append(area)
newpar.append(position)
@@ -774,20 +871,20 @@ class FitTheories(object):
st_term = 1
lt_term = 1
step_term = 1
- if self.config['HypermetTails'] != 0:
- g_term = self.config['HypermetTails'] & 1
- st_term = (self.config['HypermetTails'] >> 1) & 1
- lt_term = (self.config['HypermetTails'] >> 2) & 1
- step_term = (self.config['HypermetTails'] >> 3) & 1
+ if self.config["HypermetTails"] != 0:
+ g_term = self.config["HypermetTails"] & 1
+ st_term = (self.config["HypermetTails"] >> 1) & 1
+ lt_term = (self.config["HypermetTails"] >> 2) & 1
+ step_term = (self.config["HypermetTails"] >> 3) & 1
if g_term == 0:
# fix the gaussian parameters
newcons[8 * i, 0] = CFIXED
newcons[8 * i + 1, 0] = CFIXED
newcons[8 * i + 2, 0] = CFIXED
# the short tail parameters
- if ((area * yscaling) <
- self.config['MinGaussArea4ShortTail']) | \
- (st_term == 0):
+ if ((area * yscaling) < self.config["MinGaussArea4ShortTail"]) | (
+ st_term == 0
+ ):
newpar.append(0.0)
newpar.append(0.0)
newcons[8 * i + 3, 0] = CFIXED
@@ -797,18 +894,18 @@ class FitTheories(object):
newcons[8 * i + 4, 1] = 0.0
newcons[8 * i + 4, 2] = 0.0
else:
- newpar.append(self.config['InitialShortTailAreaRatio'])
- newpar.append(self.config['InitialShortTailSlopeRatio'])
+ newpar.append(self.config["InitialShortTailAreaRatio"])
+ newpar.append(self.config["InitialShortTailSlopeRatio"])
newcons[8 * i + 3, 0] = CQUOTED
- newcons[8 * i + 3, 1] = self.config['MinShortTailAreaRatio']
- newcons[8 * i + 3, 2] = self.config['MaxShortTailAreaRatio']
+ newcons[8 * i + 3, 1] = self.config["MinShortTailAreaRatio"]
+ newcons[8 * i + 3, 2] = self.config["MaxShortTailAreaRatio"]
newcons[8 * i + 4, 0] = CQUOTED
- newcons[8 * i + 4, 1] = self.config['MinShortTailSlopeRatio']
- newcons[8 * i + 4, 2] = self.config['MaxShortTailSlopeRatio']
+ newcons[8 * i + 4, 1] = self.config["MinShortTailSlopeRatio"]
+ newcons[8 * i + 4, 2] = self.config["MaxShortTailSlopeRatio"]
# the long tail parameters
- if ((area * yscaling) <
- self.config['MinGaussArea4LongTail']) | \
- (lt_term == 0):
+ if ((area * yscaling) < self.config["MinGaussArea4LongTail"]) | (
+ lt_term == 0
+ ):
newpar.append(0.0)
newpar.append(0.0)
newcons[8 * i + 5, 0] = CFIXED
@@ -818,50 +915,50 @@ class FitTheories(object):
newcons[8 * i + 6, 1] = 0.0
newcons[8 * i + 6, 2] = 0.0
else:
- newpar.append(self.config['InitialLongTailAreaRatio'])
- newpar.append(self.config['InitialLongTailSlopeRatio'])
+ newpar.append(self.config["InitialLongTailAreaRatio"])
+ newpar.append(self.config["InitialLongTailSlopeRatio"])
newcons[8 * i + 5, 0] = CQUOTED
- newcons[8 * i + 5, 1] = self.config['MinLongTailAreaRatio']
- newcons[8 * i + 5, 2] = self.config['MaxLongTailAreaRatio']
+ newcons[8 * i + 5, 1] = self.config["MinLongTailAreaRatio"]
+ newcons[8 * i + 5, 2] = self.config["MaxLongTailAreaRatio"]
newcons[8 * i + 6, 0] = CQUOTED
- newcons[8 * i + 6, 1] = self.config['MinLongTailSlopeRatio']
- newcons[8 * i + 6, 2] = self.config['MaxLongTailSlopeRatio']
+ newcons[8 * i + 6, 1] = self.config["MinLongTailSlopeRatio"]
+ newcons[8 * i + 6, 2] = self.config["MaxLongTailSlopeRatio"]
# the step parameters
- if ((height * yscaling) <
- self.config['MinGaussHeight4StepTail']) | \
- (step_term == 0):
+ if ((height * yscaling) < self.config["MinGaussHeight4StepTail"]) | (
+ step_term == 0
+ ):
newpar.append(0.0)
newcons[8 * i + 7, 0] = CFIXED
newcons[8 * i + 7, 1] = 0.0
newcons[8 * i + 7, 2] = 0.0
else:
- newpar.append(self.config['InitialStepTailHeightRatio'])
+ newpar.append(self.config["InitialStepTailHeightRatio"])
newcons[8 * i + 7, 0] = CQUOTED
- newcons[8 * i + 7, 1] = self.config['MinStepTailHeightRatio']
- newcons[8 * i + 7, 2] = self.config['MaxStepTailHeightRatio']
+ newcons[8 * i + 7, 1] = self.config["MinStepTailHeightRatio"]
+ newcons[8 * i + 7, 2] = self.config["MaxStepTailHeightRatio"]
# if self.config['NoConstraintsFlag'] == 1:
# newcons=numpy.zeros((8*npeaks, 3),numpy.float64)
if npeaks > 0:
if g_term:
- if self.config['PositiveHeightAreaFlag']:
+ if self.config["PositiveHeightAreaFlag"]:
for i in range(npeaks):
newcons[8 * i, 0] = CPOSITIVE
- if self.config['PositiveFwhmFlag']:
+ if self.config["PositiveFwhmFlag"]:
for i in range(npeaks):
newcons[8 * i + 2, 0] = CPOSITIVE
- if self.config['SameFwhmFlag']:
+ if self.config["SameFwhmFlag"]:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 2, 0] = CFACTOR
newcons[8 * i + 2, 1] = 8 * main_peak + 2
newcons[8 * i + 2, 2] = 1.0
- if self.config['HypermetQuotedPositionFlag']:
+ if self.config["HypermetQuotedPositionFlag"]:
for i in range(npeaks):
- delta = self.config['DeltaPositionFwhmUnits'] * fwhm
+ delta = self.config["DeltaPositionFwhmUnits"] * fwhm
newcons[8 * i + 1, 0] = CQUOTED
newcons[8 * i + 1, 1] = newpar[8 * i + 1] - delta
newcons[8 * i + 1, 2] = newpar[8 * i + 1] + delta
- if self.config['SameSlopeRatioFlag']:
+ if self.config["SameSlopeRatioFlag"]:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 4, 0] = CFACTOR
@@ -870,7 +967,7 @@ class FitTheories(object):
newcons[8 * i + 6, 0] = CFACTOR
newcons[8 * i + 6, 1] = 8 * main_peak + 6
newcons[8 * i + 6, 2] = 1.0
- if self.config['SameAreaRatioFlag']:
+ if self.config["SameAreaRatioFlag"]:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 3, 0] = CFACTOR
@@ -897,16 +994,15 @@ class FitTheories(object):
"""
crappyfilter = [-0.25, -0.75, 0.0, 0.75, 0.25]
cutoff = len(crappyfilter) // 2
- y_deriv = numpy.convolve(y,
- crappyfilter,
- mode="valid")
+ y_deriv = numpy.convolve(y, crappyfilter, mode="valid")
# make the derivative's peak have the same amplitude as the step
if max(y_deriv) > 0:
y_deriv = y_deriv * max(y) / max(y_deriv)
fittedpar, newcons = self.estimate_height_position_fwhm(
- x[cutoff:-cutoff], y_deriv)
+ x[cutoff:-cutoff], y_deriv
+ )
data_amplitude = max(y) - min(y)
@@ -914,38 +1010,44 @@ class FitTheories(object):
if len(fittedpar):
npeaks = len(fittedpar) // 3
largest_index = 0
- largest = [data_amplitude,
- fittedpar[3 * largest_index + 1],
- fittedpar[3 * largest_index + 2]]
+ largest = [
+ data_amplitude,
+ fittedpar[3 * largest_index + 1],
+ fittedpar[3 * largest_index + 2],
+ ]
for i in range(npeaks):
if fittedpar[3 * i] > largest[0]:
largest_index = i
- largest = [data_amplitude,
- fittedpar[3 * largest_index + 1],
- fittedpar[3 * largest_index + 2]]
+ largest = [
+ data_amplitude,
+ fittedpar[3 * largest_index + 1],
+ fittedpar[3 * largest_index + 2],
+ ]
else:
# no peak was found
- largest = [data_amplitude, # height
- x[len(x)//2], # center: middle of x range
- self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
+ largest = [
+ data_amplitude, # height
+ x[len(x) // 2], # center: middle of x range
+ self.config["FwhmPoints"] * (x[1] - x[0]),
+ ] # fwhm: default value
# Setup constrains
newcons = numpy.zeros((3, 3), numpy.float64)
- if not self.config['NoConstraintsFlag']:
- # Setup height constrains
- if self.config['PositiveHeightAreaFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ # Setup height constrains
+ if self.config["PositiveHeightAreaFlag"]:
newcons[0, 0] = CPOSITIVE
newcons[0, 1] = 0
newcons[0, 2] = 0
# Setup position constrains
- if self.config['QuotedPositionFlag']:
+ if self.config["QuotedPositionFlag"]:
newcons[1, 0] = CQUOTED
newcons[1, 1] = min(x)
newcons[1, 2] = max(x)
# Setup positive FWHM constrains
- if self.config['PositiveFwhmFlag']:
+ if self.config["PositiveFwhmFlag"]:
newcons[2, 0] = CPOSITIVE
newcons[2, 1] = 0
newcons[2, 2] = 0
@@ -984,27 +1086,27 @@ class FitTheories(object):
largest = [height, position, fwhm, beamfwhm]
cons = numpy.zeros((4, 3), numpy.float64)
# Setup constrains
- if not self.config['NoConstraintsFlag']:
+ if not self.config["NoConstraintsFlag"]:
# Setup height constrains
- if self.config['PositiveHeightAreaFlag']:
+ if self.config["PositiveHeightAreaFlag"]:
cons[0, 0] = CPOSITIVE
cons[0, 1] = 0
cons[0, 2] = 0
# Setup position constrains
- if self.config['QuotedPositionFlag']:
+ if self.config["QuotedPositionFlag"]:
cons[1, 0] = CQUOTED
cons[1, 1] = min(x)
cons[1, 2] = max(x)
# Setup positive FWHM constrains
- if self.config['PositiveFwhmFlag']:
+ if self.config["PositiveFwhmFlag"]:
cons[2, 0] = CPOSITIVE
cons[2, 1] = 0
cons[2, 2] = 0
# Setup positive FWHM constrains
- if self.config['PositiveFwhmFlag']:
+ if self.config["PositiveFwhmFlag"]:
cons[3, 0] = CPOSITIVE
cons[3, 1] = 0
cons[3, 2] = 0
@@ -1030,8 +1132,7 @@ class FitTheories(object):
if max(y_deriv) > 0:
y_deriv = y_deriv * max(y) / max(y_deriv)
- fittedpar, cons = self.estimate_height_position_fwhm(
- x[cutoff:-cutoff], y_deriv)
+ fittedpar, cons = self.estimate_height_position_fwhm(x[cutoff:-cutoff], y_deriv)
# for height, use the data amplitude after removing the background
data_amplitude = max(y) - min(y)
@@ -1040,38 +1141,44 @@ class FitTheories(object):
if len(fittedpar):
npeaks = len(fittedpar) // 3
largest_index = 0
- largest = [data_amplitude,
- fittedpar[3 * largest_index + 1],
- fittedpar[3 * largest_index + 2]]
+ largest = [
+ data_amplitude,
+ fittedpar[3 * largest_index + 1],
+ fittedpar[3 * largest_index + 2],
+ ]
for i in range(npeaks):
if fittedpar[3 * i] > largest[0]:
largest_index = i
- largest = [fittedpar[3 * largest_index],
- fittedpar[3 * largest_index + 1],
- fittedpar[3 * largest_index + 2]]
+ largest = [
+ fittedpar[3 * largest_index],
+ fittedpar[3 * largest_index + 1],
+ fittedpar[3 * largest_index + 2],
+ ]
else:
# no peak was found
- largest = [data_amplitude, # height
- x[len(x)//2], # center: middle of x range
- self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
+ largest = [
+ data_amplitude, # height
+ x[len(x) // 2], # center: middle of x range
+ self.config["FwhmPoints"] * (x[1] - x[0]),
+ ] # fwhm: default value
newcons = numpy.zeros((3, 3), numpy.float64)
# Setup constrains
- if not self.config['NoConstraintsFlag']:
- # Setup height constraints
- if self.config['PositiveHeightAreaFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ # Setup height constraints
+ if self.config["PositiveHeightAreaFlag"]:
newcons[0, 0] = CPOSITIVE
newcons[0, 1] = 0
newcons[0, 2] = 0
# Setup position constraints
- if self.config['QuotedPositionFlag']:
+ if self.config["QuotedPositionFlag"]:
newcons[1, 0] = CQUOTED
newcons[1, 1] = min(x)
newcons[1, 2] = max(x)
# Setup positive FWHM constraints
- if self.config['PositiveFwhmFlag']:
+ if self.config["PositiveFwhmFlag"]:
newcons[2, 0] = CPOSITIVE
newcons[2, 1] = 0
newcons[2, 2] = 0
@@ -1096,17 +1203,17 @@ class FitTheories(object):
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
"""
- yscaling = self.config.get('Yscaling', 1.0)
+ yscaling = self.config.get("Yscaling", 1.0)
if yscaling == 0:
yscaling = 1.0
bg = self.strip_bg(y)
- if self.config['AutoFwhm']:
+ if self.config["AutoFwhm"]:
search_fwhm = guess_fwhm(y)
else:
- search_fwhm = int(float(self.config['FwhmPoints']))
- search_sens = float(self.config['Sensitivity'])
+ search_fwhm = int(float(self.config["FwhmPoints"]))
+ search_sens = float(self.config["Sensitivity"])
if search_fwhm < 3:
search_fwhm = 3
@@ -1115,8 +1222,7 @@ class FitTheories(object):
search_sens = 1
if len(y) > 1.5 * search_fwhm:
- peaks = peak_search(yscaling * y, fwhm=search_fwhm,
- sensitivity=search_sens)
+ peaks = peak_search(yscaling * y, fwhm=search_fwhm, sensitivity=search_sens)
else:
peaks = []
npeaks = len(peaks)
@@ -1136,7 +1242,7 @@ class FitTheories(object):
for i in range(npeaks):
height += y[int(peaks[i])] - bg[int(peaks[i])]
if i != npeaks - 1:
- delta += (x[int(peaks[i + 1])] - x[int(peaks[i])])
+ delta += x[int(peaks[i + 1])] - x[int(peaks[i])]
# delta between peaks
if npeaks > 1:
@@ -1160,8 +1266,8 @@ class FitTheories(object):
cons[1, 0] = CFREE
j = 2
# Setup height area constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['PositiveHeightAreaFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["PositiveHeightAreaFlag"]:
# POSITIVE = 1
cons[j, 0] = CPOSITIVE
cons[j, 1] = 0
@@ -1169,8 +1275,8 @@ class FitTheories(object):
j += 1
# Setup position constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['QuotedPositionFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["QuotedPositionFlag"]:
# QUOTED = 2
cons[j, 0] = CQUOTED
cons[j, 1] = min(x)
@@ -1178,8 +1284,8 @@ class FitTheories(object):
j += 1
# Setup positive FWHM constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['PositiveFwhmFlag']:
+ if not self.config["NoConstraintsFlag"]:
+ if self.config["PositiveFwhmFlag"]:
# POSITIVE=1
cons[j, 0] = CPOSITIVE
cons[j, 1] = 0
@@ -1208,127 +1314,223 @@ class FitTheories(object):
self.config[key] = kw[key]
return self.config
+
fitfuns = FitTheories()
-THEORY = OrderedDict((
- ('Gaussians',
- FitTheory(description='Gaussian functions',
- function=functions.sum_gauss,
- parameters=('Height', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_height_position_fwhm,
- configure=fitfuns.configure)),
- ('Lorentz',
- FitTheory(description='Lorentzian functions',
- function=functions.sum_lorentz,
- parameters=('Height', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_height_position_fwhm,
- configure=fitfuns.configure)),
- ('Area Gaussians',
- FitTheory(description='Gaussian functions (area)',
- function=functions.sum_agauss,
- parameters=('Area', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_agauss,
- configure=fitfuns.configure)),
- ('Area Lorentz',
- FitTheory(description='Lorentzian functions (area)',
- function=functions.sum_alorentz,
- parameters=('Area', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_alorentz,
- configure=fitfuns.configure)),
- ('Pseudo-Voigt Line',
- FitTheory(description='Pseudo-Voigt functions',
- function=functions.sum_pvoigt,
- parameters=('Height', 'Position', 'FWHM', 'Eta'),
- estimate=fitfuns.estimate_pvoigt,
- configure=fitfuns.configure)),
- ('Area Pseudo-Voigt',
- FitTheory(description='Pseudo-Voigt functions (area)',
- function=functions.sum_apvoigt,
- parameters=('Area', 'Position', 'FWHM', 'Eta'),
- estimate=fitfuns.estimate_apvoigt,
- configure=fitfuns.configure)),
- ('Split Gaussian',
- FitTheory(description='Asymmetric gaussian functions',
- function=functions.sum_splitgauss,
- parameters=('Height', 'Position', 'LowFWHM',
- 'HighFWHM'),
- estimate=fitfuns.estimate_splitgauss,
- configure=fitfuns.configure)),
- ('Split Lorentz',
- FitTheory(description='Asymmetric lorentzian functions',
- function=functions.sum_splitlorentz,
- parameters=('Height', 'Position', 'LowFWHM', 'HighFWHM'),
- estimate=fitfuns.estimate_splitgauss,
- configure=fitfuns.configure)),
- ('Split Pseudo-Voigt',
- FitTheory(description='Asymmetric pseudo-Voigt functions',
- function=functions.sum_splitpvoigt,
- parameters=('Height', 'Position', 'LowFWHM',
- 'HighFWHM', 'Eta'),
- estimate=fitfuns.estimate_splitpvoigt,
- configure=fitfuns.configure)),
- ('Step Down',
- FitTheory(description='Step down function',
- function=functions.sum_stepdown,
- parameters=('Height', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_stepdown,
- configure=fitfuns.configure)),
- ('Step Up',
- FitTheory(description='Step up function',
- function=functions.sum_stepup,
- parameters=('Height', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_stepup,
- configure=fitfuns.configure)),
- ('Slit',
- FitTheory(description='Slit function',
- function=functions.sum_slit,
- parameters=('Height', 'Position', 'FWHM', 'BeamFWHM'),
- estimate=fitfuns.estimate_slit,
- configure=fitfuns.configure)),
- ('Atan',
- FitTheory(description='Arctan step up function',
- function=functions.atan_stepup,
- parameters=('Height', 'Position', 'Width'),
- estimate=fitfuns.estimate_stepup,
- configure=fitfuns.configure)),
- ('Hypermet',
- FitTheory(description='Hypermet functions',
- function=fitfuns.ahypermet, # customized version of functions.sum_ahypermet
- parameters=('G_Area', 'Position', 'FWHM', 'ST_Area',
- 'ST_Slope', 'LT_Area', 'LT_Slope', 'Step_H'),
- estimate=fitfuns.estimate_ahypermet,
- configure=fitfuns.configure)),
- # ('Periodic Gaussians',
- # FitTheory(description='Periodic gaussian functions',
- # function=functions.periodic_gauss,
- # parameters=('N', 'Delta', 'Height', 'Position', 'FWHM'),
- # estimate=fitfuns.estimate_periodic_gauss,
- # configure=fitfuns.configure))
- ('Degree 2 Polynomial',
- FitTheory(description='Degree 2 polynomial'
- '\ny = a*x^2 + b*x +c',
- function=fitfuns.poly,
- parameters=['a', 'b', 'c'],
- estimate=fitfuns.estimate_quadratic)),
- ('Degree 3 Polynomial',
- FitTheory(description='Degree 3 polynomial'
- '\ny = a*x^3 + b*x^2 + c*x + d',
- function=fitfuns.poly,
- parameters=['a', 'b', 'c', 'd'],
- estimate=fitfuns.estimate_cubic)),
- ('Degree 4 Polynomial',
- FitTheory(description='Degree 4 polynomial'
- '\ny = a*x^4 + b*x^3 + c*x^2 + d*x + e',
- function=fitfuns.poly,
- parameters=['a', 'b', 'c', 'd', 'e'],
- estimate=fitfuns.estimate_quartic)),
- ('Degree 5 Polynomial',
- FitTheory(description='Degree 5 polynomial'
- '\ny = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f',
- function=fitfuns.poly,
- parameters=['a', 'b', 'c', 'd', 'e', 'f'],
- estimate=fitfuns.estimate_quintic)),
-))
+THEORY = dict(
+ (
+ (
+ "Gaussians",
+ FitTheory(
+ description="Gaussian functions",
+ function=functions.sum_gauss,
+ parameters=("Height", "Position", "FWHM"),
+ estimate=fitfuns.estimate_height_position_fwhm,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Lorentz",
+ FitTheory(
+ description="Lorentzian functions",
+ function=functions.sum_lorentz,
+ parameters=("Height", "Position", "FWHM"),
+ estimate=fitfuns.estimate_height_position_fwhm,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Area Gaussians",
+ FitTheory(
+ description="Gaussian functions (area)",
+ function=functions.sum_agauss,
+ parameters=("Area", "Position", "FWHM"),
+ estimate=fitfuns.estimate_agauss,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Area Lorentz",
+ FitTheory(
+ description="Lorentzian functions (area)",
+ function=functions.sum_alorentz,
+ parameters=("Area", "Position", "FWHM"),
+ estimate=fitfuns.estimate_alorentz,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Pseudo-Voigt Line",
+ FitTheory(
+ description="Pseudo-Voigt functions",
+ function=functions.sum_pvoigt,
+ parameters=("Height", "Position", "FWHM", "Eta"),
+ estimate=fitfuns.estimate_pvoigt,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Area Pseudo-Voigt",
+ FitTheory(
+ description="Pseudo-Voigt functions (area)",
+ function=functions.sum_apvoigt,
+ parameters=("Area", "Position", "FWHM", "Eta"),
+ estimate=fitfuns.estimate_apvoigt,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Split Gaussian",
+ FitTheory(
+ description="Asymmetric gaussian functions",
+ function=functions.sum_splitgauss,
+ parameters=("Height", "Position", "LowFWHM", "HighFWHM"),
+ estimate=fitfuns.estimate_splitgauss,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Split Lorentz",
+ FitTheory(
+ description="Asymmetric lorentzian functions",
+ function=functions.sum_splitlorentz,
+ parameters=("Height", "Position", "LowFWHM", "HighFWHM"),
+ estimate=fitfuns.estimate_splitgauss,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Split Pseudo-Voigt",
+ FitTheory(
+ description="Asymmetric pseudo-Voigt functions",
+ function=functions.sum_splitpvoigt,
+ parameters=("Height", "Position", "LowFWHM", "HighFWHM", "Eta"),
+ estimate=fitfuns.estimate_splitpvoigt,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Split Pseudo-Voigt 2",
+ FitTheory(
+ description="Asymmetric pseudo-Voigt functions",
+ function=functions.sum_splitpvoigt2,
+ parameters=(
+ "Height",
+ "Position",
+ "LowFWHM",
+ "HighFWHM",
+ "LowEta",
+ "HighEta",
+ ),
+ estimate=fitfuns.estimate_splitpvoigt2,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Step Down",
+ FitTheory(
+ description="Step down function",
+ function=functions.sum_stepdown,
+ parameters=("Height", "Position", "FWHM"),
+ estimate=fitfuns.estimate_stepdown,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Step Up",
+ FitTheory(
+ description="Step up function",
+ function=functions.sum_stepup,
+ parameters=("Height", "Position", "FWHM"),
+ estimate=fitfuns.estimate_stepup,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Slit",
+ FitTheory(
+ description="Slit function",
+ function=functions.sum_slit,
+ parameters=("Height", "Position", "FWHM", "BeamFWHM"),
+ estimate=fitfuns.estimate_slit,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Atan",
+ FitTheory(
+ description="Arctan step up function",
+ function=functions.atan_stepup,
+ parameters=("Height", "Position", "Width"),
+ estimate=fitfuns.estimate_stepup,
+ configure=fitfuns.configure,
+ ),
+ ),
+ (
+ "Hypermet",
+ FitTheory(
+ description="Hypermet functions",
+ function=fitfuns.ahypermet, # customized version of functions.sum_ahypermet
+ parameters=(
+ "G_Area",
+ "Position",
+ "FWHM",
+ "ST_Area",
+ "ST_Slope",
+ "LT_Area",
+ "LT_Slope",
+ "Step_H",
+ ),
+ estimate=fitfuns.estimate_ahypermet,
+ configure=fitfuns.configure,
+ ),
+ ),
+ # ('Periodic Gaussians',
+ # FitTheory(description='Periodic gaussian functions',
+ # function=functions.periodic_gauss,
+ # parameters=('N', 'Delta', 'Height', 'Position', 'FWHM'),
+ # estimate=fitfuns.estimate_periodic_gauss,
+ # configure=fitfuns.configure))
+ (
+ "Degree 2 Polynomial",
+ FitTheory(
+ description="Degree 2 polynomial" "\ny = a*x^2 + b*x +c",
+ function=fitfuns.poly,
+ parameters=["a", "b", "c"],
+ estimate=fitfuns.estimate_quadratic,
+ ),
+ ),
+ (
+ "Degree 3 Polynomial",
+ FitTheory(
+ description="Degree 3 polynomial" "\ny = a*x^3 + b*x^2 + c*x + d",
+ function=fitfuns.poly,
+ parameters=["a", "b", "c", "d"],
+ estimate=fitfuns.estimate_cubic,
+ ),
+ ),
+ (
+ "Degree 4 Polynomial",
+ FitTheory(
+ description="Degree 4 polynomial"
+ "\ny = a*x^4 + b*x^3 + c*x^2 + d*x + e",
+ function=fitfuns.poly,
+ parameters=["a", "b", "c", "d", "e"],
+ estimate=fitfuns.estimate_quartic,
+ ),
+ ),
+ (
+ "Degree 5 Polynomial",
+ FitTheory(
+ description="Degree 5 polynomial"
+ "\ny = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f",
+ function=fitfuns.poly,
+ parameters=["a", "b", "c", "d", "e", "f"],
+ estimate=fitfuns.estimate_quintic,
+ ),
+ ),
+ )
+)
"""Dictionary of fit theories: fit functions and their associated estimation
function, parameters list, configuration function and description.
"""
@@ -1336,16 +1538,20 @@ function, parameters list, configuration function and description.
def test(a):
from silx.math.fit import fitmanager
+
x = numpy.arange(1000).astype(numpy.float64)
- p = [1500, 100., 50.0,
- 1500, 700., 50.0]
+ p = [1500, 100.0, 50.0, 1500, 700.0, 50.0]
y_synthetic = functions.sum_gauss(x, *p) + 1
fit = fitmanager.FitManager(x, y_synthetic)
- fit.addtheory('Gaussians', functions.sum_gauss, ['Height', 'Position', 'FWHM'],
- a.estimate_height_position_fwhm)
- fit.settheory('Gaussians')
- fit.setbackground('Linear')
+ fit.addtheory(
+ "Gaussians",
+ functions.sum_gauss,
+ ["Height", "Position", "FWHM"],
+ a.estimate_height_position_fwhm,
+ )
+ fit.settheory("Gaussians")
+ fit.setbackground("Linear")
fit.estimate()
fit.runfit()
@@ -1353,12 +1559,13 @@ def test(a):
y_fit = fit.gendata()
print("Fit parameter names: %s" % str(fit.get_names()))
- print("Theoretical parameters: %s" % str(numpy.append([1, 0], p)))
+ print("Theoretical parameters: %s" % str(numpy.append([1, 0], p)))
print("Fitted parameters: %s" % str(fit.get_fitted_parameters()))
try:
from silx.gui import qt
from silx.gui.plot import plot1D
+
app = qt.QApplication([])
# Offset of 1 to see the difference in log scale
diff --git a/src/silx/math/fit/fittheory.py b/src/silx/math/fit/fittheory.py
index ab3ae43..4d2b19b 100644
--- a/src/silx/math/fit/fittheory.py
+++ b/src/silx/math/fit/fittheory.py
@@ -1,4 +1,4 @@
-#/*##########################################################################
+# /*##########################################################################
#
# Copyright (c) 2004-2018 European Synchrotron Radiation Facility
#
@@ -35,19 +35,28 @@ __date__ = "09/08/2016"
class FitTheory(object):
"""This class defines a fit theory, which consists of:
- - a model function, the actual function to be fitted
- - parameters names
- - an estimation function, that return the estimated initial parameters
- that serve as input for :func:`silx.math.fit.leastsq`
- - an optional configuration function, that can be used to modify
- configuration parameters to alter the behavior of the fit function
- and the estimation function
- - an optional derivative function, that replaces the default model
- derivative used in :func:`silx.math.fit.leastsq`
+ - a model function, the actual function to be fitted
+ - parameters names
+ - an estimation function, that return the estimated initial parameters
+ that serve as input for :func:`silx.math.fit.leastsq`
+ - an optional configuration function, that can be used to modify
+ configuration parameters to alter the behavior of the fit function
+ and the estimation function
+ - an optional derivative function, that replaces the default model
+ derivative used in :func:`silx.math.fit.leastsq`
"""
- def __init__(self, function, parameters,
- estimate=None, configure=None, derivative=None,
- description=None, pymca_legacy=False, is_background=False):
+
+ def __init__(
+ self,
+ function,
+ parameters,
+ estimate=None,
+ configure=None,
+ derivative=None,
+ description=None,
+ pymca_legacy=False,
+ is_background=False,
+ ):
"""
:param function function: Actual function. See documentation for
:attr:`function`.
@@ -155,6 +164,6 @@ class FitTheory(object):
"""Default estimate function. Return an array of *ones* as the
initial estimated parameters, and set all constraints to zero
(FREE)"""
- estimated_parameters = [1. for _ in self.parameters]
+ estimated_parameters = [1.0 for _ in self.parameters]
estimated_constraints = [[0, 0, 0] for _ in self.parameters]
return estimated_parameters, estimated_constraints
diff --git a/src/silx/math/fit/functions.pyx b/src/silx/math/fit/functions.pyx
index a69086c..e7102a5 100644
--- a/src/silx/math/fit/functions.pyx
+++ b/src/silx/math/fit/functions.pyx
@@ -33,6 +33,7 @@ List of fit functions:
- :func:`sum_apvoigt`
- :func:`sum_pvoigt`
- :func:`sum_splitpvoigt`
+ - :func:`sum_splitpvoigt2`
- :func:`sum_lorentz`
- :func:`sum_alorentz`
@@ -143,9 +144,7 @@ def sum_gauss(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No gaussian parameters specified. " +
- "At least 3 parameters are required.")
+ _validate_parameters(params, 3)
# ensure float64 (double) type and 1D contiguous data layout in memory
x_c = numpy.array(x,
@@ -191,9 +190,7 @@ def sum_agauss(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No gaussian parameters specified. " +
- "At least 3 parameters are required.")
+ _validate_parameters(params, 3)
x_c = numpy.array(x,
copy=False,
@@ -241,9 +238,7 @@ def sum_fastagauss(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No gaussian parameters specified. " +
- "At least 3 parameters are required.")
+ _validate_parameters(params, 3)
x_c = numpy.array(x,
copy=False,
@@ -290,9 +285,7 @@ def sum_splitgauss(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No gaussian parameters specified. " +
- "At least 4 parameters are required.")
+ _validate_parameters(params, 4)
x_c = numpy.array(x,
copy=False,
@@ -327,7 +320,7 @@ def sum_apvoigt(x, *params):
- *area* is the area underneath both G(x) and L(x)
- *centroid* is the peak x-coordinate for both functions
- *fwhm* is the full-width at half maximum of both functions
- - *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
+ - *eta* is the Lorentzian fraction: PV(x) = eta * L(x) + (1 - eta) * G(x)
:param x: Independent variable where the gaussians are calculated
:type x: numpy.ndarray
@@ -341,9 +334,8 @@ def sum_apvoigt(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 4 parameters are required.")
+ _validate_parameters(params, 4)
+
x_c = numpy.array(x,
copy=False,
dtype=numpy.float64,
@@ -377,7 +369,7 @@ def sum_pvoigt(x, *params):
- *height* is the peak amplitude of G(x) and L(x)
- *centroid* is the peak x-coordinate for both functions
- *fwhm* is the full-width at half maximum of both functions
- - *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
+ - *eta* is the Lorentzian fraction: PV(x) = eta * L(x) + (1 - eta) * G(x)
:param x: Independent variable where the gaussians are calculated
:type x: numpy.ndarray
@@ -391,9 +383,7 @@ def sum_pvoigt(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 4 parameters are required.")
+ _validate_parameters(params, 4)
x_c = numpy.array(x,
copy=False,
@@ -425,13 +415,13 @@ def sum_splitpvoigt(x, *params):
profile using a linear combination of a Gaussian curve ``G(x)`` and a
Lorentzian curve ``L(x)`` instead of their convolution.
- - *height* is the peak amplitudefor G(x) and L(x)
+ - *height* is the peak amplitude for G(x) and L(x)
- *centroid* is the peak x-coordinate for both functions
- *fwhm1* is the full-width at half maximum of both functions
when ``x < centroid``
- *fwhm2* is the full-width at half maximum of both functions
when ``x > centroid``
- - *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
+ - *eta* is the Lorentzian fraction: PV(x) = eta * L(x) + (1 - eta) * G(x)
:param x: Independent variable where the gaussians are calculated
:type x: numpy.ndarray
@@ -446,9 +436,7 @@ def sum_splitpvoigt(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 5 parameters are required.")
+ _validate_parameters(params, 5)
x_c = numpy.array(x,
copy=False,
@@ -472,6 +460,60 @@ def sum_splitpvoigt(x, *params):
return numpy.asarray(y_c).reshape(x.shape)
+def sum_splitpvoigt2(x, *params):
+ """Return a sum of split pseudo-Voigt functions, defined by *(height,
+ centroid, fwhm1, fwhm2, eta1, eta2)*.
+
+ The pseudo-Voigt profile ``PV(x)`` is an approximation of the Voigt
+ profile using a linear combination of a Gaussian curve ``G(x)`` and a
+ Lorentzian curve ``L(x)`` instead of their convolution.
+
+ - *height* is the peak amplitude for G(x) and L(x)
+ - *centroid* is the peak x-coordinate for both functions
+ - *fwhm1* is the full-width at half maximum of both functions
+ when ``x < centroid``
+ - *fwhm2* is the full-width at half maximum of both functions
+ when ``x > centroid``
+ - *eta1* is the Lorentzian fraction when ``x < centroid``
+ - *eta2* is the Lorentzian fraction when ``x > centroid``
+
+ :param x: Independent variable where the gaussians are calculated
+ :type x: numpy.ndarray
+ :param params: Array of pseudo-Voigt parameters (length must be a multiple
+ of 6):
+ *(height1, centroid1, fwhm11, fwhm21, eta11, eta21,...)*
+ :return: Array of sum of split pseudo-Voigt functions at each ``x``
+ coordinate
+ """
+ cdef:
+ double[::1] x_c
+ double[::1] params_c
+ double[::1] y_c
+
+ _validate_parameters(params, 6)
+
+ x_c = numpy.array(x,
+ copy=False,
+ dtype=numpy.float64,
+ order='C').reshape(-1)
+ params_c = numpy.array(params,
+ copy=False,
+ dtype=numpy.float64,
+ order='C').reshape(-1)
+ y_c = numpy.empty(shape=(x.size,),
+ dtype=numpy.float64)
+
+ status = functions_wrapper.sum_splitpvoigt2(
+ &x_c[0], x.size,
+ &params_c[0], params_c.size,
+ &y_c[0])
+
+ if status:
+ raise IndexError("Wrong number of parameters for function")
+
+ return numpy.asarray(y_c).reshape(x.shape)
+
+
def sum_lorentz(x, *params):
"""Return a sum of Lorentz distributions, also known as Cauchy distribution,
defined by *(height, centroid, fwhm)*.
@@ -493,9 +535,7 @@ def sum_lorentz(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 3 parameters are required.")
+ _validate_parameters(params, 3)
x_c = numpy.array(x,
copy=False,
@@ -540,9 +580,7 @@ def sum_alorentz(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 3 parameters are required.")
+ _validate_parameters(params, 3)
x_c = numpy.array(x,
copy=False,
@@ -588,9 +626,7 @@ def sum_splitlorentz(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 4 parameters are required.")
+ _validate_parameters(params, 4)
x_c = numpy.array(x,
copy=False,
@@ -636,9 +672,8 @@ def sum_stepdown(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 3 parameters are required.")
+ _validate_parameters(params, 3)
+
x_c = numpy.array(x,
copy=False,
dtype=numpy.float64,
@@ -684,9 +719,7 @@ def sum_stepup(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 3 parameters are required.")
+ _validate_parameters(params, 3)
x_c = numpy.array(x,
copy=False,
@@ -735,9 +768,7 @@ def sum_slit(x, *params):
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 4 parameters are required.")
+ _validate_parameters(params, 4)
x_c = numpy.array(x,
copy=False,
@@ -797,9 +828,9 @@ def sum_ahypermet(x, *params,
*(area1, position1, fwhm1, st_area_r1, st_slope_r1, lt_area_r1,
lt_slope_r1, step_height_r1...)*
:param gaussian_term: If ``True``, enable gaussian term. Default ``True``
- :param st_term: If ``True``, enable gaussian term. Default ``True``
- :param lt_term: If ``True``, enable gaussian term. Default ``True``
- :param step_term: If ``True``, enable gaussian term. Default ``True``
+ :param st_term: If ``True``, enable short tail term. Default ``True``
+ :param lt_term: If ``True``, enable long tail term. Default ``True``
+ :param step_term: If ``True``, enable step term. Default ``True``
:return: Array of sum of hypermet functions at each ``x`` coordinate
"""
cdef:
@@ -807,9 +838,7 @@ def sum_ahypermet(x, *params,
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 8 parameters are required.")
+ _validate_parameters(params, 8)
# Sum binary flags to activate various terms of the equation
tail_flags = 1 if gaussian_term else 0
@@ -883,9 +912,9 @@ def sum_fastahypermet(x, *params,
*(area1, position1, fwhm1, st_area_r1, st_slope_r1, lt_area_r1,
lt_slope_r1, step_height_r1...)*
:param gaussian_term: If ``True``, enable gaussian term. Default ``True``
- :param st_term: If ``True``, enable gaussian term. Default ``True``
- :param lt_term: If ``True``, enable gaussian term. Default ``True``
- :param step_term: If ``True``, enable gaussian term. Default ``True``
+ :param st_term: If ``True``, enable short tail term. Default ``True``
+ :param lt_term: If ``True``, enable long tail term. Default ``True``
+ :param step_term: If ``True``, enable step term. Default ``True``
:return: Array of sum of hypermet functions at each ``x`` coordinate
"""
cdef:
@@ -893,9 +922,7 @@ def sum_fastahypermet(x, *params,
double[::1] params_c
double[::1] y_c
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 8 parameters are required.")
+ _validate_parameters(params, 8)
# Sum binary flags to activate various terms of the equation
tail_flags = 1 if gaussian_term else 0
@@ -955,7 +982,7 @@ def atan_stepup(x, a, b, c):
return a * (0.5 + (numpy.arctan((1.0 * x - b) / c) / numpy.pi))
-def periodic_gauss(x, *pars):
+def periodic_gauss(x, *params):
"""
Return a sum of gaussian functions defined by
*(npeaks, delta, height, centroid, fwhm)*,
@@ -968,17 +995,22 @@ def periodic_gauss(x, *pars):
- *fwhm* is the full-width at half maximum for all the gaussians
:param x: Independent variable where the function is calculated
- :param pars: *(npeaks, delta, height, centroid, fwhm)*
+ :param params: *(npeaks, delta, height, centroid, fwhm)*
:return: Sum of ``npeaks`` gaussians
"""
- if not len(pars):
- raise IndexError("No parameters specified. " +
- "At least 5 parameters are required.")
+ _validate_parameters(params, 5)
- newpars = numpy.zeros((pars[0], 3), numpy.float64)
- for i in range(int(pars[0])):
- newpars[i, 0] = pars[2]
- newpars[i, 1] = pars[3] + i * pars[1]
- newpars[:, 2] = pars[4]
+ newpars = numpy.zeros((params[0], 3), numpy.float64)
+ for i in range(int(params[0])):
+ newpars[i, 0] = params[2]
+ newpars[i, 1] = params[3] + i * params[1]
+ newpars[:, 2] = params[4]
return sum_gauss(x, newpars)
+
+
+def _validate_parameters(params, multiple):
+ if len(params) == 0:
+ raise IndexError("No parameters specified.")
+ if len(params) % multiple:
+ raise IndexError(f"The number of parameters should be a multiple of {multiple}.")
diff --git a/src/silx/math/fit/functions/include/functions.h b/src/silx/math/fit/functions/include/functions.h
index de4209b..cf084b2 100644
--- a/src/silx/math/fit/functions/include/functions.h
+++ b/src/silx/math/fit/functions/include/functions.h
@@ -53,6 +53,7 @@ int sum_splitgauss(double* x, int len_x, double* pgauss, int len_pgauss, double*
int sum_apvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y);
int sum_pvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y);
int sum_splitpvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y);
+int sum_splitpvoigt2(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y);
int sum_lorentz(double* x, int len_x, double* plorentz, int len_plorentz, double* y);
int sum_alorentz(double* x, int len_x, double* plorentz, int len_plorentz, double* y);
diff --git a/src/silx/math/fit/functions/src/funs.c b/src/silx/math/fit/functions/src/funs.c
index aae173f..4b41fce 100644
--- a/src/silx/math/fit/functions/src/funs.c
+++ b/src/silx/math/fit/functions/src/funs.c
@@ -434,7 +434,7 @@ int sum_splitgauss(double* x, int len_x, double* pgauss, int len_pgauss, double*
*area* is the area underneath both G(x) and L(x)
*centroid* is the peak x-coordinate for both functions
*fwhm* is the full-width at half maximum of both functions
- *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
+ *eta* is the Lorentzian fraction: PV(x) = eta * L(x) + (1 - eta) * G(x)
Parameters:
-----------
@@ -504,7 +504,7 @@ int sum_apvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y)
*height* is the peak amplitude of G(x) and L(x)
*centroid* is the peak x-coordinate for both functions
*fwhm* is the full-width at half maximum of both functions
- *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
+ *eta* is the Lorentzian fraction: PV(x) = eta * L(x) + (1 - eta) * G(x)
Parameters:
-----------
@@ -573,7 +573,7 @@ int sum_pvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y)
*centroid* is the peak x-coordinate for both functions
*fwhm1* is the full-width at half maximum of both functions for x < centroid
*fwhm2* is the full-width at half maximum of both functions for x > centroid
- *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
+ *eta* is the Lorentzian fraction: PV(x) = eta * L(x) + (1 - eta) * G(x)
Parameters:
-----------
@@ -650,6 +650,98 @@ int sum_splitpvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double
return(0);
}
+/* sum_splitpvoigt2
+ Sum of split pseudo-Voigt functions, defined by
+ (height, centroid, fwhm1, fwhm2, eta1, eta2).
+
+ The pseudo-Voigt profile PV(x) is an approximation of the Voigt profile
+ using a linear combination of a Gaussian curve G(x) and a Lorentzian curve
+ L(x) instead of their convolution.
+
+ *height* is the peak amplitude of G(x) and L(x)
+ *centroid* is the peak x-coordinate for both functions
+ *fwhm1* is the full-width at half maximum of both functions for x < centroid
+ *fwhm2* is the full-width at half maximum of both functions for x > centroid
+ *eta1* is the Lorentzian fraction for x < centroid
+ *eta2* is the Lorentzian fraction for x > centroid
+
+ Parameters:
+ -----------
+
+ - x: Independant variable where the gaussians are calculated.
+ - len_x: Number of elements in the x array.
+ - pvoigt: Array of Voigt function parameters:
+ (height1, centroid1, fwhm11, fwhm21, eta11, eta21, ...)
+ - len_voigt: Number of elements in the pvoigt array. Must be
+ a multiple of 6.
+ - y: Output array. Must have memory allocated for the same number
+ of elements as x (len_x).
+
+*/
+int sum_splitpvoigt2(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y)
+{
+ int i, j;
+ double dhelp, x_minus_centroid, inv_two_sqrt_two_log2, sigma1, sigma2;
+ double height, centroid, fwhm1, fwhm2, eta1, eta2;
+
+ if (test_params(len_pvoigt, 6, "sum_splitpvoigt2", "height, centroid, fwhm1, fwhm2, eta1, eta2")) {
+ return(1);
+ }
+
+ /* Initialize output array */
+ for (j=0; j<len_x; j++) {
+ y[j] = 0.;
+ }
+
+ inv_two_sqrt_two_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
+
+ for (i=0; i<len_pvoigt/6; i++) {
+ height = pvoigt[6*i];
+ centroid = pvoigt[6*i+1];
+ fwhm1 = pvoigt[6*i+2];
+ fwhm2 = pvoigt[6*i+3];
+ eta1 = pvoigt[6*i+4];
+ eta2 = pvoigt[6*i+5];
+
+ sigma1 = fwhm1 * inv_two_sqrt_two_log2;
+ sigma2 = fwhm2 * inv_two_sqrt_two_log2;
+
+ for (j=0; j<len_x; j++) {
+ x_minus_centroid = (x[j] - centroid);
+
+ /* Use fwhm2 and eta2 when x > centroid */
+ if (x_minus_centroid > 0) {
+ /* Lorentzian term */
+ dhelp = (2.0 * x_minus_centroid) / fwhm2;
+ dhelp = 1.0 + (dhelp * dhelp);
+ y[j] += eta2 * height / dhelp;
+
+ /* Gaussian term */
+ dhelp = x_minus_centroid / sigma2;
+ if (dhelp <= 35) {
+ dhelp = exp(-0.5 * dhelp * dhelp);
+ y[j] += (1 - eta2) * height * dhelp;
+ }
+ }
+ /* Use fwhm1 and eta1 when x < centroid */
+ else {
+ /* Lorentzian term */
+ dhelp = (2.0 * x_minus_centroid) / fwhm1;
+ dhelp = 1.0 + (dhelp * dhelp);
+ y[j] += eta1 * height / dhelp;
+
+ /* Gaussian term */
+ dhelp = x_minus_centroid / sigma1;
+ if (dhelp <= 35) {
+ dhelp = exp(-0.5 * dhelp * dhelp);
+ y[j] += (1 - eta1) * height * dhelp;
+ }
+ }
+ }
+ }
+ return(0);
+}
+
/* sum_lorentz
Sum of Lorentz functions, defined by (height, centroid, fwhm).
diff --git a/src/silx/math/fit/functions_wrapper.pxd b/src/silx/math/fit/functions_wrapper.pxd
index 38de94a..232a14b 100644
--- a/src/silx/math/fit/functions_wrapper.pxd
+++ b/src/silx/math/fit/functions_wrapper.pxd
@@ -102,6 +102,12 @@ cdef extern from "functions.h":
int len_pvoigt,
double* y)
+ int sum_splitpvoigt2(double* x,
+ int len_x,
+ double* pvoigt,
+ int len_pvoigt,
+ double* y)
+
int sum_lorentz(double* x,
int len_x,
double* plorentz,
diff --git a/src/silx/math/fit/leastsq.py b/src/silx/math/fit/leastsq.py
index e49977f..9a1e2ad 100644
--- a/src/silx/math/fit/leastsq.py
+++ b/src/silx/math/fit/leastsq.py
@@ -46,21 +46,31 @@ import copy
_logger = logging.getLogger(__name__)
# codes understood by the routine
-CFREE = 0
-CPOSITIVE = 1
-CQUOTED = 2
-CFIXED = 3
-CFACTOR = 4
-CDELTA = 5
-CSUM = 6
-CIGNORED = 7
-
-def leastsq(model, xdata, ydata, p0, sigma=None,
- constraints=None, model_deriv=None, epsfcn=None,
- deltachi=None, full_output=None,
- check_finite=True,
- left_derivative=False,
- max_iter=100):
+CFREE = 0
+CPOSITIVE = 1
+CQUOTED = 2
+CFIXED = 3
+CFACTOR = 4
+CDELTA = 5
+CSUM = 6
+CIGNORED = 7
+
+
+def leastsq(
+ model,
+ xdata,
+ ydata,
+ p0,
+ sigma=None,
+ constraints=None,
+ model_deriv=None,
+ epsfcn=None,
+ deltachi=None,
+ full_output=None,
+ check_finite=True,
+ left_derivative=False,
+ max_iter=100,
+):
"""
Use non-linear least squares Levenberg-Marquardt algorithm to fit a function, f, to
data with optional constraints on the fitted parameters.
@@ -272,7 +282,9 @@ def leastsq(model, xdata, ydata, p0, sigma=None,
filter_xdata = True
if filter_xdata:
if xdata.size != ydata.size:
- raise ValueError("xdata contains non-finite data that cannot be filtered")
+ raise ValueError(
+ "xdata contains non-finite data that cannot be filtered"
+ )
else:
# we leave the xdata as they where
old_shape = xdata.shape
@@ -324,25 +336,27 @@ def leastsq(model, xdata, ydata, p0, sigma=None,
elif txt in ["IGNORED", "IGNORE"]:
constraints[i][0] = CIGNORED
else:
- #I should raise an exception
+ # I should raise an exception
raise ValueError("Unknown constraint %s" % constraints[i][0])
if constraints[i][0] > 0:
constrained_fit = True
if constrained_fit:
if full_output is None:
- _logger.info("Recommended to set full_output to True when using constraints")
+ _logger.info(
+ "Recommended to set full_output to True when using constraints"
+ )
# Levenberg-Marquardt algorithm
fittedpar = parameters.__copy__()
flambda = 0.001
iiter = max_iter
- #niter = 0
- last_evaluation=None
+ # niter = 0
+ last_evaluation = None
x = xdata
y = ydata
chisq0 = -1
iteration_counter = 0
- while (iiter > 0):
+ while iiter > 0:
weight = weight0
"""
I cannot evaluate the initial chisq here because I do not know
@@ -357,60 +371,67 @@ def leastsq(model, xdata, ydata, p0, sigma=None,
"""
iteration_counter += 1
chisq0, alpha0, beta, internal_output = chisq_alpha_beta(
- model, fittedpar,
- x, y, weight, constraints=constraints,
- model_deriv=model_deriv,
- epsfcn=epsfcn,
- left_derivative=left_derivative,
- last_evaluation=last_evaluation,
- full_output=True)
+ model,
+ fittedpar,
+ x,
+ y,
+ weight,
+ constraints=constraints,
+ model_deriv=model_deriv,
+ epsfcn=epsfcn,
+ left_derivative=left_derivative,
+ last_evaluation=last_evaluation,
+ full_output=True,
+ )
n_free = internal_output["n_free"]
free_index = internal_output["free_index"]
noigno = internal_output["noigno"]
fitparam = internal_output["fitparam"]
function_calls = internal_output["function_calls"]
function_call_counter += function_calls
- #print("chisq0 = ", chisq0, n_free, fittedpar)
- #raise
+ # print("chisq0 = ", chisq0, n_free, fittedpar)
+ # raise
nr, nc = alpha0.shape
flag = 0
- #lastdeltachi = chisq0
+ # lastdeltachi = chisq0
while flag == 0:
alpha = alpha0 * (1.0 + flambda * numpy.identity(nr))
deltapar = numpy.dot(beta, inv(alpha))
if constraints is None:
- newpar = fitparam + deltapar [0]
+ newpar = fitparam + deltapar[0]
else:
newpar = parameters.__copy__()
pwork = numpy.zeros(deltapar.shape, numpy.float64)
for i in range(n_free):
if constraints is None:
- pwork [0] [i] = fitparam [i] + deltapar [0] [i]
- elif constraints [free_index[i]][0] == CFREE:
- pwork [0] [i] = fitparam [i] + deltapar [0] [i]
- elif constraints [free_index[i]][0] == CPOSITIVE:
- #abs method
- pwork [0] [i] = fitparam [i] + deltapar [0] [i]
- #square method
- #pwork [0] [i] = (numpy.sqrt(fitparam [i]) + deltapar [0] [i]) * \
+ pwork[0][i] = fitparam[i] + deltapar[0][i]
+ elif constraints[free_index[i]][0] == CFREE:
+ pwork[0][i] = fitparam[i] + deltapar[0][i]
+ elif constraints[free_index[i]][0] == CPOSITIVE:
+ # abs method
+ pwork[0][i] = fitparam[i] + deltapar[0][i]
+ # square method
+ # pwork [0] [i] = (numpy.sqrt(fitparam [i]) + deltapar [0] [i]) * \
# (numpy.sqrt(fitparam [i]) + deltapar [0] [i])
elif constraints[free_index[i]][0] == CQUOTED:
- pmax = max(constraints[free_index[i]][1],
- constraints[free_index[i]][2])
- pmin = min(constraints[free_index[i]][1],
- constraints[free_index[i]][2])
+ pmax = max(
+ constraints[free_index[i]][1], constraints[free_index[i]][2]
+ )
+ pmin = min(
+ constraints[free_index[i]][1], constraints[free_index[i]][2]
+ )
A = 0.5 * (pmax + pmin)
B = 0.5 * (pmax - pmin)
if B != 0:
- pwork [0] [i] = A + \
- B * numpy.sin(numpy.arcsin((fitparam[i] - A)/B)+ \
- deltapar [0] [i])
+ pwork[0][i] = A + B * numpy.sin(
+ numpy.arcsin((fitparam[i] - A) / B) + deltapar[0][i]
+ )
else:
txt = "Error processing constrained fit\n"
txt += "Parameter limits are %g and %g\n" % (pmin, pmax)
- txt += "A = %g B = %g" % (A, B)
+ txt += "A = %g B = %g" % (A, B)
raise ValueError("Invalid parameter limits")
- newpar[free_index[i]] = pwork [0] [i]
+ newpar[free_index[i]] = pwork[0][i]
newpar = numpy.array(_get_parameters(newpar, constraints))
workpar = numpy.take(newpar, noigno)
yfit = model(x, *workpar)
@@ -422,7 +443,7 @@ def leastsq(model, xdata, ydata, p0, sigma=None,
_logger.warning(msg)
yfit.shape = -1
function_call_counter += 1
- chisq = (weight * pow(y-yfit, 2)).sum()
+ chisq = (weight * pow(y - yfit, 2)).sum()
absdeltachi = chisq0 - chisq
if absdeltachi < 0:
flambda *= 10.0
@@ -440,7 +461,9 @@ def leastsq(model, xdata, ydata, p0, sigma=None,
iiter = 0
elif absdeltachi < numpy.sqrt(epsfcn):
iiter = 0
- _logger.info("Iteration finished due to too small absolute chi decrement")
+ _logger.info(
+ "Iteration finished due to too small absolute chi decrement"
+ )
chisq0 = chisq
flambda = flambda / 10.0
last_evaluation = yfit
@@ -462,13 +485,18 @@ def leastsq(model, xdata, ydata, p0, sigma=None,
new_constraints[idx][1] = 0
new_constraints[idx][2] = 0
chisq, alpha, beta, internal_output = chisq_alpha_beta(
- model, fittedpar,
- x, y, weight, constraints=new_constraints,
- model_deriv=model_deriv,
- epsfcn=epsfcn,
- left_derivative=left_derivative,
- last_evaluation=last_evaluation,
- full_output=True)
+ model,
+ fittedpar,
+ x,
+ y,
+ weight,
+ constraints=new_constraints,
+ model_deriv=model_deriv,
+ epsfcn=epsfcn,
+ left_derivative=left_derivative,
+ last_evaluation=last_evaluation,
+ full_output=True,
+ )
# obtained chisq should be identical to chisq0
try:
cov = inv(alpha)
@@ -478,7 +506,9 @@ def leastsq(model, xdata, ydata, p0, sigma=None,
if cov is not None:
for idx, value in enumerate(flag_special):
if value in [CFIXED, CIGNORED]:
- cov = numpy.insert(numpy.insert(cov, idx, 0, axis=1), idx, 0, axis=0)
+ cov = numpy.insert(
+ numpy.insert(cov, idx, 0, axis=1), idx, 0, axis=0
+ )
cov[idx, idx] = fittedpar[idx] * fittedpar[idx]
if not full_output:
@@ -488,18 +518,32 @@ def leastsq(model, xdata, ydata, p0, sigma=None,
sigmapar = _get_sigma_parameters(fittedpar, sigma0, constraints)
ddict = {}
ddict["chisq"] = chisq0
- ddict["reduced_chisq"] = chisq0 / (len(yfit)-n_free)
+ ddict["reduced_chisq"] = chisq0 / (len(yfit) - n_free)
ddict["covariance"] = cov0
ddict["uncertainties"] = sigmapar
ddict["fvec"] = last_evaluation
ddict["nfev"] = function_call_counter
ddict["niter"] = iteration_counter
- return fittedpar, cov, ddict #, chisq/(len(yfit)-len(sigma0)), sigmapar,niter,lastdeltachi
-
-def chisq_alpha_beta(model, parameters, x, y, weight, constraints=None,
- model_deriv=None, epsfcn=None, left_derivative=False,
- last_evaluation=None, full_output=False):
-
+ return (
+ fittedpar,
+ cov,
+ ddict,
+ ) # , chisq/(len(yfit)-len(sigma0)), sigmapar,niter,lastdeltachi
+
+
+def chisq_alpha_beta(
+ model,
+ parameters,
+ x,
+ y,
+ weight,
+ constraints=None,
+ model_deriv=None,
+ epsfcn=None,
+ left_derivative=False,
+ last_evaluation=None,
+ full_output=False,
+):
"""
Get chi square, the curvature matrix alpha and the matrix beta according to the input parameters.
If all the parameters are unconstrained, the covariance matrix is the inverse of the alpha matrix.
@@ -597,10 +641,10 @@ def chisq_alpha_beta(model, parameters, x, y, weight, constraints=None,
epsfcn = numpy.finfo(numpy.float64).eps
else:
epsfcn = max(epsfcn, numpy.finfo(numpy.float64).eps)
- #nr0, nc = data.shape
+ # nr0, nc = data.shape
n_param = len(parameters)
if constraints is None:
- derivfactor = numpy.ones((n_param, ))
+ derivfactor = numpy.ones((n_param,))
n_free = n_param
noigno = numpy.arange(n_param)
free_index = noigno * 1
@@ -615,30 +659,34 @@ def chisq_alpha_beta(model, parameters, x, y, weight, constraints=None,
if constraints[i][0] != CIGNORED:
noigno.append(i)
if constraints[i][0] == CFREE:
- fitparam.append(parameters [i])
+ fitparam.append(parameters[i])
derivfactor.append(1.0)
free_index.append(i)
n_free += 1
elif constraints[i][0] == CPOSITIVE:
fitparam.append(abs(parameters[i]))
derivfactor.append(1.0)
- #fitparam.append(numpy.sqrt(abs(parameters[i])))
- #derivfactor.append(2.0*numpy.sqrt(abs(parameters[i])))
+ # fitparam.append(numpy.sqrt(abs(parameters[i])))
+ # derivfactor.append(2.0*numpy.sqrt(abs(parameters[i])))
free_index.append(i)
n_free += 1
elif constraints[i][0] == CQUOTED:
pmax = max(constraints[i][1], constraints[i][2])
- pmin =min(constraints[i][1], constraints[i][2])
- if ((pmax-pmin) > 0) & \
- (parameters[i] <= pmax) & \
- (parameters[i] >= pmin):
+ pmin = min(constraints[i][1], constraints[i][2])
+ if (
+ ((pmax - pmin) > 0)
+ & (parameters[i] <= pmax)
+ & (parameters[i] >= pmin)
+ ):
A = 0.5 * (pmax + pmin)
B = 0.5 * (pmax - pmin)
fitparam.append(parameters[i])
- derivfactor.append(B*numpy.cos(numpy.arcsin((parameters[i] - A)/B)))
+ derivfactor.append(
+ B * numpy.cos(numpy.arcsin((parameters[i] - A) / B))
+ )
free_index.append(i)
n_free += 1
- elif (pmax-pmin) > 0:
+ elif (pmax - pmin) > 0:
print("WARNING: Quoted parameter outside boundaries")
print("Initial value = %f" % parameters[i])
print("Limits are %f and %f" % (pmin, pmax))
@@ -646,15 +694,15 @@ def chisq_alpha_beta(model, parameters, x, y, weight, constraints=None,
fitparam = numpy.array(fitparam, numpy.float64)
alpha = numpy.zeros((n_free, n_free), numpy.float64)
beta = numpy.zeros((1, n_free), numpy.float64)
- #delta = (fitparam + numpy.equal(fitparam, 0.0)) * 0.00001
+ # delta = (fitparam + numpy.equal(fitparam, 0.0)) * 0.00001
delta = (fitparam + numpy.equal(fitparam, 0.0)) * numpy.sqrt(epsfcn)
- nr = y.size
+ nr = y.size
##############
# Prior to each call to the function one has to re-calculate the
# parameters
pwork = parameters.__copy__()
for i in range(n_free):
- pwork [free_index[i]] = fitparam [i]
+ pwork[free_index[i]] = fitparam[i]
if n_free == 0:
raise ValueError("No free parameters to fit")
function_calls = 0
@@ -667,26 +715,26 @@ def chisq_alpha_beta(model, parameters, x, y, weight, constraints=None,
function_calls += 1
for i in range(n_free):
if model_deriv is None:
- #pwork = parameters.__copy__()
- pwork[free_index[i]] = fitparam [i] + delta [i]
+ # pwork = parameters.__copy__()
+ pwork[free_index[i]] = fitparam[i] + delta[i]
newpar = _get_parameters(pwork.tolist(), constraints)
newpar = numpy.take(newpar, noigno)
f1 = model(x, *newpar)
f1.shape = -1
function_calls += 1
if left_derivative:
- pwork[free_index[i]] = fitparam [i] - delta [i]
+ pwork[free_index[i]] = fitparam[i] - delta[i]
newpar = _get_parameters(pwork.tolist(), constraints)
- newpar=numpy.take(newpar, noigno)
+ newpar = numpy.take(newpar, noigno)
f2 = model(x, *newpar)
function_calls += 1
help0 = (f1 - f2) / (2.0 * delta[i])
else:
help0 = (f1 - f2) / (delta[i])
help0 = help0 * derivfactor[i]
- pwork[free_index[i]] = fitparam [i]
- #removed I resize outside the loop:
- #help0 = numpy.resize(help0, (1, nr))
+ pwork[free_index[i]] = fitparam[i]
+ # removed I resize outside the loop:
+ # help0 = numpy.resize(help0, (1, nr))
else:
help0 = model_deriv(x, pwork, free_index[i])
help0 = help0 * derivfactor[i]
@@ -696,7 +744,7 @@ def chisq_alpha_beta(model, parameters, x, y, weight, constraints=None,
else:
deriv = numpy.concatenate((deriv, help0), 0)
- #line added to resize outside the loop
+ # line added to resize outside the loop
deriv = numpy.resize(deriv, (n_free, nr))
if last_evaluation is None:
if constraints is None:
@@ -719,7 +767,7 @@ def chisq_alpha_beta(model, parameters, x, y, weight, constraints=None,
beta = help1
else:
beta = numpy.concatenate((beta, help1), 1)
- help1 = numpy.inner(deriv, weight*derivi)
+ help1 = numpy.inner(deriv, weight * derivi)
if i == 0:
alpha = help1
else:
@@ -752,13 +800,13 @@ def _get_parameters(parameters, constraints):
if constraints is None:
return parameters * 1
newparam = []
- #first I make the free parameters
- #because the quoted ones put troubles
+ # first I make the free parameters
+ # because the quoted ones put troubles
for i in range(len(constraints)):
if constraints[i][0] == CFREE:
newparam.append(parameters[i])
elif constraints[i][0] == CPOSITIVE:
- #newparam.append(parameters[i] * parameters[i])
+ # newparam.append(parameters[i] * parameters[i])
newparam.append(abs(parameters[i]))
elif constraints[i][0] == CQUOTED:
newparam.append(parameters[i])
@@ -779,7 +827,7 @@ def _get_parameters(parameters, constraints):
# using this module
newparam[i] = 0
elif constraints[i][0] == CSUM:
- newparam[i] = constraints[i][2]-newparam[int(constraints[i][1])]
+ newparam[i] = constraints[i][2] - newparam[int(constraints[i][1])]
return newparam
@@ -805,31 +853,31 @@ def _get_sigma_parameters(parameters, sigma0, constraints):
sigma_par = numpy.zeros(parameters.shape, numpy.float64)
for i in range(len(constraints)):
if constraints[i][0] == CFREE:
- sigma_par [i] = sigma0[n_free]
+ sigma_par[i] = sigma0[n_free]
n_free += 1
elif constraints[i][0] == CPOSITIVE:
- #sigma_par [i] = 2.0 * sigma0[n_free]
- sigma_par [i] = sigma0[n_free]
+ # sigma_par [i] = 2.0 * sigma0[n_free]
+ sigma_par[i] = sigma0[n_free]
n_free += 1
elif constraints[i][0] == CQUOTED:
- pmax = max(constraints [i][1], constraints [i][2])
- pmin = min(constraints [i][1], constraints [i][2])
+ pmax = max(constraints[i][1], constraints[i][2])
+ pmin = min(constraints[i][1], constraints[i][2])
# A = 0.5 * (pmax + pmin)
B = 0.5 * (pmax - pmin)
- if (B > 0) & (parameters [i] < pmax) & (parameters [i] > pmin):
- sigma_par [i] = abs(B * numpy.cos(parameters[i]) * sigma0[n_free])
+ if (B > 0) & (parameters[i] < pmax) & (parameters[i] > pmin):
+ sigma_par[i] = abs(B * numpy.cos(parameters[i]) * sigma0[n_free])
n_free += 1
else:
- sigma_par [i] = parameters[i]
+ sigma_par[i] = parameters[i]
elif abs(constraints[i][0]) == CFIXED:
sigma_par[i] = parameters[i]
for i in range(len(constraints)):
if constraints[i][0] == CFACTOR:
- sigma_par [i] = constraints[i][2]*sigma_par[int(constraints[i][1])]
+ sigma_par[i] = constraints[i][2] * sigma_par[int(constraints[i][1])]
elif constraints[i][0] == CDELTA:
- sigma_par [i] = sigma_par[int(constraints[i][1])]
+ sigma_par[i] = sigma_par[int(constraints[i][1])]
elif constraints[i][0] == CSUM:
- sigma_par [i] = sigma_par[int(constraints[i][1])]
+ sigma_par[i] = sigma_par[int(constraints[i][1])]
return sigma_par
@@ -852,24 +900,29 @@ def main(argv=None):
dummy = 2.3548200450309493 * (t - param[3]) / param[4]
return param[0] + param[1] * t + param[2] * myexp(-0.5 * dummy * dummy)
-
def myexp(x):
# put a (bad) filter to avoid over/underflows
# with no python looping
- return numpy.exp(x * numpy.less(abs(x), 250)) -\
- 1.0 * numpy.greater_equal(abs(x), 250)
+ return numpy.exp(x * numpy.less(abs(x), 250)) - 1.0 * numpy.greater_equal(
+ abs(x), 250
+ )
xx = numpy.arange(npoints, dtype=numpy.float64)
- yy = gauss(xx, *[10.5, 2, 1000.0, 20., 15])
+ yy = gauss(xx, *[10.5, 2, 1000.0, 20.0, 15])
sy = numpy.sqrt(abs(yy))
- parameters = [0.0, 1.0, 900.0, 25., 10]
+ parameters = [0.0, 1.0, 900.0, 25.0, 10]
stime = time.time()
- fittedpar, cov, ddict = leastsq(gauss, xx, yy, parameters,
- sigma=sy,
- left_derivative=False,
- full_output=True,
- check_finite=True)
+ fittedpar, cov, ddict = leastsq(
+ gauss,
+ xx,
+ yy,
+ parameters,
+ sigma=sy,
+ left_derivative=False,
+ full_output=True,
+ check_finite=True,
+ )
etime = time.time()
sigmapars = numpy.sqrt(numpy.diag(cov))
print("Took ", etime - stime, "seconds")
@@ -879,22 +932,20 @@ def main(argv=None):
print("Sigma pars = ", sigmapars)
try:
from scipy.optimize import curve_fit as cfit
+
SCIPY = True
except ImportError:
SCIPY = False
if SCIPY:
counter = 0
stime = time.time()
- scipy_fittedpar, scipy_cov = cfit(gauss,
- xx,
- yy,
- parameters,
- sigma=sy)
+ scipy_fittedpar, scipy_cov = cfit(gauss, xx, yy, parameters, sigma=sy)
etime = time.time()
print("Scipy Took ", etime - stime, "seconds")
print("Counter = ", counter)
print("scipy = ", scipy_fittedpar)
print("Sigma = ", numpy.sqrt(numpy.diag(scipy_cov)))
+
if __name__ == "__main__":
main()
diff --git a/src/silx/math/fit/test/test_bgtheories.py b/src/silx/math/fit/test/test_bgtheories.py
index 40f0831..8dd8d81 100644
--- a/src/silx/math/fit/test/test_bgtheories.py
+++ b/src/silx/math/fit/test/test_bgtheories.py
@@ -30,13 +30,13 @@ from silx.math.fit.functions import sum_gauss
class TestBgTheories(unittest.TestCase):
- """
- """
+ """ """
+
def setUp(self):
self.x = numpy.arange(100)
- self.y = 10 + 0.05 * self.x + sum_gauss(self.x, 10., 45., 15.)
+ self.y = 10 + 0.05 * self.x + sum_gauss(self.x, 10.0, 45.0, 15.0)
# add a very narrow high amplitude peak to test strip and snip
- self.y += sum_gauss(self.x, 100., 75., 2.)
+ self.y += sum_gauss(self.x, 100.0, 75.0, 2.0)
self.narrow_peak_index = list(self.x).index(75)
random.seed()
@@ -46,46 +46,47 @@ class TestBgTheories(unittest.TestCase):
def testTheoriesAttrs(self):
for theory_name in bgtheories.THEORY:
self.assertIsInstance(theory_name, str)
- self.assertTrue(hasattr(bgtheories.THEORY[theory_name],
- "function"))
- self.assertTrue(hasattr(bgtheories.THEORY[theory_name].function,
- "__call__"))
+ self.assertTrue(hasattr(bgtheories.THEORY[theory_name], "function"))
+ self.assertTrue(
+ hasattr(bgtheories.THEORY[theory_name].function, "__call__")
+ )
# Ensure legacy functions are not renamed accidentally
self.assertTrue(
- {"No Background", "Constant", "Linear", "Strip", "Snip"}.issubset(
- set(bgtheories.THEORY)))
+ {"No Background", "Constant", "Linear", "Strip", "Snip"}.issubset(
+ set(bgtheories.THEORY)
+ )
+ )
def testNoBg(self):
nobgfun = bgtheories.THEORY["No Background"].function
- self.assertTrue(numpy.array_equal(nobgfun(self.x, self.y),
- numpy.zeros_like(self.x)))
+ self.assertTrue(
+ numpy.array_equal(nobgfun(self.x, self.y), numpy.zeros_like(self.x))
+ )
# default estimate
- self.assertEqual(bgtheories.THEORY["No Background"].estimate(self.x, self.y),
- ([], []))
+ self.assertEqual(
+ bgtheories.THEORY["No Background"].estimate(self.x, self.y), ([], [])
+ )
def testConstant(self):
consfun = bgtheories.THEORY["Constant"].function
c = random.random() * 100
- self.assertTrue(numpy.array_equal(consfun(self.x, self.y, c),
- c * numpy.ones_like(self.x)))
+ self.assertTrue(
+ numpy.array_equal(consfun(self.x, self.y, c), c * numpy.ones_like(self.x))
+ )
# default estimate
esti_par, cons = bgtheories.THEORY["Constant"].estimate(self.x, self.y)
- self.assertEqual(cons,
- [[0, 0, 0]])
- self.assertAlmostEqual(esti_par,
- min(self.y))
+ self.assertEqual(cons, [[0, 0, 0]])
+ self.assertAlmostEqual(esti_par, min(self.y))
def testLinear(self):
linfun = bgtheories.THEORY["Linear"].function
a = random.random() * 100
b = random.random() * 100
- self.assertTrue(numpy.array_equal(linfun(self.x, self.y, a, b),
- a + b * self.x))
+ self.assertTrue(numpy.array_equal(linfun(self.x, self.y, a, b), a + b * self.x))
# default estimate
esti_par, cons = bgtheories.THEORY["Linear"].estimate(self.x, self.y)
- self.assertEqual(cons,
- [[0, 0, 0], [0, 0, 0]])
+ self.assertEqual(cons, [[0, 0, 0], [0, 0, 0]])
self.assertAlmostEqual(esti_par[0], 10, places=3)
self.assertAlmostEqual(esti_par[1], 0.05, places=3)
@@ -108,8 +109,7 @@ class TestBgTheories(unittest.TestCase):
bg = stripfun(self.x, self.y, width, niter)
# assert peak amplitude has been decreased
- self.assertLess(bg[self.narrow_peak_index],
- self.y[self.narrow_peak_index])
+ self.assertLess(bg[self.narrow_peak_index], self.y[self.narrow_peak_index])
# default estimate
for i in anchors_indices:
@@ -138,9 +138,11 @@ class TestBgTheories(unittest.TestCase):
bg = snipfun(self.x, self.y, width)
# assert peak amplitude has been decreased
- self.assertLess(bg[self.narrow_peak_index],
- self.y[self.narrow_peak_index],
- "Snip didn't decrease the peak amplitude.")
+ self.assertLess(
+ bg[self.narrow_peak_index],
+ self.y[self.narrow_peak_index],
+ "Snip didn't decrease the peak amplitude.",
+ )
# anchored data must remain fixed
for i in anchors_indices:
diff --git a/src/silx/math/fit/test/test_filters.py b/src/silx/math/fit/test/test_filters.py
index 5b8b070..645991e 100644
--- a/src/silx/math/fit/test/test_filters.py
+++ b/src/silx/math/fit/test/test_filters.py
@@ -35,35 +35,70 @@ class TestSmooth(unittest.TestCase):
noise and the result of smoothing that signal is less than 5%. We compare
the sum of all samples in each curve.
"""
+
def setUp(self):
x = numpy.arange(5000)
# (height1, center1, fwhm1, beamfwhm...)
- slit_params = (50, 500, 200, 100,
- 50, 600, 80, 30,
- 20, 2000, 150, 150,
- 50, 2250, 110, 100,
- 40, 3000, 50, 10,
- 23, 4980, 250, 20)
+ slit_params = (
+ 50,
+ 500,
+ 200,
+ 100,
+ 50,
+ 600,
+ 80,
+ 30,
+ 20,
+ 2000,
+ 150,
+ 150,
+ 50,
+ 2250,
+ 110,
+ 100,
+ 40,
+ 3000,
+ 50,
+ 10,
+ 23,
+ 4980,
+ 250,
+ 20,
+ )
self.y1 = functions.sum_slit(x, *slit_params)
# 5% noise
- self.y1 = add_relative_noise(self.y1, 5.)
+ self.y1 = add_relative_noise(self.y1, 5.0)
# (height1, center1, fwhm1...)
- step_params = (50, 500, 200,
- 50, 600, 80,
- 20, 2000, 150,
- 50, 2250, 110,
- 40, 3000, 50,
- 23, 4980, 250,)
+ step_params = (
+ 50,
+ 500,
+ 200,
+ 50,
+ 600,
+ 80,
+ 20,
+ 2000,
+ 150,
+ 50,
+ 2250,
+ 110,
+ 40,
+ 3000,
+ 50,
+ 23,
+ 4980,
+ 250,
+ )
self.y2 = functions.sum_stepup(x, *step_params)
# 5% noise
- self.y2 = add_relative_noise(self.y2, 5.)
+ self.y2 = add_relative_noise(self.y2, 5.0)
self.y3 = functions.sum_stepdown(x, *step_params)
# 5% noise
- self.y3 = add_relative_noise(self.y3, 5.)
+ self.y3 = add_relative_noise(self.y3, 5.0)
def tearDown(self):
pass
@@ -76,9 +111,12 @@ class TestSmooth(unittest.TestCase):
# we added +-5% of random noise. The difference must be much lower
# than 5%.
diff = abs(sum(smoothed_y) - sum(y)) / sum(y)
- self.assertLess(diff, 0.05,
- "Difference between data with 5%% noise and " +
- "smoothed data is > 5%% (%f %%)" % (diff * 100))
+ self.assertLess(
+ diff,
+ 0.05,
+ "Difference between data with 5%% noise and "
+ + "smoothed data is > 5%% (%f %%)" % (diff * 100),
+ )
# Try various smoothing levels
npts += 25
@@ -89,8 +127,9 @@ class TestSmooth(unittest.TestCase):
smoothed_y = filters.smooth1d(self.y1)
for i in range(1, len(self.y1) - 1):
- self.assertAlmostEqual(4 * smoothed_y[i],
- self.y1[i-1] + 2 * self.y1[i] + self.y1[i+1])
+ self.assertAlmostEqual(
+ 4 * smoothed_y[i], self.y1[i - 1] + 2 * self.y1[i] + self.y1[i + 1]
+ )
def testSmooth2d(self):
"""Test that a 2D smoothing is the same as two successive and
@@ -117,5 +156,4 @@ class TestSmooth(unittest.TestCase):
for i in range(0, y.shape[0]):
for j in range(0, y.shape[1]):
- self.assertAlmostEqual(smoothed_y[i, j],
- expected_smooth[i, j])
+ self.assertAlmostEqual(smoothed_y[i, j], expected_smooth[i, j])
diff --git a/src/silx/math/fit/test/test_fit.py b/src/silx/math/fit/test/test_fit.py
index 39a04f9..a25a94b 100644
--- a/src/silx/math/fit/test/test_fit.py
+++ b/src/silx/math/fit/test/test_fit.py
@@ -43,6 +43,7 @@ class Test_leastsq(unittest.TestCase):
def setUp(self):
try:
from silx.math.fit import leastsq
+
self.instance = leastsq
except ImportError:
self.instance = None
@@ -50,9 +51,10 @@ class Test_leastsq(unittest.TestCase):
def myexp(x):
# put a (bad) filter to avoid over/underflows
# with no python looping
- with numpy.errstate(invalid='ignore'):
- return numpy.exp(x*numpy.less(abs(x), 250)) - \
- 1.0 * numpy.greater_equal(abs(x), 250)
+ with numpy.errstate(invalid="ignore"):
+ return numpy.exp(
+ x * numpy.less(abs(x), 250)
+ ) - 1.0 * numpy.greater_equal(abs(x), 250)
self.my_exp = myexp
@@ -60,8 +62,8 @@ class Test_leastsq(unittest.TestCase):
params = numpy.array(params, copy=False, dtype=numpy.float64)
result = params[0] + params[1] * x
for i in range(2, len(params), 3):
- p = params[i:(i+3)]
- dummy = 2.3548200450309493*(x - p[1])/p[2]
+ p = params[i : (i + 3)]
+ dummy = 2.3548200450309493 * (x - p[1]) / p[2]
result += p[0] * self.my_exp(-0.5 * dummy * dummy)
return result
@@ -75,17 +77,17 @@ class Test_leastsq(unittest.TestCase):
gaussian_peak = (idx - 2) // 3
gaussian_parameter = (idx - 2) % 3
actual_idx = 2 + 3 * gaussian_peak
- p = params[actual_idx:(actual_idx+3)]
+ p = params[actual_idx : (actual_idx + 3)]
if gaussian_parameter == 0:
return self.gauss(x, *[0, 0, 1.0, p[1], p[2]])
if gaussian_parameter == 1:
tmp = self.gauss(x, *[0, 0, p[0], p[1], p[2]])
- tmp *= 2.3548200450309493*(x - p[1])/p[2]
- return tmp * 2.3548200450309493/p[2]
+ tmp *= 2.3548200450309493 * (x - p[1]) / p[2]
+ return tmp * 2.3548200450309493 / p[2]
if gaussian_parameter == 2:
tmp = self.gauss(x, *[0, 0, p[0], p[1], p[2]])
- tmp *= 2.3548200450309493*(x - p[1])/p[2]
- return tmp * 2.3548200450309493*(x - p[1])/(p[2]*p[2])
+ tmp *= 2.3548200450309493 * (x - p[1]) / p[2]
+ return tmp * 2.3548200450309493 * (x - p[1]) / (p[2] * p[2])
self.gauss_derivative = gauss_derivative
@@ -98,14 +100,15 @@ class Test_leastsq(unittest.TestCase):
self.model_derivative = None
def testImport(self):
- self.assertTrue(self.instance is not None,
- "Cannot import leastsq from silx.math.fit")
+ self.assertTrue(
+ self.instance is not None, "Cannot import leastsq from silx.math.fit"
+ )
def testUnconstrainedFitNoWeight(self):
- parameters_actual = [10.5, 2, 1000.0, 20., 15]
- x = numpy.arange(10000.)
+ parameters_actual = [10.5, 2, 1000.0, 20.0, 15]
+ x = numpy.arange(10000.0)
y = self.gauss(x, *parameters_actual)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
+ parameters_estimate = [0.0, 1.0, 900.0, 25.0, 10]
model_function = self.gauss
fittedpar, cov = self.instance(model_function, x, y, parameters_estimate)
@@ -113,32 +116,36 @@ class Test_leastsq(unittest.TestCase):
if not test_condition:
msg = "Unsuccessfull fit\n"
for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
+ msg += "Expected %g obtained %g\n" % (
+ parameters_actual[i],
+ fittedpar[i],
+ )
self.assertTrue(test_condition, msg)
def testUnconstrainedFitWeight(self):
- parameters_actual = [10.5,2,1000.0,20.,15]
- x = numpy.arange(10000.)
+ parameters_actual = [10.5, 2, 1000.0, 20.0, 15]
+ x = numpy.arange(10000.0)
y = self.gauss(x, *parameters_actual)
sigma = numpy.sqrt(y)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
+ parameters_estimate = [0.0, 1.0, 900.0, 25.0, 10]
model_function = self.gauss
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma)
+ fittedpar, cov = self.instance(
+ model_function, x, y, parameters_estimate, sigma=sigma
+ )
test_condition = numpy.allclose(parameters_actual, fittedpar)
if not test_condition:
msg = "Unsuccessfull fit\n"
for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
+ msg += "Expected %g obtained %g\n" % (
+ parameters_actual[i],
+ fittedpar[i],
+ )
self.assertTrue(test_condition, msg)
def testDerivativeFunction(self):
- parameters_actual = [10.5, 2, 10000.0, 20., 150, 5000, 900., 300]
- x = numpy.arange(10000.)
+ parameters_actual = [10.5, 2, 10000.0, 20.0, 150, 5000, 900.0, 300]
+ x = numpy.arange(10000.0)
y = self.gauss(x, *parameters_actual)
delta = numpy.sqrt(numpy.finfo(numpy.float64).eps)
for i in range(len(parameters_actual)):
@@ -155,44 +162,47 @@ class Test_leastsq(unittest.TestCase):
p[i] = parameters_actual[i] - delta_par
yMinus = self.gauss(x, *p)
numerical_derivative = (yPlus - yMinus) / (2 * delta_par)
- #numerical_derivative = (self.gauss(x, *p) - y) / delta_par
+ # numerical_derivative = (self.gauss(x, *p) - y) / delta_par
p[i] = parameters_actual[i]
derivative = self.gauss_derivative(x, p, i)
diff = numerical_derivative - derivative
- test_condition = numpy.allclose(numerical_derivative,
- derivative, atol=5.0e-6)
+ test_condition = numpy.allclose(
+ numerical_derivative, derivative, atol=5.0e-6
+ )
if not test_condition:
msg = "Error calculating derivative of parameter %d." % i
msg += "\n diff min = %g diff max = %g" % (diff.min(), diff.max())
self.assertTrue(test_condition, msg)
def testConstrainedFit(self):
- CFREE = 0
- CPOSITIVE = 1
- CQUOTED = 2
- CFIXED = 3
- CFACTOR = 4
- CDELTA = 5
- CSUM = 6
- parameters_actual = [10.5, 2, 10000.0, 20., 150, 5000, 900., 300]
- x = numpy.arange(10000.)
+ CFREE = 0
+ CPOSITIVE = 1
+ CQUOTED = 2
+ CFIXED = 3
+ CFACTOR = 4
+ CDELTA = 5
+ CSUM = 6
+ parameters_actual = [10.5, 2, 10000.0, 20.0, 150, 5000, 900.0, 300]
+ x = numpy.arange(10000.0)
y = self.gauss(x, *parameters_actual)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10, 400, 850, 200]
+ parameters_estimate = [0.0, 1.0, 900.0, 25.0, 10, 400, 850, 200]
model_function = self.gauss
model_deriv = self.gauss_derivative
constraints_all_free = [[0, 0, 0]] * len(parameters_actual)
constraints_all_positive = [[1, 0, 0]] * len(parameters_actual)
constraints_delta_position = [[0, 0, 0]] * len(parameters_actual)
constraints_delta_position[6] = [CDELTA, 3, 880]
- constraints_sum_position = constraints_all_positive * 1
+ constraints_sum_position = constraints_all_positive * 1
constraints_sum_position[6] = [CSUM, 3, 920]
constraints_factor = constraints_delta_position * 1
constraints_factor[2] = [CFACTOR, 5, 2]
- constraints_list = [None,
- constraints_all_free,
- constraints_all_positive,
- constraints_delta_position,
- constraints_sum_position]
+ constraints_list = [
+ None,
+ constraints_all_free,
+ constraints_all_positive,
+ constraints_delta_position,
+ constraints_sum_position,
+ ]
# for better code coverage, the warning recommending to set full_output
# to True when using constraints should be shown at least once
@@ -203,152 +213,176 @@ class Test_leastsq(unittest.TestCase):
elif index == 3:
full_output = 0
for model_deriv in [None, self.gauss_derivative]:
- for sigma in [None, numpy.sqrt(y)]:
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- constraints=constraints,
- model_deriv=model_deriv,
- full_output=full_output)[:2]
+ for sigma in [None, numpy.sqrt(y)]:
+ fittedpar, cov = self.instance(
+ model_function,
+ x,
+ y,
+ parameters_estimate,
+ sigma=sigma,
+ constraints=constraints,
+ model_deriv=model_deriv,
+ full_output=full_output,
+ )[:2]
full_output = True
test_condition = numpy.allclose(parameters_actual, fittedpar)
if not test_condition:
msg = "Unsuccessfull fit\n"
for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
+ msg += "Expected %g obtained %g\n" % (
+ parameters_actual[i],
+ fittedpar[i],
+ )
self.assertTrue(test_condition, msg)
def testUnconstrainedFitAnalyticalDerivative(self):
- parameters_actual = [10.5, 2, 1000.0, 20., 15]
- x = numpy.arange(10000.)
+ parameters_actual = [10.5, 2, 1000.0, 20.0, 15]
+ x = numpy.arange(10000.0)
y = self.gauss(x, *parameters_actual)
sigma = numpy.sqrt(y)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
+ parameters_estimate = [0.0, 1.0, 900.0, 25.0, 10]
model_function = self.gauss
model_deriv = self.gauss_derivative
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- model_deriv=model_deriv)
+ fittedpar, cov = self.instance(
+ model_function,
+ x,
+ y,
+ parameters_estimate,
+ sigma=sigma,
+ model_deriv=model_deriv,
+ )
test_condition = numpy.allclose(parameters_actual, fittedpar)
if not test_condition:
msg = "Unsuccessfull fit\n"
for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
+ msg += "Expected %g obtained %g\n" % (
+ parameters_actual[i],
+ fittedpar[i],
+ )
self.assertTrue(test_condition, msg)
@testutils.validate_logging(fitlogger.name, warning=2)
def testBadlyShapedData(self):
- parameters_actual = [10.5, 2, 1000.0, 20., 15]
- x = numpy.arange(10000.).reshape(1000, 10)
+ parameters_actual = [10.5, 2, 1000.0, 20.0, 15]
+ x = numpy.arange(10000.0).reshape(1000, 10)
y = self.gauss(x, *parameters_actual)
sigma = numpy.sqrt(y)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
+ parameters_estimate = [0.0, 1.0, 900.0, 25.0, 10]
model_function = self.gauss
for check_finite in [True, False]:
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=check_finite)
+ fittedpar, cov = self.instance(
+ model_function,
+ x,
+ y,
+ parameters_estimate,
+ sigma=sigma,
+ check_finite=check_finite,
+ )
test_condition = numpy.allclose(parameters_actual, fittedpar)
if not test_condition:
msg = "Unsuccessfull fit\n"
for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
+ msg += "Expected %g obtained %g\n" % (
+ parameters_actual[i],
+ fittedpar[i],
+ )
self.assertTrue(test_condition, msg)
@testutils.validate_logging(fitlogger.name, warning=3)
def testDataWithNaN(self):
- parameters_actual = [10.5, 2, 1000.0, 20., 15]
- x = numpy.arange(10000.).reshape(1000, 10)
+ parameters_actual = [10.5, 2, 1000.0, 20.0, 15]
+ x = numpy.arange(10000.0).reshape(1000, 10)
y = self.gauss(x, *parameters_actual)
sigma = numpy.sqrt(y)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
+ parameters_estimate = [0.0, 1.0, 900.0, 25.0, 10]
model_function = self.gauss
x[500] = numpy.inf
# check default behavior
try:
- self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma)
+ self.instance(model_function, x, y, parameters_estimate, sigma=sigma)
except ValueError:
info = "%s" % sys.exc_info()[1]
self.assertTrue("array must not contain inf" in info)
# check requested behavior
try:
- self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=True)
+ self.instance(
+ model_function,
+ x,
+ y,
+ parameters_estimate,
+ sigma=sigma,
+ check_finite=True,
+ )
except ValueError:
info = "%s" % sys.exc_info()[1]
self.assertTrue("array must not contain inf" in info)
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=False)
+ fittedpar, cov = self.instance(
+ model_function, x, y, parameters_estimate, sigma=sigma, check_finite=False
+ )
test_condition = numpy.allclose(parameters_actual, fittedpar)
if not test_condition:
msg = "Unsuccessfull fit\n"
for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
+ msg += "Expected %g obtained %g\n" % (
+ parameters_actual[i],
+ fittedpar[i],
+ )
self.assertTrue(test_condition, msg)
# testing now with ydata containing NaN
- x = numpy.arange(10000.).reshape(1000, 10)
+ x = numpy.arange(10000.0).reshape(1000, 10)
y[500] = numpy.nan
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=False)
+ fittedpar, cov = self.instance(
+ model_function, x, y, parameters_estimate, sigma=sigma, check_finite=False
+ )
test_condition = numpy.allclose(parameters_actual, fittedpar)
if not test_condition:
msg = "Unsuccessfull fit\n"
for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
+ msg += "Expected %g obtained %g\n" % (
+ parameters_actual[i],
+ fittedpar[i],
+ )
self.assertTrue(test_condition, msg)
# testing now with sigma containing NaN
sigma[300] = numpy.nan
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=False)
+ fittedpar, cov = self.instance(
+ model_function, x, y, parameters_estimate, sigma=sigma, check_finite=False
+ )
test_condition = numpy.allclose(parameters_actual, fittedpar)
if not test_condition:
msg = "Unsuccessfull fit\n"
for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
+ msg += "Expected %g obtained %g\n" % (
+ parameters_actual[i],
+ fittedpar[i],
+ )
self.assertTrue(test_condition, msg)
def testUncertainties(self):
"""Test for validity of uncertainties in returned full-output
dictionary. This is a non-regression test for pull request #197"""
- parameters_actual = [10.5, 2, 1000.0, 20., 15, 2001.0, 30.1, 16]
- x = numpy.arange(10000.)
+ parameters_actual = [10.5, 2, 1000.0, 20.0, 15, 2001.0, 30.1, 16]
+ x = numpy.arange(10000.0)
y = self.gauss(x, *parameters_actual)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10., 1500., 20., 2.0]
+ parameters_estimate = [0.0, 1.0, 900.0, 25.0, 10.0, 1500.0, 20.0, 2.0]
# test that uncertainties are not 0.
- fittedpar, cov, infodict = self.instance(self.gauss, x, y, parameters_estimate,
- full_output=True)
+ fittedpar, cov, infodict = self.instance(
+ self.gauss, x, y, parameters_estimate, full_output=True
+ )
uncertainties = infodict["uncertainties"]
self.assertEqual(len(uncertainties), len(parameters_actual))
self.assertEqual(len(uncertainties), len(fittedpar))
for uncertainty in uncertainties:
- self.assertNotAlmostEqual(uncertainty, 0.)
+ self.assertNotAlmostEqual(uncertainty, 0.0)
# set constraint FIXED for half the parameters.
# This should cause leastsq to return 100% uncertainty.
@@ -361,12 +395,16 @@ class Test_leastsq(unittest.TestCase):
constraints.append([CFIXED, 0, 0])
else:
constraints.append([CFREE, 0, 0])
- fittedpar, cov, infodict = self.instance(self.gauss, x, y, parameters_estimate,
- constraints=constraints,
- full_output=True)
+ fittedpar, cov, infodict = self.instance(
+ self.gauss,
+ x,
+ y,
+ parameters_estimate,
+ constraints=constraints,
+ full_output=True,
+ )
uncertainties = infodict["uncertainties"]
for i in range(len(parameters_estimate)):
if i % 2:
# test that all FIXED parameters have 100% uncertainty
- self.assertAlmostEqual(uncertainties[i],
- parameters_estimate[i])
+ self.assertAlmostEqual(uncertainties[i], parameters_estimate[i])
diff --git a/src/silx/math/fit/test/test_fitmanager.py b/src/silx/math/fit/test/test_fitmanager.py
index cc35ccf..5229df5 100644
--- a/src/silx/math/fit/test/test_fitmanager.py
+++ b/src/silx/math/fit/test/test_fitmanager.py
@@ -1,5 +1,5 @@
# /*##########################################################################
-# Copyright (C) 2016-2020 European Synchrotron Radiation Facility
+# Copyright (C) 2016-2023 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -114,6 +114,7 @@ class TestFitmanager(ParametricTestCase):
"""
Unit tests of multi-peak functions.
"""
+
def setUp(self):
pass
@@ -126,9 +127,7 @@ class TestFitmanager(ParametricTestCase):
# Create synthetic data with a sum of gaussian functions
x = numpy.arange(1000).astype(numpy.float64)
- p = [1000, 100., 250,
- 255, 650., 45,
- 1500, 800.5, 95]
+ p = [1000, 100.0, 250, 255, 650.0, 45, 1500, 800.5, 95]
linear_bg = 2.65 * x + 13
y = linear_bg + sum_gauss(x, *p)
@@ -139,10 +138,10 @@ class TestFitmanager(ParametricTestCase):
x_with_nans[5::15] = numpy.nan
tests = {
- 'all finite': (x, y),
- 'y with NaNs': (x, y_with_nans),
- 'x with NaNs': (x_with_nans, y),
- }
+ "all finite": (x, y),
+ "y with NaNs": (x, y_with_nans),
+ "x with NaNs": (x_with_nans, y),
+ }
for name, (xdata, ydata) in tests.items():
with self.subTest(name=name):
@@ -151,8 +150,8 @@ class TestFitmanager(ParametricTestCase):
fit.setdata(x=xdata, y=ydata)
fit.loadtheories(fittheories)
# Use one of the default fit functions
- fit.settheory('Gaussians')
- fit.setbackground('Linear')
+ fit.settheory("Gaussians")
+ fit.setbackground("Linear")
fit.estimate()
fit.runfit()
@@ -167,19 +166,17 @@ class TestFitmanager(ParametricTestCase):
for i, param in enumerate(fit.fit_results[2:]):
param_number = i // 3 + 1
if i % 3 == 0:
- self.assertEqual(param["name"],
- "Height%d" % param_number)
+ self.assertEqual(param["name"], "Height%d" % param_number)
elif i % 3 == 1:
- self.assertEqual(param["name"],
- "Position%d" % param_number)
+ self.assertEqual(param["name"], "Position%d" % param_number)
elif i % 3 == 2:
- self.assertEqual(param["name"],
- "FWHM%d" % param_number)
+ self.assertEqual(param["name"], "FWHM%d" % param_number)
- self.assertAlmostEqual(param["fitresult"],
- p[i])
- self.assertAlmostEqual(_order_of_magnitude(param["estimation"]),
- _order_of_magnitude(p[i]))
+ self.assertAlmostEqual(param["fitresult"], p[i])
+ self.assertAlmostEqual(
+ _order_of_magnitude(param["estimation"]),
+ _order_of_magnitude(p[i]),
+ )
def testLoadCustomFitFunction(self):
"""Test FitManager using a custom fit function defined in an external
@@ -198,35 +195,29 @@ class TestFitmanager(ParametricTestCase):
# Create a temporary function definition file, and import it
with temp_dir() as tmpDir:
- tmpfile = os.path.join(tmpDir, 'customfun.py')
+ tmpfile = os.path.join(tmpDir, "customfun.py")
# custom_function_definition
fd = open(tmpfile, "w")
fd.write(custom_function_definition)
fd.close()
fit.loadtheories(tmpfile)
- tmpfile_pyc = os.path.join(tmpDir, 'customfun.pyc')
+ tmpfile_pyc = os.path.join(tmpDir, "customfun.pyc")
if os.path.exists(tmpfile_pyc):
os.unlink(tmpfile_pyc)
os.unlink(tmpfile)
- fit.settheory('my fit theory')
+ fit.settheory("my fit theory")
# Test configure
fit.configure(d=4.5)
fit.estimate()
fit.runfit()
- self.assertEqual(fit.fit_results[0]["name"],
- "A1")
- self.assertAlmostEqual(fit.fit_results[0]["fitresult"],
- 1.5)
- self.assertEqual(fit.fit_results[1]["name"],
- "B1")
- self.assertAlmostEqual(fit.fit_results[1]["fitresult"],
- 2.5)
- self.assertEqual(fit.fit_results[2]["name"],
- "C1")
- self.assertAlmostEqual(fit.fit_results[2]["fitresult"],
- 3.5)
+ self.assertEqual(fit.fit_results[0]["name"], "A1")
+ self.assertAlmostEqual(fit.fit_results[0]["fitresult"], 1.5)
+ self.assertEqual(fit.fit_results[1]["name"], "B1")
+ self.assertAlmostEqual(fit.fit_results[1]["fitresult"], 2.5)
+ self.assertEqual(fit.fit_results[2]["name"], "C1")
+ self.assertAlmostEqual(fit.fit_results[2]["fitresult"], 3.5)
def testLoadOldCustomFitFunction(self):
"""Test FitManager using a custom fit function defined in an external
@@ -245,34 +236,28 @@ class TestFitmanager(ParametricTestCase):
# Create a temporary function definition file, and import it
with temp_dir() as tmpDir:
- tmpfile = os.path.join(tmpDir, 'oldcustomfun.py')
+ tmpfile = os.path.join(tmpDir, "oldcustomfun.py")
# custom_function_definition
fd = open(tmpfile, "w")
fd.write(old_custom_function_definition)
fd.close()
fit.loadtheories(tmpfile)
- tmpfile_pyc = os.path.join(tmpDir, 'oldcustomfun.pyc')
+ tmpfile_pyc = os.path.join(tmpDir, "oldcustomfun.pyc")
if os.path.exists(tmpfile_pyc):
os.unlink(tmpfile_pyc)
os.unlink(tmpfile)
- fit.settheory('my fit theory')
+ fit.settheory("my fit theory")
fit.configure(d=4.5)
fit.estimate()
fit.runfit()
- self.assertEqual(fit.fit_results[0]["name"],
- "A1")
- self.assertAlmostEqual(fit.fit_results[0]["fitresult"],
- 1.5)
- self.assertEqual(fit.fit_results[1]["name"],
- "B1")
- self.assertAlmostEqual(fit.fit_results[1]["fitresult"],
- 2.5)
- self.assertEqual(fit.fit_results[2]["name"],
- "C1")
- self.assertAlmostEqual(fit.fit_results[2]["fitresult"],
- 3.5)
+ self.assertEqual(fit.fit_results[0]["name"], "A1")
+ self.assertAlmostEqual(fit.fit_results[0]["fitresult"], 1.5)
+ self.assertEqual(fit.fit_results[1]["name"], "B1")
+ self.assertAlmostEqual(fit.fit_results[1]["fitresult"], 2.5)
+ self.assertEqual(fit.fit_results[2]["name"], "C1")
+ self.assertAlmostEqual(fit.fit_results[2]["fitresult"], 3.5)
def testAddTheory(self, estimate=True):
"""Test FitManager using a custom fit function imported with
@@ -290,19 +275,19 @@ class TestFitmanager(ParametricTestCase):
fit.setdata(x=x, y=y)
# Define and add the fit theory
- CONFIG = {'d': 1.}
+ CONFIG = {"d": 1.0}
def myfun(x_, a_, b_, c_):
- """"Model function"""
- return (a_ * x_**2 + b_ * x_ + c_) / CONFIG['d']
+ """Model function"""
+ return (a_ * x_**2 + b_ * x_ + c_) / CONFIG["d"]
def myesti(x_, y_):
- """"Initial parameters for iterative fit:
+ """Initial parameters for iterative fit:
(a, b, c) = (1, 1, 1)
Constraints all set to 0 (FREE)"""
- return (1., 1., 1.), ((0, 0, 0), (0, 0, 0), (0, 0, 0))
+ return (1.0, 1.0, 1.0), ((0, 0, 0), (0, 0, 0), (0, 0, 0))
- def myconfig(d_=1., **kw):
+ def myconfig(d_=1.0, **kw):
"""This function can modify CONFIG"""
CONFIG["d"] = d_
return CONFIG
@@ -320,41 +305,41 @@ class TestFitmanager(ParametricTestCase):
return delta_fun / delta_par
- fit.addtheory("polynomial",
- FitTheory(function=myfun,
- parameters=["A", "B", "C"],
- estimate=myesti if estimate else None,
- configure=myconfig,
- derivative=myderiv))
-
- fit.settheory('polynomial')
+ fit.addtheory(
+ "polynomial",
+ FitTheory(
+ function=myfun,
+ parameters=["A", "B", "C"],
+ estimate=myesti if estimate else None,
+ configure=myconfig,
+ derivative=myderiv,
+ ),
+ )
+
+ fit.settheory("polynomial")
fit.configure(d_=4.5)
fit.estimate()
params1, sigmas, infodict = fit.runfit()
- self.assertEqual(fit.fit_results[0]["name"],
- "A1")
- self.assertAlmostEqual(fit.fit_results[0]["fitresult"],
- -3.14)
- self.assertEqual(fit.fit_results[1]["name"],
- "B1")
+ self.assertEqual(fit.fit_results[0]["name"], "A1")
+ self.assertAlmostEqual(fit.fit_results[0]["fitresult"], -3.14)
+ self.assertEqual(fit.fit_results[1]["name"], "B1")
# params1[1] is the same as fit.fit_results[1]["fitresult"]
- self.assertAlmostEqual(params1[1],
- 1234.5)
- self.assertEqual(fit.fit_results[2]["name"],
- "C1")
- self.assertAlmostEqual(params1[2],
- 10000)
+ self.assertAlmostEqual(params1[1], 1234.5)
+ self.assertEqual(fit.fit_results[2]["name"], "C1")
+ self.assertAlmostEqual(params1[2], 10000)
# change configuration scaling factor and check that the fit returns
# different values
- fit.configure(d_=5.)
+ fit.configure(d_=5.0)
fit.estimate()
params2, sigmas, infodict = fit.runfit()
for p1, p2 in zip(params1, params2):
- self.assertFalse(numpy.array_equal(p1, p2),
- "Fit parameters are equal even though the " +
- "configuration has been changed")
+ self.assertFalse(
+ numpy.array_equal(p1, p2),
+ "Fit parameters are equal even though the "
+ + "configuration has been changed",
+ )
def testNoEstimate(self):
"""Ensure that the in the absence of the estimation function,
@@ -365,8 +350,10 @@ class TestFitmanager(ParametricTestCase):
def testStep(self):
"""Test fit manager on a step function with a more complex estimate
function than the gaussian (convolution filter)"""
- for theory_name, theory_fun in (('Step Down', sum_stepdown),
- ('Step Up', sum_stepup)):
+ for theory_name, theory_fun in (
+ ("Step Down", sum_stepdown),
+ ("Step Up", sum_stepup),
+ ):
# Create synthetic data with a sum of gaussian functions
x = numpy.arange(1000).astype(numpy.float64)
@@ -381,7 +368,7 @@ class TestFitmanager(ParametricTestCase):
fit.setdata(x=x, y=y)
fit.loadtheories(fittheories)
fit.settheory(theory_name)
- fit.setbackground('Constant')
+ fit.setbackground("Constant")
fit.estimate()
@@ -391,8 +378,10 @@ class TestFitmanager(ParametricTestCase):
self.assertAlmostEqual(params[0], 13, places=5)
for i, param in enumerate(params[1:]):
self.assertAlmostEqual(param, p[i], places=5)
- self.assertAlmostEqual(_order_of_magnitude(fit.fit_results[i+1]["estimation"]),
- _order_of_magnitude(p[i]))
+ self.assertAlmostEqual(
+ _order_of_magnitude(fit.fit_results[i + 1]["estimation"]),
+ _order_of_magnitude(p[i]),
+ )
def quadratic(x, a, b, c):
@@ -405,6 +394,7 @@ def cubic(x, a, b, c, d):
class TestPolynomials(unittest.TestCase):
"""Test polynomial fit theories and fit background"""
+
def setUp(self):
self.x = numpy.arange(100).astype(numpy.float64)
@@ -424,8 +414,7 @@ class TestPolynomials(unittest.TestCase):
fit_params = fm.runfit()[0]
for p, pfit in zip(poly_params + gaussian_params, fit_params):
- self.assertAlmostEqual(p,
- pfit)
+ self.assertAlmostEqual(p, pfit)
def testCubicBg(self):
gaussian_params = [1000, 45, 8]
@@ -442,8 +431,7 @@ class TestPolynomials(unittest.TestCase):
fit_params = fm.runfit()[0]
for p, pfit in zip(poly_params + gaussian_params, fit_params):
- self.assertAlmostEqual(p,
- pfit)
+ self.assertAlmostEqual(p, pfit)
def testQuarticcBg(self):
gaussian_params = [10000, 69, 25]
@@ -460,9 +448,7 @@ class TestPolynomials(unittest.TestCase):
fit_params = fm.runfit()[0]
for p, pfit in zip(poly_params + gaussian_params, fit_params):
- self.assertAlmostEqual(p,
- pfit,
- places=5)
+ self.assertAlmostEqual(p, pfit, places=5)
def _testPoly(self, poly_params, theory, places=5):
p = numpy.poly1d(poly_params)
@@ -480,18 +466,13 @@ class TestPolynomials(unittest.TestCase):
self.assertAlmostEqual(p, pfit, places=places)
def testQuadratic(self):
- self._testPoly([0.05, -2, 3],
- "Degree 2 Polynomial")
+ self._testPoly([0.05, -2, 3], "Degree 2 Polynomial")
def testCubic(self):
- self._testPoly([0.0005, -0.05, 3, -4],
- "Degree 3 Polynomial")
+ self._testPoly([0.0005, -0.05, 3, -4], "Degree 3 Polynomial")
def testQuartic(self):
- self._testPoly([1, -2, 3, -4, -5],
- "Degree 4 Polynomial")
+ self._testPoly([1, -2, 3, -4, -5], "Degree 4 Polynomial")
def testQuintic(self):
- self._testPoly([1, -2, 3, -4, -5, 6],
- "Degree 5 Polynomial",
- places=4)
+ self._testPoly([1, -2, 3, -4, -5, 6], "Degree 5 Polynomial", places=4)
diff --git a/src/silx/math/fit/test/test_functions.py b/src/silx/math/fit/test/test_functions.py
index 71cce8b..525925c 100644
--- a/src/silx/math/fit/test/test_functions.py
+++ b/src/silx/math/fit/test/test_functions.py
@@ -34,36 +34,59 @@ __authors__ = ["P. Knobel"]
__license__ = "MIT"
__date__ = "21/07/2016"
+
class Test_functions(unittest.TestCase):
"""
Unit tests of multi-peak functions.
"""
+
def setUp(self):
self.x = numpy.arange(11)
# height, center, sigma1, sigma2
- (h, c, s1, s2) = (7., 5., 3., 2.1)
+ (h, c, s1, s2) = (7.0, 5.0, 3.0, 2.1)
self.g_params = {
"height": h,
"center": c,
- #"sigma": s,
+ # "sigma": s,
"fwhm1": 2 * math.sqrt(2 * math.log(2)) * s1,
"fwhm2": 2 * math.sqrt(2 * math.log(2)) * s2,
- "area1": h * s1 * math.sqrt(2 * math.pi)
+ "area1": h * s1 * math.sqrt(2 * math.pi),
}
# result of `7 * scipy.signal.gaussian(11, 3)`
self.scipy_gaussian = numpy.array(
- [1.74546546, 2.87778603, 4.24571462, 5.60516182, 6.62171628,
- 7., 6.62171628, 5.60516182, 4.24571462, 2.87778603,
- 1.74546546]
+ [
+ 1.74546546,
+ 2.87778603,
+ 4.24571462,
+ 5.60516182,
+ 6.62171628,
+ 7.0,
+ 6.62171628,
+ 5.60516182,
+ 4.24571462,
+ 2.87778603,
+ 1.74546546,
+ ]
)
# result of:
# numpy.concatenate((7 * scipy.signal.gaussian(11, 3)[0:5],
# 7 * scipy.signal.gaussian(11, 2.1)[5:11]))
self.scipy_asym_gaussian = numpy.array(
- [1.74546546, 2.87778603, 4.24571462, 5.60516182, 6.62171628,
- 7., 6.24968751, 4.44773692, 2.52313452, 1.14093853, 0.41124877]
+ [
+ 1.74546546,
+ 2.87778603,
+ 4.24571462,
+ 5.60516182,
+ 6.62171628,
+ 7.0,
+ 6.24968751,
+ 4.44773692,
+ 2.52313452,
+ 1.14093853,
+ 0.41124877,
+ ]
)
def tearDown(self):
@@ -71,41 +94,48 @@ class Test_functions(unittest.TestCase):
def testGauss(self):
"""Compare sum_gauss with scipy.signals.gaussian"""
- y = functions.sum_gauss(self.x,
- self.g_params["height"],
- self.g_params["center"],
- self.g_params["fwhm1"])
+ y = functions.sum_gauss(
+ self.x,
+ self.g_params["height"],
+ self.g_params["center"],
+ self.g_params["fwhm1"],
+ )
for i in range(11):
self.assertAlmostEqual(y[i], self.scipy_gaussian[i])
def testAGauss(self):
"""Compare sum_agauss with scipy.signals.gaussian"""
- y = functions.sum_agauss(self.x,
- self.g_params["area1"],
- self.g_params["center"],
- self.g_params["fwhm1"])
+ y = functions.sum_agauss(
+ self.x,
+ self.g_params["area1"],
+ self.g_params["center"],
+ self.g_params["fwhm1"],
+ )
for i in range(11):
self.assertAlmostEqual(y[i], self.scipy_gaussian[i])
def testFastAGauss(self):
"""Compare sum_fastagauss with scipy.signals.gaussian
Limit precision to 3 decimal places."""
- y = functions.sum_fastagauss(self.x,
- self.g_params["area1"],
- self.g_params["center"],
- self.g_params["fwhm1"])
+ y = functions.sum_fastagauss(
+ self.x,
+ self.g_params["area1"],
+ self.g_params["center"],
+ self.g_params["fwhm1"],
+ )
for i in range(11):
self.assertAlmostEqual(y[i], self.scipy_gaussian[i], 3)
-
def testSplitGauss(self):
"""Compare sum_splitgauss with scipy.signals.gaussian"""
- y = functions.sum_splitgauss(self.x,
- self.g_params["height"],
- self.g_params["center"],
- self.g_params["fwhm1"],
- self.g_params["fwhm2"])
+ y = functions.sum_splitgauss(
+ self.x,
+ self.g_params["height"],
+ self.g_params["center"],
+ self.g_params["fwhm1"],
+ self.g_params["fwhm2"],
+ )
for i in range(11):
self.assertAlmostEqual(y[i], self.scipy_asym_gaussian[i])
@@ -120,18 +150,14 @@ class Test_functions(unittest.TestCase):
x = [-5, -2, -1.5, -0.6, 0, 0.1, 2, 3]
erfx = functions.erf(x)
for i in range(len(x)):
- self.assertAlmostEqual(erfx[i],
- math.erf(x[i]),
- places=5)
+ self.assertAlmostEqual(erfx[i], math.erf(x[i]), places=5)
# ndarray
x = numpy.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
erfx = functions.erf(x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
- self.assertAlmostEqual(erfx[i, j],
- math.erf(x[i, j]),
- places=5)
+ self.assertAlmostEqual(erfx[i, j], math.erf(x[i, j]), places=5)
def testErfc(self):
"""Compare erf with math.erf"""
@@ -162,15 +188,14 @@ class Test_functions(unittest.TestCase):
for x, y in zip(x0, y0):
self.assertAlmostEqual(
- 11.1 * (0.5 + math.atan((x - 22.2) / 3.33) / math.pi),
- y
+ 11.1 * (0.5 + math.atan((x - 22.2) / 3.33) / math.pi), y
)
def testStepUp(self):
"""sanity check for step up:
- - derivative must be largest around the step center
- - max value must be close to height parameter
+ - derivative must be largest around the step center
+ - max value must be close to height parameter
"""
x0 = numpy.arange(1000)
@@ -187,14 +212,13 @@ class Test_functions(unittest.TestCase):
# Test center position within +- 1 sample of max derivative
index_max_deriv = numpy.argmax(deriv0)
- self.assertLess(abs(index_max_deriv - center),
- 1)
+ self.assertLess(abs(index_max_deriv - center), 1)
def testStepDown(self):
"""sanity check for step down:
- - absolute value of derivative must be largest around the step center
- - max value must be close to height parameter
+ - absolute value of derivative must be largest around the step center
+ - max value must be close to height parameter
"""
x0 = numpy.arange(1000)
@@ -207,18 +231,19 @@ class Test_functions(unittest.TestCase):
self.assertAlmostEqual(max(y0), height, places=1)
self.assertAlmostEqual(min(y0), 0, places=1)
- deriv0 = _numerical_derivative(functions.sum_stepdown, x0, [height, center, fwhm])
+ deriv0 = _numerical_derivative(
+ functions.sum_stepdown, x0, [height, center, fwhm]
+ )
# Test center position within +- 1 sample of max derivative
index_min_deriv = numpy.argmax(-deriv0)
- self.assertLess(abs(index_min_deriv - center),
- 1)
+ self.assertLess(abs(index_min_deriv - center), 1)
def testSlit(self):
"""sanity check for slit:
- - absolute value of derivative must be largest around the step center
- - max value must be close to height parameter
+ - absolute value of derivative must be largest around the step center
+ - max value must be close to height parameter
"""
x0 = numpy.arange(1000)
@@ -231,16 +256,16 @@ class Test_functions(unittest.TestCase):
self.assertAlmostEqual(max(y0), height, places=1)
self.assertAlmostEqual(min(y0), 0, places=1)
- deriv0 = _numerical_derivative(functions.sum_slit, x0, [height, center, fwhm, beamfwhm])
+ deriv0 = _numerical_derivative(
+ functions.sum_slit, x0, [height, center, fwhm, beamfwhm]
+ )
# Test step up center position (center - fwhm/2) within +- 1 sample of max derivative
index_max_deriv = numpy.argmax(deriv0)
- self.assertLess(abs(index_max_deriv - (center - fwhm/2)),
- 1)
+ self.assertLess(abs(index_max_deriv - (center - fwhm / 2)), 1)
# Test step down center position (center + fwhm/2) within +- 1 sample of min derivative
index_min_deriv = numpy.argmin(deriv0)
- self.assertLess(abs(index_min_deriv - (center + fwhm/2)),
- 1)
+ self.assertLess(abs(index_min_deriv - (center + fwhm / 2)), 1)
def _numerical_derivative(f, x, params=[], delta_factor=0.0001):
diff --git a/src/silx/math/fit/test/test_peaks.py b/src/silx/math/fit/test/test_peaks.py
index 23e4061..d6b9db5 100644
--- a/src/silx/math/fit/test/test_peaks.py
+++ b/src/silx/math/fit/test/test_peaks.py
@@ -24,108 +24,414 @@
Tests for peaks module
"""
-import unittest
import numpy
-import math
+import pytest
from silx.math.fit import functions
from silx.math.fit import peaks
-class Test_peak_search(unittest.TestCase):
- """
- Unit tests of peak_search on various types of multi-peak functions.
- """
- def setUp(self):
- self.x = numpy.arange(5000)
- # (height1, center1, fwhm1, ...)
- self.h_c_fwhm = (50, 500, 100,
- 50, 600, 80,
- 20, 2000, 100,
- 50, 2250, 110,
- 40, 3000, 99,
- 23, 4980, 80)
- # (height1, center1, fwhm1, eta1 ...)
- self.h_c_fwhm_eta = (50, 500, 100, 0.4,
- 50, 600, 80, 0.5,
- 20, 2000, 100, 0.6,
- 50, 2250, 110, 0.7,
- 40, 3000, 99, 0.8,
- 23, 4980, 80, 0.3,)
- # (height1, center1, fwhm11, fwhm21, ...)
- self.h_c_fwhm_fwhm = (50, 500, 100, 85,
- 50, 600, 80, 110,
- 20, 2000, 100, 100,
- 50, 2250, 110, 99,
- 40, 3000, 99, 110,
- 23, 4980, 80, 80,)
- # (height1, center1, fwhm11, fwhm21, eta1 ...)
- self.h_c_fwhm_fwhm_eta = (50, 500, 100, 85, 0.4,
- 50, 600, 80, 110, 0.5,
- 20, 2000, 100, 100, 0.6,
- 50, 2250, 110, 99, 0.7,
- 40, 3000, 99, 110, 0.8,
- 23, 4980, 80, 80, 0.3,)
- # (area1, center1, fwhm1, ...)
- self.a_c_fwhm = (2550, 500, 100,
- 2000, 600, 80,
- 500, 2000, 100,
- 4000, 2250, 110,
- 2300, 3000, 99,
- 3333, 4980, 80)
- # (area1, center1, fwhm1, eta1 ...)
- self.a_c_fwhm_eta = (500, 500, 100, 0.4,
- 500, 600, 80, 0.5,
- 200, 2000, 100, 0.6,
- 500, 2250, 110, 0.7,
- 400, 3000, 99, 0.8,
- 230, 4980, 80, 0.3,)
- # (area, position, fwhm, st_area_r, st_slope_r, lt_area_r, lt_slope_r, step_height_r)
- self.hypermet_params = (1000, 500, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 1000, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 2000, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 2350, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 3000, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 4900, 200, 0.2, 100, 0.3, 100, 0.05,)
+_PEAK_PARAMETERS = {
+ "sum_gauss": (
+ 50,
+ 500,
+ 100,
+ 50,
+ 600,
+ 80,
+ 20,
+ 2000,
+ 100,
+ 50,
+ 2250,
+ 110,
+ 40,
+ 3000,
+ 99,
+ 23,
+ 4980,
+ 80,
+ ),
+ "sum_lorentz": (
+ 50,
+ 500,
+ 100,
+ 50,
+ 600,
+ 80,
+ 20,
+ 2000,
+ 100,
+ 50,
+ 2250,
+ 110,
+ 40,
+ 3000,
+ 99,
+ 23,
+ 4980,
+ 80,
+ ),
+ "sum_pvoigt": (
+ 50,
+ 500,
+ 100,
+ 0.4,
+ 50,
+ 600,
+ 80,
+ 0.5,
+ 20,
+ 2000,
+ 100,
+ 0.6,
+ 50,
+ 2250,
+ 110,
+ 0.7,
+ 40,
+ 3000,
+ 99,
+ 0.8,
+ 23,
+ 4980,
+ 80,
+ 0.3,
+ ),
+ "sum_splitgauss": (
+ 50,
+ 500,
+ 100,
+ 85,
+ 50,
+ 600,
+ 80,
+ 110,
+ 20,
+ 2000,
+ 100,
+ 100,
+ 50,
+ 2250,
+ 110,
+ 99,
+ 40,
+ 3000,
+ 99,
+ 110,
+ 23,
+ 4980,
+ 80,
+ 80,
+ ),
+ "sum_splitlorentz": (
+ 50,
+ 500,
+ 100,
+ 85,
+ 50,
+ 600,
+ 80,
+ 110,
+ 20,
+ 2000,
+ 100,
+ 100,
+ 50,
+ 2250,
+ 110,
+ 99,
+ 40,
+ 3000,
+ 99,
+ 110,
+ 23,
+ 4980,
+ 80,
+ 80,
+ ),
+ "sum_splitpvoigt": (
+ 50,
+ 500,
+ 100,
+ 85,
+ 0.4,
+ 50,
+ 600,
+ 80,
+ 110,
+ 0.5,
+ 20,
+ 2000,
+ 100,
+ 100,
+ 0.6,
+ 50,
+ 2250,
+ 110,
+ 99,
+ 0.7,
+ 40,
+ 3000,
+ 99,
+ 110,
+ 0.8,
+ 23,
+ 4980,
+ 80,
+ 80,
+ 0.3,
+ ),
+ "sum_splitpvoigt2": (
+ 50,
+ 500,
+ 100,
+ 85,
+ 0.4,
+ 0.7,
+ 50,
+ 600,
+ 80,
+ 110,
+ 0.5,
+ 0.3,
+ 20,
+ 2000,
+ 100,
+ 100,
+ 0.6,
+ 0.4,
+ 50,
+ 2250,
+ 110,
+ 99,
+ 0.7,
+ 1,
+ 40,
+ 3000,
+ 99,
+ 110,
+ 0.8,
+ 0,
+ 23,
+ 4980,
+ 80,
+ 80,
+ 0.3,
+ 0.5,
+ ),
+ "sum_agauss": (
+ 2550,
+ 500,
+ 100,
+ 2000,
+ 600,
+ 80,
+ 500,
+ 2000,
+ 100,
+ 4000,
+ 2250,
+ 110,
+ 2300,
+ 3000,
+ 99,
+ 3333,
+ 4980,
+ 80,
+ ),
+ "sum_fastagauss": (
+ 2550,
+ 500,
+ 100,
+ 2000,
+ 600,
+ 80,
+ 500,
+ 2000,
+ 100,
+ 4000,
+ 2250,
+ 110,
+ 2300,
+ 3000,
+ 99,
+ 3333,
+ 4980,
+ 80,
+ ),
+ "sum_alorentz": (
+ 2550,
+ 500,
+ 100,
+ 2000,
+ 600,
+ 80,
+ 500,
+ 2000,
+ 100,
+ 4000,
+ 2250,
+ 110,
+ 2300,
+ 3000,
+ 99,
+ 3333,
+ 4980,
+ 80,
+ ),
+ "sum_apvoigt": (
+ 500,
+ 500,
+ 100,
+ 0.4,
+ 500,
+ 600,
+ 80,
+ 0.5,
+ 200,
+ 2000,
+ 100,
+ 0.6,
+ 500,
+ 2250,
+ 110,
+ 0.7,
+ 400,
+ 3000,
+ 99,
+ 0.8,
+ 230,
+ 4980,
+ 80,
+ 0.3,
+ ),
+ "sum_ahypermet": (
+ 1000,
+ 500,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 1000,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 2000,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 2350,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 3000,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 4900,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ ),
+ "sum_fastahypermet": (
+ 1000,
+ 500,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 1000,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 2000,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 2350,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 3000,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ 1000,
+ 4900,
+ 200,
+ 0.2,
+ 100,
+ 0.3,
+ 100,
+ 0.05,
+ ),
+}
- def tearDown(self):
- pass
+@pytest.mark.parametrize("peak_profile", list(_PEAK_PARAMETERS))
+def test_peak_functions(peak_profile):
+ x = numpy.arange(5000)
+ peak_params = _PEAK_PARAMETERS[peak_profile]
+ func = getattr(functions, peak_profile)
- def get_peaks(self, function, params):
- """
+ with pytest.raises(IndexError):
+ func(x)
+ with pytest.raises(IndexError):
+ func(x, *peak_params, 0)
- :param function: Multi-peak function
- :param params: Parameter for this function
- :return: list of (peak, relevance) tuples
- """
- y = function(self.x, *params)
- return peaks.peak_search(y=y, fwhm=100, relevance_info=True)
+ y = func(x, *peak_params)
+ assert x.shape == y.shape
- def testPeakSearch_various_functions(self):
- """Run peak search on a variety of synthetic functions, and
- check that result falls within +-25 samples of the actual peak
- (reasonable delta considering a fwhm of ~100 samples) and effects
- of overlapping peaks)."""
- f_p = ((functions.sum_gauss, self.h_c_fwhm ),
- (functions.sum_lorentz, self.h_c_fwhm),
- (functions.sum_pvoigt, self.h_c_fwhm_eta),
- (functions.sum_splitgauss, self.h_c_fwhm_fwhm),
- (functions.sum_splitlorentz, self.h_c_fwhm_fwhm),
- (functions.sum_splitpvoigt, self.h_c_fwhm_fwhm_eta),
- (functions.sum_agauss, self.a_c_fwhm),
- (functions.sum_fastagauss, self.a_c_fwhm),
- (functions.sum_alorentz, self.a_c_fwhm),
- (functions.sum_apvoigt, self.a_c_fwhm_eta),
- (functions.sum_ahypermet, self.hypermet_params),
- (functions.sum_fastahypermet, self.hypermet_params),)
- for function, params in f_p:
- peaks = self.get_peaks(function, params)
+@pytest.mark.parametrize("peak_profile", list(_PEAK_PARAMETERS))
+def test_peak_search(peak_profile):
+ x = numpy.arange(5000)
+ peak_params = _PEAK_PARAMETERS[peak_profile]
+ func = getattr(functions, peak_profile)
+ y = func(x, *peak_params)
+ estimated_peak_params = peaks.peak_search(y=y, fwhm=100, relevance_info=True)
- self.assertEqual(len(peaks), 6,
- "Wrong number of peaks detected")
-
- for i in range(6):
- theoretical_peak_index = params[i*(len(params)//6) + 1]
- found_peak_index = peaks[i][0]
- self.assertLess(abs(found_peak_index - theoretical_peak_index), 25)
+ assert len(estimated_peak_params) == 6, "Wrong number of peaks detected"
+ for i, (peak_position, *_) in enumerate(estimated_peak_params):
+ theoretical_peak_position = peak_params[i * (len(peak_params) // 6) + 1]
+ assert abs(peak_position - theoretical_peak_position) < 25