summaryrefslogtreecommitdiff
path: root/silx/math/fit
diff options
context:
space:
mode:
Diffstat (limited to 'silx/math/fit')
-rw-r--r--silx/math/fit/__init__.py39
-rw-r--r--silx/math/fit/bgtheories.py440
-rw-r--r--silx/math/fit/filters.pyx416
-rw-r--r--silx/math/fit/filters/include/filters.h45
-rw-r--r--silx/math/fit/filters/src/smoothnd.c317
-rw-r--r--silx/math/fit/filters/src/snip1d.c149
-rw-r--r--silx/math/fit/filters/src/snip2d.c96
-rw-r--r--silx/math/fit/filters/src/snip3d.c186
-rw-r--r--silx/math/fit/filters/src/strip.c118
-rw-r--r--silx/math/fit/filters_wrapper.pxd71
-rw-r--r--silx/math/fit/fitmanager.py1087
-rw-r--r--silx/math/fit/fittheories.py1374
-rw-r--r--silx/math/fit/fittheory.py161
-rw-r--r--silx/math/fit/functions.pyx985
-rw-r--r--silx/math/fit/functions/include/functions.h68
-rw-r--r--silx/math/fit/functions/src/funs.c1265
-rw-r--r--silx/math/fit/functions_wrapper.pxd170
-rw-r--r--silx/math/fit/leastsq.py901
-rw-r--r--silx/math/fit/peaks.pyx175
-rw-r--r--silx/math/fit/peaks/include/peaks.h32
-rw-r--r--silx/math/fit/peaks/src/peaks.c255
-rw-r--r--silx/math/fit/peaks_wrapper.pxd41
-rw-r--r--silx/math/fit/setup.py85
-rw-r--r--silx/math/fit/test/__init__.py46
-rw-r--r--silx/math/fit/test/test_bgtheories.py169
-rw-r--r--silx/math/fit/test/test_filters.py137
-rw-r--r--silx/math/fit/test/test_fit.py387
-rw-r--r--silx/math/fit/test/test_fitmanager.py513
-rw-r--r--silx/math/fit/test/test_functions.py272
-rw-r--r--silx/math/fit/test/test_peaks.py146
30 files changed, 0 insertions, 10146 deletions
diff --git a/silx/math/fit/__init__.py b/silx/math/fit/__init__.py
deleted file mode 100644
index 29e6a9e..0000000
--- a/silx/math/fit/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "22/06/2016"
-
-
-from .leastsq import leastsq, chisq_alpha_beta
-from .leastsq import \
- CFREE, CPOSITIVE, CQUOTED, CFIXED, \
- CFACTOR, CDELTA, CSUM
-
-from .functions import *
-from .filters import *
-from .peaks import peak_search, guess_fwhm
-from .fitmanager import FitManager
-from .fittheory import FitTheory
diff --git a/silx/math/fit/bgtheories.py b/silx/math/fit/bgtheories.py
deleted file mode 100644
index 631c43e..0000000
--- a/silx/math/fit/bgtheories.py
+++ /dev/null
@@ -1,440 +0,0 @@
-# coding: utf-8
-#/*##########################################################################
-#
-# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-########################################################################### */
-"""This modules defines a set of background model functions and associated
-estimation functions in a format that can be imported into a
-:class:`silx.math.fit.FitManager` object.
-
-A background function is a function that you want to add to a regular fit
-function prior to fitting the sum of both functions. This is useful, for
-instance, if you need to fit multiple gaussian peaks in an array of
-measured data points when the measurement is polluted by a background signal.
-
-The models include common background models such as a constant value or a
-linear background.
-
-It also includes background computation filters - *strip* and *snip* - that
-can extract a more complex low-curvature background signal from a signal with
-peaks having higher curvatures.
-
-The source code of this module can serve as a template for defining your
-own fit background theories. The minimal skeleton of such a theory definition
-file is::
-
- from silx.math.fit.fittheory import FitTheory
-
- def bgfunction1(x, y0, …):
- bg_signal = …
- return bg_signal
-
- def estimation_function1(x, y):
- …
- estimated_params = …
- constraints = …
- return estimated_params, constraints
-
- THEORY = {
- 'bg_theory_name1': FitTheory(
- description='Description of theory 1',
- function=bgfunction1,
- parameters=('param name 1', 'param name 2', …),
- estimate=estimation_function1,
- configure=configuration_function1,
- derivative=derivative_function1,
- is_background=True),
- 'theory_name_2': …,
- }
-"""
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "16/01/2017"
-
-from collections import OrderedDict
-import numpy
-from silx.math.fit.filters import strip, snip1d,\
- savitsky_golay
-from silx.math.fit.fittheory import FitTheory
-
-CONFIG = {
- "SmoothingFlag": False,
- "SmoothingWidth": 5,
- "AnchorsFlag": False,
- "AnchorsList": [],
- "StripWidth": 2,
- "StripIterations": 5000,
- "StripThresholdFactor": 1.0,
- "SnipWidth": 16,
- "EstimatePolyOnStrip": True
-}
-
-# to avoid costly computations when parameters stay the same
-_BG_STRIP_OLDY = numpy.array([])
-_BG_STRIP_OLDPARS = [0, 0]
-_BG_STRIP_OLDBG = numpy.array([])
-
-_BG_SNIP_OLDY = numpy.array([])
-_BG_SNIP_OLDWIDTH = None
-_BG_SNIP_OLDBG = numpy.array([])
-
-
-_BG_OLD_ANCHORS = []
-_BG_OLD_ANCHORS_FLAG = None
-
-_BG_SMOOTH_OLDWIDTH = None
-_BG_SMOOTH_OLDFLAG = None
-
-
-def _convert_anchors_to_indices(x):
- """Anchors stored in CONFIG["AnchorsList"] are abscissa.
- Convert then to indices (take first index where x >= anchor),
- then return the list of indices.
-
- :param x: Original array of abscissa
- :return: List of indices of anchors in x array.
- If CONFIG['AnchorsFlag'] is False or None, or if the list
- of indices is empty, return None.
- """
- # convert anchor X abscissa to index
- if CONFIG['AnchorsFlag'] and CONFIG['AnchorsList'] is not None:
- anchors_indices = []
- for anchor_x in CONFIG['AnchorsList']:
- if anchor_x <= x[0]:
- continue
- # take the first index where x > anchor_x
- indices = numpy.nonzero(x >= anchor_x)[0]
- if len(indices):
- anchors_indices.append(min(indices))
- if not len(anchors_indices):
- anchors_indices = None
- else:
- anchors_indices = None
-
- return anchors_indices
-
-
-def strip_bg(x, y0, width, niter):
- """Extract and return the strip bg from y0.
-
- Use anchors coordinates in CONFIG["AnchorsList"] if flag
- CONFIG["AnchorsFlag"] is True. Convert anchors from x coordinate
- to array index prior to passing it to silx.math.fit.filters.strip
-
- :param x: Abscissa array
- :param x: Ordinate array (data values at x positions)
- :param width: strip width
- :param niter: strip niter
- """
- global _BG_STRIP_OLDY
- global _BG_STRIP_OLDPARS
- global _BG_STRIP_OLDBG
- global _BG_SMOOTH_OLDWIDTH
- global _BG_SMOOTH_OLDFLAG
- global _BG_OLD_ANCHORS
- global _BG_OLD_ANCHORS_FLAG
-
- parameters_changed =\
- _BG_STRIP_OLDPARS != [width, niter] or\
- _BG_SMOOTH_OLDWIDTH != CONFIG["SmoothingWidth"] or\
- _BG_SMOOTH_OLDFLAG != CONFIG["SmoothingFlag"] or\
- _BG_OLD_ANCHORS_FLAG != CONFIG["AnchorsFlag"] or\
- _BG_OLD_ANCHORS != CONFIG["AnchorsList"]
-
- # same parameters
- if not parameters_changed:
- # same data
- if numpy.array_equal(_BG_STRIP_OLDY, y0):
- # same result
- return _BG_STRIP_OLDBG
-
- _BG_STRIP_OLDY = y0
- _BG_STRIP_OLDPARS = [width, niter]
- _BG_SMOOTH_OLDWIDTH = CONFIG["SmoothingWidth"]
- _BG_SMOOTH_OLDFLAG = CONFIG["SmoothingFlag"]
- _BG_OLD_ANCHORS = CONFIG["AnchorsList"]
- _BG_OLD_ANCHORS_FLAG = CONFIG["AnchorsFlag"]
-
- y1 = savitsky_golay(y0, CONFIG["SmoothingWidth"]) if CONFIG["SmoothingFlag"] else y0
-
- anchors_indices = _convert_anchors_to_indices(x)
-
- background = strip(y1,
- w=width,
- niterations=niter,
- factor=CONFIG["StripThresholdFactor"],
- anchors=anchors_indices)
-
- _BG_STRIP_OLDBG = background
-
- return background
-
-
-def snip_bg(x, y0, width):
- """Compute the snip bg for y0"""
- global _BG_SNIP_OLDY
- global _BG_SNIP_OLDWIDTH
- global _BG_SNIP_OLDBG
- global _BG_SMOOTH_OLDWIDTH
- global _BG_SMOOTH_OLDFLAG
- global _BG_OLD_ANCHORS
- global _BG_OLD_ANCHORS_FLAG
-
- parameters_changed =\
- _BG_SNIP_OLDWIDTH != width or\
- _BG_SMOOTH_OLDWIDTH != CONFIG["SmoothingWidth"] or\
- _BG_SMOOTH_OLDFLAG != CONFIG["SmoothingFlag"] or\
- _BG_OLD_ANCHORS_FLAG != CONFIG["AnchorsFlag"] or\
- _BG_OLD_ANCHORS != CONFIG["AnchorsList"]
-
- # same parameters
- if not parameters_changed:
- # same data
- if numpy.sum(_BG_SNIP_OLDY == y0) == len(y0):
- # same result
- return _BG_SNIP_OLDBG
-
- _BG_SNIP_OLDY = y0
- _BG_SNIP_OLDWIDTH = width
- _BG_SMOOTH_OLDWIDTH = CONFIG["SmoothingWidth"]
- _BG_SMOOTH_OLDFLAG = CONFIG["SmoothingFlag"]
- _BG_OLD_ANCHORS = CONFIG["AnchorsList"]
- _BG_OLD_ANCHORS_FLAG = CONFIG["AnchorsFlag"]
-
- y1 = savitsky_golay(y0, CONFIG["SmoothingWidth"]) if CONFIG["SmoothingFlag"] else y0
-
- anchors_indices = _convert_anchors_to_indices(x)
-
- if anchors_indices is None or not len(anchors_indices):
- anchors_indices = [0, len(y1) - 1]
-
- background = numpy.zeros_like(y1)
- previous_anchor = 0
- for anchor_index in anchors_indices:
- if (anchor_index > previous_anchor) and (anchor_index < len(y1)):
- background[previous_anchor:anchor_index] =\
- snip1d(y1[previous_anchor:anchor_index],
- width)
- previous_anchor = anchor_index
-
- if previous_anchor < len(y1):
- background[previous_anchor:] = snip1d(y1[previous_anchor:],
- width)
-
- _BG_SNIP_OLDBG = background
-
- return background
-
-
-def estimate_linear(x, y):
- """
- Estimate the linear parameters (constant, slope) of a y signal.
-
- Strip peaks, then perform a linear regression.
- """
- bg = strip_bg(x, y,
- width=CONFIG["StripWidth"],
- niter=CONFIG["StripIterations"])
- n = float(len(bg))
- Sy = numpy.sum(bg)
- Sx = float(numpy.sum(x))
- Sxx = float(numpy.sum(x * x))
- Sxy = float(numpy.sum(x * bg))
-
- deno = n * Sxx - (Sx * Sx)
- if deno != 0:
- bg = (Sxx * Sy - Sx * Sxy) / deno
- slope = (n * Sxy - Sx * Sy) / deno
- else:
- bg = 0.0
- slope = 0.0
- estimated_par = [bg, slope]
- # code = 0: FREE
- constraints = [[0, 0, 0], [0, 0, 0]]
- return estimated_par, constraints
-
-
-def estimate_strip(x, y):
- """Estimation function for strip parameters.
-
- Return parameters as defined in CONFIG dict,
- set constraints to FIXED.
- """
- estimated_par = [CONFIG["StripWidth"],
- CONFIG["StripIterations"]]
- constraints = numpy.zeros((len(estimated_par), 3), numpy.float64)
- # code = 3: FIXED
- constraints[0][0] = 3
- constraints[1][0] = 3
- return estimated_par, constraints
-
-
-def estimate_snip(x, y):
- """Estimation function for snip parameters.
-
- Return parameters as defined in CONFIG dict,
- set constraints to FIXED.
- """
- estimated_par = [CONFIG["SnipWidth"]]
- constraints = numpy.zeros((len(estimated_par), 3), numpy.float64)
- # code = 3: FIXED
- constraints[0][0] = 3
- return estimated_par, constraints
-
-
-def poly(x, y, *pars):
- """Order n polynomial.
- The order of the polynomial is defined by the number of
- coefficients (``*pars``).
-
- """
- p = numpy.poly1d(pars)
- return p(x)
-
-
-def estimate_poly(x, y, deg=2):
- """Estimate polynomial coefficients.
-
- """
- # extract bg signal with strip, to estimate polynomial on background
- if CONFIG["EstimatePolyOnStrip"]:
- y = strip_bg(x, y,
- CONFIG["StripWidth"],
- CONFIG["StripIterations"])
- pcoeffs = numpy.polyfit(x, y, deg)
- cons = numpy.zeros((deg + 1, 3), numpy.float64)
- return pcoeffs, cons
-
-
-def estimate_quadratic_poly(x, y):
- """Estimate quadratic polynomial coefficients.
- """
- return estimate_poly(x, y, deg=2)
-
-
-def estimate_cubic_poly(x, y):
- """Estimate cubic polynomial coefficients.
- """
- return estimate_poly(x, y, deg=3)
-
-
-def estimate_quartic_poly(x, y):
- """Estimate degree 4 polynomial coefficients.
- """
- return estimate_poly(x, y, deg=4)
-
-
-def estimate_quintic_poly(x, y):
- """Estimate degree 5 polynomial coefficients.
- """
- return estimate_poly(x, y, deg=5)
-
-
-def configure(**kw):
- """Update the CONFIG dict
- """
- # inspect **kw to find known keys, update them in CONFIG
- for key in CONFIG:
- if key in kw:
- CONFIG[key] = kw[key]
-
- return CONFIG
-
-
-THEORY = OrderedDict(
- (('No Background',
- FitTheory(
- description="No background function",
- function=lambda x, y0: numpy.zeros_like(x),
- parameters=[],
- is_background=True)),
- ('Constant',
- FitTheory(
- description='Constant background',
- function=lambda x, y0, c: c * numpy.ones_like(x),
- parameters=['Constant', ],
- estimate=lambda x, y: ([min(y)], [[0, 0, 0]]),
- is_background=True)),
- ('Linear',
- FitTheory(
- description="Linear background, parameters 'Constant' and"
- " 'Slope'",
- function=lambda x, y0, a, b: a + b * x,
- parameters=['Constant', 'Slope'],
- estimate=estimate_linear,
- configure=configure,
- is_background=True)),
- ('Strip',
- FitTheory(
- description="Compute background using a strip filter\n"
- "Parameters 'StripWidth', 'StripIterations'",
- function=strip_bg,
- parameters=['StripWidth', 'StripIterations'],
- estimate=estimate_strip,
- configure=configure,
- is_background=True)),
- ('Snip',
- FitTheory(
- description="Compute background using a snip filter\n"
- "Parameter 'SnipWidth'",
- function=snip_bg,
- parameters=['SnipWidth'],
- estimate=estimate_snip,
- configure=configure,
- is_background=True)),
- ('Degree 2 Polynomial',
- FitTheory(
- description="Quadratic polynomial background, Parameters "
- "'a', 'b' and 'c'\ny = a*x^2 + b*x +c",
- function=poly,
- parameters=['a', 'b', 'c'],
- estimate=estimate_quadratic_poly,
- configure=configure,
- is_background=True)),
- ('Degree 3 Polynomial',
- FitTheory(
- description="Cubic polynomial background, Parameters "
- "'a', 'b', 'c' and 'd'\n"
- "y = a*x^3 + b*x^2 + c*x + d",
- function=poly,
- parameters=['a', 'b', 'c', 'd'],
- estimate=estimate_cubic_poly,
- configure=configure,
- is_background=True)),
- ('Degree 4 Polynomial',
- FitTheory(
- description="Quartic polynomial background\n"
- "y = a*x^4 + b*x^3 + c*x^2 + d*x + e",
- function=poly,
- parameters=['a', 'b', 'c', 'd', 'e'],
- estimate=estimate_quartic_poly,
- configure=configure,
- is_background=True)),
- ('Degree 5 Polynomial',
- FitTheory(
- description="Quaintic polynomial background\n"
- "y = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f",
- function=poly,
- parameters=['a', 'b', 'c', 'd', 'e', 'f'],
- estimate=estimate_quintic_poly,
- configure=configure,
- is_background=True))))
diff --git a/silx/math/fit/filters.pyx b/silx/math/fit/filters.pyx
deleted file mode 100644
index da1f6f5..0000000
--- a/silx/math/fit/filters.pyx
+++ /dev/null
@@ -1,416 +0,0 @@
-# coding: utf-8
-#/*##########################################################################
-# Copyright (C) 2016-2018 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-"""This module provides background extraction functions and smoothing
-functions. These functions are extracted from PyMca module SpecFitFuns.
-
-Index of background extraction functions:
-------------------------------------------
-
- - :func:`strip`
- - :func:`snip1d`
- - :func:`snip2d`
- - :func:`snip3d`
-
-Smoothing functions:
---------------------
-
- - :func:`savitsky_golay`
- - :func:`smooth1d`
- - :func:`smooth2d`
- - :func:`smooth3d`
-
-References:
------------
-
-.. [Morhac97] Miroslav Morháč et al.
- Background elimination methods for multidimensional coincidence γ-ray spectra.
- Nucl. Instruments and Methods in Physics Research A401 (1997) 113-132.
- https://doi.org/10.1016/S0168-9002(97)01023-1
-
-.. [Ryan88] C.G. Ryan et al.
- SNIP, a statistics-sensitive background treatment for the quantitative analysis of PIXE spectra in geoscience applications.
- Nucl. Instruments and Methods in Physics Research B34 (1988) 396-402*.
- https://doi.org/10.1016/0168-583X(88)90063-8
-
-API documentation:
--------------------
-
-"""
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "15/05/2017"
-
-import logging
-import numpy
-
-_logger = logging.getLogger(__name__)
-
-cimport cython
-cimport silx.math.fit.filters_wrapper as filters_wrapper
-
-
-def strip(data, w=1, niterations=1000, factor=1.0, anchors=None):
- """Extract background from data using the strip algorithm, as explained at
- http://pymca.sourceforge.net/stripbackground.html.
-
- In its simplest implementation it is just as an iterative procedure
- depending on two parameters. These parameters are the strip background
- width ``w``, and the number of iterations. At each iteration, if the
- contents of channel ``i``, ``y(i)``, is above the average of the contents
- of the channels at ``w`` channels of distance, ``y(i-w)`` and
- ``y(i+w)``, ``y(i)`` is replaced by the average.
- At the end of the process we are left with something that resembles a spectrum
- in which the peaks have been stripped.
-
- :param data: Data array
- :type data: numpy.ndarray
- :param w: Strip width
- :param niterations: number of iterations
- :param factor: scaling factor applied to the average of ``y(i-w)`` and
- ``y(i+w)`` before comparing to ``y(i)``
- :param anchors: Array of anchors, indices of points that will not be
- modified during the stripping procedure.
- :return: Data with peaks stripped away
- """
- cdef:
- double[::1] input_c
- double[::1] output
- long[::1] anchors_c
-
- if not isinstance(data, numpy.ndarray):
- if not hasattr(data, "__len__"):
- raise TypeError("data must be a sequence (list, tuple) " +
- "or a numpy array")
- data_shape = (len(data), )
- else:
- data_shape = data.shape
-
- input_c = numpy.array(data,
- copy=True,
- dtype=numpy.float64,
- order='C').reshape(-1)
-
- output = numpy.empty(shape=(input_c.size,),
- dtype=numpy.float64)
-
- if anchors is not None and len(anchors):
- # numpy.int_ is the same as C long (http://docs.scipy.org/doc/numpy/user/basics.types.html)
- anchors_c = numpy.array(anchors,
- copy=False,
- dtype=numpy.int_,
- order='C')
- len_anchors = anchors_c.size
- else:
- # Make a dummy length-1 array, because if I use shape=(0,) I get the error
- # IndexError: Out of bounds on buffer access (axis 0)
- anchors_c = numpy.empty(shape=(1,),
- dtype=numpy.int_)
- len_anchors = 0
-
-
- status = filters_wrapper.strip(&input_c[0], input_c.size,
- factor, niterations, w,
- &anchors_c[0], len_anchors, &output[0])
-
- return numpy.asarray(output).reshape(data_shape)
-
-
-def snip1d(data, snip_width):
- """Estimate the baseline (background) of a 1D data vector by clipping peaks.
-
- Implementation of the algorithm SNIP in 1D is described in [Morhac97]_.
- The original idea for 1D and the low-statistics-digital-filter (lsdf) comes
- from [Ryan88]_.
-
- :param data: Data array, preferably 1D and of type *numpy.float64*.
- Else, the data array will be flattened and converted to
- *dtype=numpy.float64* prior to applying the snip filter.
- :type data: numpy.ndarray
- :param snip_width: Width of the snip operator, in number of samples.
- A sample will be iteratively compared to it's neighbors up to a
- distance of ``snip_width`` samples. This parameters has a direct
- influence on the speed of the algorithm.
- :type width: int
- :return: Baseline of the input array, as an array of the same shape.
- :rtype: numpy.ndarray
- """
- cdef:
- double[::1] data_c
-
- if not isinstance(data, numpy.ndarray):
- if not hasattr(data, "__len__"):
- raise TypeError("data must be a sequence (list, tuple) " +
- "or a numpy array")
- data_shape = (len(data), )
- else:
- data_shape = data.shape
-
- data_c = numpy.array(data,
- copy=True,
- dtype=numpy.float64,
- order='C').reshape(-1)
-
- filters_wrapper.snip1d(&data_c[0], data_c.size, snip_width)
-
- return numpy.asarray(data_c).reshape(data_shape)
-
-
-def snip2d(data, snip_width):
- """Estimate the baseline (background) of a 2D data signal by clipping peaks.
-
- Implementation of the algorithm SNIP in 2D described in [Morhac97]_.
-
- :param data: 2D array
- :type data: numpy.ndarray
- :param width: Width of the snip operator, in number of samples. A wider
- snip operator will result in a smoother result (lower frequency peaks
- will be clipped), and a longer computation time.
- :type width: int
- :return: Baseline of the input array, as an array of the same shape.
- :rtype: numpy.ndarray
- """
- cdef:
- double[::1] data_c
-
- if not isinstance(data, numpy.ndarray):
- if not hasattr(data, "__len__") or not hasattr(data[0], "__len__"):
- raise TypeError("data must be a 2D sequence (list, tuple) " +
- "or a 2D numpy array")
- nrows = len(data)
- ncolumns = len(data[0])
- data_shape = (len(data), len(data[0]))
-
- else:
- data_shape = data.shape
- nrows = data_shape[0]
- if len(data_shape) == 2:
- ncolumns = data_shape[1]
- else:
- raise TypeError("data array must be 2-dimensional")
-
- data_c = numpy.array(data,
- copy=True,
- dtype=numpy.float64,
- order='C').reshape(-1)
-
- filters_wrapper.snip2d(&data_c[0], nrows, ncolumns, snip_width)
-
- return numpy.asarray(data_c).reshape(data_shape)
-
-
-def snip3d(data, snip_width):
- """Estimate the baseline (background) of a 3D data signal by clipping peaks.
-
- Implementation of the algorithm SNIP in 3D described in [Morhac97]_.
-
- :param data: 3D array
- :type data: numpy.ndarray
- :param width: Width of the snip operator, in number of samples. A wider
- snip operator will result in a smoother result (lower frequency peaks
- will be clipped), and a longer computation time.
- :type width: int
-
- :return: Baseline of the input array, as an array of the same shape.
- :rtype: numpy.ndarray
- """
- cdef:
- double[::1] data_c
-
- if not isinstance(data, numpy.ndarray):
- if not hasattr(data, "__len__") or not hasattr(data[0], "__len__") or\
- not hasattr(data[0][0], "__len__"):
- raise TypeError("data must be a 3D sequence (list, tuple) " +
- "or a 3D numpy array")
- nx = len(data)
- ny = len(data[0])
- nz = len(data[0][0])
- data_shape = (len(data), len(data[0]), len(data[0][0]))
- else:
- data_shape = data.shape
- nrows = data_shape[0]
- if len(data_shape) == 3:
- nx = data_shape[0]
- ny = data_shape[1]
- nz = data_shape[2]
- else:
- raise TypeError("data array must be 3-dimensional")
-
- data_c = numpy.array(data,
- copy=True,
- dtype=numpy.float64,
- order='C').reshape(-1)
-
- filters_wrapper.snip3d(&data_c[0], nx, ny, nz, snip_width)
-
- return numpy.asarray(data_c).reshape(data_shape)
-
-
-def savitsky_golay(data, npoints=5):
- """Smooth a curve using a Savitsky-Golay filter.
-
- :param data: Input data
- :type data: 1D numpy array
- :param npoints: Size of the smoothing operator in number of samples
- Must be between 3 and 100.
- :return: Smoothed data
- """
- cdef:
- double[::1] data_c
- double[::1] output
-
- data_c = numpy.array(data,
- dtype=numpy.float64,
- order='C').reshape(-1)
-
- output = numpy.empty(shape=(data_c.size,),
- dtype=numpy.float64)
-
- status = filters_wrapper.SavitskyGolay(&data_c[0], data_c.size,
- npoints, &output[0])
-
- if status:
- _logger.error("Smoothing failed. Check that npoints is greater " +
- "than 3 and smaller than 100.")
-
- return numpy.asarray(output).reshape(data.shape)
-
-
-def smooth1d(data):
- """Simple smoothing for 1D data.
-
- For a data array :math:`y` of length :math:`n`, the smoothed array
- :math:`ys` is calculated as a weighted average of neighboring samples:
-
- :math:`ys_0 = 0.75 y_0 + 0.25 y_1`
-
- :math:`ys_i = 0.25 (y_{i-1} + 2 y_i + y_{i+1})` for :math:`0 < i < n-1`
-
- :math:`ys_{n-1} = 0.25 y_{n-2} + 0.75 y_{n-1}`
-
-
- :param data: 1D data array
- :type data: numpy.ndarray
- :return: Smoothed data
- :rtype: numpy.ndarray(dtype=numpy.float64)
- """
- cdef:
- double[::1] data_c
-
- if not isinstance(data, numpy.ndarray):
- if not hasattr(data, "__len__"):
- raise TypeError("data must be a sequence (list, tuple) " +
- "or a numpy array")
- data_shape = (len(data), )
- else:
- data_shape = data.shape
-
- data_c = numpy.array(data,
- copy=True,
- dtype=numpy.float64,
- order='C').reshape(-1)
-
- filters_wrapper.smooth1d(&data_c[0], data_c.size)
-
- return numpy.asarray(data_c).reshape(data_shape)
-
-
-def smooth2d(data):
- """Simple smoothing for 2D data:
- :func:`smooth1d` is applied succesively along both axis
-
- :param data: 2D data array
- :type data: numpy.ndarray
- :return: Smoothed data
- :rtype: numpy.ndarray(dtype=numpy.float64)
- """
- cdef:
- double[::1] data_c
-
- if not isinstance(data, numpy.ndarray):
- if not hasattr(data, "__len__") or not hasattr(data[0], "__len__"):
- raise TypeError("data must be a 2D sequence (list, tuple) " +
- "or a 2D numpy array")
- nrows = len(data)
- ncolumns = len(data[0])
- data_shape = (len(data), len(data[0]))
-
- else:
- data_shape = data.shape
- nrows = data_shape[0]
- if len(data_shape) == 2:
- ncolumns = data_shape[1]
- else:
- raise TypeError("data array must be 2-dimensional")
-
- data_c = numpy.array(data,
- copy=True,
- dtype=numpy.float64,
- order='C').reshape(-1)
-
- filters_wrapper.smooth2d(&data_c[0], nrows, ncolumns)
-
- return numpy.asarray(data_c).reshape(data_shape)
-
-
-def smooth3d(data):
- """Simple smoothing for 3D data:
- :func:`smooth2d` is applied on each 2D slice of the data volume along all
- 3 axis
-
- :param data: 2D data array
- :type data: numpy.ndarray
- :return: Smoothed data
- :rtype: numpy.ndarray(dtype=numpy.float64)
- """
- cdef:
- double[::1] data_c
-
- if not isinstance(data, numpy.ndarray):
- if not hasattr(data, "__len__") or not hasattr(data[0], "__len__") or\
- not hasattr(data[0][0], "__len__"):
- raise TypeError("data must be a 3D sequence (list, tuple) " +
- "or a 3D numpy array")
- nx = len(data)
- ny = len(data[0])
- nz = len(data[0][0])
- data_shape = (len(data), len(data[0]), len(data[0][0]))
- else:
- data_shape = data.shape
- nrows = data_shape[0]
- if len(data_shape) == 3:
- nx = data_shape[0]
- ny = data_shape[1]
- nz = data_shape[2]
- else:
- raise TypeError("data array must be 3-dimensional")
-
- data_c = numpy.array(data,
- copy=True,
- dtype=numpy.float64,
- order='C').reshape(-1)
-
- filters_wrapper.smooth3d(&data_c[0], nx, ny, nz)
-
- return numpy.asarray(data_c).reshape(data_shape)
diff --git a/silx/math/fit/filters/include/filters.h b/silx/math/fit/filters/include/filters.h
deleted file mode 100644
index 1ee9a95..0000000
--- a/silx/math/fit/filters/include/filters.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-
-#ifndef FITFILTERS_H
-#define FITFILTERS_H
-
-/* Background functions */
-void snip1d(double *data, int size, int width);
-//void snip1d_multiple(double *data, int n_channels, int snip_width, int n_spectra);
-void snip2d(double *data, int nrows, int ncolumns, int width);
-void snip3d(double *data, int nx, int ny, int nz, int width);
-
-int strip(double* input, long len_input, double c, long niter, int deltai,
- long* anchors, long len_anchors, double* output);
-
-/* Smoothing functions */
-
-int SavitskyGolay(double* input, long len_input, int npoints, double* output);
-
-void smooth1d(double *data, int size);
-void smooth2d(double *data, int size0, int size1);
-void smooth3d(double *data, int size0, int size1, int size2);
-
-
-#endif /* #define FITFILTERS_H */
diff --git a/silx/math/fit/filters/src/smoothnd.c b/silx/math/fit/filters/src/smoothnd.c
deleted file mode 100644
index cb96961..0000000
--- a/silx/math/fit/filters/src/smoothnd.c
+++ /dev/null
@@ -1,317 +0,0 @@
-#/*##########################################################################
-#
-# Copyright (c) 2004-2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-#include <stdlib.h>
-#include <string.h>
-#include <math.h>
-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-#define MAX(x, y) (((x) > (y)) ? (x) : (y))
-
-#define MAX_SAVITSKY_GOLAY_WIDTH 101
-#define MIN_SAVITSKY_GOLAY_WIDTH 3
-
-/* Wrapped functions */
-void smooth1d(double *data, int size);
-void smooth2d(double *data, int size0, int size1);
-void smooth3d(double *data, int size0, int size1, int size2);
-int SavitskyGolay(double* input, long len_input, int npoints, double* output);
-
-/* Internal functions */
-long index2d(long row_idx, long col_idx, long ncols);
-long index3d(long x_idx, long y_idx, long z_idx, long ny, long nz);
-void smooth1d_rows(double *data, long nrows, long ncols);
-void smooth1d_cols(double *data, long nrows, long ncols);
-void smooth1d_x(double *data, long nx, long ny, long nz);
-void smooth1d_y(double *data, long nx, long ny, long nz);
-void smooth1d_z(double *data, long nx, long ny, long nz);
-void smooth2d_yzslice(double *data, long nx, long ny, long nz);
-void smooth2d_xzslice(double *data, long nx, long ny, long nz);
-void smooth2d_xyslice(double *data, long nx, long ny, long nz);
-
-
-/* Simple smoothing of a 1D array */
-void smooth1d(double *data, int size)
-{
- long i;
- double prev_sample;
- double next_sample;
-
- if (size < 3)
- {
- return;
- }
- prev_sample = data[0];
- for (i=0; i<(size-1); i++)
- {
- next_sample = 0.25 * (prev_sample + 2 * data[i] + data[i+1]);
- prev_sample = data[i];
- data[i] = next_sample;
- }
- data[size-1] = 0.25 * prev_sample + 0.75 * data[size-1];
- return;
-}
-
-/* Smoothing of a 2D array*/
-void smooth2d(double *data, int nrows, int ncols)
-{
- /* smooth the first dimension (rows) */
- smooth1d_rows(data, nrows, ncols);
-
- /* smooth the 2nd dimension */
- smooth1d_cols(data, nrows, ncols);
-}
-
-/* Smoothing of a 3D array */
-void smooth3d(double *data, int nx, int ny, int nz)
-{
- smooth2d_xyslice(data, nx, ny, nz);
- smooth2d_xzslice(data, nx, ny, nz);
- smooth2d_yzslice(data, nx, ny, nz);
-}
-
-/* 1D Savitsky-Golay smoothing */
-int SavitskyGolay(double* input, long len_input, int npoints, double* output)
-{
-
- //double dpoints = 5.;
- double coeff[MAX_SAVITSKY_GOLAY_WIDTH];
- int i, j, m;
- double dhelp, den;
- double *data;
-
- memcpy(output, input, len_input * sizeof(double));
-
- if (!(npoints % 2)) npoints +=1;
-
- if((npoints < MIN_SAVITSKY_GOLAY_WIDTH) || (len_input < npoints) || \
- (npoints > MAX_SAVITSKY_GOLAY_WIDTH))
- {
- /* do not smooth data */
- return 1;
- }
-
- /* calculate the coefficients */
- m = (int) (npoints/2);
- den = (double) ((2*m-1) * (2*m+1) * (2*m + 3));
- for (i=0; i<= m; i++){
- coeff[m+i] = (double) (3 * (3*m*m + 3*m - 1 - 5*i*i ));
- coeff[m-i] = coeff[m+i];
- }
-
- /* simple smoothing at the beginning */
- for (j=0; j<=(int)(npoints/3); j++)
- {
- smooth1d(output, m);
- }
-
- /* simple smoothing at the end */
- for (j=0; j<=(int)(npoints/3); j++)
- {
- smooth1d((output+len_input-m-1), m);
- }
-
- /*one does not need the whole spectrum buffer, but code is clearer */
- data = (double *) malloc(len_input * sizeof(double));
- memcpy(data, output, len_input * sizeof(double));
-
- /* the actual SG smoothing in the middle */
- for (i=m; i<(len_input-m); i++){
- dhelp = 0;
- for (j=-m;j<=m;j++) {
- dhelp += coeff[m+j] * (*(data+i+j));
- }
- if(dhelp > 0.0){
- *(output+i) = dhelp / den;
- }
- }
- free(data);
- return (0);
-}
-
-/*********************/
-/* Utility functions */
-/*********************/
-
-long index2d(long row_idx, long col_idx, long ncols)
-{
- return (row_idx*ncols+col_idx);
-}
-
-/* Apply smooth 1d on all rows in a 2D array*/
-void smooth1d_rows(double *data, long nrows, long ncols)
-{
- long row_idx;
-
- for (row_idx=0; row_idx < nrows; row_idx++)
- {
- smooth1d(&data[row_idx * ncols], ncols);
- }
-}
-
-/* Apply smooth 1d on all columns in a 2D array*/
-void smooth1d_cols(double *data, long nrows, long ncols)
-{
- long row_idx, col_idx;
- long this_idx2d, next_idx2d;
- double prev_sample;
- double next_sample;
-
- for (col_idx=0; col_idx < ncols; col_idx++)
- {
- prev_sample = data[index2d(0, col_idx, ncols)];
- for (row_idx=0; row_idx<(nrows-1); row_idx++)
- {
- this_idx2d = index2d(row_idx, col_idx, ncols);
- next_idx2d = index2d(row_idx+1, col_idx, ncols);
-
- next_sample = 0.25 * (prev_sample + \
- 2 * data[this_idx2d] + \
- data[next_idx2d]);
- prev_sample = data[this_idx2d];
- data[this_idx2d] = next_sample;
- }
-
- this_idx2d = index2d(nrows-1, col_idx, ncols);
- data[this_idx2d] = 0.25 * prev_sample + 0.75 * data[this_idx2d];
- }
-}
-
-long index3d(long x_idx, long y_idx, long z_idx, long ny, long nz)
-{
- return ((x_idx*ny + y_idx) * nz + z_idx);
-}
-
-/* Apply smooth 1d along first dimension in a 3D array*/
-void smooth1d_x(double *data, long nx, long ny, long nz)
-{
- long x_idx, y_idx, z_idx;
- long this_idx3d, next_idx3d;
- double prev_sample;
- double next_sample;
-
- for (y_idx=0; y_idx < ny; y_idx++)
- {
- for (z_idx=0; z_idx < nz; z_idx++)
- {
- prev_sample = data[index3d(0, y_idx, z_idx, ny, nz)];
- for (x_idx=0; x_idx<(nx-1); x_idx++)
- {
- this_idx3d = index3d(x_idx, y_idx, z_idx, ny, nz);
- next_idx3d = index3d(x_idx+1, y_idx, z_idx, ny, nz);
-
- next_sample = 0.25 * (prev_sample + \
- 2 * data[this_idx3d] + \
- data[next_idx3d]);
- prev_sample = data[this_idx3d];
- data[this_idx3d] = next_sample;
- }
-
- this_idx3d = index3d(nx-1, y_idx, z_idx, ny, nz);
- data[this_idx3d] = 0.25 * prev_sample + 0.75 * data[this_idx3d];
- }
- }
-}
-
-/* Apply smooth 1d along second dimension in a 3D array*/
-void smooth1d_y(double *data, long nx, long ny, long nz)
-{
- long x_idx, y_idx, z_idx;
- long this_idx3d, next_idx3d;
- double prev_sample;
- double next_sample;
-
- for (x_idx=0; x_idx < nx; x_idx++)
- {
- for (z_idx=0; z_idx < nz; z_idx++)
- {
- prev_sample = data[index3d(x_idx, 0, z_idx, ny, nz)];
- for (y_idx=0; y_idx<(ny-1); y_idx++)
- {
- this_idx3d = index3d(x_idx, y_idx, z_idx, ny, nz);
- next_idx3d = index3d(x_idx, y_idx+1, z_idx, ny, nz);
-
- next_sample = 0.25 * (prev_sample + \
- 2 * data[this_idx3d] + \
- data[next_idx3d]);
- prev_sample = data[this_idx3d];
- data[this_idx3d] = next_sample;
- }
-
- this_idx3d = index3d(x_idx, ny-1, z_idx, ny, nz);
- data[this_idx3d] = 0.25 * prev_sample + 0.75 * data[this_idx3d];
- }
- }
-}
-
-/* Apply smooth 1d along third dimension in a 3D array*/
-void smooth1d_z(double *data, long nx, long ny, long nz)
-{
- long x_idx, y_idx;
- long idx3d_first_sample;
-
- for (x_idx=0; x_idx < nx; x_idx++)
- {
- for (y_idx=0; y_idx < ny; y_idx++)
- {
- idx3d_first_sample = index3d(x_idx, y_idx, 0, ny, nz);
- /*We can use regular 1D smoothing function because z samples
- are contiguous in memory*/
- smooth1d(&data[idx3d_first_sample], nz);
- }
- }
-}
-
-/* 2D smoothing of a YZ slice in a 3D volume*/
-void smooth2d_yzslice(double *data, long nx, long ny, long nz)
-{
- long x_idx;
- long slice_size = ny * nz;
-
- /* a YZ slice is a "normal" 2D array of memory-contiguous data*/
- for (x_idx=0; x_idx < nx; x_idx++)
- {
- smooth2d(&data[x_idx*slice_size], ny, nz);
- }
-}
-
-/* 2D smoothing of a XZ slice in a 3D volume*/
-void smooth2d_xzslice(double *data, long nx, long ny, long nz)
-{
-
- /* smooth along the first dimension */
- smooth1d_x(data, nx, ny, nz);
-
- /* smooth along the third dimension */
- smooth1d_z(data, nx, ny, nz);
-}
-
-/* 2D smoothing of a XY slice in a 3D volume*/
-void smooth2d_xyslice(double *data, long nx, long ny, long nz)
-{
- /* smooth along the first dimension */
- smooth1d_x(data, nx, ny, nz);
-
- /* smooth along the second dimension */
- smooth1d_y(data, nx, ny, nz);
-}
-
diff --git a/silx/math/fit/filters/src/snip1d.c b/silx/math/fit/filters/src/snip1d.c
deleted file mode 100644
index 994a272..0000000
--- a/silx/math/fit/filters/src/snip1d.c
+++ /dev/null
@@ -1,149 +0,0 @@
-#/*##########################################################################
-# Copyright (c) 2004-2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-/*
- Implementation of the algorithm SNIP in 1D described in
- Miroslav Morhac et al. Nucl. Instruments and Methods in Physics Research A401 (1997) 113-132.
-
- The original idea for 1D and the low-statistics-digital-filter (lsdf) come from
- C.G. Ryan et al. Nucl. Instruments and Methods in Physics Research B34 (1988) 396-402.
-*/
-#include <stdlib.h>
-#include <string.h>
-#include <math.h>
-
-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-#define MAX(x, y) (((x) > (y)) ? (x) : (y))
-
-void lls(double *data, int size);
-void lls_inv(double *data, int size);
-void snip1d(double *data, int n_channels, int snip_width);
-void snip1d_multiple(double *data, int n_channels, int snip_width, int n_spectra);
-void lsdf(double *data, int size, int fwhm, double f, double A, double M, double ratio);
-
-void lls(double *data, int size)
-{
- int i;
- for (i=0; i< size; i++)
- {
- data[i] = log(log(sqrt(data[i]+1.0)+1.0)+1.0);
- }
-}
-
-void lls_inv(double *data, int size)
-{
- int i;
- double tmp;
- for (i=0; i< size; i++)
- {
- /* slightly different than the published formula because
- with the original formula:
-
- tmp = exp(exp(data[i]-1.0)-1.0);
- data[i] = tmp * tmp - 1.0;
-
- one does not recover the original data */
-
- tmp = exp(exp(data[i])-1.0)-1.0;
- data[i] = tmp * tmp - 1.0;
- }
-}
-
-void lsdf(double *data, int size, int fwhm, double f, double A, double M, double ratio)
-{
- int channel, i, j;
- double L, R, S;
- int width;
- double dhelp;
-
- width = (int) (f * fwhm);
- for (channel=width; channel<(size-width); channel++)
- {
- i = width;
- while(i>0)
- {
- L=0;
- R=0;
- for(j=channel-i; j<channel; j++)
- {
- L += data[j];
- }
- for(j=channel+1; j<channel+i; j++)
- {
- R += data[j];
- }
- S = data[channel] + L + R;
- if (S<M)
- {
- data[channel] = S /(2*i+1);
- break;
- }
- dhelp = (R+1)/(L+1);
- if ((dhelp < ratio) && (dhelp > (1/ratio)))
- {
- if (S<(A*sqrt(data[channel])))
- {
- data[channel] = S /(2*i+1);
- break;
- }
- }
- i=i-1;
- }
- }
-}
-
-
-void snip1d(double *data, int n_channels, int snip_width)
-{
- snip1d_multiple(data, n_channels, snip_width, 1);
-}
-
-void snip1d_multiple(double *data, int n_channels, int snip_width, int n_spectra)
-{
- int i;
- int j;
- int p;
- int offset;
- double *w;
-
- i = (int) (0.5 * snip_width);
- /* lsdf(data, size, i, 1.5, 75., 10., 1.3); */
-
- w = (double *) malloc(n_channels * sizeof(double));
-
- for (j=0; j < n_spectra; j++)
- {
- offset = j * n_channels;
- for (p = snip_width; p > 0; p--)
- {
- for (i=p; i<(n_channels - p); i++)
- {
- w[i] = MIN(data[i + offset], 0.5*(data[i + offset - p] + data[ i + offset + p]));
- }
- for (i=p; i<(n_channels - p); i++)
- {
- data[i+offset] = w[i];
- }
- }
- }
- free(w);
-}
diff --git a/silx/math/fit/filters/src/snip2d.c b/silx/math/fit/filters/src/snip2d.c
deleted file mode 100644
index 235759c..0000000
--- a/silx/math/fit/filters/src/snip2d.c
+++ /dev/null
@@ -1,96 +0,0 @@
-#/*##########################################################################
-#
-# The PyMca X-Ray Fluorescence Toolkit
-#
-# Copyright (c) 2004-2014 European Synchrotron Radiation Facility
-#
-# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
-# the ESRF by the Software group.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-/*
- Implementation of the algorithm SNIP in 2D described in
- Miroslav Morhac et al. Nucl. Instruments and Methods in Physics Research A401 (1997) 113-132.
-*/
-#include <stdlib.h>
-#include <string.h>
-#include <math.h>
-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-#define MAX(x, y) (((x) > (y)) ? (x) : (y))
-
-void lls(double *data, int size);
-void lls_inv(double *data, int size);
-
-void snip2d(double *data, int nrows, int ncolumns, int width)
-{
- int i, j;
- int p;
- int size;
- double *w;
- double P1, P2, P3, P4;
- double S1, S2, S3, S4;
- double dhelp;
- int iminuspxncolumns; /* (i-p) * ncolumns */
- int ixncolumns; /* i * ncolumns */
- int ipluspxncolumns; /* (i+p) * ncolumns */
-
- size = nrows * ncolumns;
- w = (double *) malloc(size * sizeof(double));
-
- for (p=width; p > 0; p--)
- {
- for (i=p; i<(nrows-p); i++)
- {
- iminuspxncolumns = (i-p) * ncolumns;
- ixncolumns = i * ncolumns;
- ipluspxncolumns = (i+p) * ncolumns;
- for (j=p; j<(ncolumns-p); j++)
- {
- P4 = data[ iminuspxncolumns + (j-p)]; /* P4 = data[i-p][j-p] */
- S4 = data[ iminuspxncolumns + j]; /* S4 = data[i-p][j] */
- P2 = data[ iminuspxncolumns + (j+p)]; /* P2 = data[i-p][j+p] */
- S3 = data[ ixncolumns + (j-p)]; /* S3 = data[i][j-p] */
- S2 = data[ ixncolumns + (j+p)]; /* S2 = data[i][j+p] */
- P3 = data[ ipluspxncolumns + (j-p)]; /* P3 = data[i+p][j-p] */
- S1 = data[ ipluspxncolumns + j]; /* S1 = data[i+p][j] */
- P1 = data[ ipluspxncolumns + (j+p)]; /* P1 = data[i+p][j+p] */
- dhelp = 0.5*(P1+P3);
- S1 = MAX(S1, dhelp) - dhelp;
- dhelp = 0.5*(P1+P2);
- S2 = MAX(S2, dhelp) - dhelp;
- dhelp = 0.5*(P3+P4);
- S3 = MAX(S3, dhelp) - dhelp;
- dhelp = 0.5*(P2+P4);
- S4 = MAX(S4, dhelp) - dhelp;
- w[ixncolumns + j] = MIN(data[ixncolumns + j], 0.5 * (S1+S2+S3+S4) + 0.25 * (P1+P2+P3+P4));
- }
- }
- for (i=p; i<(nrows-p); i++)
- {
- ixncolumns = i * ncolumns;
- for (j=p; j<(ncolumns-p); j++)
- {
- data[ixncolumns + j] = w[ixncolumns + j];
- }
- }
- }
- free(w);
-}
diff --git a/silx/math/fit/filters/src/snip3d.c b/silx/math/fit/filters/src/snip3d.c
deleted file mode 100644
index cf48ee4..0000000
--- a/silx/math/fit/filters/src/snip3d.c
+++ /dev/null
@@ -1,186 +0,0 @@
-#/*##########################################################################
-#
-# The PyMca X-Ray Fluorescence Toolkit
-#
-# Copyright (c) 2004-2014 European Synchrotron Radiation Facility
-#
-# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
-# the ESRF by the Software group.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-/*
- Implementation of the algorithm SNIP in 3D described in
- Miroslav Morhac et al. Nucl. Instruments and Methods in Physics Research A401 (1997) 113-132.
-*/
-
-#include <stdlib.h>
-#include <string.h>
-#include <math.h>
-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-#define MAX(x, y) (((x) > (y)) ? (x) : (y))
-
-void lls(double *data, int size);
-void lls_inv(double *data, int size);
-
-void snip3d(double *data, int nx, int ny, int nz, int width)
-{
- int i, j, k;
- int p;
- int size;
- double *w;
- double P1, P2, P3, P4, P5, P6, P7, P8;
- double R1, R2, R3, R4, R5, R6;
- double S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12;
- double dhelp;
- long ioffset;
- long iplus;
- long imin;
- long joffset;
- long jplus;
- long jmin;
-
- size = nx * ny * nz;
- w = (double *) malloc(size * sizeof(double));
-
- for (p=width; p > 0; p--)
- {
- for (i=p; i<(nx-p); i++)
- {
- ioffset = i * ny * nz;
- iplus = (i + p) * ny * nz;
- imin = (i - p) * ny * nz;
- for (j=p; j<(ny-p); j++)
- {
- joffset = j * nz;
- jplus = (j + p) * nz;
- jmin = (j - p) * nz;
- for (k=p; k<(nz-p); k++)
- {
- P1 = data[iplus + jplus + k-p]; /* P1 = data[i+p][j+p][k-p] */
- P2 = data[imin + jplus + k-p]; /* P2 = data[i-p][j+p][k-p] */
- P3 = data[iplus + jmin + k-p]; /* P3 = data[i+p][j-p][k-p] */
- P4 = data[imin + jmin + k-p]; /* P4 = data[i-p][j-p][k-p] */
- P5 = data[iplus + jplus + k+p]; /* P5 = data[i+p][j+p][k+p] */
- P6 = data[imin + jplus + k+p]; /* P6 = data[i-p][j+p][k+p] */
- P7 = data[imin + jmin + k+p]; /* P7 = data[i-p][j-p][k+p] */
- P8 = data[iplus + jmin + k+p]; /* P8 = data[i+p][j-p][k+p] */
-
- S1 = data[iplus + joffset + k-p]; /* S1 = data[i+p][j][k-p] */
- S2 = data[ioffset + jmin + k-p]; /* S2 = data[i][j+p][k-p] */
- S3 = data[imin + joffset + k-p]; /* S3 = data[i-p][j][k-p] */
- S4 = data[ioffset + jmin + k-p]; /* S4 = data[i][j-p][k-p] */
- S5 = data[imin + joffset + k+p]; /* S5 = data[i-p][j][k+p] */
- S6 = data[ioffset + jplus + k+p]; /* S6 = data[i][j+p][k+p] */
- S7 = data[imin + joffset + k+p]; /* S7 = data[i-p][j][k+p] */
- S8 = data[ioffset + jmin + k+p]; /* S8 = data[i][j-p][k+p] */
- S9 = data[imin + jplus + k]; /* S9 = data[i-p][j+p][k] */
- S10 = data[imin + jmin + k]; /* S10 = data[i-p][j-p][k] */
- S11 = data[iplus + jmin + k]; /* S11 = data[i+p][j-p][k] */
- S12 = data[iplus + jplus + k]; /* S12 = data[i+p][j+p][k] */
-
- R1 = data[ioffset + joffset + k-p]; /* R1 = data[i][j][k-p] */
- R2 = data[ioffset + joffset + k+p]; /* R2 = data[i][j][k+p] */
- R3 = data[imin + joffset + k]; /* R3 = data[i-p][j][k] */
- R4 = data[iplus + joffset + k]; /* R4 = data[i+p][j][k] */
- R5 = data[ioffset + jplus + k]; /* R5 = data[i][j+p][k] */
- R6 = data[ioffset + jmin + k]; /* R6 = data[i][j-p][k] */
-
- dhelp = 0.5*(P1+P3);
- S1 = MAX(S1, dhelp) - dhelp;
-
- dhelp = 0.5*(P1+P2);
- S2 = MAX(S2, dhelp) - dhelp;
-
- dhelp = 0.5*(P2+P4);
- S3 = MAX(S3, dhelp) - dhelp;
-
- dhelp = 0.5*(P3+P4);
- S4 = MAX(S4, dhelp) - dhelp;
-
- dhelp = 0.5*(P5+P8); /* Different from paper (P5+P7) but according to drawing */
- S5 = MAX(S5, dhelp) - dhelp;
-
- dhelp = 0.5*(P5+P6);
- S6 = MAX(S6, dhelp) - dhelp;
-
- dhelp = 0.5*(P6+P7); /* Different from paper (P6+P8) but according to drawing */
- S7 = MAX(S7, dhelp) - dhelp;
-
- dhelp = 0.5*(P7+P8);
- S8 = MAX(S8, dhelp) - dhelp;
-
- dhelp = 0.5*(P2+P6);
- S9 = MAX(S9, dhelp) - dhelp;
-
- dhelp = 0.5*(P4+P7); /* Different from paper (P4+P8) but according to drawing */
- S10 = MAX(S10, dhelp) - dhelp;
-
- dhelp = 0.5*(P3+P8); /* Different from paper (P1+P5) but according to drawing */
- S11 = MAX(S11, dhelp) - dhelp;
-
- dhelp = 0.5*(P1+P5); /* Different from paper (P3+P7) but according to drawing */
- S12 = MAX(S12, dhelp) - dhelp;
-
- /* The published formulae correspond to have:
- P7 and P8 interchanged, and S11 and S12 interchanged
- with respect to the published drawing */
-
- dhelp = 0.5 * (S1+S2+S3+S4) + 0.25 * (P1+P2+P3+P4);
- R1 = MAX(R1, dhelp) - dhelp;
-
- dhelp = 0.5 * (S5+S6+S7+S8) + 0.25 * (P5+P6+P7+P8);
- R2 = MAX(R2, dhelp) - dhelp;
-
- dhelp = 0.5 * (S3+S7+S9+S10) + 0.25 * (P2+P4+P6+P7); /* Again same P7 and P8 change */
- R3 = MAX(R3, dhelp) - dhelp;
-
- dhelp = 0.5 * (S1+S5+S11+S12) + 0.25 * (P1+P3+P5+P8); /* Again same P7 and P8 change */
- R4 = MAX(R4, dhelp) - dhelp;
-
- dhelp = 0.5 * (S2+S6+S9+S12) + 0.25 * (P1+P2+P5+P6); /* Again same S11 and S12 change */
- R5 = MAX(R5, dhelp) - dhelp;
-
- dhelp = 0.5 * (S4+S8+S10+S11) + 0.25 * (P3+P4+P7+P8); /* Again same S11 and S12 change */
- R6 = MAX(R6, dhelp) - dhelp;
-
- dhelp = 0.5 * (R1 + R2 + R3 + R4 + R5 + R6) +\
- 0.25 * (S1 + S2 + S3 + S4 + S5 + S6) +\
- 0.25 * (S7 + S8 + S9 + S10 + S11 + S12) +\
- 0.125 * (P1 + P2 + P3 + P4 + P5 + P6 + P7 + P8);
- w[ioffset + joffset + k] = MIN(data[ioffset + joffset + k], dhelp);
- }
- }
- }
- for (i=p; i<(nx-p); i++)
- {
- ioffset = i * ny * nz;
- for (j=p; j<(ny-p); j++)
- {
- joffset = j * nz;
- for (k=p; k<(nz-p); j++)
- {
- data[ioffset + joffset + k] = w[ioffset + joffset + k];
- }
- }
- }
- }
- free(w);
-}
diff --git a/silx/math/fit/filters/src/strip.c b/silx/math/fit/filters/src/strip.c
deleted file mode 100644
index dec0742..0000000
--- a/silx/math/fit/filters/src/strip.c
+++ /dev/null
@@ -1,118 +0,0 @@
-#/*##########################################################################
-# Copyright (c) 2004-2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-/*
- This file provides a background strip function, to isolate low frequency
- background signal from a spectrum (and later substact it from the signal
- to be left only with the peaks to be fitted).
-
- It is adapted from PyMca source file "SpecFitFuns.c". The main difference
- with the original code is that this code does not handle the python
- wrapping, which is done elsewhere using cython.
-
- Authors: V.A. Sole, P. Knobel
- License: MIT
- Last modified: 17/06/2016
-*/
-
-#include <string.h>
-
-#include <stdio.h>
-
-/* strip(double* input, double c, long niter, double* output)
-
- The strip background is probably PyMca's most popular background model.
-
- In its simplest implementation it is just as an iterative procedure depending
- on two parameters. These parameters are the strip background width w, and the
- strip background number of iterations. At each iteration, if the contents of
- channel i, y(i), is above the average of the contents of the channels at w
- channels of distance, y(i-w) and y(i+w), y(i) is replaced by the average.
- At the end of the process we are left with something that resembles a spectrum
- in which the peaks have been "stripped".
-
- Parameters:
-
- - input: Input data array
- - c: scaling factor applied to the average of y(i-w) and y(i+w) before
- comparing to y(i)
- - niter: number of iterations
- - deltai: operator width (in number of channels)
- - anchors: Array of anchors, indices of points that will not be
- modified during the stripping procedure.
- - output: output array
-
-*/
-int strip(double* input, long len_input,
- double c, long niter, int deltai,
- long* anchors, long len_anchors,
- double* output)
-{
- long iter_index, array_index, anchor_index, anchor;
- int anchor_nearby_flag;
- double t_mean;
-
- memcpy(output, input, len_input * sizeof(double));
-
- if (deltai <=0) deltai = 1;
-
- if (len_input < (2*deltai+1)) return(-1);
-
- if (len_anchors > 0) {
- for (iter_index = 0; iter_index < niter; iter_index++) {
- for (array_index = deltai; array_index < len_input - deltai; array_index++) {
- /* if index is within +- deltai of an anchor, don't do anything */
- anchor_nearby_flag = 0;
- for (anchor_index=0; anchor_index<len_anchors; anchor_index++)
- {
- anchor = anchors[anchor_index];
- if (array_index > (anchor - deltai) && array_index < (anchor + deltai))
- {
- anchor_nearby_flag = 1;
- break;
- }
- }
- /* skip this array_index index */
- if (anchor_nearby_flag) {
- continue;
- }
-
- t_mean = 0.5 * (input[array_index-deltai] + input[array_index+deltai]);
- if (input[array_index] > (t_mean * c))
- output[array_index] = t_mean;
- }
- memcpy(input, output, len_input * sizeof(double));
- }
- }
- else {
- for (iter_index = 0; iter_index < niter; iter_index++) {
- for (array_index=deltai; array_index < len_input - deltai; array_index++) {
- t_mean = 0.5 * (input[array_index-deltai] + input[array_index+deltai]);
-
- if (input[array_index] > (t_mean * c))
- output[array_index] = t_mean;
- }
- memcpy(input, output, len_input * sizeof(double));
- }
- }
- return(0);
-}
diff --git a/silx/math/fit/filters_wrapper.pxd b/silx/math/fit/filters_wrapper.pxd
deleted file mode 100644
index e4f7c72..0000000
--- a/silx/math/fit/filters_wrapper.pxd
+++ /dev/null
@@ -1,71 +0,0 @@
-# coding: utf-8
-#/*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "22/06/2016"
-
-cimport cython
-
-cdef extern from "filters.h":
- void snip1d(double *data,
- int size,
- int width)
-
- void snip2d(double *data,
- int nrows,
- int ncolumns,
- int width)
-
- void snip3d(double *data,
- int nx,
- int ny,
- int nz,
- int width)
-
- int strip(double* input,
- long len_input,
- double c,
- long niter,
- int deltai,
- long* anchors,
- long len_anchors,
- double* output)
-
- int SavitskyGolay(double* input,
- long len_input,
- int npoints,
- double* output)
-
- void smooth1d(double *data,
- int size)
-
- void smooth2d(double *data,
- int size0,
- int size1)
-
- void smooth3d(double *data,
- int size0,
- int size1,
- int size2)
diff --git a/silx/math/fit/fitmanager.py b/silx/math/fit/fitmanager.py
deleted file mode 100644
index b60e073..0000000
--- a/silx/math/fit/fitmanager.py
+++ /dev/null
@@ -1,1087 +0,0 @@
-# coding: utf-8
-# /*#########################################################################
-#
-# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ##########################################################################*/
-"""
-This module provides a tool to perform advanced fitting. The actual fit relies
-on :func:`silx.math.fit.leastsq`.
-
-This module deals with:
-
- - handling of the model functions (using a set of default functions or
- loading custom user functions)
- - handling of estimation function, that are used to determine the number
- of parameters to be fitted for functions with unknown number of
- parameters (such as the sum of a variable number of gaussian curves),
- and find reasonable initial parameters for input to the iterative
- fitting algorithm
- - handling of custom derivative functions that can be passed as a
- parameter to :func:`silx.math.fit.leastsq`
- - providing different background models
-
-"""
-from collections import OrderedDict
-import logging
-import numpy
-from numpy.linalg.linalg import LinAlgError
-import os
-import sys
-
-from .filters import strip, smooth1d
-from .leastsq import leastsq
-from .fittheory import FitTheory
-from . import bgtheories
-
-
-__authors__ = ["V.A. Sole", "P. Knobel"]
-__license__ = "MIT"
-__date__ = "16/01/2017"
-
-_logger = logging.getLogger(__name__)
-
-
-class FitManager(object):
- """
- Fit functions manager
-
- :param x: Abscissa data. If ``None``, :attr:`xdata` is set to
- ``numpy.array([0.0, 1.0, 2.0, ..., len(y)-1])``
- :type x: Sequence or numpy array or None
- :param y: The dependant data ``y = f(x)``. ``y`` must have the same
- shape as ``x`` if ``x`` is not ``None``.
- :type y: Sequence or numpy array or None
- :param sigmay: The uncertainties in the ``ydata`` array. These can be
- used as weights in the least-squares problem, if ``weight_flag``
- is ``True``.
- If ``None``, the uncertainties are assumed to be 1, unless
- ``weight_flag`` is ``True``, in which case the square-root
- of ``y`` is used.
- :type sigmay: Sequence or numpy array or None
- :param weight_flag: If this parameter is ``True`` and ``sigmay``
- uncertainties are not specified, the square root of ``y`` is used
- as weights in the least-squares problem. If ``False``, the
- uncertainties are set to 1.
- :type weight_flag: boolean
- """
- def __init__(self, x=None, y=None, sigmay=None, weight_flag=False):
- """
- """
- self.fitconfig = {
- 'WeightFlag': weight_flag,
- 'fitbkg': 'No Background',
- 'fittheory': None,
- # Next few parameters are defined for compatibility with legacy theories
- # which take the background as argument for their estimation function
- 'StripWidth': 2,
- 'StripIterations': 5000,
- 'StripThresholdFactor': 1.0,
- 'SmoothingFlag': False
- }
- """Dictionary of fit configuration parameters.
- These parameters can be modified using the :meth:`configure` method.
-
- Keys are:
-
- - 'fitbkg': name of the function used for fitting a low frequency
- background signal
- - 'FwhmPoints': default full width at half maximum value for the
- peaks'.
- - 'Sensitivity': Sensitivity parameter for the peak detection
- algorithm (:func:`silx.math.fit.peak_search`)
- """
-
- self.theories = OrderedDict()
- """Dictionary of fit theories, defining functions to be fitted
- to individual peaks.
-
- Keys are descriptive theory names (e.g "Gaussians" or "Step up").
- Values are :class:`silx.math.fit.fittheory.FitTheory` objects with
- the following attributes:
-
- - *"function"* is the fit function for an individual peak
- - *"parameters"* is a sequence of parameter names
- - *"estimate"* is the parameter estimation function
- - *"configure"* is the function returning the configuration dict
- for the theory in the format described in the :attr:` fitconfig`
- documentation
- - *"derivative"* (optional) is a custom derivative function, whose
- signature is described in the documentation of
- :func:`silx.math.fit.leastsq.leastsq`
- (``model_deriv(xdata, parameters, index)``).
- - *"description"* is a description string
- """
-
- self.selectedtheory = None
- """Name of currently selected theory. This name matches a key in
- :attr:`theories`."""
-
- self.bgtheories = OrderedDict()
- """Dictionary of background theories.
-
- See :attr:`theories` for documentation on theories.
- """
-
- # Load default theories (constant, linear, strip)
- self.loadbgtheories(bgtheories)
-
- self.selectedbg = 'No Background'
- """Name of currently selected background theory. This name must be
- an existing key in :attr:`bgtheories`."""
-
- self.fit_results = []
- """This list stores detailed information about all fit parameters.
- It is initialized in :meth:`estimate` and completed with final fit
- values in :meth:`runfit`.
-
- Each fit parameter is stored as a dictionary with following fields:
-
- - 'name': Parameter name.
- - 'estimation': Estimated value.
- - 'group': Group number. Group 0 corresponds to the background
- function parameters. Group ``n`` (for ``n>0``) corresponds to
- the fit function parameters for the n-th peak.
- - 'code': Constraint code
-
- - 0 - FREE
- - 1 - POSITIVE
- - 2 - QUOTED
- - 3 - FIXED
- - 4 - FACTOR
- - 5 - DELTA
- - 6 - SUM
-
- - 'cons1':
-
- - Ignored if 'code' is FREE, POSITIVE or FIXED.
- - Min value of the parameter if code is QUOTED
- - Index of fitted parameter to which 'cons2' is related
- if code is FACTOR, DELTA or SUM.
-
- - 'cons2':
-
- - Ignored if 'code' is FREE, POSITIVE or FIXED.
- - Max value of the parameter if QUOTED
- - Factor to apply to related parameter with index 'cons1' if
- 'code' is FACTOR
- - Difference with parameter with index 'cons1' if
- 'code' is DELTA
- - Sum obtained when adding parameter with index 'cons1' if
- 'code' is SUM
-
- - 'fitresult': Fitted value.
- - 'sigma': Standard deviation for the parameter estimate
- - 'xmin': Lower limit of the ``x`` data range on which the fit
- was performed
- - 'xmax': Upeer limit of the ``x`` data range on which the fit
- was performed
- """
-
- self.parameter_names = []
- """This list stores all fit parameter names: background function
- parameters and fit function parameters for every peak. It is filled
- in :meth:`estimate`.
-
- It is the responsibility of the estimate function defined in
- :attr:`theories` to determine how many parameters are needed,
- based on how many peaks are detected and how many parameters are needed
- to fit an individual peak.
- """
-
- self.setdata(x, y, sigmay)
-
- ##################
- # Public methods #
- ##################
- def addbackground(self, bgname, bgtheory):
- """Add a new background theory to dictionary :attr:`bgtheories`.
-
- :param bgname: String with the name describing the function
- :param bgtheory: :class:`FitTheory` object
- :type bgtheory: :class:`silx.math.fit.fittheory.FitTheory`
- """
- self.bgtheories[bgname] = bgtheory
-
- def addtheory(self, name, theory=None,
- function=None, parameters=None,
- estimate=None, configure=None, derivative=None,
- description=None, pymca_legacy=False):
- """Add a new theory to dictionary :attr:`theories`.
-
- You can pass a name and a :class:`FitTheory` object as arguments, or
- alternatively provide all arguments necessary to instantiate a new
- :class:`FitTheory` object.
-
- See :meth:`loadtheories` for more information on estimation functions,
- configuration functions and custom derivative functions.
-
- :param name: String with the name describing the function
- :param theory: :class:`FitTheory` object, defining a fit function and
- associated information (estimation function, description…).
- If this parameter is provided, all other parameters, except for
- ``name``, are ignored.
- :type theory: :class:`silx.math.fit.fittheory.FitTheory`
- :param callable function: Mandatory argument if ``theory`` is not provided.
- See documentation for :attr:`silx.math.fit.fittheory.FitTheory.function`.
- :param List[str] parameters: Mandatory argument if ``theory`` is not provided.
- See documentation for :attr:`silx.math.fit.fittheory.FitTheory.parameters`.
- :param callable estimate: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.estimate`
- :param callable configure: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.configure`
- :param callable derivative: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.derivative`
- :param str description: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.description`
- :param config_widget: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.config_widget`
- :param bool pymca_legacy: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.pymca_legacy`
- """
- if theory is not None:
- self.theories[name] = theory
-
- elif function is not None and parameters is not None:
- self.theories[name] = FitTheory(
- description=description,
- function=function,
- parameters=parameters,
- estimate=estimate,
- configure=configure,
- derivative=derivative,
- pymca_legacy=pymca_legacy
- )
-
- else:
- raise TypeError("You must supply a FitTheory object or define " +
- "a fit function and its parameters.")
-
- def addbgtheory(self, name, theory=None,
- function=None, parameters=None,
- estimate=None, configure=None,
- derivative=None, description=None):
- """Add a new theory to dictionary :attr:`bgtheories`.
-
- You can pass a name and a :class:`FitTheory` object as arguments, or
- alternatively provide all arguments necessary to instantiate a new
- :class:`FitTheory` object.
-
- :param name: String with the name describing the function
- :param theory: :class:`FitTheory` object, defining a fit function and
- associated information (estimation function, description…).
- If this parameter is provided, all other parameters, except for
- ``name``, are ignored.
- :type theory: :class:`silx.math.fit.fittheory.FitTheory`
- :param function function: Mandatory argument if ``theory`` is not provided.
- See documentation for :attr:`silx.math.fit.fittheory.FitTheory.function`.
- :param list[str] parameters: Mandatory argument if ``theory`` is not provided.
- See documentation for :attr:`silx.math.fit.fittheory.FitTheory.parameters`.
- :param function estimate: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.estimate`
- :param function configure: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.configure`
- :param function derivative: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.derivative`
- :param str description: See documentation for
- :attr:`silx.math.fit.fittheory.FitTheory.description`
- """
- if theory is not None:
- self.bgtheories[name] = theory
-
- elif function is not None and parameters is not None:
- self.bgtheories[name] = FitTheory(
- description=description,
- function=function,
- parameters=parameters,
- estimate=estimate,
- configure=configure,
- derivative=derivative,
- is_background=True
- )
-
- else:
- raise TypeError("You must supply a FitTheory object or define " +
- "a background function and its parameters.")
-
- def configure(self, **kw):
- """Configure the current theory by filling or updating the
- :attr:`fitconfig` dictionary.
- Call the custom configuration function, if any. This allows the user
- to modify the behavior of the custom fit function or the custom
- estimate function.
-
- This methods accepts only named parameters. All ``**kw`` parameters
- are expected to be fields of :attr:`fitconfig` to be updated, unless
- they have a special meaning for the custom configuration function
- of the currently selected theory..
-
- This method returns the modified config dictionary returned by the
- custom configuration function.
- """
- # inspect **kw to find known keys, update them in self.fitconfig
- for key in self.fitconfig:
- if key in kw:
- self.fitconfig[key] = kw[key]
-
- # initialize dict with existing config dict
- result = {}
- result.update(self.fitconfig)
-
- if "WeightFlag" in kw:
- if kw["WeightFlag"]:
- self.enableweight()
- else:
- self.disableweight()
-
- if self.selectedtheory is None:
- return result
-
- # Apply custom configuration function
- custom_config_fun = self.theories[self.selectedtheory].configure
- if custom_config_fun is not None:
- result.update(custom_config_fun(**kw))
-
- custom_bg_config_fun = self.bgtheories[self.selectedbg].configure
- if custom_bg_config_fun is not None:
- result.update(custom_bg_config_fun(**kw))
-
- # Update self.fitconfig with custom config
- for key in self.fitconfig:
- if key in result:
- self.fitconfig[key] = result[key]
-
- result.update(self.fitconfig)
- return result
-
- def estimate(self, callback=None):
- """
- Fill :attr:`fit_results` with an estimation of the fit parameters.
-
- At first, the background parameters are estimated, if a background
- model has been specified.
- Then, a custom estimation function related to the model function is
- called.
-
- This process determines the number of needed fit parameters and
- provides an initial estimation for them, to serve as an input for the
- actual iterative fitting performed in :meth:`runfit`.
-
- :param callback: Optional callback function, conforming to the
- signature ``callback(data)`` with ``data`` being a dictionary.
- This callback function is called before and after the estimation
- process, and is given a dictionary containing the values of
- :attr:`state` (``'Estimate in progress'`` or ``'Ready to Fit'``)
- and :attr:`chisq`.
- This is used for instance in :mod:`silx.gui.fit.FitWidget` to
- update a widget displaying a status message.
- :return: Estimated parameters
- """
- self.state = 'Estimate in progress'
- self.chisq = None
-
- if callback is not None:
- callback(data={'chisq': self.chisq,
- 'status': self.state})
-
- CONS = {0: 'FREE',
- 1: 'POSITIVE',
- 2: 'QUOTED',
- 3: 'FIXED',
- 4: 'FACTOR',
- 5: 'DELTA',
- 6: 'SUM',
- 7: 'IGNORE'}
-
- # Filter-out not finite data
- xwork = self.xdata[self._finite_mask]
- ywork = self.ydata[self._finite_mask]
-
- # estimate the background
- bg_params, bg_constraints = self.estimate_bkg(xwork, ywork)
-
- # estimate the function
- try:
- fun_params, fun_constraints = self.estimate_fun(xwork, ywork)
- except LinAlgError:
- self.state = 'Estimate failed'
- if callback is not None:
- callback(data={'status': self.state})
- raise
-
- # build the names
- self.parameter_names = []
-
- for bg_param_name in self.bgtheories[self.selectedbg].parameters:
- self.parameter_names.append(bg_param_name)
-
- fun_param_names = self.theories[self.selectedtheory].parameters
- param_index, peak_index = 0, 0
- while param_index < len(fun_params):
- peak_index += 1
- for fun_param_name in fun_param_names:
- self.parameter_names.append(fun_param_name + "%d" % peak_index)
- param_index += 1
-
- self.fit_results = []
- nb_fun_params_per_group = len(fun_param_names)
- group_number = 0
- xmin = min(xwork)
- xmax = max(xwork)
- nb_bg_params = len(bg_params)
- for (pindex, pname) in enumerate(self.parameter_names):
- # First come background parameters
- if pindex < nb_bg_params:
- estimation_value = bg_params[pindex]
- constraint_code = CONS[int(bg_constraints[pindex][0])]
- cons1 = bg_constraints[pindex][1]
- cons2 = bg_constraints[pindex][2]
- # then come peak function parameters
- else:
- fun_param_index = pindex - nb_bg_params
-
- # increment group_number for each new fitted peak
- if (fun_param_index % nb_fun_params_per_group) == 0:
- group_number += 1
-
- estimation_value = fun_params[fun_param_index]
- constraint_code = CONS[int(fun_constraints[fun_param_index][0])]
- # cons1 is the index of another fit parameter. In the global
- # fit_results, we must adjust the index to account for the bg
- # params added to the start of the list.
- cons1 = fun_constraints[fun_param_index][1]
- if constraint_code in ["FACTOR", "DELTA", "SUM"]:
- cons1 += nb_bg_params
- cons2 = fun_constraints[fun_param_index][2]
-
- self.fit_results.append({'name': pname,
- 'estimation': estimation_value,
- 'group': group_number,
- 'code': constraint_code,
- 'cons1': cons1,
- 'cons2': cons2,
- 'fitresult': 0.0,
- 'sigma': 0.0,
- 'xmin': xmin,
- 'xmax': xmax})
-
- self.state = 'Ready to Fit'
- self.chisq = None
- self.niter = 0
-
- if callback is not None:
- callback(data={'chisq': self.chisq,
- 'status': self.state})
- return numpy.append(bg_params, fun_params)
-
- def fit(self):
- """Convenience method to call :meth:`estimate` followed by :meth:`runfit`.
-
- :return: Output of :meth:`runfit`"""
- self.estimate()
- return self.runfit()
-
- def gendata(self, x=None, paramlist=None, estimated=False):
- """Return a data array using the currently selected fit function
- and the fitted parameters.
-
- :param x: Independent variable where the function is calculated.
- If ``None``, use :attr:`xdata`.
- :param paramlist: List of dictionaries, each dictionary item being a
- fit parameter. The dictionary's format is documented in
- :attr:`fit_results`.
- If ``None`` (default), use parameters from :attr:`fit_results`.
- :param estimated: If *True*, use estimated parameters.
- :return: :meth:`fitfunction` calculated for parameters whose code is
- not set to ``"IGNORE"``.
-
- This calculates :meth:`fitfunction` on `x` data using fit parameters
- from a list of parameter dictionaries, if field ``code`` is not set
- to ``"IGNORE"``.
- """
- x = self.xdata if x is None else numpy.array(x, copy=False)
-
- if paramlist is None:
- paramlist = self.fit_results
- active_params = []
- for param in paramlist:
- if param['code'] not in ['IGNORE', 7]:
- if not estimated:
- active_params.append(param['fitresult'])
- else:
- active_params.append(param['estimation'])
-
- # Mask x with not finite (support nD x)
- finite_mask = numpy.all(numpy.isfinite(x), axis=tuple(range(1, x.ndim)))
-
- if numpy.all(finite_mask): # All values are finite: fast path
- return self.fitfunction(numpy.array(x, copy=True), *active_params)
-
- else: # Only run fitfunction on finite data and complete result with NaNs
- # Create result with same number as elements as x, filling holes with NaNs
- result = numpy.full((x.shape[0],), numpy.nan, dtype=numpy.float64)
- result[finite_mask] = self.fitfunction(
- numpy.array(x[finite_mask], copy=True), *active_params)
- return result
-
- def get_estimation(self):
- """Return the list of fit parameter names."""
- if self.state not in ["Ready to fit", "Fit in progress", "Ready"]:
- _logger.warning("get_estimation() called before estimate() completed")
- return [param["estimation"] for param in self.fit_results]
-
- def get_names(self):
- """Return the list of fit parameter estimations."""
- if self.state not in ["Ready to fit", "Fit in progress", "Ready"]:
- msg = "get_names() called before estimate() completed, "
- msg += "names are not populated at this stage"
- _logger.warning(msg)
- return [param["name"] for param in self.fit_results]
-
- def get_fitted_parameters(self):
- """Return the list of fitted parameters."""
- if self.state not in ["Ready"]:
- msg = "get_fitted_parameters() called before runfit() completed, "
- msg += "results are not available a this stage"
- _logger.warning(msg)
- return [param["fitresult"] for param in self.fit_results]
-
- def loadtheories(self, theories):
- """Import user defined fit functions defined in an external Python
- source file, and save them in :attr:`theories`.
-
- An example of such a file can be found in the sources of
- :mod:`silx.math.fit.fittheories`. It must contain a
- dictionary named ``THEORY`` with the following structure::
-
- THEORY = {
- 'theory_name_1':
- FitTheory(description='Description of theory 1',
- function=fitfunction1,
- parameters=('param name 1', 'param name 2', …),
- estimate=estimation_function1,
- configure=configuration_function1,
- derivative=derivative_function1),
- 'theory_name_2':
- FitTheory(…),
- }
-
- See documentation of :mod:`silx.math.fit.fittheories` and
- :mod:`silx.math.fit.fittheory` for more
- information on designing your fit functions file.
-
- This method can also load user defined functions in the legacy
- format used in *PyMca*.
-
- :param theories: Name of python source file, or module containing the
- definition of fit functions.
- :raise: ImportError if theories cannot be imported
- """
- from types import ModuleType
- if isinstance(theories, ModuleType):
- theories_module = theories
- else:
- # if theories is not a module, it must be a string
- string_types = (basestring,) if sys.version_info[0] == 2 else (str,) # noqa
- if not isinstance(theories, string_types):
- raise ImportError("theory must be a python module, a module" +
- "name or a python filename")
- # if theories is a filename
- if os.path.isfile(theories):
- sys.path.append(os.path.dirname(theories))
- f = os.path.basename(os.path.splitext(theories)[0])
- theories_module = __import__(f)
- # if theories is a module name
- else:
- theories_module = __import__(theories)
-
- if hasattr(theories_module, "INIT"):
- theories.INIT()
-
- if not hasattr(theories_module, "THEORY"):
- msg = "File %s does not contain a THEORY dictionary" % theories
- raise ImportError(msg)
-
- elif isinstance(theories_module.THEORY, dict):
- # silx format for theory definition
- for theory_name, fittheory in list(theories_module.THEORY.items()):
- self.addtheory(theory_name, fittheory)
- else:
- self._load_legacy_theories(theories_module)
-
- def loadbgtheories(self, theories):
- """Import user defined background functions defined in an external Python
- module (source file), and save them in :attr:`theories`.
-
- An example of such a file can be found in the sources of
- :mod:`silx.math.fit.fittheories`. It must contain a
- dictionary named ``THEORY`` with the following structure::
-
- THEORY = {
- 'theory_name_1':
- FitTheory(description='Description of theory 1',
- function=fitfunction1,
- parameters=('param name 1', 'param name 2', …),
- estimate=estimation_function1,
- configure=configuration_function1,
- 'theory_name_2':
- FitTheory(…),
- }
-
- See documentation of :mod:`silx.math.fit.bgtheories` and
- :mod:`silx.math.fit.fittheory` for more
- information on designing your background functions file.
-
- :param theories: Module or name of python source file containing the
- definition of background functions.
- :raise: ImportError if theories cannot be imported
- """
- from types import ModuleType
- if isinstance(theories, ModuleType):
- theories_module = theories
- else:
- # if theories is not a module, it must be a string
- string_types = (basestring,) if sys.version_info[0] == 2 else (str,) # noqa
- if not isinstance(theories, string_types):
- raise ImportError("theory must be a python module, a module" +
- "name or a python filename")
- # if theories is a filename
- if os.path.isfile(theories):
- sys.path.append(os.path.dirname(theories))
- f = os.path.basename(os.path.splitext(theories)[0])
- theories_module = __import__(f)
- # if theories is a module name
- else:
- theories_module = __import__(theories)
-
- if hasattr(theories_module, "INIT"):
- theories.INIT()
-
- if not hasattr(theories_module, "THEORY"):
- msg = "File %s does not contain a THEORY dictionary" % theories
- raise ImportError(msg)
-
- elif isinstance(theories_module.THEORY, dict):
- # silx format for theory definition
- for theory_name, fittheory in list(theories_module.THEORY.items()):
- self.addbgtheory(theory_name, fittheory)
-
- def setbackground(self, theory):
- """Choose a background type from within :attr:`bgtheories`.
-
- This updates :attr:`selectedbg`.
-
- :param theory: The name of the background to be used.
- :raise: KeyError if ``theory`` is not a key of :attr:`bgtheories``.
- """
- if theory in self.bgtheories:
- self.selectedbg = theory
- else:
- msg = "No theory with name %s in bgtheories.\n" % theory
- msg += "Available theories: %s\n" % self.bgtheories.keys()
- raise KeyError(msg)
-
- # run configure to apply our fitconfig to the selected theory
- # through its custom config function
- self.configure(**self.fitconfig)
-
- def setdata(self, x, y, sigmay=None, xmin=None, xmax=None):
- """Set data attributes:
-
- - ``xdata0``, ``ydata0`` and ``sigmay0`` store the initial data
- and uncertainties. These attributes are not modified after
- initialization.
- - ``xdata``, ``ydata`` and ``sigmay`` store the data after
- removing values where ``xdata < xmin`` or ``xdata > xmax``.
- These attributes may be modified at a latter stage by filters.
-
- :param x: Abscissa data. If ``None``, :attr:`xdata`` is set to
- ``numpy.array([0.0, 1.0, 2.0, ..., len(y)-1])``
- :type x: Sequence or numpy array or None
- :param y: The dependant data ``y = f(x)``. ``y`` must have the same
- shape as ``x`` if ``x`` is not ``None``.
- :type y: Sequence or numpy array or None
- :param sigmay: The uncertainties in the ``ydata`` array. These are
- used as weights in the least-squares problem.
- If ``None``, the uncertainties are assumed to be 1.
- :type sigmay: Sequence or numpy array or None
- :param xmin: Lower value of x values to use for fitting
- :param xmax: Upper value of x values to use for fitting
- """
- if y is None:
- self.xdata0 = numpy.array([], numpy.float64)
- self.ydata0 = numpy.array([], numpy.float64)
- # self.sigmay0 = numpy.array([], numpy.float64)
- self.xdata = numpy.array([], numpy.float64)
- self.ydata = numpy.array([], numpy.float64)
- # self.sigmay = numpy.array([], numpy.float64)
-
- else:
- self.ydata0 = numpy.array(y)
- self.ydata = numpy.array(y)
- if x is None:
- self.xdata0 = numpy.arange(len(self.ydata0))
- self.xdata = numpy.arange(len(self.ydata0))
- else:
- self.xdata0 = numpy.array(x)
- self.xdata = numpy.array(x)
-
- # default weight
- if sigmay is None:
- self.sigmay0 = None
- self.sigmay = numpy.sqrt(self.ydata) if self.fitconfig["WeightFlag"] else None
- else:
- self.sigmay0 = numpy.array(sigmay)
- self.sigmay = numpy.array(sigmay) if self.fitconfig["WeightFlag"] else None
-
- # take the data between limits, using boolean array indexing
- if (xmin is not None or xmax is not None) and len(self.xdata):
- xmin = xmin if xmin is not None else min(self.xdata)
- xmax = xmax if xmax is not None else max(self.xdata)
- bool_array = (self.xdata >= xmin) & (self.xdata <= xmax)
- self.xdata = self.xdata[bool_array]
- self.ydata = self.ydata[bool_array]
- self.sigmay = self.sigmay[bool_array] if sigmay is not None else None
-
- self._finite_mask = numpy.logical_and(
- numpy.all(numpy.isfinite(self.xdata), axis=tuple(range(1, self.xdata.ndim))),
- numpy.isfinite(self.ydata))
-
- def enableweight(self):
- """This method can be called to set :attr:`sigmay`. If :attr:`sigmay0` was filled with
- actual uncertainties in :meth:`setdata`, use these values.
- Else, use ``sqrt(self.ydata)``.
- """
- if self.sigmay0 is None:
- self.sigmay = numpy.sqrt(self.ydata) if self.fitconfig["WeightFlag"] else None
- else:
- self.sigmay = self.sigmay0
-
- def disableweight(self):
- """This method can be called to set :attr:`sigmay` equal to ``None``.
- As a result, :func:`leastsq` will consider that the weights in the
- least square problem are 1 for all samples."""
- self.sigmay = None
-
- def settheory(self, theory):
- """Pick a theory from :attr:`theories`.
-
- :param theory: Name of the theory to be used.
- :raise: KeyError if ``theory`` is not a key of :attr:`theories`.
- """
- if theory is None:
- self.selectedtheory = None
- elif theory in self.theories:
- self.selectedtheory = theory
- else:
- msg = "No theory with name %s in theories.\n" % theory
- msg += "Available theories: %s\n" % self.theories.keys()
- raise KeyError(msg)
-
- # run configure to apply our fitconfig to the selected theory
- # through its custom config function
- self.configure(**self.fitconfig)
-
- def runfit(self, callback=None):
- """Run the actual fitting and fill :attr:`fit_results` with fit results.
-
- Before running this method, :attr:`fit_results` must already be
- populated with a list of all parameters and their estimated values.
- For this, run :meth:`estimate` beforehand.
-
- :param callback: Optional callback function, conforming to the
- signature ``callback(data)`` with ``data`` being a dictionary.
- This callback function is called before and after the estimation
- process, and is given a dictionary containing the values of
- :attr:`state` (``'Fit in progress'`` or ``'Ready'``)
- and :attr:`chisq`.
- This is used for instance in :mod:`silx.gui.fit.FitWidget` to
- update a widget displaying a status message.
- :return: Tuple ``(fitted parameters, uncertainties, infodict)``.
- *infodict* is the dictionary returned by
- :func:`silx.math.fit.leastsq` when called with option
- ``full_output=True``. Uncertainties is a sequence of uncertainty
- values associated with each fitted parameter.
- """
- # self.dataupdate()
-
- self.state = 'Fit in progress'
- self.chisq = None
-
- if callback is not None:
- callback(data={'chisq': self.chisq,
- 'status': self.state})
-
- param_val = []
- param_constraints = []
- # Initial values are set to the ones computed in estimate()
- for param in self.fit_results:
- param_val.append(param['estimation'])
- param_constraints.append([param['code'], param['cons1'], param['cons2']])
-
- # Filter-out not finite data
- ywork = self.ydata[self._finite_mask]
- xwork = self.xdata[self._finite_mask]
-
- try:
- params, covariance_matrix, infodict = leastsq(
- self.fitfunction, # bg + actual model function
- xwork, ywork, param_val,
- sigma=self.sigmay,
- constraints=param_constraints,
- model_deriv=self.theories[self.selectedtheory].derivative,
- full_output=True, left_derivative=True)
- except LinAlgError:
- self.state = 'Fit failed'
- callback(data={'status': self.state})
- raise
-
- sigmas = infodict['uncertainties']
-
- for i, param in enumerate(self.fit_results):
- if param['code'] != 'IGNORE':
- param['fitresult'] = params[i]
- param['sigma'] = sigmas[i]
-
- self.chisq = infodict["reduced_chisq"]
- self.niter = infodict["niter"]
- self.state = 'Ready'
-
- if callback is not None:
- callback(data={'chisq': self.chisq,
- 'status': self.state})
-
- return params, sigmas, infodict
-
- ###################
- # Private methods #
- ###################
- def fitfunction(self, x, *pars):
- """Function to be fitted.
-
- This is the sum of the selected background function plus
- the selected fit model function.
-
- :param x: Independent variable where the function is calculated.
- :param pars: Sequence of all fit parameters. The first few parameters
- are background parameters, then come the peak function parameters.
- :return: Output of the fit function with ``x`` as input and ``pars``
- as fit parameters.
- """
- result = numpy.zeros(numpy.shape(x), numpy.float64)
-
- if self.selectedbg is not None:
- bg_pars_list = self.bgtheories[self.selectedbg].parameters
- nb_bg_pars = len(bg_pars_list)
-
- bgfun = self.bgtheories[self.selectedbg].function
- result += bgfun(x, self.ydata, *pars[0:nb_bg_pars])
- else:
- nb_bg_pars = 0
-
- selectedfun = self.theories[self.selectedtheory].function
- result += selectedfun(x, *pars[nb_bg_pars:])
-
- return result
-
- def estimate_bkg(self, x, y):
- """Estimate background parameters using the function defined in
- the current fit configuration.
-
- To change the selected background model, attribute :attr:`selectdbg`
- must be changed using method :meth:`setbackground`.
-
- The actual background function to be used is
- referenced in :attr:`bgtheories`
-
- :param x: Sequence of x data
- :param y: sequence of y data
- :return: Tuple of two sequences and one data array
- ``(estimated_param, constraints, bg_data)``:
-
- - ``estimated_param`` is a list of estimated values for each
- background parameter.
- - ``constraints`` is a 2D sequence of dimension ``(n_parameters, 3)``
-
- - ``constraints[i][0]``: Constraint code.
- See explanation about codes in :attr:`fit_results`
-
- - ``constraints[i][1]``
- See explanation about 'cons1' in :attr:`fit_results`
- documentation.
-
- - ``constraints[i][2]``
- See explanation about 'cons2' in :attr:`fit_results`
- documentation.
- """
- background_estimate_function = self.bgtheories[self.selectedbg].estimate
- if background_estimate_function is not None:
- return background_estimate_function(x, y)
- else:
- return [], []
-
- def estimate_fun(self, x, y):
- """Estimate fit parameters using the function defined in
- the current fit configuration.
-
- :param x: Sequence of x data
- :param y: sequence of y data
- :param bg: Background signal, to be subtracted from ``y`` before fitting.
- :return: Tuple of two sequences ``(estimated_param, constraints)``:
-
- - ``estimated_param`` is a list of estimated values for each
- background parameter.
- - ``constraints`` is a 2D sequence of dimension (n_parameters, 3)
-
- - ``constraints[i][0]``: Constraint code.
- See explanation about codes in :attr:`fit_results`
-
- - ``constraints[i][1]``
- See explanation about 'cons1' in :attr:`fit_results`
- documentation.
-
- - ``constraints[i][2]``
- See explanation about 'cons2' in :attr:`fit_results`
- documentation.
- :raise: ``TypeError`` if estimation function is not callable
-
- """
- estimatefunction = self.theories[self.selectedtheory].estimate
- if hasattr(estimatefunction, '__call__'):
- if not self.theories[self.selectedtheory].pymca_legacy:
- return estimatefunction(x, y)
- else:
- # legacy pymca estimate functions have a different signature
- if self.fitconfig["fitbkg"] == "No Background":
- bg = numpy.zeros_like(y)
- else:
- if self.fitconfig["SmoothingFlag"]:
- y = smooth1d(y)
- bg = strip(y,
- w=self.fitconfig["StripWidth"],
- niterations=self.fitconfig["StripIterations"],
- factor=self.fitconfig["StripThresholdFactor"])
- # fitconfig can be filled by user defined config function
- xscaling = self.fitconfig.get('Xscaling', 1.0)
- yscaling = self.fitconfig.get('Yscaling', 1.0)
- return estimatefunction(x, y, bg, xscaling, yscaling)
- else:
- raise TypeError("Estimation function in attribute " +
- "theories[%s]" % self.selectedtheory +
- " must be callable.")
-
- def _load_legacy_theories(self, theories_module):
- """Load theories from a custom module in the old PyMca format.
-
- See PyMca5.PyMcaMath.fitting.SpecfitFunctions for an example.
- """
- mandatory_attributes = ["THEORY", "PARAMETERS",
- "FUNCTION", "ESTIMATE"]
- err_msg = "Custom fit function file must define: "
- err_msg += ", ".join(mandatory_attributes)
- for attr in mandatory_attributes:
- if not hasattr(theories_module, attr):
- raise ImportError(err_msg)
-
- derivative = theories_module.DERIVATIVE if hasattr(theories_module, "DERIVATIVE") else None
- configure = theories_module.CONFIGURE if hasattr(theories_module, "CONFIGURE") else None
- estimate = theories_module.ESTIMATE if hasattr(theories_module, "ESTIMATE") else None
- if isinstance(theories_module.THEORY, (list, tuple)):
- # multiple fit functions
- for i in range(len(theories_module.THEORY)):
- deriv = derivative[i] if derivative is not None else None
- config = configure[i] if configure is not None else None
- estim = estimate[i] if estimate is not None else None
- self.addtheory(theories_module.THEORY[i],
- FitTheory(
- theories_module.FUNCTION[i],
- theories_module.PARAMETERS[i],
- estim,
- config,
- deriv,
- pymca_legacy=True))
- else:
- # single fit function
- self.addtheory(theories_module.THEORY,
- FitTheory(
- theories_module.FUNCTION,
- theories_module.PARAMETERS,
- estimate,
- configure,
- derivative,
- pymca_legacy=True))
-
-
-def test():
- from .functions import sum_gauss
- from . import fittheories
- from . import bgtheories
-
- # Create synthetic data with a sum of gaussian functions
- x = numpy.arange(1000).astype(numpy.float64)
-
- p = [1000, 100., 250,
- 255, 690., 45,
- 1500, 800.5, 95]
- y = 0.5 * x + 13 + sum_gauss(x, *p)
-
- # Fitting
- fit = FitManager()
- # more sensitivity necessary to resolve
- # overlapping peaks at x=690 and x=800.5
- fit.setdata(x=x, y=y)
- fit.loadtheories(fittheories)
- fit.settheory('Gaussians')
- fit.loadbgtheories(bgtheories)
- fit.setbackground('Linear')
- fit.estimate()
- fit.runfit()
-
- print("Searched parameters = ", p)
- print("Obtained parameters : ")
- dummy_list = []
- for param in fit.fit_results:
- print(param['name'], ' = ', param['fitresult'])
- dummy_list.append(param['fitresult'])
- print("chisq = ", fit.chisq)
-
- # Plot
- constant, slope = dummy_list[:2]
- p1 = dummy_list[2:]
- print(p1)
- y2 = slope * x + constant + sum_gauss(x, *p1)
-
- try:
- from silx.gui import qt
- from silx.gui.plot.PlotWindow import PlotWindow
- app = qt.QApplication([])
- pw = PlotWindow(control=True)
- pw.addCurve(x, y, "Original")
- pw.addCurve(x, y2, "Fit result")
- pw.legendsDockWidget.show()
- pw.show()
- app.exec_()
- except ImportError:
- _logger.warning("Could not import qt to display fit result as curve")
-
-
-if __name__ == "__main__":
- test()
diff --git a/silx/math/fit/fittheories.py b/silx/math/fit/fittheories.py
deleted file mode 100644
index 6b19a38..0000000
--- a/silx/math/fit/fittheories.py
+++ /dev/null
@@ -1,1374 +0,0 @@
-# coding: utf-8
-#/*##########################################################################
-#
-# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-########################################################################### */
-"""This modules provides a set of fit functions and associated
-estimation functions in a format that can be imported into a
-:class:`silx.math.fit.FitManager` instance.
-
-These functions are well suited for fitting multiple gaussian shaped peaks
-typically found in spectroscopy data. The estimation functions are designed
-to detect how many peaks are present in the data, and provide an initial
-estimate for their height, their center location and their full-width
-at half maximum (fwhm).
-
-The limitation of these estimation algorithms is that only gaussians having a
-similar fwhm can be detected by the peak search algorithm.
-This *search fwhm* can be defined by the user, if
-he knows the characteristics of his data, or can be automatically estimated
-based on the fwhm of the largest peak in the data.
-
-The source code of this module can serve as template for defining your own
-fit functions.
-
-The functions to be imported by :meth:`FitManager.loadtheories` are defined by
-a dictionary :const:`THEORY`: with the following structure::
-
- from silx.math.fit.fittheory import FitTheory
-
- THEORY = {
- 'theory_name_1': FitTheory(
- description='Description of theory 1',
- function=fitfunction1,
- parameters=('param name 1', 'param name 2', …),
- estimate=estimation_function1,
- configure=configuration_function1,
- derivative=derivative_function1),
-
- 'theory_name_2': FitTheory(…),
- }
-
-.. note::
-
- Consider using an OrderedDict instead of a regular dictionary, when
- defining your own theory dictionary, if the order matters to you.
- This will likely be the case if you intend to load a selection of
- functions in a GUI such as :class:`silx.gui.fit.FitManager`.
-
-Theory names can be customized (e.g. ``gauss, lorentz, splitgauss``…).
-
-The mandatory parameters for :class:`FitTheory` are ``function`` and
-``parameters``.
-
-You can also define an ``INIT`` function that will be executed by
-:meth:`FitManager.loadtheories`.
-
-See the documentation of :class:`silx.math.fit.fittheory.FitTheory`
-for more information.
-
-Module members:
----------------
-"""
-import numpy
-from collections import OrderedDict
-import logging
-
-from silx.math.fit import functions
-from silx.math.fit.peaks import peak_search, guess_fwhm
-from silx.math.fit.filters import strip, savitsky_golay
-from silx.math.fit.leastsq import leastsq
-from silx.math.fit.fittheory import FitTheory
-
-_logger = logging.getLogger(__name__)
-
-__authors__ = ["V.A. Sole", "P. Knobel"]
-__license__ = "MIT"
-__date__ = "15/05/2017"
-
-
-DEFAULT_CONFIG = {
- 'NoConstraintsFlag': False,
- 'PositiveFwhmFlag': True,
- 'PositiveHeightAreaFlag': True,
- 'SameFwhmFlag': False,
- 'QuotedPositionFlag': False, # peak not outside data range
- 'QuotedEtaFlag': False, # force 0 < eta < 1
- # Peak detection
- 'AutoScaling': False,
- 'Yscaling': 1.0,
- 'FwhmPoints': 8,
- 'AutoFwhm': True,
- 'Sensitivity': 2.5,
- 'ForcePeakPresence': True,
- # Hypermet
- 'HypermetTails': 15,
- 'QuotedFwhmFlag': 0,
- 'MaxFwhm2InputRatio': 1.5,
- 'MinFwhm2InputRatio': 0.4,
- # short tail parameters
- 'MinGaussArea4ShortTail': 50000.,
- 'InitialShortTailAreaRatio': 0.050,
- 'MaxShortTailAreaRatio': 0.100,
- 'MinShortTailAreaRatio': 0.0010,
- 'InitialShortTailSlopeRatio': 0.70,
- 'MaxShortTailSlopeRatio': 2.00,
- 'MinShortTailSlopeRatio': 0.50,
- # long tail parameters
- 'MinGaussArea4LongTail': 1000.0,
- 'InitialLongTailAreaRatio': 0.050,
- 'MaxLongTailAreaRatio': 0.300,
- 'MinLongTailAreaRatio': 0.010,
- 'InitialLongTailSlopeRatio': 20.0,
- 'MaxLongTailSlopeRatio': 50.0,
- 'MinLongTailSlopeRatio': 5.0,
- # step tail
- 'MinGaussHeight4StepTail': 5000.,
- 'InitialStepTailHeightRatio': 0.002,
- 'MaxStepTailHeightRatio': 0.0100,
- 'MinStepTailHeightRatio': 0.0001,
- # Hypermet constraints
- # position in range [estimated position +- estimated fwhm/2]
- 'HypermetQuotedPositionFlag': True,
- 'DeltaPositionFwhmUnits': 0.5,
- 'SameSlopeRatioFlag': 1,
- 'SameAreaRatioFlag': 1,
- # Strip bg removal
- 'StripBackgroundFlag': True,
- 'SmoothingFlag': True,
- 'SmoothingWidth': 5,
- 'StripWidth': 2,
- 'StripIterations': 5000,
- 'StripThresholdFactor': 1.0}
-"""This dictionary defines default configuration parameters that have effects
-on fit functions and estimation functions, mainly on fit constraints.
-This dictionary is accessible as attribute :attr:`FitTheories.config`,
-which can be modified by configuration functions defined in
-:const:`CONFIGURE`.
-"""
-
-CFREE = 0
-CPOSITIVE = 1
-CQUOTED = 2
-CFIXED = 3
-CFACTOR = 4
-CDELTA = 5
-CSUM = 6
-CIGNORED = 7
-
-
-class FitTheories(object):
- """Class wrapping functions from :class:`silx.math.fit.functions`
- and providing estimate functions for all of these fit functions."""
- def __init__(self, config=None):
- if config is None:
- self.config = DEFAULT_CONFIG
- else:
- self.config = config
-
- def ahypermet(self, x, *pars):
- """
- Wrapping of :func:`silx.math.fit.functions.sum_ahypermet` without
- the tail flags in the function signature.
-
- Depending on the value of `self.config['HypermetTails']`, one can
- activate or deactivate the various terms of the hypermet function.
-
- `self.config['HypermetTails']` must be an integer between 0 and 15.
- It is a set of 4 binary flags, one for activating each one of the
- hypermet terms: *gaussian function, short tail, long tail, step*.
-
- For example, 15 can be expressed as ``1111`` in base 2, so a flag of
- 15 means all terms are active.
- """
- g_term = self.config['HypermetTails'] & 1
- st_term = (self.config['HypermetTails'] >> 1) & 1
- lt_term = (self.config['HypermetTails'] >> 2) & 1
- step_term = (self.config['HypermetTails'] >> 3) & 1
- return functions.sum_ahypermet(x, *pars,
- gaussian_term=g_term, st_term=st_term,
- lt_term=lt_term, step_term=step_term)
-
- def poly(self, x, *pars):
- """Order n polynomial.
- The order of the polynomial is defined by the number of
- coefficients (``*pars``).
-
- """
- p = numpy.poly1d(pars)
- return p(x)
-
- @staticmethod
- def estimate_poly(x, y, n=2):
- """Estimate polynomial coefficients for a degree n polynomial.
-
- """
- pcoeffs = numpy.polyfit(x, y, n)
- constraints = numpy.zeros((n + 1, 3), numpy.float64)
- return pcoeffs, constraints
-
- def estimate_quadratic(self, x, y):
- """Estimate quadratic coefficients
-
- """
- return self.estimate_poly(x, y, n=2)
-
- def estimate_cubic(self, x, y):
- """Estimate coefficients for a degree 3 polynomial
-
- """
- return self.estimate_poly(x, y, n=3)
-
- def estimate_quartic(self, x, y):
- """Estimate coefficients for a degree 4 polynomial
-
- """
- return self.estimate_poly(x, y, n=4)
-
- def estimate_quintic(self, x, y):
- """Estimate coefficients for a degree 5 polynomial
-
- """
- return self.estimate_poly(x, y, n=5)
-
- def strip_bg(self, y):
- """Return the strip background of y, using parameters from
- :attr:`config` dictionary (*StripBackgroundFlag, StripWidth,
- StripIterations, StripThresholdFactor*)"""
- remove_strip_bg = self.config.get('StripBackgroundFlag', False)
- if remove_strip_bg:
- if self.config['SmoothingFlag']:
- y = savitsky_golay(y, self.config['SmoothingWidth'])
- strip_width = self.config['StripWidth']
- strip_niterations = self.config['StripIterations']
- strip_thr_factor = self.config['StripThresholdFactor']
- return strip(y, w=strip_width,
- niterations=strip_niterations,
- factor=strip_thr_factor)
- else:
- return numpy.zeros_like(y)
-
- def guess_yscaling(self, y):
- """Estimate scaling for y prior to peak search.
- A smoothing filter is applied to y to estimate the noise level
- (chi-squared)
-
- :param y: Data array
- :return: Scaling factor
- """
- # ensure y is an array
- yy = numpy.array(y, copy=False)
-
- # smooth
- convolution_kernel = numpy.ones(shape=(3,)) / 3.
- ysmooth = numpy.convolve(y, convolution_kernel, mode="same")
-
- # remove zeros
- idx_array = numpy.fabs(y) > 0.0
- yy = yy[idx_array]
- ysmooth = ysmooth[idx_array]
-
- # compute scaling factor
- chisq = numpy.mean((yy - ysmooth)**2 / numpy.fabs(yy))
- if chisq > 0:
- return 1. / chisq
- else:
- return 1.0
-
- def peak_search(self, y, fwhm, sensitivity):
- """Search for peaks in y array, after padding the array and
- multiplying its value by a scaling factor.
-
- :param y: 1-D data array
- :param int fwhm: Typical full width at half maximum for peaks,
- in number of points. This parameter is used for to discriminate between
- true peaks and background fluctuations.
- :param float sensitivity: Sensitivity parameter. This is a threshold factor
- for peak detection. Only peaks larger than the standard deviation
- of the noise multiplied by this sensitivity parameter are detected.
- :return: List of peak indices
- """
- # add padding
- ysearch = numpy.ones((len(y) + 2 * fwhm,), numpy.float64)
- ysearch[0:fwhm] = y[0]
- ysearch[-1:-fwhm - 1:-1] = y[len(y)-1]
- ysearch[fwhm:fwhm + len(y)] = y[:]
-
- scaling = self.guess_yscaling(y) if self.config["AutoScaling"] else self.config["Yscaling"]
-
- if len(ysearch) > 1.5 * fwhm:
- peaks = peak_search(scaling * ysearch,
- fwhm=fwhm, sensitivity=sensitivity)
- return [peak_index - fwhm for peak_index in peaks
- if 0 <= peak_index - fwhm < len(y)]
- else:
- return []
-
- def estimate_height_position_fwhm(self, x, y):
- """Estimation of *Height, Position, FWHM* of peaks, for gaussian-like
- curves.
-
- This functions finds how many parameters are needed, based on the
- number of peaks detected. Then it estimates the fit parameters
- with a few iterations of fitting gaussian functions.
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each peak are:
- *Height, Position, FWHM*.
- Fit constraints depend on :attr:`config`.
- """
- fittedpar = []
-
- bg = self.strip_bg(y)
-
- if self.config['AutoFwhm']:
- search_fwhm = guess_fwhm(y)
- else:
- search_fwhm = int(float(self.config['FwhmPoints']))
- search_sens = float(self.config['Sensitivity'])
-
- if search_fwhm < 3:
- _logger.warning("Setting peak fwhm to 3 (lower limit)")
- search_fwhm = 3
- self.config['FwhmPoints'] = 3
-
- if search_sens < 1:
- _logger.warning("Setting peak search sensitivity to 1. " +
- "(lower limit to filter out noise peaks)")
- search_sens = 1
- self.config['Sensitivity'] = 1
-
- npoints = len(y)
-
- # Find indices of peaks in data array
- peaks = self.peak_search(y,
- fwhm=search_fwhm,
- sensitivity=search_sens)
-
- if not len(peaks):
- forcepeak = int(float(self.config.get('ForcePeakPresence', 0)))
- if forcepeak:
- delta = y - bg
- # get index of global maximum
- # (first one if several samples are equal to this value)
- peaks = [numpy.nonzero(delta == delta.max())[0][0]]
-
- # Find index of largest peak in peaks array
- index_largest_peak = 0
- if len(peaks) > 0:
- # estimate fwhm as 5 * sampling interval
- sig = 5 * abs(x[npoints - 1] - x[0]) / npoints
- peakpos = x[int(peaks[0])]
- if abs(peakpos) < 1.0e-16:
- peakpos = 0.0
- param = numpy.array(
- [y[int(peaks[0])] - bg[int(peaks[0])], peakpos, sig])
- height_largest_peak = param[0]
- peak_index = 1
- for i in peaks[1:]:
- param2 = numpy.array(
- [y[int(i)] - bg[int(i)], x[int(i)], sig])
- param = numpy.concatenate((param, param2))
- if param2[0] > height_largest_peak:
- height_largest_peak = param2[0]
- index_largest_peak = peak_index
- peak_index += 1
-
- # Subtract background
- xw = x
- yw = y - bg
-
- cons = numpy.zeros((len(param), 3), numpy.float64)
-
- # peak height must be positive
- cons[0:len(param):3, 0] = CPOSITIVE
- # force peaks to stay around their position
- cons[1:len(param):3, 0] = CQUOTED
-
- # set possible peak range to estimated peak +- guessed fwhm
- if len(xw) > search_fwhm:
- fwhmx = numpy.fabs(xw[int(search_fwhm)] - xw[0])
- cons[1:len(param):3, 1] = param[1:len(param):3] - 0.5 * fwhmx
- cons[1:len(param):3, 2] = param[1:len(param):3] + 0.5 * fwhmx
- else:
- shape = [max(1, int(x)) for x in (param[1:len(param):3])]
- cons[1:len(param):3, 1] = min(xw) * numpy.ones(
- shape,
- numpy.float64)
- cons[1:len(param):3, 2] = max(xw) * numpy.ones(
- shape,
- numpy.float64)
-
- # ensure fwhm is positive
- cons[2:len(param):3, 0] = CPOSITIVE
-
- # run a quick iterative fit (4 iterations) to improve
- # estimations
- fittedpar, _, _ = leastsq(functions.sum_gauss, xw, yw, param,
- max_iter=4, constraints=cons.tolist(),
- full_output=True)
-
- # set final constraints based on config parameters
- cons = numpy.zeros((len(fittedpar), 3), numpy.float64)
- peak_index = 0
- for i in range(len(peaks)):
- # Setup height area constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['PositiveHeightAreaFlag']:
- cons[peak_index, 0] = CPOSITIVE
- cons[peak_index, 1] = 0
- cons[peak_index, 2] = 0
- peak_index += 1
-
- # Setup position constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['QuotedPositionFlag']:
- cons[peak_index, 0] = CQUOTED
- cons[peak_index, 1] = min(x)
- cons[peak_index, 2] = max(x)
- peak_index += 1
-
- # Setup positive FWHM constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['PositiveFwhmFlag']:
- cons[peak_index, 0] = CPOSITIVE
- cons[peak_index, 1] = 0
- cons[peak_index, 2] = 0
- if self.config['SameFwhmFlag']:
- if i != index_largest_peak:
- cons[peak_index, 0] = CFACTOR
- cons[peak_index, 1] = 3 * index_largest_peak + 2
- cons[peak_index, 2] = 1.0
- peak_index += 1
-
- return fittedpar, cons
-
- def estimate_agauss(self, x, y):
- """Estimation of *Area, Position, FWHM* of peaks, for gaussian-like
- curves.
-
- This functions uses :meth:`estimate_height_position_fwhm`, then
- converts the height parameters to area under the curve with the
- formula ``area = sqrt(2*pi) * height * fwhm / (2 * sqrt(2 * log(2))``
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each peak are:
- *Area, Position, FWHM*.
- Fit constraints depend on :attr:`config`.
- """
- fittedpar, cons = self.estimate_height_position_fwhm(x, y)
- # get the number of found peaks
- npeaks = len(fittedpar) // 3
- for i in range(npeaks):
- height = fittedpar[3 * i]
- fwhm = fittedpar[3 * i + 2]
- # Replace height with area in fittedpar
- fittedpar[3 * i] = numpy.sqrt(2 * numpy.pi) * height * fwhm / (
- 2.0 * numpy.sqrt(2 * numpy.log(2)))
- return fittedpar, cons
-
- def estimate_alorentz(self, x, y):
- """Estimation of *Area, Position, FWHM* of peaks, for Lorentzian
- curves.
-
- This functions uses :meth:`estimate_height_position_fwhm`, then
- converts the height parameters to area under the curve with the
- formula ``area = height * fwhm * 0.5 * pi``
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each peak are:
- *Area, Position, FWHM*.
- Fit constraints depend on :attr:`config`.
- """
- fittedpar, cons = self.estimate_height_position_fwhm(x, y)
- # get the number of found peaks
- npeaks = len(fittedpar) // 3
- for i in range(npeaks):
- height = fittedpar[3 * i]
- fwhm = fittedpar[3 * i + 2]
- # Replace height with area in fittedpar
- fittedpar[3 * i] = (height * fwhm * 0.5 * numpy.pi)
- return fittedpar, cons
-
- def estimate_splitgauss(self, x, y):
- """Estimation of *Height, Position, FWHM1, FWHM2* of peaks, for
- asymmetric gaussian-like curves.
-
- This functions uses :meth:`estimate_height_position_fwhm`, then
- adds a second (identical) estimation of FWHM to the fit parameters
- for each peak, and the corresponding constraint.
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each peak are:
- *Height, Position, FWHM1, FWHM2*.
- Fit constraints depend on :attr:`config`.
- """
- fittedpar, cons = self.estimate_height_position_fwhm(x, y)
- # get the number of found peaks
- npeaks = len(fittedpar) // 3
- estimated_parameters = []
- estimated_constraints = numpy.zeros((4 * npeaks, 3), numpy.float64)
- for i in range(npeaks):
- for j in range(3):
- estimated_parameters.append(fittedpar[3 * i + j])
- # fwhm2 estimate = fwhm1
- estimated_parameters.append(fittedpar[3 * i + 2])
- # height
- estimated_constraints[4 * i, 0] = cons[3 * i, 0]
- estimated_constraints[4 * i, 1] = cons[3 * i, 1]
- estimated_constraints[4 * i, 2] = cons[3 * i, 2]
- # position
- estimated_constraints[4 * i + 1, 0] = cons[3 * i + 1, 0]
- estimated_constraints[4 * i + 1, 1] = cons[3 * i + 1, 1]
- estimated_constraints[4 * i + 1, 2] = cons[3 * i + 1, 2]
- # fwhm1
- estimated_constraints[4 * i + 2, 0] = cons[3 * i + 2, 0]
- estimated_constraints[4 * i + 2, 1] = cons[3 * i + 2, 1]
- estimated_constraints[4 * i + 2, 2] = cons[3 * i + 2, 2]
- # fwhm2
- estimated_constraints[4 * i + 3, 0] = cons[3 * i + 2, 0]
- estimated_constraints[4 * i + 3, 1] = cons[3 * i + 2, 1]
- estimated_constraints[4 * i + 3, 2] = cons[3 * i + 2, 2]
- if cons[3 * i + 2, 0] == CFACTOR:
- # convert indices of related parameters
- # (this happens if SameFwhmFlag == True)
- estimated_constraints[4 * i + 2, 1] = \
- int(cons[3 * i + 2, 1] / 3) * 4 + 2
- estimated_constraints[4 * i + 3, 1] = \
- int(cons[3 * i + 2, 1] / 3) * 4 + 3
- return estimated_parameters, estimated_constraints
-
- def estimate_pvoigt(self, x, y):
- """Estimation of *Height, Position, FWHM, eta* of peaks, for
- pseudo-Voigt curves.
-
- Pseudo-Voigt are a sum of a gaussian curve *G(x)* and a lorentzian
- curve *L(x)* with the same height, center, fwhm parameters:
- ``y(x) = eta * G(x) + (1-eta) * L(x)``
-
- This functions uses :meth:`estimate_height_position_fwhm`, then
- adds a constant estimation of *eta* (0.5) to the fit parameters
- for each peak, and the corresponding constraint.
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each peak are:
- *Height, Position, FWHM, eta*.
- Constraint for the eta parameter can be set to QUOTED (0.--1.)
- by setting :attr:`config`['QuotedEtaFlag'] to ``True``.
- If this is not the case, the constraint code is set to FREE.
- """
- fittedpar, cons = self.estimate_height_position_fwhm(x, y)
- npeaks = len(fittedpar) // 3
- newpar = []
- newcons = numpy.zeros((4 * npeaks, 3), numpy.float64)
- # find out related parameters proper index
- if not self.config['NoConstraintsFlag']:
- if self.config['SameFwhmFlag']:
- j = 0
- # get the index of the free FWHM
- for i in range(npeaks):
- if cons[3 * i + 2, 0] != 4:
- j = i
- for i in range(npeaks):
- if i != j:
- cons[3 * i + 2, 1] = 4 * j + 2
- for i in range(npeaks):
- newpar.append(fittedpar[3 * i])
- newpar.append(fittedpar[3 * i + 1])
- newpar.append(fittedpar[3 * i + 2])
- newpar.append(0.5)
- # height
- newcons[4 * i, 0] = cons[3 * i, 0]
- newcons[4 * i, 1] = cons[3 * i, 1]
- newcons[4 * i, 2] = cons[3 * i, 2]
- # position
- newcons[4 * i + 1, 0] = cons[3 * i + 1, 0]
- newcons[4 * i + 1, 1] = cons[3 * i + 1, 1]
- newcons[4 * i + 1, 2] = cons[3 * i + 1, 2]
- # fwhm
- newcons[4 * i + 2, 0] = cons[3 * i + 2, 0]
- newcons[4 * i + 2, 1] = cons[3 * i + 2, 1]
- newcons[4 * i + 2, 2] = cons[3 * i + 2, 2]
- # Eta constrains
- newcons[4 * i + 3, 0] = CFREE
- newcons[4 * i + 3, 1] = 0
- newcons[4 * i + 3, 2] = 0
- if self.config['QuotedEtaFlag']:
- newcons[4 * i + 3, 0] = CQUOTED
- newcons[4 * i + 3, 1] = 0.0
- newcons[4 * i + 3, 2] = 1.0
- return newpar, newcons
-
- def estimate_splitpvoigt(self, x, y):
- """Estimation of *Height, Position, FWHM1, FWHM2, eta* of peaks, for
- asymmetric pseudo-Voigt curves.
-
- This functions uses :meth:`estimate_height_position_fwhm`, then
- adds an identical FWHM2 parameter and a constant estimation of
- *eta* (0.5) to the fit parameters for each peak, and the corresponding
- constraints.
-
- Constraint for the eta parameter can be set to QUOTED (0.--1.)
- by setting :attr:`config`['QuotedEtaFlag'] to ``True``.
- If this is not the case, the constraint code is set to FREE.
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each peak are:
- *Height, Position, FWHM1, FWHM2, eta*.
- """
- fittedpar, cons = self.estimate_height_position_fwhm(x, y)
- npeaks = len(fittedpar) // 3
- newpar = []
- newcons = numpy.zeros((5 * npeaks, 3), numpy.float64)
- # find out related parameters proper index
- if not self.config['NoConstraintsFlag']:
- if self.config['SameFwhmFlag']:
- j = 0
- # get the index of the free FWHM
- for i in range(npeaks):
- if cons[3 * i + 2, 0] != 4:
- j = i
- for i in range(npeaks):
- if i != j:
- cons[3 * i + 2, 1] = 4 * j + 2
- for i in range(npeaks):
- # height
- newpar.append(fittedpar[3 * i])
- # position
- newpar.append(fittedpar[3 * i + 1])
- # fwhm1
- newpar.append(fittedpar[3 * i + 2])
- # fwhm2 estimate equal to fwhm1
- newpar.append(fittedpar[3 * i + 2])
- # eta
- newpar.append(0.5)
- # constraint codes
- # ----------------
- # height
- newcons[5 * i, 0] = cons[3 * i, 0]
- # position
- newcons[5 * i + 1, 0] = cons[3 * i + 1, 0]
- # fwhm1
- newcons[5 * i + 2, 0] = cons[3 * i + 2, 0]
- # fwhm2
- newcons[5 * i + 3, 0] = cons[3 * i + 2, 0]
- # cons 1
- # ------
- newcons[5 * i, 1] = cons[3 * i, 1]
- newcons[5 * i + 1, 1] = cons[3 * i + 1, 1]
- newcons[5 * i + 2, 1] = cons[3 * i + 2, 1]
- newcons[5 * i + 3, 1] = cons[3 * i + 2, 1]
- # cons 2
- # ------
- newcons[5 * i, 2] = cons[3 * i, 2]
- newcons[5 * i + 1, 2] = cons[3 * i + 1, 2]
- newcons[5 * i + 2, 2] = cons[3 * i + 2, 2]
- newcons[5 * i + 3, 2] = cons[3 * i + 2, 2]
-
- if cons[3 * i + 2, 0] == CFACTOR:
- # fwhm2 connstraint depends on fwhm1
- newcons[5 * i + 3, 1] = newcons[5 * i + 2, 1] + 1
- # eta constraints
- newcons[5 * i + 4, 0] = CFREE
- newcons[5 * i + 4, 1] = 0
- newcons[5 * i + 4, 2] = 0
- if self.config['QuotedEtaFlag']:
- newcons[5 * i + 4, 0] = CQUOTED
- newcons[5 * i + 4, 1] = 0.0
- newcons[5 * i + 4, 2] = 1.0
- return newpar, newcons
-
- def estimate_apvoigt(self, x, y):
- """Estimation of *Area, Position, FWHM1, eta* of peaks, for
- pseudo-Voigt curves.
-
- This functions uses :meth:`estimate_pvoigt`, then converts the height
- parameter to area.
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each peak are:
- *Area, Position, FWHM, eta*.
- """
- fittedpar, cons = self.estimate_pvoigt(x, y)
- npeaks = len(fittedpar) // 4
- # Assume 50% of the area is determined by the gaussian and 50% by
- # the Lorentzian.
- for i in range(npeaks):
- height = fittedpar[4 * i]
- fwhm = fittedpar[4 * i + 2]
- fittedpar[4 * i] = 0.5 * (height * fwhm * 0.5 * numpy.pi) +\
- 0.5 * (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
- ) * numpy.sqrt(2 * numpy.pi)
- return fittedpar, cons
-
- def estimate_ahypermet(self, x, y):
- """Estimation of *area, position, fwhm, st_area_r, st_slope_r,
- lt_area_r, lt_slope_r, step_height_r* of peaks, for hypermet curves.
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each peak are:
- *area, position, fwhm, st_area_r, st_slope_r,
- lt_area_r, lt_slope_r, step_height_r* .
- """
- yscaling = self.config.get('Yscaling', 1.0)
- if yscaling == 0:
- yscaling = 1.0
- fittedpar, cons = self.estimate_height_position_fwhm(x, y)
- npeaks = len(fittedpar) // 3
- newpar = []
- newcons = numpy.zeros((8 * npeaks, 3), numpy.float64)
- main_peak = 0
- # find out related parameters proper index
- if not self.config['NoConstraintsFlag']:
- if self.config['SameFwhmFlag']:
- j = 0
- # get the index of the free FWHM
- for i in range(npeaks):
- if cons[3 * i + 2, 0] != 4:
- j = i
- for i in range(npeaks):
- if i != j:
- cons[3 * i + 2, 1] = 8 * j + 2
- main_peak = j
- for i in range(npeaks):
- if fittedpar[3 * i] > fittedpar[3 * main_peak]:
- main_peak = i
-
- for i in range(npeaks):
- height = fittedpar[3 * i]
- position = fittedpar[3 * i + 1]
- fwhm = fittedpar[3 * i + 2]
- area = (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
- ) * numpy.sqrt(2 * numpy.pi)
- # the gaussian parameters
- newpar.append(area)
- newpar.append(position)
- newpar.append(fwhm)
- # print "area, pos , fwhm = ",area,position,fwhm
- # Avoid zero derivatives because of not calculating contribution
- g_term = 1
- st_term = 1
- lt_term = 1
- step_term = 1
- if self.config['HypermetTails'] != 0:
- g_term = self.config['HypermetTails'] & 1
- st_term = (self.config['HypermetTails'] >> 1) & 1
- lt_term = (self.config['HypermetTails'] >> 2) & 1
- step_term = (self.config['HypermetTails'] >> 3) & 1
- if g_term == 0:
- # fix the gaussian parameters
- newcons[8 * i, 0] = CFIXED
- newcons[8 * i + 1, 0] = CFIXED
- newcons[8 * i + 2, 0] = CFIXED
- # the short tail parameters
- if ((area * yscaling) <
- self.config['MinGaussArea4ShortTail']) | \
- (st_term == 0):
- newpar.append(0.0)
- newpar.append(0.0)
- newcons[8 * i + 3, 0] = CFIXED
- newcons[8 * i + 3, 1] = 0.0
- newcons[8 * i + 3, 2] = 0.0
- newcons[8 * i + 4, 0] = CFIXED
- newcons[8 * i + 4, 1] = 0.0
- newcons[8 * i + 4, 2] = 0.0
- else:
- newpar.append(self.config['InitialShortTailAreaRatio'])
- newpar.append(self.config['InitialShortTailSlopeRatio'])
- newcons[8 * i + 3, 0] = CQUOTED
- newcons[8 * i + 3, 1] = self.config['MinShortTailAreaRatio']
- newcons[8 * i + 3, 2] = self.config['MaxShortTailAreaRatio']
- newcons[8 * i + 4, 0] = CQUOTED
- newcons[8 * i + 4, 1] = self.config['MinShortTailSlopeRatio']
- newcons[8 * i + 4, 2] = self.config['MaxShortTailSlopeRatio']
- # the long tail parameters
- if ((area * yscaling) <
- self.config['MinGaussArea4LongTail']) | \
- (lt_term == 0):
- newpar.append(0.0)
- newpar.append(0.0)
- newcons[8 * i + 5, 0] = CFIXED
- newcons[8 * i + 5, 1] = 0.0
- newcons[8 * i + 5, 2] = 0.0
- newcons[8 * i + 6, 0] = CFIXED
- newcons[8 * i + 6, 1] = 0.0
- newcons[8 * i + 6, 2] = 0.0
- else:
- newpar.append(self.config['InitialLongTailAreaRatio'])
- newpar.append(self.config['InitialLongTailSlopeRatio'])
- newcons[8 * i + 5, 0] = CQUOTED
- newcons[8 * i + 5, 1] = self.config['MinLongTailAreaRatio']
- newcons[8 * i + 5, 2] = self.config['MaxLongTailAreaRatio']
- newcons[8 * i + 6, 0] = CQUOTED
- newcons[8 * i + 6, 1] = self.config['MinLongTailSlopeRatio']
- newcons[8 * i + 6, 2] = self.config['MaxLongTailSlopeRatio']
- # the step parameters
- if ((height * yscaling) <
- self.config['MinGaussHeight4StepTail']) | \
- (step_term == 0):
- newpar.append(0.0)
- newcons[8 * i + 7, 0] = CFIXED
- newcons[8 * i + 7, 1] = 0.0
- newcons[8 * i + 7, 2] = 0.0
- else:
- newpar.append(self.config['InitialStepTailHeightRatio'])
- newcons[8 * i + 7, 0] = CQUOTED
- newcons[8 * i + 7, 1] = self.config['MinStepTailHeightRatio']
- newcons[8 * i + 7, 2] = self.config['MaxStepTailHeightRatio']
- # if self.config['NoConstraintsFlag'] == 1:
- # newcons=numpy.zeros((8*npeaks, 3),numpy.float64)
- if npeaks > 0:
- if g_term:
- if self.config['PositiveHeightAreaFlag']:
- for i in range(npeaks):
- newcons[8 * i, 0] = CPOSITIVE
- if self.config['PositiveFwhmFlag']:
- for i in range(npeaks):
- newcons[8 * i + 2, 0] = CPOSITIVE
- if self.config['SameFwhmFlag']:
- for i in range(npeaks):
- if i != main_peak:
- newcons[8 * i + 2, 0] = CFACTOR
- newcons[8 * i + 2, 1] = 8 * main_peak + 2
- newcons[8 * i + 2, 2] = 1.0
- if self.config['HypermetQuotedPositionFlag']:
- for i in range(npeaks):
- delta = self.config['DeltaPositionFwhmUnits'] * fwhm
- newcons[8 * i + 1, 0] = CQUOTED
- newcons[8 * i + 1, 1] = newpar[8 * i + 1] - delta
- newcons[8 * i + 1, 2] = newpar[8 * i + 1] + delta
- if self.config['SameSlopeRatioFlag']:
- for i in range(npeaks):
- if i != main_peak:
- newcons[8 * i + 4, 0] = CFACTOR
- newcons[8 * i + 4, 1] = 8 * main_peak + 4
- newcons[8 * i + 4, 2] = 1.0
- newcons[8 * i + 6, 0] = CFACTOR
- newcons[8 * i + 6, 1] = 8 * main_peak + 6
- newcons[8 * i + 6, 2] = 1.0
- if self.config['SameAreaRatioFlag']:
- for i in range(npeaks):
- if i != main_peak:
- newcons[8 * i + 3, 0] = CFACTOR
- newcons[8 * i + 3, 1] = 8 * main_peak + 3
- newcons[8 * i + 3, 2] = 1.0
- newcons[8 * i + 5, 0] = CFACTOR
- newcons[8 * i + 5, 1] = 8 * main_peak + 5
- newcons[8 * i + 5, 2] = 1.0
- return newpar, newcons
-
- def estimate_stepdown(self, x, y):
- """Estimation of parameters for stepdown curves.
-
- The functions estimates gaussian parameters for the derivative of
- the data, takes the largest gaussian peak and uses its estimated
- parameters to define the center of the step and its fwhm. The
- estimated amplitude returned is simply ``max(y) - min(y)``.
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit newconstraints.
- Parameters to be estimated for each stepdown are:
- *height, centroid, fwhm* .
- """
- crappyfilter = [-0.25, -0.75, 0.0, 0.75, 0.25]
- cutoff = len(crappyfilter) // 2
- y_deriv = numpy.convolve(y,
- crappyfilter,
- mode="valid")
-
- # make the derivative's peak have the same amplitude as the step
- if max(y_deriv) > 0:
- y_deriv = y_deriv * max(y) / max(y_deriv)
-
- fittedpar, newcons = self.estimate_height_position_fwhm(
- x[cutoff:-cutoff], y_deriv)
-
- data_amplitude = max(y) - min(y)
-
- # use parameters from largest gaussian found
- if len(fittedpar):
- npeaks = len(fittedpar) // 3
- largest_index = 0
- largest = [data_amplitude,
- fittedpar[3 * largest_index + 1],
- fittedpar[3 * largest_index + 2]]
- for i in range(npeaks):
- if fittedpar[3 * i] > largest[0]:
- largest_index = i
- largest = [data_amplitude,
- fittedpar[3 * largest_index + 1],
- fittedpar[3 * largest_index + 2]]
- else:
- # no peak was found
- largest = [data_amplitude, # height
- x[len(x)//2], # center: middle of x range
- self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
-
- # Setup constrains
- newcons = numpy.zeros((3, 3), numpy.float64)
- if not self.config['NoConstraintsFlag']:
- # Setup height constrains
- if self.config['PositiveHeightAreaFlag']:
- newcons[0, 0] = CPOSITIVE
- newcons[0, 1] = 0
- newcons[0, 2] = 0
-
- # Setup position constrains
- if self.config['QuotedPositionFlag']:
- newcons[1, 0] = CQUOTED
- newcons[1, 1] = min(x)
- newcons[1, 2] = max(x)
-
- # Setup positive FWHM constrains
- if self.config['PositiveFwhmFlag']:
- newcons[2, 0] = CPOSITIVE
- newcons[2, 1] = 0
- newcons[2, 2] = 0
-
- return largest, newcons
-
- def estimate_slit(self, x, y):
- """Estimation of parameters for slit curves.
-
- The functions estimates stepup and stepdown parameters for the largest
- steps, and uses them for calculating the center (middle between stepup
- and stepdown), the height (maximum amplitude in data), the fwhm
- (distance between the up- and down-step centers) and the beamfwhm
- (average of FWHM for up- and down-step).
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each slit are:
- *height, position, fwhm, beamfwhm* .
- """
- largestup, cons = self.estimate_stepup(x, y)
- largestdown, cons = self.estimate_stepdown(x, y)
- fwhm = numpy.fabs(largestdown[1] - largestup[1])
- beamfwhm = 0.5 * (largestup[2] + largestdown[1])
- beamfwhm = min(beamfwhm, fwhm / 10.0)
- beamfwhm = max(beamfwhm, (max(x) - min(x)) * 3.0 / len(x))
-
- y_minus_bg = y - self.strip_bg(y)
- height = max(y_minus_bg)
-
- i1 = numpy.nonzero(y_minus_bg >= 0.5 * height)[0]
- xx = numpy.take(x, i1)
- position = (xx[0] + xx[-1]) / 2.0
- fwhm = xx[-1] - xx[0]
- largest = [height, position, fwhm, beamfwhm]
- cons = numpy.zeros((4, 3), numpy.float64)
- # Setup constrains
- if not self.config['NoConstraintsFlag']:
- # Setup height constrains
- if self.config['PositiveHeightAreaFlag']:
- cons[0, 0] = CPOSITIVE
- cons[0, 1] = 0
- cons[0, 2] = 0
-
- # Setup position constrains
- if self.config['QuotedPositionFlag']:
- cons[1, 0] = CQUOTED
- cons[1, 1] = min(x)
- cons[1, 2] = max(x)
-
- # Setup positive FWHM constrains
- if self.config['PositiveFwhmFlag']:
- cons[2, 0] = CPOSITIVE
- cons[2, 1] = 0
- cons[2, 2] = 0
-
- # Setup positive FWHM constrains
- if self.config['PositiveFwhmFlag']:
- cons[3, 0] = CPOSITIVE
- cons[3, 1] = 0
- cons[3, 2] = 0
- return largest, cons
-
- def estimate_stepup(self, x, y):
- """Estimation of parameters for a single step up curve.
-
- The functions estimates gaussian parameters for the derivative of
- the data, takes the largest gaussian peak and uses its estimated
- parameters to define the center of the step and its fwhm. The
- estimated amplitude returned is simply ``max(y) - min(y)``.
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- Parameters to be estimated for each stepup are:
- *height, centroid, fwhm* .
- """
- crappyfilter = [0.25, 0.75, 0.0, -0.75, -0.25]
- cutoff = len(crappyfilter) // 2
- y_deriv = numpy.convolve(y, crappyfilter, mode="valid")
- if max(y_deriv) > 0:
- y_deriv = y_deriv * max(y) / max(y_deriv)
-
- fittedpar, cons = self.estimate_height_position_fwhm(
- x[cutoff:-cutoff], y_deriv)
-
- # for height, use the data amplitude after removing the background
- data_amplitude = max(y) - min(y)
-
- # find params of the largest gaussian found
- if len(fittedpar):
- npeaks = len(fittedpar) // 3
- largest_index = 0
- largest = [data_amplitude,
- fittedpar[3 * largest_index + 1],
- fittedpar[3 * largest_index + 2]]
- for i in range(npeaks):
- if fittedpar[3 * i] > largest[0]:
- largest_index = i
- largest = [fittedpar[3 * largest_index],
- fittedpar[3 * largest_index + 1],
- fittedpar[3 * largest_index + 2]]
- else:
- # no peak was found
- largest = [data_amplitude, # height
- x[len(x)//2], # center: middle of x range
- self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
-
- newcons = numpy.zeros((3, 3), numpy.float64)
- # Setup constrains
- if not self.config['NoConstraintsFlag']:
- # Setup height constraints
- if self.config['PositiveHeightAreaFlag']:
- newcons[0, 0] = CPOSITIVE
- newcons[0, 1] = 0
- newcons[0, 2] = 0
-
- # Setup position constraints
- if self.config['QuotedPositionFlag']:
- newcons[1, 0] = CQUOTED
- newcons[1, 1] = min(x)
- newcons[1, 2] = max(x)
-
- # Setup positive FWHM constraints
- if self.config['PositiveFwhmFlag']:
- newcons[2, 0] = CPOSITIVE
- newcons[2, 1] = 0
- newcons[2, 2] = 0
-
- return largest, newcons
-
- def estimate_periodic_gauss(self, x, y):
- """Estimation of parameters for periodic gaussian curves:
- *number of peaks, distance between peaks, height, position of the
- first peak, fwhm*
-
- The functions detects all peaks, then computes the parameters the
- following way:
-
- - *distance*: average of distances between detected peaks
- - *height*: average height of detected peaks
- - *fwhm*: fwhm of the highest peak (in number of samples) if
- field ``'AutoFwhm'`` in :attr:`config` is ``True``, else take
- the default value (field ``'FwhmPoints'`` in :attr:`config`)
-
- :param x: Array of abscissa values
- :param y: Array of ordinate values (``y = f(x)``)
- :return: Tuple of estimated fit parameters and fit constraints.
- """
- yscaling = self.config.get('Yscaling', 1.0)
- if yscaling == 0:
- yscaling = 1.0
-
- bg = self.strip_bg(y)
-
- if self.config['AutoFwhm']:
- search_fwhm = guess_fwhm(y)
- else:
- search_fwhm = int(float(self.config['FwhmPoints']))
- search_sens = float(self.config['Sensitivity'])
-
- if search_fwhm < 3:
- search_fwhm = 3
-
- if search_sens < 1:
- search_sens = 1
-
- if len(y) > 1.5 * search_fwhm:
- peaks = peak_search(yscaling * y, fwhm=search_fwhm,
- sensitivity=search_sens)
- else:
- peaks = []
- npeaks = len(peaks)
- if not npeaks:
- fittedpar = []
- cons = numpy.zeros((len(fittedpar), 3), numpy.float64)
- return fittedpar, cons
-
- fittedpar = [0.0, 0.0, 0.0, 0.0, 0.0]
-
- # The number of peaks
- fittedpar[0] = npeaks
-
- # The separation between peaks in x units
- delta = 0.0
- height = 0.0
- for i in range(npeaks):
- height += y[int(peaks[i])] - bg[int(peaks[i])]
- if i != npeaks - 1:
- delta += (x[int(peaks[i + 1])] - x[int(peaks[i])])
-
- # delta between peaks
- if npeaks > 1:
- fittedpar[1] = delta / (npeaks - 1)
-
- # starting height
- fittedpar[2] = height / npeaks
-
- # position of the first peak
- fittedpar[3] = x[int(peaks[0])]
-
- # Estimate the fwhm
- fittedpar[4] = search_fwhm
-
- # setup constraints
- cons = numpy.zeros((5, 3), numpy.float64)
- cons[0, 0] = CFIXED # the number of gaussians
- if npeaks == 1:
- cons[1, 0] = CFIXED # the delta between peaks
- else:
- cons[1, 0] = CFREE
- j = 2
- # Setup height area constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['PositiveHeightAreaFlag']:
- # POSITIVE = 1
- cons[j, 0] = CPOSITIVE
- cons[j, 1] = 0
- cons[j, 2] = 0
- j += 1
-
- # Setup position constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['QuotedPositionFlag']:
- # QUOTED = 2
- cons[j, 0] = CQUOTED
- cons[j, 1] = min(x)
- cons[j, 2] = max(x)
- j += 1
-
- # Setup positive FWHM constrains
- if not self.config['NoConstraintsFlag']:
- if self.config['PositiveFwhmFlag']:
- # POSITIVE=1
- cons[j, 0] = CPOSITIVE
- cons[j, 1] = 0
- cons[j, 2] = 0
- j += 1
- return fittedpar, cons
-
- def configure(self, **kw):
- """Add new / unknown keyword arguments to :attr:`config`,
- update entries in :attr:`config` if the parameter name is a existing
- key.
-
- :param kw: Dictionary of keyword arguments.
- :return: Configuration dictionary :attr:`config`
- """
- if not kw.keys():
- return self.config
- for key in kw.keys():
- notdone = 1
- # take care of lower / upper case problems ...
- for config_key in self.config.keys():
- if config_key.lower() == key.lower():
- self.config[config_key] = kw[key]
- notdone = 0
- if notdone:
- self.config[key] = kw[key]
- return self.config
-
-fitfuns = FitTheories()
-
-THEORY = OrderedDict((
- ('Gaussians',
- FitTheory(description='Gaussian functions',
- function=functions.sum_gauss,
- parameters=('Height', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_height_position_fwhm,
- configure=fitfuns.configure)),
- ('Lorentz',
- FitTheory(description='Lorentzian functions',
- function=functions.sum_lorentz,
- parameters=('Height', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_height_position_fwhm,
- configure=fitfuns.configure)),
- ('Area Gaussians',
- FitTheory(description='Gaussian functions (area)',
- function=functions.sum_agauss,
- parameters=('Area', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_agauss,
- configure=fitfuns.configure)),
- ('Area Lorentz',
- FitTheory(description='Lorentzian functions (area)',
- function=functions.sum_alorentz,
- parameters=('Area', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_alorentz,
- configure=fitfuns.configure)),
- ('Pseudo-Voigt Line',
- FitTheory(description='Pseudo-Voigt functions',
- function=functions.sum_pvoigt,
- parameters=('Height', 'Position', 'FWHM', 'Eta'),
- estimate=fitfuns.estimate_pvoigt,
- configure=fitfuns.configure)),
- ('Area Pseudo-Voigt',
- FitTheory(description='Pseudo-Voigt functions (area)',
- function=functions.sum_apvoigt,
- parameters=('Area', 'Position', 'FWHM', 'Eta'),
- estimate=fitfuns.estimate_apvoigt,
- configure=fitfuns.configure)),
- ('Split Gaussian',
- FitTheory(description='Asymmetric gaussian functions',
- function=functions.sum_splitgauss,
- parameters=('Height', 'Position', 'LowFWHM',
- 'HighFWHM'),
- estimate=fitfuns.estimate_splitgauss,
- configure=fitfuns.configure)),
- ('Split Lorentz',
- FitTheory(description='Asymmetric lorentzian functions',
- function=functions.sum_splitlorentz,
- parameters=('Height', 'Position', 'LowFWHM', 'HighFWHM'),
- estimate=fitfuns.estimate_splitgauss,
- configure=fitfuns.configure)),
- ('Split Pseudo-Voigt',
- FitTheory(description='Asymmetric pseudo-Voigt functions',
- function=functions.sum_splitpvoigt,
- parameters=('Height', 'Position', 'LowFWHM',
- 'HighFWHM', 'Eta'),
- estimate=fitfuns.estimate_splitpvoigt,
- configure=fitfuns.configure)),
- ('Step Down',
- FitTheory(description='Step down function',
- function=functions.sum_stepdown,
- parameters=('Height', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_stepdown,
- configure=fitfuns.configure)),
- ('Step Up',
- FitTheory(description='Step up function',
- function=functions.sum_stepup,
- parameters=('Height', 'Position', 'FWHM'),
- estimate=fitfuns.estimate_stepup,
- configure=fitfuns.configure)),
- ('Slit',
- FitTheory(description='Slit function',
- function=functions.sum_slit,
- parameters=('Height', 'Position', 'FWHM', 'BeamFWHM'),
- estimate=fitfuns.estimate_slit,
- configure=fitfuns.configure)),
- ('Atan',
- FitTheory(description='Arctan step up function',
- function=functions.atan_stepup,
- parameters=('Height', 'Position', 'Width'),
- estimate=fitfuns.estimate_stepup,
- configure=fitfuns.configure)),
- ('Hypermet',
- FitTheory(description='Hypermet functions',
- function=fitfuns.ahypermet, # customized version of functions.sum_ahypermet
- parameters=('G_Area', 'Position', 'FWHM', 'ST_Area',
- 'ST_Slope', 'LT_Area', 'LT_Slope', 'Step_H'),
- estimate=fitfuns.estimate_ahypermet,
- configure=fitfuns.configure)),
- # ('Periodic Gaussians',
- # FitTheory(description='Periodic gaussian functions',
- # function=functions.periodic_gauss,
- # parameters=('N', 'Delta', 'Height', 'Position', 'FWHM'),
- # estimate=fitfuns.estimate_periodic_gauss,
- # configure=fitfuns.configure))
- ('Degree 2 Polynomial',
- FitTheory(description='Degree 2 polynomial'
- '\ny = a*x^2 + b*x +c',
- function=fitfuns.poly,
- parameters=['a', 'b', 'c'],
- estimate=fitfuns.estimate_quadratic)),
- ('Degree 3 Polynomial',
- FitTheory(description='Degree 3 polynomial'
- '\ny = a*x^3 + b*x^2 + c*x + d',
- function=fitfuns.poly,
- parameters=['a', 'b', 'c', 'd'],
- estimate=fitfuns.estimate_cubic)),
- ('Degree 4 Polynomial',
- FitTheory(description='Degree 4 polynomial'
- '\ny = a*x^4 + b*x^3 + c*x^2 + d*x + e',
- function=fitfuns.poly,
- parameters=['a', 'b', 'c', 'd', 'e'],
- estimate=fitfuns.estimate_quartic)),
- ('Degree 5 Polynomial',
- FitTheory(description='Degree 5 polynomial'
- '\ny = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f',
- function=fitfuns.poly,
- parameters=['a', 'b', 'c', 'd', 'e', 'f'],
- estimate=fitfuns.estimate_quintic)),
-))
-"""Dictionary of fit theories: fit functions and their associated estimation
-function, parameters list, configuration function and description.
-"""
-
-
-def test(a):
- from silx.math.fit import fitmanager
- x = numpy.arange(1000).astype(numpy.float64)
- p = [1500, 100., 50.0,
- 1500, 700., 50.0]
- y_synthetic = functions.sum_gauss(x, *p) + 1
-
- fit = fitmanager.FitManager(x, y_synthetic)
- fit.addtheory('Gaussians', functions.sum_gauss, ['Height', 'Position', 'FWHM'],
- a.estimate_height_position_fwhm)
- fit.settheory('Gaussians')
- fit.setbackground('Linear')
-
- fit.estimate()
- fit.runfit()
-
- y_fit = fit.gendata()
-
- print("Fit parameter names: %s" % str(fit.get_names()))
- print("Theoretical parameters: %s" % str(numpy.append([1, 0], p)))
- print("Fitted parameters: %s" % str(fit.get_fitted_parameters()))
-
- try:
- from silx.gui import qt
- from silx.gui.plot import plot1D
- app = qt.QApplication([])
-
- # Offset of 1 to see the difference in log scale
- plot1D(x, (y_synthetic + 1, y_fit), "Input data + 1, Fit")
-
- app.exec_()
- except ImportError:
- _logger.warning("Unable to load qt binding, can't plot results.")
-
-
-if __name__ == "__main__":
- test(fitfuns)
diff --git a/silx/math/fit/fittheory.py b/silx/math/fit/fittheory.py
deleted file mode 100644
index fa42e6b..0000000
--- a/silx/math/fit/fittheory.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# coding: utf-8
-#/*##########################################################################
-#
-# Copyright (c) 2004-2018 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-########################################################################### */
-"""
-This module defines the :class:`FitTheory` object that is used by
-:class:`silx.math.fit.FitManager` to define fit functions and background
-models.
-"""
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "09/08/2016"
-
-
-class FitTheory(object):
- """This class defines a fit theory, which consists of:
-
- - a model function, the actual function to be fitted
- - parameters names
- - an estimation function, that return the estimated initial parameters
- that serve as input for :func:`silx.math.fit.leastsq`
- - an optional configuration function, that can be used to modify
- configuration parameters to alter the behavior of the fit function
- and the estimation function
- - an optional derivative function, that replaces the default model
- derivative used in :func:`silx.math.fit.leastsq`
- """
- def __init__(self, function, parameters,
- estimate=None, configure=None, derivative=None,
- description=None, pymca_legacy=False, is_background=False):
- """
- :param function function: Actual function. See documentation for
- :attr:`function`.
- :param list[str] parameters: List of parameter names for the function.
- See documentation for :attr:`parameters`.
- :param function estimate: Optional estimation function.
- See documentation for :attr:`estimate`
- :param function configure: Optional configuration function.
- See documentation for :attr:`configure`
- :param function derivative: Optional custom derivative function.
- See documentation for :attr:`derivative`
- :param str description: Optional description string.
- See documentation for :attr:`description`
- :param bool pymca_legacy: Flag to indicate that the theory is a PyMca
- legacy theory. See documentation for :attr:`pymca_legacy`
- :param bool is_background: Flag to indicate that the theory is a
- background theory. This has implications regarding the function's
- signature, as explained in the documentation for :attr:`function`.
- """
- self.function = function
- """Regular fit functions must have the signature ``f(x, *params) -> y``,
- where *x* is a 1D array of values for the independent variable,
- *params* are the parameters to be fitted and *y* is the output array
- that we want to have the best fit to a series of data points.
-
- Background functions used by :class:`FitManager` must have a slightly
- different signature: ``f(x, y0, *params) -> bg``, where *y0* is the
- array of original data points and *bg* is the background signal that
- we want to subtract from the data array prior to fitting the regular
- fit function.
-
- The number of parameters must be the same as in :attr:`parameters`, or
- a multiple of this number if the function is defined as a sum of a
- variable number of base functions and if :attr:`estimate` is designed
- to be able to estimate the number of needed base functions.
- """
-
- self.parameters = parameters
- """List of parameters names.
-
- This list can contain the minimum number of parameters, if the
- function takes a variable number of parameters,
- and if the estimation function is responsible for finding the number
- of required parameters """
-
- self.estimate = estimate
- """The estimation function should have the following signature::
-
- f(x, y) -> (estimated_param, constraints)
-
- Parameters:
-
- - ``x`` is a sequence of values for the independent variable
- - ``y`` is a sequence of the same length as ``x`` containing the
- data to be fitted
-
- Return values:
-
- - ``estimated_param`` is a sequence of estimated fit parameters to
- be used as initial values for an iterative fit.
- - ``constraints`` is a sequence of shape *(n, 3)*, where *n* is the
- number of estimated parameters, containing the constraints for each
- parameter to be fitted. See :func:`silx.math.fit.leastsq` for more
- explanations about constraints."""
- if estimate is None:
- self.estimate = self.default_estimate
-
- self.configure = configure
- """The optional configuration function must conform to the signature
- ``f(**kw) -> dict`` (i.e it must accept any named argument and
- return a dictionary).
- It can be used to modify configuration parameters to alter the
- behavior of the fit function and the estimation function."""
-
- self.derivative = derivative
- """The optional derivative function must conform to the signature
- ``model_deriv(xdata, parameters, index)``, where parameters is a
- sequence with the current values of the fitting parameters, index is
- the fitting parameter index for which the the derivative has to be
- provided in the supplied array of xdata points."""
-
- self.description = description
- """Optional description string for this particular fit theory."""
-
- self.pymca_legacy = pymca_legacy
- """This attribute can be set to *True* to indicate that the theory
- is a PyMca legacy theory.
-
- This tells :mod:`silx.math.fit.fitmanager` that the signature of
- the estimate function is::
-
- f(x, y, bg, xscaling, yscaling) -> (estimated_param, constraints)
- """
-
- self.is_background = is_background
- """Flag to indicate that the theory is background theory.
-
- A background function is an secondary function that needs to be added
- to the main fit function to better fit the original data.
- If this flag is set to *True*, modules using this theory are informed
- that :attr:`function` has the signature ``f(x, y0, *params) -> bg``,
- instead of the usual fit function signature."""
-
- def default_estimate(self, x=None, y=None, bg=None):
- """Default estimate function. Return an array of *ones* as the
- initial estimated parameters, and set all constraints to zero
- (FREE)"""
- estimated_parameters = [1. for _ in self.parameters]
- estimated_constraints = [[0, 0, 0] for _ in self.parameters]
- return estimated_parameters, estimated_constraints
diff --git a/silx/math/fit/functions.pyx b/silx/math/fit/functions.pyx
deleted file mode 100644
index 1f78563..0000000
--- a/silx/math/fit/functions.pyx
+++ /dev/null
@@ -1,985 +0,0 @@
-# coding: utf-8
-#/*##########################################################################
-# Copyright (C) 2016-2020 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-"""This module provides fit functions.
-
-List of fit functions:
------------------------
-
- - :func:`sum_gauss`
- - :func:`sum_agauss`
- - :func:`sum_splitgauss`
- - :func:`sum_fastagauss`
-
- - :func:`sum_apvoigt`
- - :func:`sum_pvoigt`
- - :func:`sum_splitpvoigt`
-
- - :func:`sum_lorentz`
- - :func:`sum_alorentz`
- - :func:`sum_splitlorentz`
-
- - :func:`sum_stepdown`
- - :func:`sum_stepup`
- - :func:`sum_slit`
-
- - :func:`sum_ahypermet`
- - :func:`sum_fastahypermet`
-
-Full documentation:
--------------------
-
-"""
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "16/08/2017"
-
-import logging
-import numpy
-
-_logger = logging.getLogger(__name__)
-
-cimport cython
-cimport silx.math.fit.functions_wrapper as functions_wrapper
-
-
-def erf(x):
- """Return the gaussian error function
-
- :param x: Independent variable where the gaussian error function is
- calculated
- :type x: numpy.ndarray or scalar
- :return: Gaussian error function ``y=erf(x)``
- :raise: IndexError if ``x`` is an empty array
- """
- cdef:
- double[::1] x_c
- double[::1] y_c
-
-
- # force list into numpy array
- if not hasattr(x, "shape"):
- x = numpy.asarray(x)
-
- for len_dim in x.shape:
- if len_dim == 0:
- raise IndexError("Cannot compute erf for an empty array")
-
- x_c = numpy.array(x, copy=False, dtype=numpy.float64, order='C').reshape(-1)
- y_c = numpy.empty(shape=(x_c.size,), dtype=numpy.float64)
-
- status = functions_wrapper.erf_array(&x_c[0], x_c.size, &y_c[0])
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def erfc(x):
- """Return the gaussian complementary error function
-
- :param x: Independent variable where the gaussian complementary error
- function is calculated
- :type x: numpy.ndarray or scalar
- :return: Gaussian complementary error function ``y=erfc(x)``
- :type rtype: numpy.ndarray
- :raise: IndexError if ``x`` is an empty array
- """
- cdef:
- double[::1] x_c
- double[::1] y_c
-
- # force list into numpy array
- if not hasattr(x, "shape"):
- x = numpy.asarray(x)
-
- for len_dim in x.shape:
- if len_dim == 0:
- raise IndexError("Cannot compute erfc for an empty array")
-
- x_c = numpy.array(x, copy=False, dtype=numpy.float64, order='C').reshape(-1)
- y_c = numpy.empty(shape=(x_c.size,), dtype=numpy.float64)
-
- status = functions_wrapper.erfc_array(&x_c[0], x_c.size, &y_c[0])
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_gauss(x, *params):
- """Return a sum of gaussian functions defined by *(height, centroid, fwhm)*,
- where:
-
- - *height* is the peak amplitude
- - *centroid* is the peak x-coordinate
- - *fwhm* is the full-width at half maximum
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of gaussian parameters (length must be a multiple
- of 3):
- *(height1, centroid1, fwhm1, height2, centroid2, fwhm2,...)*
- :return: Array of sum of gaussian functions at each ``x`` coordinate.
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No gaussian parameters specified. " +
- "At least 3 parameters are required.")
-
- # ensure float64 (double) type and 1D contiguous data layout in memory
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_gauss(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- # reshape y_c to match original, possibly unusual, data shape
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_agauss(x, *params):
- """Return a sum of gaussian functions defined by *(area, centroid, fwhm)*,
- where:
-
- - *area* is the area underneath the peak
- - *centroid* is the peak x-coordinate
- - *fwhm* is the full-width at half maximum
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of gaussian parameters (length must be a multiple
- of 3):
- *(area1, centroid1, fwhm1, area2, centroid2, fwhm2,...)*
- :return: Array of sum of gaussian functions at each ``x`` coordinate.
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No gaussian parameters specified. " +
- "At least 3 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_agauss(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_fastagauss(x, *params):
- """Return a sum of gaussian functions defined by *(area, centroid, fwhm)*,
- where:
-
- - *area* is the area underneath the peak
- - *centroid* is the peak x-coordinate
- - *fwhm* is the full-width at half maximum
-
- This implementation differs from :func:`sum_agauss` by the usage of a
- lookup table with precalculated exponential values. This might speed up
- the computation for large numbers of individual gaussian functions.
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of gaussian parameters (length must be a multiple
- of 3):
- *(area1, centroid1, fwhm1, area2, centroid2, fwhm2,...)*
- :return: Array of sum of gaussian functions at each ``x`` coordinate.
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No gaussian parameters specified. " +
- "At least 3 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_fastagauss(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_splitgauss(x, *params):
- """Return a sum of gaussian functions defined by *(area, centroid, fwhm1, fwhm2)*,
- where:
-
- - *height* is the peak amplitude
- - *centroid* is the peak x-coordinate
- - *fwhm1* is the full-width at half maximum for the distribution
- when ``x < centroid``
- - *fwhm2* is the full-width at half maximum for the distribution
- when ``x > centroid``
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of gaussian parameters (length must be a multiple
- of 4):
- *(height1, centroid1, fwhm11, fwhm21, height2, centroid2, fwhm12, fwhm22,...)*
- :return: Array of sum of split gaussian functions at each ``x`` coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No gaussian parameters specified. " +
- "At least 4 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_splitgauss(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_apvoigt(x, *params):
- """Return a sum of pseudo-Voigt functions, defined by *(area, centroid, fwhm,
- eta)*.
-
- The pseudo-Voigt profile ``PV(x)`` is an approximation of the Voigt
- profile using a linear combination of a Gaussian curve ``G(x)`` and a
- Lorentzian curve ``L(x)`` instead of their convolution.
-
- - *area* is the area underneath both G(x) and L(x)
- - *centroid* is the peak x-coordinate for both functions
- - *fwhm* is the full-width at half maximum of both functions
- - *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of pseudo-Voigt parameters (length must be a multiple
- of 4):
- *(area1, centroid1, fwhm1, eta1, area2, centroid2, fwhm2, eta2,...)*
- :return: Array of sum of pseudo-Voigt functions at each ``x`` coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 4 parameters are required.")
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_apvoigt(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_pvoigt(x, *params):
- """Return a sum of pseudo-Voigt functions, defined by *(height, centroid,
- fwhm, eta)*.
-
- The pseudo-Voigt profile ``PV(x)`` is an approximation of the Voigt
- profile using a linear combination of a Gaussian curve ``G(x)`` and a
- Lorentzian curve ``L(x)`` instead of their convolution.
-
- - *height* is the peak amplitude of G(x) and L(x)
- - *centroid* is the peak x-coordinate for both functions
- - *fwhm* is the full-width at half maximum of both functions
- - *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of pseudo-Voigt parameters (length must be a multiple
- of 4):
- *(height1, centroid1, fwhm1, eta1, height2, centroid2, fwhm2, eta2,...)*
- :return: Array of sum of pseudo-Voigt functions at each ``x`` coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 4 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_pvoigt(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_splitpvoigt(x, *params):
- """Return a sum of split pseudo-Voigt functions, defined by *(height,
- centroid, fwhm1, fwhm2, eta)*.
-
- The pseudo-Voigt profile ``PV(x)`` is an approximation of the Voigt
- profile using a linear combination of a Gaussian curve ``G(x)`` and a
- Lorentzian curve ``L(x)`` instead of their convolution.
-
- - *height* is the peak amplitudefor G(x) and L(x)
- - *centroid* is the peak x-coordinate for both functions
- - *fwhm1* is the full-width at half maximum of both functions
- when ``x < centroid``
- - *fwhm2* is the full-width at half maximum of both functions
- when ``x > centroid``
- - *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of pseudo-Voigt parameters (length must be a multiple
- of 5):
- *(height1, centroid1, fwhm11, fwhm21, eta1,...)*
- :return: Array of sum of split pseudo-Voigt functions at each ``x``
- coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 5 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_splitpvoigt(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_lorentz(x, *params):
- """Return a sum of Lorentz distributions, also known as Cauchy distribution,
- defined by *(height, centroid, fwhm)*.
-
- - *height* is the peak amplitude
- - *centroid* is the peak x-coordinate
- - *fwhm* is the full-width at half maximum
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of Lorentz parameters (length must be a multiple
- of 3):
- *(height1, centroid1, fwhm1,...)*
- :return: Array of sum Lorentz functions at each ``x``
- coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 3 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_lorentz(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_alorentz(x, *params):
- """Return a sum of Lorentz distributions, also known as Cauchy distribution,
- defined by *(area, centroid, fwhm)*.
-
- - *area* is the area underneath the peak
- - *centroid* is the peak x-coordinate for both functions
- - *fwhm* is the full-width at half maximum
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of Lorentz parameters (length must be a multiple
- of 3):
- *(area1, centroid1, fwhm1,...)*
- :return: Array of sum of Lorentz functions at each ``x``
- coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 3 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_alorentz(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_splitlorentz(x, *params):
- """Return a sum of split Lorentz distributions,
- defined by *(height, centroid, fwhm1, fwhm2)*.
-
- - *height* is the peak amplitude
- - *centroid* is the peak x-coordinate for both functions
- - *fwhm1* is the full-width at half maximum for ``x < centroid``
- - *fwhm2* is the full-width at half maximum for ``x > centroid``
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of Lorentz parameters (length must be a multiple
- of 4):
- *(height1, centroid1, fwhm11, fwhm21...)*
- :return: Array of sum of Lorentz functions at each ``x``
- coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 4 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_splitlorentz(
- &x_c[0], x.size,
- &params_c[0], params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_stepdown(x, *params):
- """Return a sum of stepdown functions.
- defined by *(height, centroid, fwhm)*.
-
- - *height* is the step's amplitude
- - *centroid* is the step's x-coordinate
- - *fwhm* is the full-width at half maximum for the derivative,
- which is a measure of the *sharpness* of the step-down's edge
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of stepdown parameters (length must be a multiple
- of 3):
- *(height1, centroid1, fwhm1,...)*
- :return: Array of sum of stepdown functions at each ``x``
- coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 3 parameters are required.")
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_stepdown(&x_c[0],
- x.size,
- &params_c[0],
- params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_stepup(x, *params):
- """Return a sum of stepup functions.
- defined by *(height, centroid, fwhm)*.
-
- - *height* is the step's amplitude
- - *centroid* is the step's x-coordinate
- - *fwhm* is the full-width at half maximum for the derivative,
- which is a measure of the *sharpness* of the step-up's edge
-
- :param x: Independent variable where the gaussians are calculated
- :type x: numpy.ndarray
- :param params: Array of stepup parameters (length must be a multiple
- of 3):
- *(height1, centroid1, fwhm1,...)*
- :return: Array of sum of stepup functions at each ``x``
- coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 3 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_stepup(&x_c[0],
- x.size,
- &params_c[0],
- params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_slit(x, *params):
- """Return a sum of slit functions.
- defined by *(height, position, fwhm, beamfwhm)*.
-
- - *height* is the slit's amplitude
- - *position* is the center of the slit's x-coordinate
- - *fwhm* is the full-width at half maximum of the slit
- - *beamfwhm* is the full-width at half maximum of the
- derivative, which is a measure of the *sharpness*
- of the edges of the slit
-
- :param x: Independent variable where the slits are calculated
- :type x: numpy.ndarray
- :param params: Array of slit parameters (length must be a multiple
- of 4):
- *(height1, centroid1, fwhm1, beamfwhm1,...)*
- :return: Array of sum of slit functions at each ``x``
- coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 4 parameters are required.")
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_slit(&x_c[0],
- x.size,
- &params_c[0],
- params_c.size,
- &y_c[0])
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_ahypermet(x, *params,
- gaussian_term=True, st_term=True, lt_term=True, step_term=True):
- """Return a sum of ahypermet functions.
- defined by *(area, position, fwhm, st_area_r, st_slope_r, lt_area_r,
- lt_slope_r, step_height_r)*.
-
- - *area* is the area underneath the gaussian peak
- - *position* is the center of the various peaks and the position of
- the step down
- - *fwhm* is the full-width at half maximum of the terms
- - *st_area_r* is factor between the gaussian area and the area of the
- short tail term
- - *st_slope_r* is a ratio related to the slope of the short tail
- in the low ``x`` values (the lower, the steeper)
- - *lt_area_r* is ratio between the gaussian area and the area of the
- long tail term
- - *lt_slope_r* is a ratio related to the slope of the long tail
- in the low ``x`` values (the lower, the steeper)
- - *step_height_r* is the ratio between the height of the step down
- and the gaussian height
-
- A hypermet function is a sum of four functions (terms):
-
- - a gaussian term
- - a long tail term
- - a short tail term
- - a step down term
-
- :param x: Independent variable where the hypermets are calculated
- :type x: numpy.ndarray
- :param params: Array of hypermet parameters (length must be a multiple
- of 8):
- *(area1, position1, fwhm1, st_area_r1, st_slope_r1, lt_area_r1,
- lt_slope_r1, step_height_r1...)*
- :param gaussian_term: If ``True``, enable gaussian term. Default ``True``
- :param st_term: If ``True``, enable gaussian term. Default ``True``
- :param lt_term: If ``True``, enable gaussian term. Default ``True``
- :param step_term: If ``True``, enable gaussian term. Default ``True``
- :return: Array of sum of hypermet functions at each ``x`` coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 8 parameters are required.")
-
- # Sum binary flags to activate various terms of the equation
- tail_flags = 1 if gaussian_term else 0
- if st_term:
- tail_flags += 2
- if lt_term:
- tail_flags += 4
- if step_term:
- tail_flags += 8
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_ahypermet(&x_c[0],
- x.size,
- &params_c[0],
- params_c.size,
- &y_c[0],
- tail_flags)
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def sum_fastahypermet(x, *params,
- gaussian_term=True, st_term=True,
- lt_term=True, step_term=True):
- """Return a sum of hypermet functions defined by *(area, position, fwhm,
- st_area_r, st_slope_r, lt_area_r, lt_slope_r, step_height_r)*.
-
- - *area* is the area underneath the gaussian peak
- - *position* is the center of the various peaks and the position of
- the step down
- - *fwhm* is the full-width at half maximum of the terms
- - *st_area_r* is factor between the gaussian area and the area of the
- short tail term
- - *st_slope_r* is a parameter related to the slope of the short tail
- in the low ``x`` values (the lower, the steeper)
- - *lt_area_r* is factor between the gaussian area and the area of the
- long tail term
- - *lt_slope_r* is a parameter related to the slope of the long tail
- in the low ``x`` values (the lower, the steeper)
- - *step_height_r* is the factor between the height of the step down
- and the gaussian height
-
- A hypermet function is a sum of four functions (terms):
-
- - a gaussian term
- - a long tail term
- - a short tail term
- - a step down term
-
- This function differs from :func:`sum_ahypermet` by the use of a lookup
- table for calculating exponentials. This offers better performance when
- calculating many functions for large ``x`` arrays.
-
- :param x: Independent variable where the hypermets are calculated
- :type x: numpy.ndarray
- :param params: Array of hypermet parameters (length must be a multiple
- of 8):
- *(area1, position1, fwhm1, st_area_r1, st_slope_r1, lt_area_r1,
- lt_slope_r1, step_height_r1...)*
- :param gaussian_term: If ``True``, enable gaussian term. Default ``True``
- :param st_term: If ``True``, enable gaussian term. Default ``True``
- :param lt_term: If ``True``, enable gaussian term. Default ``True``
- :param step_term: If ``True``, enable gaussian term. Default ``True``
- :return: Array of sum of hypermet functions at each ``x`` coordinate
- """
- cdef:
- double[::1] x_c
- double[::1] params_c
- double[::1] y_c
-
- if not len(params):
- raise IndexError("No parameters specified. " +
- "At least 8 parameters are required.")
-
- # Sum binary flags to activate various terms of the equation
- tail_flags = 1 if gaussian_term else 0
- if st_term:
- tail_flags += 2
- if lt_term:
- tail_flags += 4
- if step_term:
- tail_flags += 8
-
- # TODO (maybe):
- # Set flags according to params, to move conditional
- # branches out of the C code.
- # E.g., set st_term = False if any of the st_slope_r params
- # (params[8*i + 4]) is 0, to prevent division by 0. Same thing for
- # lt_slope_r (params[8*i + 6]) and lt_term.
-
- x_c = numpy.array(x,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- params_c = numpy.array(params,
- copy=False,
- dtype=numpy.float64,
- order='C').reshape(-1)
- y_c = numpy.empty(shape=(x.size,),
- dtype=numpy.float64)
-
- status = functions_wrapper.sum_fastahypermet(&x_c[0],
- x.size,
- &params_c[0],
- params_c.size,
- &y_c[0],
- tail_flags)
-
- if status:
- raise IndexError("Wrong number of parameters for function")
-
- return numpy.asarray(y_c).reshape(x.shape)
-
-
-def atan_stepup(x, a, b, c):
- """
- Step up function using an inverse tangent.
-
- :param x: Independent variable where the function is calculated
- :type x: numpy array
- :param a: Height of the step up
- :param b: Center of the step up
- :param c: Parameter related to the slope of the step. A lower ``c``
- value yields a sharper step.
- :return: ``a * (0.5 + (arctan((x - b) / c) / pi))``
- :rtype: numpy array
- """
- if not hasattr(x, "shape"):
- x = numpy.array(x)
- return a * (0.5 + (numpy.arctan((1.0 * x - b) / c) / numpy.pi))
-
-
-def periodic_gauss(x, *pars):
- """
- Return a sum of gaussian functions defined by
- *(npeaks, delta, height, centroid, fwhm)*,
- where:
-
- - *npeaks* is the number of gaussians peaks
- - *delta* is the constant distance between 2 peaks
- - *height* is the peak amplitude of all the gaussians
- - *centroid* is the peak x-coordinate of the first gaussian
- - *fwhm* is the full-width at half maximum for all the gaussians
-
- :param x: Independent variable where the function is calculated
- :param pars: *(npeaks, delta, height, centroid, fwhm)*
- :return: Sum of ``npeaks`` gaussians
- """
-
- if not len(pars):
- raise IndexError("No parameters specified. " +
- "At least 5 parameters are required.")
-
- newpars = numpy.zeros((pars[0], 3), numpy.float64)
- for i in range(int(pars[0])):
- newpars[i, 0] = pars[2]
- newpars[i, 1] = pars[3] + i * pars[1]
- newpars[:, 2] = pars[4]
- return sum_gauss(x, newpars)
diff --git a/silx/math/fit/functions/include/functions.h b/silx/math/fit/functions/include/functions.h
deleted file mode 100644
index de4209b..0000000
--- a/silx/math/fit/functions/include/functions.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-
-#ifndef FITFUNCTIONS_H
-#define FITFUNCTIONS_H
-
-/* Helper functions */
-int test_params(int len_params, int len_params_one_function, char* fun_name, char* param_names);
-double myerfc(double x);
-double myerf(double x);
-int erfc_array(double* x, int len_x, double* y);
-int erf_array(double* x, int len_x, double* y);
-
-/* Background functions */
-void snip1d(double *data, int size, int width);
-//void snip1d_multiple(double *data, int n_channels, int snip_width, int n_spectra);
-void snip2d(double *data, int nrows, int ncolumns, int width);
-void snip3d(double *data, int nx, int ny, int nz, int width);
-
-int strip(double* input, long len_input, double c, long niter, int deltai,
- long* anchors, long len_anchors, double* output);
-
-/* Smoothing functions */
-
-int SavitskyGolay(double* input, long len_input, int npoints, double* output);
-
-/* Fit functions */
-int sum_gauss(double* x, int len_x, double* pgauss, int len_pgauss, double* y);
-int sum_agauss(double* x, int len_x, double* pgauss, int len_pgauss, double* y);
-int sum_fastagauss(double* x, int len_x, double* pgauss, int len_pgauss, double* y);
-int sum_splitgauss(double* x, int len_x, double* pgauss, int len_pgauss, double* y);
-
-int sum_apvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y);
-int sum_pvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y);
-int sum_splitpvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y);
-
-int sum_lorentz(double* x, int len_x, double* plorentz, int len_plorentz, double* y);
-int sum_alorentz(double* x, int len_x, double* plorentz, int len_plorentz, double* y);
-int sum_splitlorentz(double* x, int len_x, double* plorentz, int len_plorentz, double* y);
-
-int sum_stepdown(double* x, int len_x, double* pdstep, int len_pdstep, double* y);
-int sum_stepup(double* x, int len_x, double* pustep, int len_pustep, double* y);
-int sum_slit(double* x, int len_x, double* pslit, int len_pslit, double* y);
-
-int sum_ahypermet(double* x, int len_x, double* phypermet, int len_phypermet, double* y, int tail_flags);
-int sum_fastahypermet(double* x, int len_x, double* phypermet, int len_phypermet, double* y, int tail_flags);
-
-#endif /* #define FITFUNCTIONS_H */
diff --git a/silx/math/fit/functions/src/funs.c b/silx/math/fit/functions/src/funs.c
deleted file mode 100644
index aae173f..0000000
--- a/silx/math/fit/functions/src/funs.c
+++ /dev/null
@@ -1,1265 +0,0 @@
-#/*##########################################################################
-# Copyright (c) 2004-2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-/*
- This file provides fit functions.
-
- It is adapted from PyMca source file "SpecFitFuns.c". The main difference
- with the original code is that this code does not handle the python
- wrapping, which is done elsewhere using cython.
-
- Authors: V.A. Sole, P. Knobel
- License: MIT
- Last modified: 17/06/2016
-*/
-#include <math.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include "functions.h"
-
-#ifndef M_PI
-#define M_PI 3.1415926535
-#endif
-
-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-#define MAX(x, y) (((x) > (y)) ? (x) : (y))
-
-#if defined(_WIN32)
-#define erf myerf
-#define erfc myerfc
-#endif
-
-#define LOG2 0.69314718055994529
-
-
-int test_params(int len_params,
- int len_params_one_function,
- char* fun_name,
- char* param_names)
-{
- if (len_params % len_params_one_function) {
- printf("[%s]Error: Number of parameters must be a multiple of %d.",
- fun_name, len_params_one_function);
- printf("\nParameters expected for %s: %s\n",
- fun_name, param_names);
- return(1);
- }
- if (len_params == 0) {
- printf("[%s]Error: No parameters specified.", fun_name);
- printf("\nParameters expected for %s: %s\n",
- fun_name, param_names);
- return(1);
- }
- return(0);
-}
-
-/* Complementary error function for a single value*/
-double myerfc(double x)
-{
- double z;
- double t;
- double r;
-
- z=fabs(x);
- t=1.0/(1.0+0.5*z);
- r=t * exp(-z * z - 1.26551223 + t * (1.00002368 + t * (0.3740916 +
- t * (0.09678418 + t * (-0.18628806 + t * (0.27886807 + t * (-1.13520398 +
- t * (1.48851587 + t * (-0.82215223+t*0.17087277)))))))));
- if (x<0)
- r=2.0-r;
- return (r);
-}
-
-/* Gauss error function for a single value*/
-double myerf(double x)
-{
- return (1.0 - myerfc(x));
-}
-
-/* Gauss error function for an array
- y[i]=erf(x[i])
- returns status code 0
-*/
-int erf_array(double* x, int len_x, double* y)
-{
- int j;
- for (j=0; j<len_x; j++) {
- y[j] = erf(x[j]);
- }
- return(0);
-}
-
-/* Complementary error function for an array
- y[i]=erfc(x[i])
- returns status code 0*/
-int erfc_array(double* x, int len_x, double* y)
-{
- int j;
- for (j=0; j<len_x; j++) {
- y[j] = erfc(x[j]);
- }
- return(0);
-}
-
-/* Use lookup table for fast exp computation */
-double fastexp(double x)
-{
- int expindex;
- static double EXP[5000] = {0.0};
- int i;
-
-/*initialize */
- if (EXP[0] < 1){
- for (i=0;i<5000;i++){
- EXP[i] = exp(-0.01 * i);
- }
- }
-/*calculate*/
- if (x < 0){
- x = -x;
- if (x < 50){
- expindex = (int) (x * 100);
- return EXP[expindex]*(1.0 - (x - 0.01 * expindex)) ;
- }else if (x < 100) {
- expindex = (int) (x * 10);
- return pow(EXP[expindex]*(1.0 - (x - 0.1 * expindex)),10) ;
- }else if (x < 1000){
- expindex = (int) x;
- return pow(EXP[expindex]*(1.0 - (x - expindex)),20) ;
- }else if (x < 10000){
- expindex = (int) (x * 0.1);
- return pow(EXP[expindex]*(1.0 - (x - 10.0 * expindex)),30) ;
- }else{
- return 0;
- }
- }else{
- if (x < 50){
- expindex = (int) (x * 100);
- return 1.0/EXP[expindex]*(1.0 - (x - 0.01 * expindex)) ;
- }else if (x < 100) {
- expindex = (int) (x * 10);
- return pow(EXP[expindex]*(1.0 - (x - 0.1 * expindex)),-10) ;
- }else{
- return exp(x);
- }
- }
-}
-
-
-/* sum_gauss
- Sum of gaussian functions, defined by (height, centroid, fwhm)
-
- *height* is the peak amplitude
- *centroid* is the peak x-coordinate
- *fwhm* is the full-width at half maximum
-
- Parameters:
- -----------
-
- - x: Independant variable where the gaussians are calculated.
- - len_x: Number of elements in the x array.
- - pvoigt: Array of gaussian parameters:
- (height1, centroid1, fwhm1, height2, centroid2, fwhm2,...)
- - len_pgauss: Number of elements in the pgauss array. Must be
- a multiple of 3.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_gauss(double* x, int len_x, double* pgauss, int len_pgauss, double* y)
-{
- int i, j;
- double dhelp, inv_two_sqrt_two_log2, sigma;
- double fwhm, centroid, height;
-
- if (test_params(len_pgauss, 3, "sum_gauss", "height, centroid, fwhm")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- inv_two_sqrt_two_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
-
- for (i=0; i<len_pgauss/3; i++) {
- height = pgauss[3*i];
- centroid = pgauss[3*i+1];
- fwhm = pgauss[3*i+2];
-
- sigma = fwhm * inv_two_sqrt_two_log2;
-
- for (j=0; j<len_x; j++) {
- dhelp = (x[j] - centroid) / sigma;
- if (dhelp <= 20) {
- y[j] += height * exp (-0.5 * dhelp * dhelp);
- }
- }
- }
- return(0);
-}
-
-/* sum_agauss
- Sum of gaussian functions defined by (area, centroid, fwhm)
-
- *area* is the area underneath the peak
- *centroid* is the peak x-coordinate
- *fwhm* is the full-width at half maximum
-
- Parameters:
- -----------
-
- - x: Independant variable where the gaussians are calculated.
- - len_x: Number of elements in the x array.
- - pgauss: Array of gaussian parameters:
- (area1, centroid1, fwhm1, area2, centroid2, fwhm2,...)
- - len_pgauss: Number of elements in the pgauss array. Must be
- a multiple of 3.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_agauss(double* x, int len_x, double* pgauss, int len_pgauss, double* y)
-{
- int i, j;
- double dhelp, height, sqrt2PI, sigma, inv_two_sqrt_two_log2;
- double fwhm, centroid, area;
-
- if (test_params(len_pgauss, 3, "sum_agauss", "area, centroid, fwhm")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- inv_two_sqrt_two_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
- sqrt2PI = sqrt(2.0*M_PI);
-
- for (i=0; i<len_pgauss/3; i++) {
- area = pgauss[3*i];
- centroid = pgauss[3*i+1];
- fwhm = pgauss[3*i+2];
-
- sigma = fwhm * inv_two_sqrt_two_log2;
- height = area / (sigma * sqrt2PI);
-
- for (j=0; j<len_x; j++) {
- dhelp = (x[j] - centroid)/sigma;
- if (dhelp <= 35) {
- y[j] += height * exp (-0.5 * dhelp * dhelp);
- }
- }
- }
- return(0);
-}
-
-
-/* sum_fastagauss
- Sum of gaussian functions defined by (area, centroid, fwhm).
- This implementation uses a lookup table of precalculated exp values
- and a limited development (exp(-x) = 1 - x for small values of x)
-
- *area* is the area underneath the peak
- *centroid* is the peak x-coordinate
- *fwhm* is the full-width at half maximum
-
- Parameters:
- -----------
-
- - x: Independant variable where the gaussians are calculated.
- - len_x: Number of elements in the x array.
- - pgauss: Array of gaussian parameters:
- (area1, centroid1, fwhm1, area2, centroid2, fwhm2,...)
- - len_pgauss: Number of elements in the pgauss array. Must be
- a multiple of 3.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-
-int sum_fastagauss(double* x, int len_x, double* pgauss, int len_pgauss, double* y)
-{
- int i, j, expindex;
- double dhelp, height, sqrt2PI, sigma, inv_two_sqrt_two_log2;
- double fwhm, centroid, area;
- static double EXP[5000];
-
- if (test_params(len_pgauss, 3, "sum_fastagauss", "area, centroid, fwhm")) {
- return(1);
- }
-
- if (EXP[0] < 1){
- for (i=0; i<5000; i++){
- EXP[i] = exp(-0.01 * i);
- }
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- inv_two_sqrt_two_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
- sqrt2PI = sqrt(2.0*M_PI);
-
- for (i=0; i<len_pgauss/3; i++) {
- area = pgauss[3*i];
- centroid = pgauss[3*i+1];
- fwhm = pgauss[3*i+2];
-
- sigma = fwhm * inv_two_sqrt_two_log2;
- height = area / (sigma * sqrt2PI);
-
- for (j=0; j<len_x; j++) {
- dhelp = (x[j] - centroid)/sigma;
- if (dhelp <= 15){
- dhelp = 0.5 * dhelp * dhelp;
- if (dhelp < 50){
- expindex = (int) (dhelp * 100);
- y[j] += height * EXP[expindex] * (1.0 - (dhelp - 0.01 * expindex));
- }
- else if (dhelp < 100) {
- expindex = (int) (dhelp * 10);
- y[j] += height * pow(EXP[expindex] * (1.0 - (dhelp - 0.1 * expindex)), 10);
- }
- else if (dhelp < 1000){
- expindex = (int) (dhelp);
- y[j] += height * pow(EXP[expindex] * (1.0 - (dhelp - expindex)), 20);
- }
- }
- }
- }
- return(0);
-}
-
-/* sum_splitgauss
- Sum of split gaussian functions, defined by (height, centroid, fwhm1, fwhm2)
-
- *height* is the peak amplitude
- *centroid* is the peak x-coordinate
- *fwhm1* is the full-width at half maximum of the left half of the curve (x < centroid)
- *fwhm1* is the full-width at half maximum of the right half of the curve (x > centroid)
-
- Parameters:
- -----------
-
- - x: Independant variable where the gaussians are calculated.
- - len_x: Number of elements in the x array.
- - pgauss: Array of gaussian parameters:
- (height1, centroid1, fwhm11, fwhm21, height2, centroid2, fwhm12, fwhm22,...)
- - len_pgauss: Number of elements in the pgauss array. Must be
- a multiple of 4.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_splitgauss(double* x, int len_x, double* pgauss, int len_pgauss, double* y)
-{
- int i, j;
- double dhelp, inv_two_sqrt_two_log2, sigma1, sigma2;
- double fwhm1, fwhm2, centroid, height;
-
- if (test_params(len_pgauss, 4, "sum_splitgauss", "height, centroid, fwhm1, fwhm2")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- inv_two_sqrt_two_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
-
- for (i=0; i<len_pgauss/4; i++) {
- height = pgauss[4*i];
- centroid = pgauss[4*i+1];
- fwhm1 = pgauss[4*i+2];
- fwhm2 = pgauss[4*i+3];
-
- sigma1 = fwhm1 * inv_two_sqrt_two_log2;
- sigma2 = fwhm2 * inv_two_sqrt_two_log2;
-
- for (j=0; j<len_x; j++) {
- dhelp = (x[j] - centroid);
- if (dhelp > 0) {
- /* Use fwhm2 when x > centroid */
- dhelp = dhelp / sigma2;
- }
- else {
- /* Use fwhm1 when x < centroid */
- dhelp = dhelp / sigma1;
- }
-
- if (dhelp <= 20) {
- y[j] += height * exp (-0.5 * dhelp * dhelp);
- }
- }
- }
- return(0);
-}
-
-/* sum_apvoigt
- Sum of pseudo-Voigt functions, defined by (area, centroid, fwhm, eta).
-
- The pseudo-Voigt profile PV(x) is an approximation of the Voigt profile
- using a linear combination of a Gaussian curve G(x) and a Lorentzian curve
- L(x) instead of their convolution.
-
- *area* is the area underneath both G(x) and L(x)
- *centroid* is the peak x-coordinate for both functions
- *fwhm* is the full-width at half maximum of both functions
- *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
-
- Parameters:
- -----------
-
- - x: Independant variable where the gaussians are calculated.
- - len_x: Number of elements in the x array.
- - pvoigt: Array of Voigt function parameters:
- (area1, centroid1, fwhm1, eta1, area2, centroid2, fwhm2, eta2,...)
- - len_voigt: Number of elements in the pvoigt array. Must be
- a multiple of 4.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_apvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y)
-{
- int i, j;
- double dhelp, inv_two_sqrt_two_log2, sqrt2PI, sigma, height;
- double area, centroid, fwhm, eta;
-
- if (test_params(len_pvoigt, 4, "sum_apvoigt", "area, centroid, fwhm, eta")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- inv_two_sqrt_two_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
- sqrt2PI = sqrt(2.0*M_PI);
-
-
- for (i=0; i<len_pvoigt/4; i++) {
- area = pvoigt[4*i];
- centroid = pvoigt[4*i+1];
- fwhm = pvoigt[4*i+2];
- eta = pvoigt[4*i+3];
-
- sigma = fwhm * inv_two_sqrt_two_log2;
- height = area / (sigma * sqrt2PI);
-
- for (j=0; j<len_x; j++) {
- /* Lorentzian term */
- dhelp = (x[j] - centroid) / (0.5 * fwhm);
- dhelp = 1.0 + (dhelp * dhelp);
- y[j] += eta * (area / (0.5 * M_PI * fwhm * dhelp));
-
- /* Gaussian term */
- dhelp = (x[j] - centroid) / sigma;
- if (dhelp <= 35) {
- y[j] += (1.0 - eta) * height * exp (-0.5 * dhelp * dhelp);
- }
- }
- }
- return(0);
-}
-
-/* sum_pvoigt
- Sum of pseudo-Voigt functions, defined by (height, centroid, fwhm, eta).
-
- The pseudo-Voigt profile PV(x) is an approximation of the Voigt profile
- using a linear combination of a Gaussian curve G(x) and a Lorentzian curve
- L(x) instead of their convolution.
-
- *height* is the peak amplitude of G(x) and L(x)
- *centroid* is the peak x-coordinate for both functions
- *fwhm* is the full-width at half maximum of both functions
- *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
-
- Parameters:
- -----------
-
- - x: Independant variable where the gaussians are calculated.
- - len_x: Number of elements in the x array.
- - pvoigt: Array of Voigt function parameters:
- (height1, centroid1, fwhm1, eta1, height2, centroid2, fwhm2, eta2,...)
- - len_voigt: Number of elements in the pvoigt array. Must be
- a multiple of 4.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_pvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y)
-{
- int i, j;
- double dhelp, inv_two_sqrt_two_log2, sigma;
- double height, centroid, fwhm, eta;
-
- if (test_params(len_pvoigt, 4, "sum_pvoigt", "height, centroid, fwhm, eta")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- inv_two_sqrt_two_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
-
- for (i=0; i<len_pvoigt/4; i++) {
- height = pvoigt[4*i];
- centroid = pvoigt[4*i+1];
- fwhm = pvoigt[4*i+2];
- eta = pvoigt[4*i+3];
-
- sigma = fwhm * inv_two_sqrt_two_log2;
-
- for (j=0; j<len_x; j++) {
- /* Lorentzian term */
- dhelp = (x[j] - centroid) / (0.5 * fwhm);
- dhelp = 1.0 + (dhelp * dhelp);
- y[j] += eta * height / dhelp;
-
- /* Gaussian term */
- dhelp = (x[j] - centroid) / sigma;
- if (dhelp <= 35) {
- y[j] += (1.0 - eta) * height * exp (-0.5 * dhelp * dhelp);
- }
- }
- }
- return(0);
-}
-
-/* sum_splitpvoigt
- Sum of split pseudo-Voigt functions, defined by
- (height, centroid, fwhm1, fwhm2, eta).
-
- The pseudo-Voigt profile PV(x) is an approximation of the Voigt profile
- using a linear combination of a Gaussian curve G(x) and a Lorentzian curve
- L(x) instead of their convolution.
-
- *height* is the peak amplitude of G(x) and L(x)
- *centroid* is the peak x-coordinate for both functions
- *fwhm1* is the full-width at half maximum of both functions for x < centroid
- *fwhm2* is the full-width at half maximum of both functions for x > centroid
- *eta* is the Lorentz factor: PV(x) = eta * L(x) + (1 - eta) * G(x)
-
- Parameters:
- -----------
-
- - x: Independant variable where the gaussians are calculated.
- - len_x: Number of elements in the x array.
- - pvoigt: Array of Voigt function parameters:
- (height1, centroid1, fwhm11, fwhm21, eta1, ...)
- - len_voigt: Number of elements in the pvoigt array. Must be
- a multiple of 5.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_splitpvoigt(double* x, int len_x, double* pvoigt, int len_pvoigt, double* y)
-{
- int i, j;
- double dhelp, inv_two_sqrt_two_log2, x_minus_centroid, sigma1, sigma2;
- double height, centroid, fwhm1, fwhm2, eta;
-
- if (test_params(len_pvoigt, 5, "sum_splitpvoigt", "height, centroid, fwhm1, fwhm2, eta")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- inv_two_sqrt_two_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
-
- for (i=0; i<len_pvoigt/5; i++) {
- height = pvoigt[5*i];
- centroid = pvoigt[5*i+1];
- fwhm1 = pvoigt[5*i+2];
- fwhm2 = pvoigt[5*i+3];
- eta = pvoigt[5*i+4];
-
- sigma1 = fwhm1 * inv_two_sqrt_two_log2;
- sigma2 = fwhm2 * inv_two_sqrt_two_log2;
-
- for (j=0; j<len_x; j++) {
- x_minus_centroid = (x[j] - centroid);
-
- /* Use fwhm2 when x > centroid */
- if (x_minus_centroid > 0) {
- /* Lorentzian term */
- dhelp = x_minus_centroid / (0.5 * fwhm2);
- dhelp = 1.0 + (dhelp * dhelp);
- y[j] += eta * height / dhelp;
-
- /* Gaussian term */
- dhelp = x_minus_centroid / sigma2;
- if (dhelp <= 35) {
- y[j] += (1.0 - eta) * height * exp (-0.5 * dhelp * dhelp);
- }
- }
- /* Use fwhm1 when x < centroid */
- else {
- /* Lorentzian term */
- dhelp = x_minus_centroid / (0.5 * fwhm1);
- dhelp = 1.0 + (dhelp * dhelp);
- y[j] += eta * height / dhelp;
-
- /* Gaussian term */
- dhelp = x_minus_centroid / sigma1;
- if (dhelp <= 35) {
- y[j] += (1.0 - eta) * height * exp (-0.5 * dhelp * dhelp);
- }
- }
- }
- }
- return(0);
-}
-
-/* sum_lorentz
- Sum of Lorentz functions, defined by (height, centroid, fwhm).
-
- *height* is the peak amplitude
- *centroid* is the peak's x-coordinate
- *fwhm* is the full-width at half maximum
-
- Parameters:
- -----------
-
- - x: Independant variable where the Lorentzians are calculated.
- - len_x: Number of elements in the x array.
- - plorentz: Array of lorentz function parameters:
- (height1, centroid1, fwhm1, ...)
- - len_lorentz: Number of elements in the plorentz array. Must be
- a multiple of 3.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_lorentz(double* x, int len_x, double* plorentz, int len_plorentz, double* y)
-{
- int i, j;
- double dhelp;
- double height, centroid, fwhm;
-
- if (test_params(len_plorentz, 3, "sum_lorentz", "height, centroid, fwhm")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- for (i=0; i<len_plorentz/3; i++) {
- height = plorentz[3*i];
- centroid = plorentz[3*i+1];
- fwhm = plorentz[3*i+2];
-
- for (j=0; j<len_x; j++) {
- dhelp = (x[j] - centroid) / (0.5 * fwhm);
- dhelp = 1.0 + (dhelp * dhelp);
- y[j] += height / dhelp;
- }
- }
- return(0);
-}
-
-
-/* sum_alorentz
- Sum of Lorentz functions, defined by (area, centroid, fwhm).
-
- *area* is the area underneath the peak
- *centroid* is the peak's x-coordinate
- *fwhm* is the full-width at half maximum
-
- Parameters:
- -----------
-
- - x: Independant variable where the Lorentzians are calculated.
- - len_x: Number of elements in the x array.
- - plorentz: Array of lorentz function parameters:
- (area1, centroid1, fwhm1, ...)
- - len_lorentz: Number of elements in the plorentz array. Must be
- a multiple of 3.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_alorentz(double* x, int len_x, double* plorentz, int len_plorentz, double* y)
-{
- int i, j;
- double dhelp;
- double area, centroid, fwhm;
-
- if (test_params(len_plorentz, 3, "sum_alorentz", "area, centroid, fwhm")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- for (i=0; i<len_plorentz/3; i++) {
- area = plorentz[3*i];
- centroid = plorentz[3*i+1];
- fwhm = plorentz[3*i+2];
-
- for (j=0; j<len_x; j++) {
- dhelp = (x[j] - centroid) / (0.5 * fwhm);
- dhelp = 1.0 + (dhelp * dhelp);
- y[j] += area / (0.5 * M_PI * fwhm * dhelp);
- }
- }
- return(0);
-}
-
-
-/* sum_splitlorentz
- Sum of Lorentz functions, defined by (height, centroid, fwhm1, fwhm2).
-
- *height* is the peak amplitude
- *centroid* is the peak's x-coordinate
- *fwhm1* is the full-width at half maximum for x < centroid
- *fwhm2* is the full-width at half maximum for x > centroid
-
- Parameters:
- -----------
-
- - x: Independant variable where the Lorentzians are calculated.
- - len_x: Number of elements in the x array.
- - plorentz: Array of lorentz function parameters:
- (height1, centroid1, fwhm11, fwhm21 ...)
- - len_lorentz: Number of elements in the plorentz array. Must be
- a multiple of 4.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_splitlorentz(double* x, int len_x, double* plorentz, int len_plorentz, double* y)
-{
- int i, j;
- double dhelp;
- double height, centroid, fwhm1, fwhm2;
-
- if (test_params(len_plorentz, 4, "sum_splitlorentz", "height, centroid, fwhm1, fwhm2")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- for (i=0; i<len_plorentz/4; i++) {
- height = plorentz[4*i];
- centroid = plorentz[4*i+1];
- fwhm1 = plorentz[4*i+2];
- fwhm2 = plorentz[4*i+3];
-
- for (j=0; j<len_x; j++) {
- dhelp = (x[j] - centroid);
- if (dhelp>0) {
- dhelp = dhelp / (0.5 * fwhm2);
- }
- else {
- dhelp = dhelp / (0.5 * fwhm1);
- }
- dhelp = 1.0 + (dhelp * dhelp);
- y[j] += height / dhelp;
- }
- }
- return(0);
-}
-
-/* sum_stepdown
- Sum of stepdown functions, defined by (height, centroid, fwhm).
-
- *height* is the step amplitude
- *centroid* is the step's x-coordinate
- *fwhm* is the full-width at half maximum of the derivative
-
- Parameters:
- -----------
-
- - x: Independant variable where the stepdown functions are calculated.
- - len_x: Number of elements in the x array.
- - pdstep: Array of downstpe function parameters:
- (height1, centroid1, fwhm1, ...)
- - len_pdstep: Number of elements in the pdstep array. Must be
- a multiple of 3.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_stepdown(double* x, int len_x, double* pdstep, int len_pdstep, double* y)
-{
- int i, j;
- double dhelp, sqrt2_inv_2_sqrt_two_log2 ;
- double height, centroid, fwhm;
-
- if (test_params(len_pdstep, 3, "sum_stepdown", "height, centroid, fwhm")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- sqrt2_inv_2_sqrt_two_log2 = sqrt(2.0) / (2.0 * sqrt(2.0 * LOG2));
-
- for (i=0; i<len_pdstep/3; i++) {
- height = pdstep[3*i];
- centroid = pdstep[3*i+1];
- fwhm = pdstep[3*i+2];
-
- for (j=0; j<len_x; j++) {
- dhelp = fwhm * sqrt2_inv_2_sqrt_two_log2;
- dhelp = (x[j] - centroid) / dhelp;
- y[j] += height * 0.5 * erfc(dhelp);
- }
- }
- return(0);
-}
-
-/* sum_stepup
- Sum of stepup functions, defined by (height, centroid, fwhm).
-
- *height* is the step amplitude
- *centroid* is the step's x-coordinate
- *fwhm* is the full-width at half maximum of the derivative
-
- Parameters:
- -----------
-
- - x: Independant variable where the stepup functions are calculated.
- - len_x: Number of elements in the x array.
- - pustep: Array of stepdown function parameters:
- (height1, centroid1, fwhm1, ...)
- - len_pustep: Number of elements in the pustep array. Must be
- a multiple of 3.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_stepup(double* x, int len_x, double* pustep, int len_pustep, double* y)
-{
- int i, j;
- double dhelp, sqrt2_inv_2_sqrt_two_log2 ;
- double height, centroid, fwhm;
-
- if (test_params(len_pustep, 3, "sum_stepup", "height, centroid, fwhm")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- sqrt2_inv_2_sqrt_two_log2 = sqrt(2.0) / (2.0 * sqrt(2.0 * LOG2));
-
- for (i=0; i<len_pustep/3; i++) {
- height = pustep[3*i];
- centroid = pustep[3*i+1];
- fwhm = pustep[3*i+2];
-
- for (j=0; j<len_x; j++) {
- dhelp = fwhm * sqrt2_inv_2_sqrt_two_log2;
- dhelp = (x[j] - centroid) / dhelp;
- y[j] += height * 0.5 * (1.0 + erf(dhelp));
- }
- }
- return(0);
-}
-
-
-/* sum_slit
- Sum of slit functions, defined by (height, position, fwhm, beamfwhm).
-
- *height* is the slit height
- *position* is the slit's center x-coordinate
- *fwhm* is the full-width at half maximum of the slit
- *beamfwhm* is the full-width at half maximum of derivative's peaks
-
- Parameters:
- -----------
-
- - x: Independant variable where the slit functions are calculated.
- - len_x: Number of elements in the x array.
- - pslit: Array of slit function parameters:
- (height1, centroid1, fwhm1, beamfwhm1 ...)
- - len_pslit: Number of elements in the pslit array. Must be
- a multiple of 3.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_slit(double* x, int len_x, double* pslit, int len_pslit, double* y)
-{
- int i, j;
- double dhelp, dhelp1, dhelp2, sqrt2_inv_2_sqrt_two_log2, centroid1, centroid2;
- double height, position, fwhm, beamfwhm;
-
- if (test_params(len_pslit, 4, "sum_slit", "height, centroid, fwhm, beamfwhm")) {
- return(1);
- }
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- sqrt2_inv_2_sqrt_two_log2 = sqrt(2.0) / (2.0 * sqrt(2.0 * LOG2));
-
- for (i=0; i<len_pslit/4; i++) {
- height = pslit[4*i];
- position = pslit[4*i+1];
- fwhm = pslit[4*i+2];
- beamfwhm = pslit[4*i+3];
-
- centroid1 = position - 0.5 * fwhm;
- centroid2 = position + 0.5 * fwhm;
-
- for (j=0; j<len_x; j++) {
- dhelp = beamfwhm * sqrt2_inv_2_sqrt_two_log2;
- dhelp1 = (x[j] - centroid1) / dhelp;
- dhelp2 = (x[j] - centroid2) / dhelp;
- y[j] += height * 0.25 * (1.0 + erf(dhelp1)) * erfc(dhelp2);
- }
- }
- return(0);
-}
-
-
-/* sum_ahypermet
- Sum of hypermet functions, defined by
- (area, position, fwhm, st_area_r, st_slope_r, lt_area_r, lt_slope_r, step_height_r).
-
- - *area* is the area underneath the gaussian peak
- - *position* is the center of the various peaks and the position of
- the step down
- - *fwhm* is the full-width at half maximum of the terms
- - *st_area_r* is factor between the gaussian area and the area of the
- short tail term
- - *st_slope_r* is a parameter related to the slope of the short tail
- in the low ``x`` values (the lower, the steeper)
- - *lt_area_r* is factor between the gaussian area and the area of the
- long tail term
- - *lt_slope_r* is a parameter related to the slope of the long tail
- in the low ``x`` values (the lower, the steeper)
- - *step_height_r* is the factor between the height of the step down
- and the gaussian height
-
- Parameters:
- -----------
-
- - x: Independant variable where the functions are calculated.
- - len_x: Number of elements in the x array.
- - phypermet: Array of hypermet function parameters:
- *(area1, position1, fwhm1, st_area_r1, st_slope_r1, lt_area_r1,
- lt_slope_r1, step_height_r1, ...)*
- - len_phypermet: Number of elements in the phypermet array. Must be
- a multiple of 8.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
- - tail_flags: sum of binary flags to activate the various terms of the
- function:
-
- - 1 (b0001): Gaussian term
- - 2 (b0010): st term
- - 4 (b0100): lt term
- - 8 (b1000): step term
-
- E.g., to activate all termsof the hypermet, use ``tail_flags = 1 + 2 + 4 + 8 = 15``
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_ahypermet(double* x, int len_x, double* phypermet, int len_phypermet, double* y, int tail_flags)
-{
- int i, j;
- int g_term_flag, st_term_flag, lt_term_flag, step_term_flag;
- double c1, c2, sigma, height, sigma_sqrt2, sqrt2PI, inv_2_sqrt_2_log2, x_minus_position, epsilon;
- double area, position, fwhm, st_area_r, st_slope_r, lt_area_r, lt_slope_r, step_height_r;
-
- if (test_params(len_phypermet, 8, "sum_hypermet",
- "height, centroid, fwhm, st_area_r, st_slope_r, lt_area_r, lt_slope_r, step_height_r")) {
- return(1);
- }
-
- g_term_flag = tail_flags & 1;
- st_term_flag = (tail_flags>>1) & 1;
- lt_term_flag = (tail_flags>>2) & 1;
- step_term_flag = (tail_flags>>3) & 1;
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- /* define epsilon to compare floating point values with 0. */
- epsilon = 0.00000000001;
-
- sqrt2PI= sqrt(2.0 * M_PI);
- inv_2_sqrt_2_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
-
- for (i=0; i<len_phypermet/8; i++) {
- area = phypermet[8*i];
- position = phypermet[8*i+1];
- fwhm = phypermet[8*i+2];
- st_area_r = phypermet[8*i+3];
- st_slope_r = phypermet[8*i+4];
- lt_area_r = phypermet[8*i+5];
- lt_slope_r = phypermet[8*i+6];
- step_height_r = phypermet[8*i+7];
-
- sigma = fwhm * inv_2_sqrt_2_log2;
- height = area / (sigma * sqrt2PI);
-
- /* Prevent division by 0 */
- if (sigma == 0) {
- printf("fwhm must not be equal to 0");
- return(1);
- }
- sigma_sqrt2 = sigma * 1.4142135623730950488;
-
- for (j=0; j<len_x; j++) {
- x_minus_position = x[j] - position;
- c2 = (0.5 * x_minus_position * x_minus_position) / (sigma * sigma);
- /* gaussian term */
- if (g_term_flag) {
- y[j] += exp(-c2) * height;
- }
-
- /* st term */
- if (st_term_flag) {
- if (fabs(st_slope_r) > epsilon) {
- c1 = st_area_r * 0.5 * \
- erfc((x_minus_position/sigma_sqrt2) + 0.5 * sigma_sqrt2 / st_slope_r);
- y[j] += ((area * c1) / st_slope_r) * \
- exp(0.5 * (sigma / st_slope_r) * (sigma / st_slope_r) + \
- (x_minus_position / st_slope_r));
- }
- }
-
- /* lt term */
- if (lt_term_flag) {
- if (fabs(lt_slope_r) > epsilon) {
- c1 = lt_area_r * \
- 0.5 * erfc((x_minus_position/sigma_sqrt2) + 0.5 * sigma_sqrt2 / lt_slope_r);
- y[j] += ((area * c1) / lt_slope_r) * \
- exp(0.5 * (sigma / lt_slope_r) * (sigma / lt_slope_r) + \
- (x_minus_position / lt_slope_r));
- }
- }
-
- /* step term flag */
- if (step_term_flag) {
- y[j] += step_height_r * (area / (sigma * sqrt2PI)) * \
- 0.5 * erfc(x_minus_position / sigma_sqrt2);
- }
- }
- }
- return(0);
-}
-
-/* sum_fastahypermet
-
- Sum of hypermet functions, defined by
- (area, position, fwhm, st_area_r, st_slope_r, lt_area_r, lt_slope_r, step_height_r).
-
- - *area* is the area underneath the gaussian peak
- - *position* is the center of the various peaks and the position of
- the step down
- - *fwhm* is the full-width at half maximum of the terms
- - *st_area_r* is factor between the gaussian area and the area of the
- short tail term
- - *st_slope_r* is a parameter related to the slope of the short tail
- in the low ``x`` values (the lower, the steeper)
- - *lt_area_r* is factor between the gaussian area and the area of the
- long tail term
- - *lt_slope_r* is a parameter related to the slope of the long tail
- in the low ``x`` values (the lower, the steeper)
- - *step_height_r* is the factor between the height of the step down
- and the gaussian height
-
- Parameters:
- -----------
-
- - x: Independant variable where the functions are calculated.
- - len_x: Number of elements in the x array.
- - phypermet: Array of hypermet function parameters:
- *(area1, position1, fwhm1, st_area_r1, st_slope_r1, lt_area_r1,
- lt_slope_r1, step_height_r1, ...)*
- - len_phypermet: Number of elements in the phypermet array. Must be
- a multiple of 8.
- - y: Output array. Must have memory allocated for the same number
- of elements as x (len_x).
- - tail_flags: sum of binary flags to activate the various terms of the
- function:
-
- - 1 (b0001): Gaussian term
- - 2 (b0010): st term
- - 4 (b0100): lt term
- - 8 (b1000): step term
-
- E.g., to activate all termsof the hypermet, use ``tail_flags = 1 + 2 + 4 + 8 = 15``
-
- Adapted from PyMca module SpecFitFuns
-*/
-int sum_fastahypermet(double* x, int len_x, double* phypermet, int len_phypermet, double* y, int tail_flags)
-{
- int i, j;
- int g_term_flag, st_term_flag, lt_term_flag, step_term_flag;
- double c1, c2, sigma, height, sigma_sqrt2, sqrt2PI, inv_2_sqrt_2_log2, x_minus_position, epsilon;
- double area, position, fwhm, st_area_r, st_slope_r, lt_area_r, lt_slope_r, step_height_r;
-
- if (test_params(len_phypermet, 8, "sum_hypermet",
- "height, centroid, fwhm, st_area_r, st_slope_r, lt_area_r, lt_slope_r, step_height_r")) {
- return(1);
- }
-
- g_term_flag = tail_flags & 1;
- st_term_flag = (tail_flags>>1) & 1;
- lt_term_flag = (tail_flags>>2) & 1;
- step_term_flag = (tail_flags>>3) & 1;
-
- /* Initialize output array */
- for (j=0; j<len_x; j++) {
- y[j] = 0.;
- }
-
- /* define epsilon to compare floating point values with 0. */
- epsilon = 0.00000000001;
-
- sqrt2PI= sqrt(2.0 * M_PI);
- inv_2_sqrt_2_log2 = 1.0 / (2.0 * sqrt(2.0 * LOG2));
-
- for (i=0; i<len_phypermet/8; i++) {
- area = phypermet[8*i];
- position = phypermet[8*i+1];
- fwhm = phypermet[8*i+2];
- st_area_r = phypermet[8*i+3];
- st_slope_r = phypermet[8*i+4];
- lt_area_r = phypermet[8*i+5];
- lt_slope_r = phypermet[8*i+6];
- step_height_r = phypermet[8*i+7];
-
- sigma = fwhm * inv_2_sqrt_2_log2;
- height = area / (sigma * sqrt2PI);
-
- /* Prevent division by 0 */
- if (sigma == 0) {
- printf("fwhm must not be equal to 0");
- return(1);
- }
- sigma_sqrt2 = sigma * 1.4142135623730950488;
-
- for (j=0; j<len_x; j++) {
- x_minus_position = x[j] - position;
- c2 = (0.5 * x_minus_position * x_minus_position) / (sigma * sigma);
- /* gaussian term */
- if (g_term_flag && c2 < 100) {
- y[j] += fastexp(-c2) * height;
- }
-
- /* st term */
- if (st_term_flag && (fabs(st_slope_r) > epsilon) && (x_minus_position / st_slope_r) <= 612) {
- c1 = st_area_r * 0.5 * \
- erfc((x_minus_position/sigma_sqrt2) + 0.5 * sigma_sqrt2 / st_slope_r);
- y[j] += ((area * c1) / st_slope_r) * \
- fastexp(0.5 * (sigma / st_slope_r) * (sigma / st_slope_r) +\
- (x_minus_position / st_slope_r));
- }
-
- /* lt term */
- if (lt_term_flag && (fabs(lt_slope_r) > epsilon) && (x_minus_position / lt_slope_r) <= 612) {
- c1 = lt_area_r * \
- 0.5 * erfc((x_minus_position/sigma_sqrt2) + 0.5 * sigma_sqrt2 / lt_slope_r);
- y[j] += ((area * c1) / lt_slope_r) * \
- fastexp(0.5 * (sigma / lt_slope_r) * (sigma / lt_slope_r) +\
- (x_minus_position / lt_slope_r));
-
- }
-
- /* step term flag */
- if (step_term_flag) {
- y[j] += step_height_r * (area / (sigma * sqrt2PI)) *\
- 0.5 * erfc(x_minus_position / sigma_sqrt2);
- }
- }
- }
- return(0);
-}
-
-void pileup(double* x, long len_x, double* ret, int input2, double zero, double gain)
-{
- //int input2=0;
- //double zero=0.0;
- //double gain=1.0;
-
- int i, j, k;
- double *px, *pret, *pall;
-
- /* the pointer to the starting position of par data */
- px = x;
- pret = ret;
-
- *pret = 0;
- k = (int )(zero/gain);
- for (i=input2; i<len_x; i++){
- pall = x;
- if ((i+k) >= 0)
- {
- pret = (double *) ret+(i+k);
- for (j=0; j<len_x-i-k ;j++){
- *pret += *px * (*pall);
- pall++;
- pret++;
- }
- }
- px++;
- }
-}
diff --git a/silx/math/fit/functions_wrapper.pxd b/silx/math/fit/functions_wrapper.pxd
deleted file mode 100644
index 780116c..0000000
--- a/silx/math/fit/functions_wrapper.pxd
+++ /dev/null
@@ -1,170 +0,0 @@
-# coding: utf-8
-#/*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "14/06/2016"
-
-cimport cython
-
-cdef extern from "functions.h":
- int erfc_array(double* x,
- int len_x,
- double* y)
-
- int erf_array(double* x,
- int len_x,
- double* y);
-
- void snip1d(double *data,
- int size,
- int width)
-
- void snip2d(double *data,
- int nrows,
- int ncolumns,
- int width)
-
- void snip3d(double *data,
- int nx,
- int ny,
- int nz,
- int width)
-
- int strip(double* input,
- long len_input,
- double c,
- long niter,
- int deltai,
- long* anchors,
- long len_anchors,
- double* output)
-
- int sum_gauss(double* x,
- int len_x,
- double* pgauss,
- int len_pgauss,
- double* y)
-
- int sum_agauss(double* x,
- int len_x,
- double* pgauss,
- int len_pgauss,
- double* y)
-
- int sum_fastagauss(double* x,
- int len_x,
- double* pgauss,
- int len_pgauss,
- double* y)
-
- int sum_splitgauss(double* x,
- int len_x,
- double* pgauss,
- int len_pgauss,
- double* y)
-
- int sum_apvoigt(double* x,
- int len_x,
- double* pvoigt,
- int len_pvoigt,
- double* y)
-
- int sum_pvoigt(double* x,
- int len_x,
- double* pvoigt,
- int len_pvoigt,
- double* y)
-
- int sum_splitpvoigt(double* x,
- int len_x,
- double* pvoigt,
- int len_pvoigt,
- double* y)
-
- int sum_lorentz(double* x,
- int len_x,
- double* plorentz,
- int len_plorentz,
- double* y)
-
- int sum_alorentz(double* x,
- int len_x,
- double* plorentz,
- int len_plorentz,
- double* y)
-
- int sum_splitlorentz(double* x,
- int len_x,
- double* plorentz,
- int len_plorentz,
- double* y)
-
- int sum_stepdown(double* x,
- int len_x,
- double* pdstep,
- int len_pdstep,
- double* y)
-
- int sum_stepup(double* x,
- int len_x,
- double* pustep,
- int len_pustep,
- double* y)
-
- int sum_slit(double* x,
- int len_x,
- double* pslit,
- int len_pslit,
- double* y)
-
- int sum_ahypermet(double* x,
- int len_x,
- double* phypermet,
- int len_phypermet,
- double* y,
- int tail_flags)
-
- int sum_fastahypermet(double* x,
- int len_x,
- double* phypermet,
- int len_phypermet,
- double* y,
- int tail_flags)
-
- long seek(long begin_index,
- long end_index,
- long nsamples,
- double fwhm,
- double sensitivity,
- double debug_info,
- long max_npeaks,
- double * data,
- double * peaks,
- double * relevances)
-
- int SavitskyGolay(double* input,
- long len_input,
- int npoints,
- double* output)
diff --git a/silx/math/fit/leastsq.py b/silx/math/fit/leastsq.py
deleted file mode 100644
index 3df1a35..0000000
--- a/silx/math/fit/leastsq.py
+++ /dev/null
@@ -1,901 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-#
-# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-"""
-This module implements a Levenberg-Marquardt algorithm with constraints on the
-fitted parameters without introducing any other dependendency than numpy.
-
-If scipy dependency is not an issue, and no constraints are applied to the fitting
-parameters, there is no real gain compared to the use of scipy.optimize.curve_fit
-other than a more conservative calculation of uncertainties on fitted parameters.
-
-This module is a refactored version of PyMca Gefit.py module.
-"""
-__authors__ = ["V.A. Sole"]
-__license__ = "MIT"
-__date__ = "15/05/2017"
-__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
-
-import numpy
-from numpy.linalg import inv
-from numpy.linalg.linalg import LinAlgError
-import time
-import logging
-import copy
-
-_logger = logging.getLogger(__name__)
-
-# codes understood by the routine
-CFREE = 0
-CPOSITIVE = 1
-CQUOTED = 2
-CFIXED = 3
-CFACTOR = 4
-CDELTA = 5
-CSUM = 6
-CIGNORED = 7
-
-def leastsq(model, xdata, ydata, p0, sigma=None,
- constraints=None, model_deriv=None, epsfcn=None,
- deltachi=None, full_output=None,
- check_finite=True,
- left_derivative=False,
- max_iter=100):
- """
- Use non-linear least squares Levenberg-Marquardt algorithm to fit a function, f, to
- data with optional constraints on the fitted parameters.
-
- Assumes ``ydata = f(xdata, *params) + eps``
-
- :param model: callable
- The model function, f(x, ...). It must take the independent
- variable as the first argument and the parameters to fit as
- separate remaining arguments.
- The returned value is a one dimensional array of floats.
-
- :param xdata: An M-length sequence.
- The independent variable where the data is measured.
-
- :param ydata: An M-length sequence
- The dependent data --- nominally f(xdata, ...)
-
- :param p0: N-length sequence
- Initial guess for the parameters.
-
- :param sigma: None or M-length sequence, optional
- If not None, the uncertainties in the ydata array. These are used as
- weights in the least-squares problem
- i.e. minimising ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )``
- If None, the uncertainties are assumed to be 1
-
- :param constraints:
- If provided, it is a 2D sequence of dimension (n_parameters, 3) where,
- for each parameter denoted by the index i, the meaning is
-
- - constraints[i][0]
-
- - 0 - Free (CFREE)
- - 1 - Positive (CPOSITIVE)
- - 2 - Quoted (CQUOTED)
- - 3 - Fixed (CFIXED)
- - 4 - Factor (CFACTOR)
- - 5 - Delta (CDELTA)
- - 6 - Sum (CSUM)
-
-
- - constraints[i][1]
-
- - Ignored if constraints[i][0] is 0, 1, 3
- - Min value of the parameter if constraints[i][0] is CQUOTED
- - Index of fitted parameter to which it is related
-
- - constraints[i][2]
-
- - Ignored if constraints[i][0] is 0, 1, 3
- - Max value of the parameter if constraints[i][0] is CQUOTED
- - Factor to apply to related parameter with index constraints[i][1]
- - Difference with parameter with index constraints[i][1]
- - Sum obtained when adding parameter with index constraints[i][1]
- :type constraints: *optional*, None or 2D sequence
-
- :param model_deriv:
- None (default) or function providing the derivatives of the fitting function respect to the fitted parameters.
- It will be called as model_deriv(xdata, parameters, index) where parameters is a sequence with the current
- values of the fitting parameters, index is the fitting parameter index for which the the derivative has
- to be provided in the supplied array of xdata points.
- :type model_deriv: *optional*, None or callable
-
-
- :param epsfcn: float
- A variable used in determining a suitable parameter variation when
- calculating the numerical derivatives (for model_deriv=None).
- Normally the actual step length will be sqrt(epsfcn)*x
- Original Gefit module was using epsfcn 1.0e-5 while default value
- is now numpy.finfo(numpy.float64).eps as in scipy
- :type epsfcn: *optional*, float
-
- :param deltachi: float
- A variable used to control the minimum change in chisq to consider the
- fitting process not worth to be continued. Default is 0.1 %.
- :type deltachi: *optional*, float
-
- :param full_output: bool, optional
- non-zero to return all optional outputs. The default is None what will give a warning in case
- of a constrained fit without having set this kweyword.
-
- :param check_finite: bool, optional
- If True, check that the input arrays do not contain nans of infs,
- and raise a ValueError if they do. Setting this parameter to
- False will ignore input arrays values containing nans.
- Default is True.
-
- :param left_derivative:
- This parameter only has an influence if no derivative function
- is provided. When True the left and right derivatives of the
- model will be calculated for each fitted parameters thus leading to
- the double number of function evaluations. Default is False.
- Original Gefit module was always using left_derivative as True.
- :type left_derivative: *optional*, bool
-
- :param max_iter: Maximum number of iterations (default is 100)
-
- :return: Returns a tuple of length 2 (or 3 if full_ouput is True) with the content:
-
- ``popt``: array
- Optimal values for the parameters so that the sum of the squared error
- of ``f(xdata, *popt) - ydata`` is minimized
- ``pcov``: 2d array
- If no constraints are applied, this array contains the estimated covariance
- of popt. The diagonal provides the variance of the parameter estimate.
- To compute one standard deviation errors use ``perr = np.sqrt(np.diag(pcov))``.
- If constraints are applied, this array does not contain the estimated covariance of
- the parameters actually used during the fitting process but the uncertainties after
- recalculating the covariance if all the parameters were free.
- To get the actual uncertainties following error propagation of the actually fitted
- parameters one should set full_output to True and access the uncertainties key.
- ``infodict``: dict
- a dictionary of optional outputs with the keys:
-
- ``uncertainties``
- The actual uncertainty on the optimized parameters.
- ``nfev``
- The number of function calls
- ``fvec``
- The function evaluated at the output
- ``niter``
- The number of iterations performed
- ``chisq``
- The chi square ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )``
- ``reduced_chisq``
- The chi square ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )`` divided
- by the number of degrees of freedom ``(M - number_of_free_parameters)``
- """
- function_call_counter = 0
- if numpy.isscalar(p0):
- p0 = [p0]
- parameters = numpy.array(p0, dtype=numpy.float64, copy=False)
- if deltachi is None:
- deltachi = 0.001
-
- # NaNs can not be handled
- if check_finite:
- xdata = numpy.asarray_chkfinite(xdata)
- ydata = numpy.asarray_chkfinite(ydata)
- if sigma is not None:
- sigma = numpy.asarray_chkfinite(sigma)
- else:
- sigma = numpy.ones((ydata.shape), dtype=numpy.float64)
- ydata.shape = -1
- sigma.shape = -1
- else:
- ydata = numpy.asarray(ydata)
- xdata = numpy.asarray(xdata)
- ydata.shape = -1
- if sigma is not None:
- sigma = numpy.asarray(sigma)
- else:
- sigma = numpy.ones((ydata.shape), dtype=numpy.float64)
- sigma.shape = -1
- # get rid of NaN in input data
- idx = numpy.isfinite(ydata)
- if False in idx:
- # xdata must have a shape able to be understood by the user function
- # in principle, one should not need to change it, however, if there are
- # points to be excluded, one has to be able to exclude them.
- # We can only hope that the sequence is properly arranged
- if xdata.size == ydata.size:
- if len(xdata.shape) != 1:
- msg = "Need to reshape input xdata."
- _logger.warning(msg)
- xdata.shape = -1
- else:
- raise ValueError("Cannot reshape xdata to deal with NaN in ydata")
- ydata = ydata[idx]
- xdata = xdata[idx]
- sigma = sigma[idx]
- idx = numpy.isfinite(sigma)
- if False in idx:
- # xdata must have a shape able to be understood by the user function
- # in principle, one should not need to change it, however, if there are
- # points to be excluded, one has to be able to exclude them.
- # We can only hope that the sequence is properly arranged
- ydata = ydata[idx]
- xdata = xdata[idx]
- sigma = sigma[idx]
- idx = numpy.isfinite(xdata)
- filter_xdata = False
- if False in idx:
- # What to do?
- try:
- # Let's see if the function is able to deal with non-finite data
- msg = "Checking if function can deal with non-finite data"
- _logger.debug(msg)
- evaluation = model(xdata, *parameters)
- function_call_counter += 1
- if evaluation.shape != ydata.shape:
- if evaluation.size == ydata.size:
- msg = "Supplied function does not return a proper array of floats."
- msg += "\nFunction should be rewritten to return a 1D array of floats."
- msg += "\nTrying to reshape output."
- _logger.warning(msg)
- evaluation.shape = ydata.shape
- if False in numpy.isfinite(evaluation):
- msg = "Supplied function unable to handle non-finite x data"
- msg += "\nAttempting to filter out those x data values."
- _logger.warning(msg)
- filter_xdata = True
- else:
- filter_xdata = False
- evaluation = None
- except:
- # function cannot handle input data
- filter_xdata = True
- if filter_xdata:
- if xdata.size != ydata.size:
- raise ValueError("xdata contains non-finite data that cannot be filtered")
- else:
- # we leave the xdata as they where
- old_shape = xdata.shape
- xdata.shape = ydata.shape
- idx0 = numpy.isfinite(xdata)
- xdata.shape = old_shape
- ydata = ydata[idx0]
- xdata = xdata[idx]
- sigma = sigma[idx0]
- weight = 1.0 / (sigma + numpy.equal(sigma, 0))
- weight0 = weight * weight
-
- nparameters = len(parameters)
-
- if epsfcn is None:
- epsfcn = numpy.finfo(numpy.float64).eps
- else:
- epsfcn = max(epsfcn, numpy.finfo(numpy.float64).eps)
-
- # check if constraints have been passed as text
- constrained_fit = False
- if constraints is not None:
- # make sure we work with a list of lists
- input_constraints = constraints
- tmp_constraints = [None] * len(input_constraints)
- for i in range(nparameters):
- tmp_constraints[i] = list(input_constraints[i])
- constraints = tmp_constraints
- for i in range(nparameters):
- if hasattr(constraints[i][0], "upper"):
- txt = constraints[i][0].upper()
- if txt == "FREE":
- constraints[i][0] = CFREE
- elif txt == "POSITIVE":
- constraints[i][0] = CPOSITIVE
- elif txt == "QUOTED":
- constraints[i][0] = CQUOTED
- elif txt == "FIXED":
- constraints[i][0] = CFIXED
- elif txt == "FACTOR":
- constraints[i][0] = CFACTOR
- constraints[i][1] = int(constraints[i][1])
- elif txt == "DELTA":
- constraints[i][0] = CDELTA
- constraints[i][1] = int(constraints[i][1])
- elif txt == "SUM":
- constraints[i][0] = CSUM
- constraints[i][1] = int(constraints[i][1])
- elif txt in ["IGNORED", "IGNORE"]:
- constraints[i][0] = CIGNORED
- else:
- #I should raise an exception
- raise ValueError("Unknown constraint %s" % constraints[i][0])
- if constraints[i][0] > 0:
- constrained_fit = True
- if constrained_fit:
- if full_output is None:
- _logger.info("Recommended to set full_output to True when using constraints")
-
- # Levenberg-Marquardt algorithm
- fittedpar = parameters.__copy__()
- flambda = 0.001
- iiter = max_iter
- #niter = 0
- last_evaluation=None
- x = xdata
- y = ydata
- chisq0 = -1
- iteration_counter = 0
- while (iiter > 0):
- weight = weight0
- """
- I cannot evaluate the initial chisq here because I do not know
- if some parameters are to be ignored, otherways I could do it as follows:
- if last_evaluation is None:
- yfit = model(x, *fittedpar)
- last_evaluation = yfit
- chisq0 = (weight * pow(y-yfit, 2)).sum()
- and chisq would not need to be recalculated.
- Passing the last_evaluation assumes that there are no parameters being
- ignored or not between calls.
- """
- iteration_counter += 1
- chisq0, alpha0, beta, internal_output = chisq_alpha_beta(
- model, fittedpar,
- x, y, weight, constraints=constraints,
- model_deriv=model_deriv,
- epsfcn=epsfcn,
- left_derivative=left_derivative,
- last_evaluation=last_evaluation,
- full_output=True)
- n_free = internal_output["n_free"]
- free_index = internal_output["free_index"]
- noigno = internal_output["noigno"]
- fitparam = internal_output["fitparam"]
- function_calls = internal_output["function_calls"]
- function_call_counter += function_calls
- #print("chisq0 = ", chisq0, n_free, fittedpar)
- #raise
- nr, nc = alpha0.shape
- flag = 0
- #lastdeltachi = chisq0
- while flag == 0:
- alpha = alpha0 * (1.0 + flambda * numpy.identity(nr))
- deltapar = numpy.dot(beta, inv(alpha))
- if constraints is None:
- newpar = fitparam + deltapar [0]
- else:
- newpar = parameters.__copy__()
- pwork = numpy.zeros(deltapar.shape, numpy.float64)
- for i in range(n_free):
- if constraints is None:
- pwork [0] [i] = fitparam [i] + deltapar [0] [i]
- elif constraints [free_index[i]][0] == CFREE:
- pwork [0] [i] = fitparam [i] + deltapar [0] [i]
- elif constraints [free_index[i]][0] == CPOSITIVE:
- #abs method
- pwork [0] [i] = fitparam [i] + deltapar [0] [i]
- #square method
- #pwork [0] [i] = (numpy.sqrt(fitparam [i]) + deltapar [0] [i]) * \
- # (numpy.sqrt(fitparam [i]) + deltapar [0] [i])
- elif constraints[free_index[i]][0] == CQUOTED:
- pmax = max(constraints[free_index[i]][1],
- constraints[free_index[i]][2])
- pmin = min(constraints[free_index[i]][1],
- constraints[free_index[i]][2])
- A = 0.5 * (pmax + pmin)
- B = 0.5 * (pmax - pmin)
- if B != 0:
- pwork [0] [i] = A + \
- B * numpy.sin(numpy.arcsin((fitparam[i] - A)/B)+ \
- deltapar [0] [i])
- else:
- txt = "Error processing constrained fit\n"
- txt += "Parameter limits are %g and %g\n" % (pmin, pmax)
- txt += "A = %g B = %g" % (A, B)
- raise ValueError("Invalid parameter limits")
- newpar[free_index[i]] = pwork [0] [i]
- newpar = numpy.array(_get_parameters(newpar, constraints))
- workpar = numpy.take(newpar, noigno)
- yfit = model(x, *workpar)
- if last_evaluation is None:
- if len(yfit.shape) > 1:
- msg = "Supplied function does not return a 1D array of floats."
- msg += "\nFunction should be rewritten."
- msg += "\nTrying to reshape output."
- _logger.warning(msg)
- yfit.shape = -1
- function_call_counter += 1
- chisq = (weight * pow(y-yfit, 2)).sum()
- absdeltachi = chisq0 - chisq
- if absdeltachi < 0:
- flambda *= 10.0
- if flambda > 1000:
- flag = 1
- iiter = 0
- else:
- flag = 1
- fittedpar = newpar.__copy__()
- lastdeltachi = 100 * (absdeltachi / (chisq + (chisq == 0)))
- if iteration_counter < 2:
- # ignore any limit, the fit *has* to be improved
- pass
- elif (lastdeltachi) < deltachi:
- iiter = 0
- elif absdeltachi < numpy.sqrt(epsfcn):
- iiter = 0
- _logger.info("Iteration finished due to too small absolute chi decrement")
- chisq0 = chisq
- flambda = flambda / 10.0
- last_evaluation = yfit
- iiter = iiter - 1
- # this is the covariance matrix of the actually fitted parameters
- cov0 = inv(alpha0)
- if constraints is None:
- cov = cov0
- else:
- # yet another call needed with all the parameters being free except those
- # that are FIXED and that will be assigned a 100 % uncertainty.
- new_constraints = copy.deepcopy(constraints)
- flag_special = [0] * len(fittedpar)
- for idx, constraint in enumerate(constraints):
- if constraints[idx][0] in [CFIXED, CIGNORED]:
- flag_special[idx] = constraints[idx][0]
- else:
- new_constraints[idx][0] = CFREE
- new_constraints[idx][1] = 0
- new_constraints[idx][2] = 0
- chisq, alpha, beta, internal_output = chisq_alpha_beta(
- model, fittedpar,
- x, y, weight, constraints=new_constraints,
- model_deriv=model_deriv,
- epsfcn=epsfcn,
- left_derivative=left_derivative,
- last_evaluation=last_evaluation,
- full_output=True)
- # obtained chisq should be identical to chisq0
- try:
- cov = inv(alpha)
- except LinAlgError:
- _logger.critical("Error calculating covariance matrix after successful fit")
- cov = None
- if cov is not None:
- for idx, value in enumerate(flag_special):
- if value in [CFIXED, CIGNORED]:
- cov = numpy.insert(numpy.insert(cov, idx, 0, axis=1), idx, 0, axis=0)
- cov[idx, idx] = fittedpar[idx] * fittedpar[idx]
-
- if not full_output:
- return fittedpar, cov
- else:
- sigma0 = numpy.sqrt(abs(numpy.diag(cov0)))
- sigmapar = _get_sigma_parameters(fittedpar, sigma0, constraints)
- ddict = {}
- ddict["chisq"] = chisq0
- ddict["reduced_chisq"] = chisq0 / (len(yfit)-n_free)
- ddict["covariance"] = cov0
- ddict["uncertainties"] = sigmapar
- ddict["fvec"] = last_evaluation
- ddict["nfev"] = function_call_counter
- ddict["niter"] = iteration_counter
- return fittedpar, cov, ddict #, chisq/(len(yfit)-len(sigma0)), sigmapar,niter,lastdeltachi
-
-def chisq_alpha_beta(model, parameters, x, y, weight, constraints=None,
- model_deriv=None, epsfcn=None, left_derivative=False,
- last_evaluation=None, full_output=False):
-
- """
- Get chi square, the curvature matrix alpha and the matrix beta according to the input parameters.
- If all the parameters are unconstrained, the covariance matrix is the inverse of the alpha matrix.
-
- :param model: callable
- The model function, f(x, ...). It must take the independent
- variable as the first argument and the parameters to fit as
- separate remaining arguments.
- The returned value is a one dimensional array of floats.
-
- :param parameters: N-length sequence
- Values of parameters at which function and derivatives are to be calculated.
-
- :param x: An M-length sequence.
- The independent variable where the data is measured.
-
- :param y: An M-length sequence
- The dependent data --- nominally f(xdata, ...)
-
- :param weight: M-length sequence
- Weights to be applied in the calculation of chi square
- As a reminder ``chisq = np.sum(weigth * (model(x, *parameters) - y)**2)``
-
- :param constraints:
- If provided, it is a 2D sequence of dimension (n_parameters, 3) where,
- for each parameter denoted by the index i, the meaning is
-
- - constraints[i][0]
-
- - 0 - Free (CFREE)
- - 1 - Positive (CPOSITIVE)
- - 2 - Quoted (CQUOTED)
- - 3 - Fixed (CFIXED)
- - 4 - Factor (CFACTOR)
- - 5 - Delta (CDELTA)
- - 6 - Sum (CSUM)
-
-
- - constraints[i][1]
-
- - Ignored if constraints[i][0] is 0, 1, 3
- - Min value of the parameter if constraints[i][0] is CQUOTED
- - Index of fitted parameter to which it is related
-
- - constraints[i][2]
-
- - Ignored if constraints[i][0] is 0, 1, 3
- - Max value of the parameter if constraints[i][0] is CQUOTED
- - Factor to apply to related parameter with index constraints[i][1]
- - Difference with parameter with index constraints[i][1]
- - Sum obtained when adding parameter with index constraints[i][1]
- :type constraints: *optional*, None or 2D sequence
-
- :param model_deriv:
- None (default) or function providing the derivatives of the fitting function respect to the fitted parameters.
- It will be called as model_deriv(xdata, parameters, index) where parameters is a sequence with the current
- values of the fitting parameters, index is the fitting parameter index for which the the derivative has
- to be provided in the supplied array of xdata points.
- :type model_deriv: *optional*, None or callable
-
-
- :param epsfcn: float
- A variable used in determining a suitable parameter variation when
- calculating the numerical derivatives (for model_deriv=None).
- Normally the actual step length will be sqrt(epsfcn)*x
- Original Gefit module was using epsfcn 1.0e-10 while default value
- is now numpy.finfo(numpy.float64).eps as in scipy
- :type epsfcn: *optional*, float
-
- :param left_derivative:
- This parameter only has an influence if no derivative function
- is provided. When True the left and right derivatives of the
- model will be calculated for each fitted parameters thus leading to
- the double number of function evaluations. Default is False.
- Original Gefit module was always using left_derivative as True.
- :type left_derivative: *optional*, bool
-
- :param last_evaluation: An M-length array
- Used for optimization purposes. If supplied, this array will be taken as the result of
- evaluating the function, that is as the result of ``model(x, *parameters)`` thus avoiding
- the evaluation call.
-
- :param full_output: bool, optional
- Additional output used for internal purposes with the keys:
- ``function_calls``
- The number of model function calls performed.
- ``fitparam``
- A sequence with the actual free parameters
- ``free_index``
- Sequence with the indices of the free parameters in input parameters sequence.
- ``noigno``
- Sequence with the indices of the original parameters considered in the calculations.
- """
- if epsfcn is None:
- epsfcn = numpy.finfo(numpy.float64).eps
- else:
- epsfcn = max(epsfcn, numpy.finfo(numpy.float64).eps)
- #nr0, nc = data.shape
- n_param = len(parameters)
- if constraints is None:
- derivfactor = numpy.ones((n_param, ))
- n_free = n_param
- noigno = numpy.arange(n_param)
- free_index = noigno * 1
- fitparam = parameters * 1
- else:
- n_free = 0
- fitparam = []
- free_index = []
- noigno = []
- derivfactor = []
- for i in range(n_param):
- if constraints[i][0] != CIGNORED:
- noigno.append(i)
- if constraints[i][0] == CFREE:
- fitparam.append(parameters [i])
- derivfactor.append(1.0)
- free_index.append(i)
- n_free += 1
- elif constraints[i][0] == CPOSITIVE:
- fitparam.append(abs(parameters[i]))
- derivfactor.append(1.0)
- #fitparam.append(numpy.sqrt(abs(parameters[i])))
- #derivfactor.append(2.0*numpy.sqrt(abs(parameters[i])))
- free_index.append(i)
- n_free += 1
- elif constraints[i][0] == CQUOTED:
- pmax = max(constraints[i][1], constraints[i][2])
- pmin =min(constraints[i][1], constraints[i][2])
- if ((pmax-pmin) > 0) & \
- (parameters[i] <= pmax) & \
- (parameters[i] >= pmin):
- A = 0.5 * (pmax + pmin)
- B = 0.5 * (pmax - pmin)
- fitparam.append(parameters[i])
- derivfactor.append(B*numpy.cos(numpy.arcsin((parameters[i] - A)/B)))
- free_index.append(i)
- n_free += 1
- elif (pmax-pmin) > 0:
- print("WARNING: Quoted parameter outside boundaries")
- print("Initial value = %f" % parameters[i])
- print("Limits are %f and %f" % (pmin, pmax))
- print("Parameter will be kept at its starting value")
- fitparam = numpy.array(fitparam, numpy.float64)
- alpha = numpy.zeros((n_free, n_free), numpy.float64)
- beta = numpy.zeros((1, n_free), numpy.float64)
- #delta = (fitparam + numpy.equal(fitparam, 0.0)) * 0.00001
- delta = (fitparam + numpy.equal(fitparam, 0.0)) * numpy.sqrt(epsfcn)
- nr = y.size
- ##############
- # Prior to each call to the function one has to re-calculate the
- # parameters
- pwork = parameters.__copy__()
- for i in range(n_free):
- pwork [free_index[i]] = fitparam [i]
- if n_free == 0:
- raise ValueError("No free parameters to fit")
- function_calls = 0
- if not left_derivative:
- if last_evaluation is not None:
- f2 = last_evaluation
- else:
- f2 = model(x, *parameters)
- f2.shape = -1
- function_calls += 1
- for i in range(n_free):
- if model_deriv is None:
- #pwork = parameters.__copy__()
- pwork[free_index[i]] = fitparam [i] + delta [i]
- newpar = _get_parameters(pwork.tolist(), constraints)
- newpar = numpy.take(newpar, noigno)
- f1 = model(x, *newpar)
- f1.shape = -1
- function_calls += 1
- if left_derivative:
- pwork[free_index[i]] = fitparam [i] - delta [i]
- newpar = _get_parameters(pwork.tolist(), constraints)
- newpar=numpy.take(newpar, noigno)
- f2 = model(x, *newpar)
- function_calls += 1
- help0 = (f1 - f2) / (2.0 * delta[i])
- else:
- help0 = (f1 - f2) / (delta[i])
- help0 = help0 * derivfactor[i]
- pwork[free_index[i]] = fitparam [i]
- #removed I resize outside the loop:
- #help0 = numpy.resize(help0, (1, nr))
- else:
- help0 = model_deriv(x, pwork, free_index[i])
- help0 = help0 * derivfactor[i]
-
- if i == 0:
- deriv = help0
- else:
- deriv = numpy.concatenate((deriv, help0), 0)
-
- #line added to resize outside the loop
- deriv = numpy.resize(deriv, (n_free, nr))
- if last_evaluation is None:
- if constraints is None:
- yfit = model(x, *fitparam)
- yfit.shape = -1
- else:
- newpar = _get_parameters(pwork.tolist(), constraints)
- newpar = numpy.take(newpar, noigno)
- yfit = model(x, *newpar)
- yfit.shape = -1
- function_calls += 1
- else:
- yfit = last_evaluation
- deltay = y - yfit
- help0 = weight * deltay
- for i in range(n_free):
- derivi = numpy.resize(deriv[i, :], (1, nr))
- help1 = numpy.resize(numpy.sum((help0 * derivi), 1), (1, 1))
- if i == 0:
- beta = help1
- else:
- beta = numpy.concatenate((beta, help1), 1)
- help1 = numpy.inner(deriv, weight*derivi)
- if i == 0:
- alpha = help1
- else:
- alpha = numpy.concatenate((alpha, help1), 1)
- chisq = (help0 * deltay).sum()
- if full_output:
- ddict = {}
- ddict["n_free"] = n_free
- ddict["free_index"] = free_index
- ddict["noigno"] = noigno
- ddict["fitparam"] = fitparam
- ddict["derivfactor"] = derivfactor
- ddict["function_calls"] = function_calls
- return chisq, alpha, beta, ddict
- else:
- return chisq, alpha, beta
-
-
-def _get_parameters(parameters, constraints):
- """
- Apply constraints to input parameters.
-
- Parameters not depending on other parameters, they are returned as the input.
-
- Parameters depending on other parameters, return the value after applying the
- relation to the parameter wo which they are related.
- """
- # 0 = Free 1 = Positive 2 = Quoted
- # 3 = Fixed 4 = Factor 5 = Delta
- if constraints is None:
- return parameters * 1
- newparam = []
- #first I make the free parameters
- #because the quoted ones put troubles
- for i in range(len(constraints)):
- if constraints[i][0] == CFREE:
- newparam.append(parameters[i])
- elif constraints[i][0] == CPOSITIVE:
- #newparam.append(parameters[i] * parameters[i])
- newparam.append(abs(parameters[i]))
- elif constraints[i][0] == CQUOTED:
- newparam.append(parameters[i])
- elif abs(constraints[i][0]) == CFIXED:
- newparam.append(parameters[i])
- else:
- newparam.append(parameters[i])
- for i in range(len(constraints)):
- if constraints[i][0] == CFACTOR:
- newparam[i] = constraints[i][2] * newparam[int(constraints[i][1])]
- elif constraints[i][0] == CDELTA:
- newparam[i] = constraints[i][2] + newparam[int(constraints[i][1])]
- elif constraints[i][0] == CIGNORED:
- # The whole ignored stuff should not be documented because setting
- # a parameter to 0 is not the same as being ignored.
- # Being ignored should imply the parameter is simply not accounted for
- # and should be stripped out of the list of parameters by the program
- # using this module
- newparam[i] = 0
- elif constraints[i][0] == CSUM:
- newparam[i] = constraints[i][2]-newparam[int(constraints[i][1])]
- return newparam
-
-
-def _get_sigma_parameters(parameters, sigma0, constraints):
- """
- Internal function propagating the uncertainty on the actually fitted parameters and related parameters to the
- final parameters considering the applied constraints.
-
- Parameters
- ----------
- parameters : 1D sequence of length equal to the number of free parameters N
- The parameters actually used in the fitting process.
- sigma0 : 1D sequence of length N
- Uncertainties calculated as the square-root of the diagonal of
- the covariance matrix
- constraints : The set of constraints applied in the fitting process
- """
- # 0 = Free 1 = Positive 2 = Quoted
- # 3 = Fixed 4 = Factor 5 = Delta
- if constraints is None:
- return sigma0
- n_free = 0
- sigma_par = numpy.zeros(parameters.shape, numpy.float64)
- for i in range(len(constraints)):
- if constraints[i][0] == CFREE:
- sigma_par [i] = sigma0[n_free]
- n_free += 1
- elif constraints[i][0] == CPOSITIVE:
- #sigma_par [i] = 2.0 * sigma0[n_free]
- sigma_par [i] = sigma0[n_free]
- n_free += 1
- elif constraints[i][0] == CQUOTED:
- pmax = max(constraints [i][1], constraints [i][2])
- pmin = min(constraints [i][1], constraints [i][2])
- # A = 0.5 * (pmax + pmin)
- B = 0.5 * (pmax - pmin)
- if (B > 0) & (parameters [i] < pmax) & (parameters [i] > pmin):
- sigma_par [i] = abs(B * numpy.cos(parameters[i]) * sigma0[n_free])
- n_free += 1
- else:
- sigma_par [i] = parameters[i]
- elif abs(constraints[i][0]) == CFIXED:
- sigma_par[i] = parameters[i]
- for i in range(len(constraints)):
- if constraints[i][0] == CFACTOR:
- sigma_par [i] = constraints[i][2]*sigma_par[int(constraints[i][1])]
- elif constraints[i][0] == CDELTA:
- sigma_par [i] = sigma_par[int(constraints[i][1])]
- elif constraints[i][0] == CSUM:
- sigma_par [i] = sigma_par[int(constraints[i][1])]
- return sigma_par
-
-
-def main(argv=None):
- if argv is None:
- npoints = 10000
- elif hasattr(argv, "__len__"):
- if len(argv) > 1:
- npoints = int(argv[1])
- else:
- print("Usage:")
- print("fit [npoints]")
- else:
- # expected a number
- npoints = argv
-
- def gauss(t0, *param0):
- param = numpy.array(param0)
- t = numpy.array(t0)
- dummy = 2.3548200450309493 * (t - param[3]) / param[4]
- return param[0] + param[1] * t + param[2] * myexp(-0.5 * dummy * dummy)
-
-
- def myexp(x):
- # put a (bad) filter to avoid over/underflows
- # with no python looping
- return numpy.exp(x * numpy.less(abs(x), 250)) -\
- 1.0 * numpy.greater_equal(abs(x), 250)
-
- xx = numpy.arange(npoints, dtype=numpy.float64)
- yy = gauss(xx, *[10.5, 2, 1000.0, 20., 15])
- sy = numpy.sqrt(abs(yy))
- parameters = [0.0, 1.0, 900.0, 25., 10]
- stime = time.time()
-
- fittedpar, cov, ddict = leastsq(gauss, xx, yy, parameters,
- sigma=sy,
- left_derivative=False,
- full_output=True,
- check_finite=True)
- etime = time.time()
- sigmapars = numpy.sqrt(numpy.diag(cov))
- print("Took ", etime - stime, "seconds")
- print("Function calls = ", ddict["nfev"])
- print("chi square = ", ddict["chisq"])
- print("Fitted pars = ", fittedpar)
- print("Sigma pars = ", sigmapars)
- try:
- from scipy.optimize import curve_fit as cfit
- SCIPY = True
- except ImportError:
- SCIPY = False
- if SCIPY:
- counter = 0
- stime = time.time()
- scipy_fittedpar, scipy_cov = cfit(gauss,
- xx,
- yy,
- parameters,
- sigma=sy)
- etime = time.time()
- print("Scipy Took ", etime - stime, "seconds")
- print("Counter = ", counter)
- print("scipy = ", scipy_fittedpar)
- print("Sigma = ", numpy.sqrt(numpy.diag(scipy_cov)))
-
-if __name__ == "__main__":
- main()
diff --git a/silx/math/fit/peaks.pyx b/silx/math/fit/peaks.pyx
deleted file mode 100644
index a4fce89..0000000
--- a/silx/math/fit/peaks.pyx
+++ /dev/null
@@ -1,175 +0,0 @@
-# coding: utf-8
-#/*##########################################################################
-# Copyright (C) 2016-2018 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-"""This module provides a peak search function and tools related to peak
-analysis.
-"""
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "15/05/2017"
-
-import logging
-import numpy
-
-from silx.math.fit import filters
-
-_logger = logging.getLogger(__name__)
-
-cimport cython
-from libc.stdlib cimport free
-
-cimport silx.math.fit.peaks_wrapper as peaks_wrapper
-
-
-def peak_search(y, fwhm, sensitivity=3.5,
- begin_index=None, end_index=None,
- debug=False, relevance_info=False):
- """Find peaks in a curve.
-
- :param y: Data array
- :type y: numpy.ndarray
- :param fwhm: Estimated full width at half maximum of the typical peaks we
- are interested in (expressed in number of samples)
- :param sensitivity: Threshold factor used for peak detection. Only peaks
- with amplitudes higher than ``σ * sensitivity`` - where ``σ`` is the
- standard deviation of the noise - qualify as peaks.
- :param begin_index: Index of the first sample of the region of interest
- in the ``y`` array. If ``None``, start from the first sample.
- :param end_index: Index of the last sample of the region of interest in
- the ``y`` array. If ``None``, process until the last sample.
- :param debug: If ``True``, print debug messages. Default: ``False``
- :param relevance_info: If ``True``, add a second dimension with relevance
- information to the output array. Default: ``False``
- :return: 1D sequence with indices of peaks in the data
- if ``relevance_info`` is ``False``.
- Else, sequence of ``(peak_index, peak_relevance)`` tuples (one tuple
- per peak).
- :raise: ``IndexError`` if the number of peaks is too large to fit in the
- output array.
- """
- cdef:
- int i
- double[::1] y_c
- double* peaks_c
- double* relevances_c
-
- y_c = numpy.array(y,
- copy=True,
- dtype=numpy.float64,
- order='C').reshape(-1)
- if debug:
- debug = 1
- else:
- debug = 0
-
- if begin_index is None:
- begin_index = 0
- if end_index is None:
- end_index = y_c.size - 1
-
- n_peaks = peaks_wrapper.seek(begin_index, end_index, y_c.size,
- fwhm, sensitivity, debug,
- &y_c[0], &peaks_c, &relevances_c)
-
-
- # A negative return value means that peaks were found but not enough
- # memory could be allocated for all
- if n_peaks < 0 and n_peaks != -123456:
- msg = "Before memory allocation error happened, "
- msg += "we found %d peaks.\n" % abs(n_peaks)
- _logger.debug(msg)
- msg = ""
- for i in range(abs(n_peaks)):
- msg += "peak index %f, " % peaks_c[i]
- msg += "relevance %f\n" % relevances_c[i]
- _logger.debug(msg)
- free(peaks_c)
- free(relevances_c)
- raise MemoryError("Failed to reallocate memory for output arrays")
- # Special value -123456 is returned if the initial memory allocation
- # fails, before any search could be performed
- elif n_peaks == -123456:
- raise MemoryError("Failed to allocate initial memory for " +
- "output arrays")
-
- peaks = numpy.empty(shape=(n_peaks,),
- dtype=numpy.float64)
- relevances = numpy.empty(shape=(n_peaks,),
- dtype=numpy.float64)
-
- for i in range(n_peaks):
- peaks[i] = peaks_c[i]
- relevances[i] = relevances_c[i]
-
- free(peaks_c)
- free(relevances_c)
-
- if not relevance_info:
- return peaks
- else:
- return list(zip(peaks, relevances))
-
-
-def guess_fwhm(y):
- """Return the full-width at half maximum for the largest peak in
- the data array.
-
- The algorithm removes the background, then finds a global maximum
- and its corresponding FWHM.
-
- This value can be used as an initial fit parameter, used as input for
- an iterative fit function.
-
- :param y: Data to be used for guessing the fwhm.
- :return: Estimation of full-width at half maximum, based on fwhm of
- the global maximum.
- """
- # set at a minimum value for the fwhm
- fwhm_min = 4
-
- # remove data background (computed with a strip filter)
- background = filters.strip(y, w=1, niterations=1000)
- yfit = y - background
-
- # basic peak search: find the global maximum
- maximum = max(yfit)
- # find indices of all values == maximum
- idx = numpy.nonzero(yfit == maximum)[0]
- # take the last one (if any)
- if not len(idx):
- return 0
- posindex = idx[-1]
- height = yfit[posindex]
-
- # now find the width of the peak at half maximum
- imin = posindex
- while yfit[imin] > 0.5 * height and imin > 0:
- imin -= 1
- imax = posindex
- while yfit[imax] > 0.5 * height and imax < len(yfit) - 1:
- imax += 1
-
- fwhm = max(imax - imin - 1, fwhm_min)
-
- return fwhm
diff --git a/silx/math/fit/peaks/include/peaks.h b/silx/math/fit/peaks/include/peaks.h
deleted file mode 100644
index bd25d96..0000000
--- a/silx/math/fit/peaks/include/peaks.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-
-#ifndef PEAKS_H
-#define PEAKS_H
-
-/* Smoothing functions */
-
-long seek(long begin_index, long end_index, long nsamples, double fwhm, double sensitivity,
- double debug_info, double *data, double **peaks, double **relevances);
-
-#endif /* #define PEAKS_H */
diff --git a/silx/math/fit/peaks/src/peaks.c b/silx/math/fit/peaks/src/peaks.c
deleted file mode 100644
index 65cb4f6..0000000
--- a/silx/math/fit/peaks/src/peaks.c
+++ /dev/null
@@ -1,255 +0,0 @@
-#/*##########################################################################
-# Copyright (c) 2004-2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-#include <math.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include "peaks.h"
-
-
-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-#define MAX(x, y) (((x) > (y)) ? (x) : (y))
-
-/* Peak search function, adapted from PyMca SpecFitFuns
-
- This uses a convolution with the second-derivative of a gaussian curve, to
- smooth the data.
-
- Arguments:
-
- - begin_index: First index of the region of interest in the input data
- array
- - end_index: Last index of the region of interest in the input data
- array
- - nsamples: Number of samples in the input array
- - fwhm: Full width at half maximum for the gaussian used for smoothing.
- - sensitivity:
- - debug_info: If different from 0, print debugging messages
- - data: input array of 1D data
- - peaks: pointer to output array of peak indices
- - relevances: pointer to output array of peak relevances
-*/
-long seek(long begin_index,
- long end_index,
- long nsamples,
- double fwhm,
- double sensitivity,
- double debug_info,
- double *data,
- double **peaks,
- double **relevances)
-{
- /* local variables */
- double *peaks0, *relevances0;
- double *realloc_peaks, *realloc_relevances;
- double sigma, sigma2, sigma4;
- long max_gfactor = 100;
- double gfactor[100];
- long nr_factor;
- double lowthreshold;
- double data2[2];
- double nom;
- double den2;
- long channel1;
- long lld;
- long cch;
- long cfac, cfac2, max_cfac;
- long ihelp1, ihelp2;
- long i;
- long max_npeaks = 100;
- long n_peaks = 0;
- double peakstarted = 0;
-
- peaks0 = malloc(100 * sizeof(double));
- relevances0 = malloc(100 * sizeof(double));
- if (peaks0 == NULL || relevances0 == NULL) {
- printf("Error: failed to allocate memory for peaks array.");
- return(-123456);
- }
- /* Make sure the peaks matrix is filled with zeros */
- for (i=0;i<100;i++){
- peaks0[i] = 0.0;
- relevances0[i] = 0.0;
- }
- /* Output pointers */
- *peaks = peaks0;
- *relevances = relevances0;
-
- /* prepare the calculation of the Gaussian scaling factors */
-
- sigma = fwhm / 2.35482;
- sigma2 = sigma * sigma;
- sigma4 = sigma2 * sigma2;
- lowthreshold = 0.01 / sigma2;
-
- /* calculate the factors until lower threshold reached */
- nr_factor = 0;
- max_cfac = MIN(max_gfactor, ((end_index - begin_index - 2) / 2) - 1);
- for (cfac=0; cfac < max_cfac; cfac++) {
- nr_factor++;
- cfac2 = (cfac+1) * (cfac+1);
- gfactor[cfac] = (sigma2 - cfac2) * exp(-cfac2/(sigma2*2.0)) / sigma4;
-
- if ((gfactor[cfac] < lowthreshold)
- && (gfactor[cfac] > (-lowthreshold))){
- break;
- }
- }
-
- /* What comes now is specific to MCA spectra ... */
- lld = 0;
- while (data[lld] == 0) {
- lld++;
- }
- lld = lld + (int) (0.5 * fwhm);
-
- channel1 = begin_index - nr_factor - 1;
- channel1 = MAX (channel1, lld);
- if(debug_info){
- printf("nrfactor = %ld\n", nr_factor);
- }
- /* calculates smoothed value and variance at begincalc */
- cch = MAX(begin_index, 0);
- nom = data[cch] / sigma2;
- den2 = data[cch] / sigma4;
- for (cfac = 0; cfac < nr_factor; cfac++){
- ihelp1 = cch-cfac;
- if (ihelp1 < 0){
- ihelp1 = 0;
- }
- ihelp2 = cch+cfac;
- if (ihelp2 >= nsamples){
- ihelp2 = nsamples-1;
- }
- nom += gfactor[cfac] * (data[ihelp2] + data[ihelp1]);
- den2 += gfactor[cfac] * gfactor[cfac] *
- (data[ihelp2] + data[ihelp1]);
- }
-
- /* now normalize the smoothed value to the standard deviation */
- if (den2 <= 0.0) {
- data2[1] = 0.0;
- }else{
- data2[1] = nom / sqrt(den2);
- }
- data[0] = data[1];
-
- while (cch <= MIN(end_index,nsamples-2)){
- /* calculate gaussian smoothed values */
- data2[0] = data2[1];
- cch++;
- nom = data[cch]/sigma2;
- den2 = data[cch] / sigma4;
- for (cfac = 1; cfac < nr_factor; cfac++){
- ihelp1 = cch-cfac;
- if (ihelp1 < 0){
- ihelp1 = 0;
- }
- ihelp2 = cch+cfac;
- if (ihelp2 >= nsamples){
- ihelp2 = nsamples-1;
- }
- nom += gfactor[cfac-1] * (data[ihelp2] + data[ihelp1]);
- den2 += gfactor[cfac-1] * gfactor[cfac-1] *
- (data[ihelp2] + data[ihelp1]);
- }
- /* now normalize the smoothed value to the standard deviation */
- if (den2 <= 0) {
- data2[1] = 0;
- }else{
- data2[1] = nom / sqrt(den2);
- }
- /* look if the current point falls in a peak */
- if (data2[1] > sensitivity) {
- if(peakstarted == 0){
- if (data2[1] > data2[0]){
- /* this second test is to prevent a peak from outside
- the region from being detected at the beginning of the search */
- peakstarted=1;
- }
- }
- /* there is a peak */
- if (debug_info){
- printf("At cch = %ld y[cch] = %g\n", cch, data[cch]);
- printf("data2[0] = %g\n", data2[0]);
- printf("data2[1] = %g\n", data2[1]);
- printf("sensitivity = %g\n", sensitivity);
- }
- if(peakstarted == 1){
- /* look for the top of the peak */
- if (data2[1] < data2[0]) {
- /* we are close to the top of the peak */
- if (debug_info){
- printf("we are close to the top of the peak\n");
- }
- if (n_peaks == max_npeaks) {
- max_npeaks = max_npeaks + 100;
- realloc_peaks = realloc(peaks0, max_npeaks * sizeof(double));
- realloc_relevances = realloc(relevances0, max_npeaks * sizeof(double));
- if (realloc_peaks == NULL || realloc_relevances == NULL) {
- printf("Error: failed to extend memory for peaks array.");
- *peaks = peaks0;
- *relevances = relevances0;
- return(-n_peaks);
- }
- else {
- peaks0 = realloc_peaks;
- relevances0 = realloc_relevances;
- }
- }
- peaks0[n_peaks] = cch-1;
- relevances0[n_peaks] = data2[0];
- n_peaks++;
- peakstarted=2;
- }
- }
- /* Doublet case */
- if(peakstarted == 2){
- if ((cch-peaks0[n_peaks-1]) > 0.6 * fwhm) {
- if (data2[1] > data2[0]){
- if(debug_info){
- printf("We may have a doublet\n");
- }
- peakstarted=1;
- }
- }
- }
- }else{
- if (peakstarted==1){
- /* We were on a peak but we did not find the top */
- if(debug_info){
- printf("We were on a peak but we did not find the top\n");
- }
- }
- peakstarted=0;
- }
- }
- if(debug_info){
- for (i=0;i< n_peaks;i++){
- printf("Peak %ld found at ",i+1);
- printf("index %g with y = %g\n", peaks0[i],data[(long ) peaks0[i]]);
- }
- }
- *peaks = peaks0;
- *relevances = relevances0;
- return (n_peaks);
-}
diff --git a/silx/math/fit/peaks_wrapper.pxd b/silx/math/fit/peaks_wrapper.pxd
deleted file mode 100644
index 4c77dc6..0000000
--- a/silx/math/fit/peaks_wrapper.pxd
+++ /dev/null
@@ -1,41 +0,0 @@
-# coding: utf-8
-#/*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-#############################################################################*/
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "22/06/2016"
-
-cimport cython
-
-cdef extern from "peaks.h":
- long seek(long begin_index,
- long end_index,
- long nsamples,
- double fwhm,
- double sensitivity,
- double debug_info,
- double * data,
- double ** peaks,
- double ** relevances)
-
diff --git a/silx/math/fit/setup.py b/silx/math/fit/setup.py
deleted file mode 100644
index 649387f..0000000
--- a/silx/math/fit/setup.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-# Copyright (C) 2016-2018 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "22/06/2016"
-
-
-import os.path
-
-from numpy.distutils.misc_util import Configuration
-
-
-def configuration(parent_package='', top_path=None):
- config = Configuration('fit', parent_package, top_path)
- config.add_subpackage('test')
-
- # =====================================
- # fit functions
- # =====================================
- fun_src = [os.path.join('functions', "src", "funs.c"),
- "functions.pyx"]
- fun_inc = [os.path.join('functions', 'include')]
-
- config.add_extension('functions',
- sources=fun_src,
- include_dirs=fun_inc,
- language='c')
-
- # =====================================
- # fit filters
- # =====================================
- filt_src = [os.path.join('filters', "src", srcf)
- for srcf in ["smoothnd.c", "snip1d.c",
- "snip2d.c", "snip3d.c", "strip.c"]]
- filt_src.append("filters.pyx")
- filt_inc = [os.path.join('filters', 'include')]
-
- config.add_extension('filters',
- sources=filt_src,
- include_dirs=filt_inc,
- language='c')
-
- # =====================================
- # peaks
- # =====================================
- peaks_src = [os.path.join('peaks', "src", "peaks.c"),
- "peaks.pyx"]
- peaks_inc = [os.path.join('peaks', 'include')]
-
- config.add_extension('peaks',
- sources=peaks_src,
- include_dirs=peaks_inc,
- language='c')
- # =====================================
- # =====================================
- return config
-
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
-
- setup(configuration=configuration)
diff --git a/silx/math/fit/test/__init__.py b/silx/math/fit/test/__init__.py
deleted file mode 100644
index d3d8ce8..0000000
--- a/silx/math/fit/test/__init__.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "22/06/2016"
-
-import unittest
-
-from .test_fit import suite as test_curve_fit
-from .test_functions import suite as test_fitfuns
-from .test_filters import suite as test_fitfilters
-from .test_peaks import suite as test_peaks
-from .test_fitmanager import suite as test_fitmanager
-from .test_bgtheories import suite as test_bgtheories
-
-
-def suite():
- test_suite = unittest.TestSuite()
- test_suite.addTest(test_curve_fit())
- test_suite.addTest(test_fitfuns())
- test_suite.addTest(test_fitfilters())
- test_suite.addTest(test_peaks())
- test_suite.addTest(test_fitmanager())
- test_suite.addTest(test_bgtheories())
- return test_suite
diff --git a/silx/math/fit/test/test_bgtheories.py b/silx/math/fit/test/test_bgtheories.py
deleted file mode 100644
index e9fea37..0000000
--- a/silx/math/fit/test/test_bgtheories.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-import copy
-import unittest
-import numpy
-import random
-
-from silx.math.fit import bgtheories
-from silx.math.fit.functions import sum_gauss
-
-
-class TestBgTheories(unittest.TestCase):
- """
- """
- def setUp(self):
- self.x = numpy.arange(100)
- self.y = 10 + 0.05 * self.x + sum_gauss(self.x, 10., 45., 15.)
- # add a very narrow high amplitude peak to test strip and snip
- self.y += sum_gauss(self.x, 100., 75., 2.)
- self.narrow_peak_index = list(self.x).index(75)
- random.seed()
-
- def tearDown(self):
- pass
-
- def testTheoriesAttrs(self):
- for theory_name in bgtheories.THEORY:
- self.assertIsInstance(theory_name, str)
- self.assertTrue(hasattr(bgtheories.THEORY[theory_name],
- "function"))
- self.assertTrue(hasattr(bgtheories.THEORY[theory_name].function,
- "__call__"))
- # Ensure legacy functions are not renamed accidentally
- self.assertTrue(
- {"No Background", "Constant", "Linear", "Strip", "Snip"}.issubset(
- set(bgtheories.THEORY)))
-
- def testNoBg(self):
- nobgfun = bgtheories.THEORY["No Background"].function
- self.assertTrue(numpy.array_equal(nobgfun(self.x, self.y),
- numpy.zeros_like(self.x)))
- # default estimate
- self.assertEqual(bgtheories.THEORY["No Background"].estimate(self.x, self.y),
- ([], []))
-
- def testConstant(self):
- consfun = bgtheories.THEORY["Constant"].function
- c = random.random() * 100
- self.assertTrue(numpy.array_equal(consfun(self.x, self.y, c),
- c * numpy.ones_like(self.x)))
- # default estimate
- esti_par, cons = bgtheories.THEORY["Constant"].estimate(self.x, self.y)
- self.assertEqual(cons,
- [[0, 0, 0]])
- self.assertAlmostEqual(esti_par,
- min(self.y))
-
- def testLinear(self):
- linfun = bgtheories.THEORY["Linear"].function
- a = random.random() * 100
- b = random.random() * 100
- self.assertTrue(numpy.array_equal(linfun(self.x, self.y, a, b),
- a + b * self.x))
- # default estimate
- esti_par, cons = bgtheories.THEORY["Linear"].estimate(self.x, self.y)
-
- self.assertEqual(cons,
- [[0, 0, 0], [0, 0, 0]])
- self.assertAlmostEqual(esti_par[0], 10, places=3)
- self.assertAlmostEqual(esti_par[1], 0.05, places=3)
-
- def testStrip(self):
- stripfun = bgtheories.THEORY["Strip"].function
- anchors = sorted(random.sample(list(self.x), 4))
- anchors_indices = [list(self.x).index(a) for a in anchors]
-
- # we really want to strip away the narrow peak
- anchors_indices_copy = copy.deepcopy(anchors_indices)
- for idx in anchors_indices_copy:
- if abs(idx - self.narrow_peak_index) < 5:
- anchors_indices.remove(idx)
- anchors.remove(self.x[idx])
-
- width = 2
- niter = 1000
- bgtheories.THEORY["Strip"].configure(AnchorsList=anchors, AnchorsFlag=True)
-
- bg = stripfun(self.x, self.y, width, niter)
-
- # assert peak amplitude has been decreased
- self.assertLess(bg[self.narrow_peak_index],
- self.y[self.narrow_peak_index])
-
- # default estimate
- for i in anchors_indices:
- self.assertEqual(bg[i], self.y[i])
-
- # estimated parameters are equal to the default ones in the config dict
- bgtheories.THEORY["Strip"].configure(StripWidth=7, StripIterations=8)
- esti_par, cons = bgtheories.THEORY["Strip"].estimate(self.x, self.y)
- self.assertTrue(numpy.array_equal(cons, [[3, 0, 0], [3, 0, 0]]))
- self.assertEqual(esti_par, [7, 8])
-
- def testSnip(self):
- snipfun = bgtheories.THEORY["Snip"].function
- anchors = sorted(random.sample(list(self.x), 4))
- anchors_indices = [list(self.x).index(a) for a in anchors]
-
- # we want to strip away the narrow peak, so remove nearby anchors
- anchors_indices_copy = copy.deepcopy(anchors_indices)
- for idx in anchors_indices_copy:
- if abs(idx - self.narrow_peak_index) < 5:
- anchors_indices.remove(idx)
- anchors.remove(self.x[idx])
-
- width = 16
- bgtheories.THEORY["Snip"].configure(AnchorsList=anchors, AnchorsFlag=True)
- bg = snipfun(self.x, self.y, width)
-
- # assert peak amplitude has been decreased
- self.assertLess(bg[self.narrow_peak_index],
- self.y[self.narrow_peak_index],
- "Snip didn't decrease the peak amplitude.")
-
- # anchored data must remain fixed
- for i in anchors_indices:
- self.assertEqual(bg[i], self.y[i])
-
- # estimated parameters are equal to the default ones in the config dict
- bgtheories.THEORY["Snip"].configure(SnipWidth=7)
- esti_par, cons = bgtheories.THEORY["Snip"].estimate(self.x, self.y)
- self.assertTrue(numpy.array_equal(cons, [[3, 0, 0]]))
- self.assertEqual(esti_par, [7])
-
-
-test_cases = (TestBgTheories,)
-
-
-def suite():
- loader = unittest.defaultTestLoader
- test_suite = unittest.TestSuite()
- for test_class in test_cases:
- tests = loader.loadTestsFromTestCase(test_class)
- test_suite.addTests(tests)
- return test_suite
-
-if __name__ == '__main__':
- unittest.main(defaultTest="suite")
diff --git a/silx/math/fit/test/test_filters.py b/silx/math/fit/test/test_filters.py
deleted file mode 100644
index 078b998..0000000
--- a/silx/math/fit/test/test_filters.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-import numpy
-import unittest
-from silx.math.fit import filters
-from silx.math.fit import functions
-from silx.test.utils import add_relative_noise
-
-
-class TestSmooth(unittest.TestCase):
- """
- Unit tests of smoothing functions.
-
- Test that the difference between a synthetic curve with 5% added random
- noise and the result of smoothing that signal is less than 5%. We compare
- the sum of all samples in each curve.
- """
- def setUp(self):
- x = numpy.arange(5000)
- # (height1, center1, fwhm1, beamfwhm...)
- slit_params = (50, 500, 200, 100,
- 50, 600, 80, 30,
- 20, 2000, 150, 150,
- 50, 2250, 110, 100,
- 40, 3000, 50, 10,
- 23, 4980, 250, 20)
-
- self.y1 = functions.sum_slit(x, *slit_params)
- # 5% noise
- self.y1 = add_relative_noise(self.y1, 5.)
-
- # (height1, center1, fwhm1...)
- step_params = (50, 500, 200,
- 50, 600, 80,
- 20, 2000, 150,
- 50, 2250, 110,
- 40, 3000, 50,
- 23, 4980, 250,)
-
- self.y2 = functions.sum_stepup(x, *step_params)
- # 5% noise
- self.y2 = add_relative_noise(self.y2, 5.)
-
- self.y3 = functions.sum_stepdown(x, *step_params)
- # 5% noise
- self.y3 = add_relative_noise(self.y3, 5.)
-
- def tearDown(self):
- pass
-
- def testSavitskyGolay(self):
- npts = 25
- for y in [self.y1, self.y2, self.y3]:
- smoothed_y = filters.savitsky_golay(y, npoints=npts)
-
- # we added +-5% of random noise. The difference must be much lower
- # than 5%.
- diff = abs(sum(smoothed_y) - sum(y)) / sum(y)
- self.assertLess(diff, 0.05,
- "Difference between data with 5%% noise and " +
- "smoothed data is > 5%% (%f %%)" % (diff * 100))
-
- # Try various smoothing levels
- npts += 25
-
- def testSmooth1d(self):
- """Test the 1D smoothing against the formula
- ys[i] = (y[i-1] + 2 * y[i] + y[i+1]) / 4 (for 1 < i < n-1)"""
- smoothed_y = filters.smooth1d(self.y1)
-
- for i in range(1, len(self.y1) - 1):
- self.assertAlmostEqual(4 * smoothed_y[i],
- self.y1[i-1] + 2 * self.y1[i] + self.y1[i+1])
-
- def testSmooth2d(self):
- """Test that a 2D smoothing is the same as two successive and
- orthogonal 1D smoothings"""
- x = numpy.arange(10000)
-
- noise = 2 * numpy.random.random(10000) - 1
- noise *= 0.05
- y = x * (1 + noise)
-
- y.shape = (100, 100)
-
- smoothed_y = filters.smooth2d(y)
-
- intermediate_smooth = numpy.zeros_like(y)
- expected_smooth = numpy.zeros_like(y)
- # smooth along first dimension
- for i in range(0, y.shape[0]):
- intermediate_smooth[i, :] = filters.smooth1d(y[i, :])
-
- # smooth along second dimension
- for j in range(0, y.shape[1]):
- expected_smooth[:, j] = filters.smooth1d(intermediate_smooth[:, j])
-
- for i in range(0, y.shape[0]):
- for j in range(0, y.shape[1]):
- self.assertAlmostEqual(smoothed_y[i, j],
- expected_smooth[i, j])
-
-
-test_cases = (TestSmooth,)
-
-
-def suite():
- loader = unittest.defaultTestLoader
- test_suite = unittest.TestSuite()
- for test_class in test_cases:
- tests = loader.loadTestsFromTestCase(test_class)
- test_suite.addTests(tests)
- return test_suite
-
-if __name__ == '__main__':
- unittest.main(defaultTest="suite")
diff --git a/silx/math/fit/test/test_fit.py b/silx/math/fit/test/test_fit.py
deleted file mode 100644
index 3fdf394..0000000
--- a/silx/math/fit/test/test_fit.py
+++ /dev/null
@@ -1,387 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-# Copyright (C) 2016-2020 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-"""
-Nominal tests of the leastsq function.
-"""
-
-import unittest
-
-import numpy
-import sys
-
-from silx.utils import testutils
-from silx.math.fit.leastsq import _logger as fitlogger
-
-
-class Test_leastsq(unittest.TestCase):
- """
- Unit tests of the leastsq function.
- """
-
- ndims = None
-
- def setUp(self):
- try:
- from silx.math.fit import leastsq
- self.instance = leastsq
- except ImportError:
- self.instance = None
-
- def myexp(x):
- # put a (bad) filter to avoid over/underflows
- # with no python looping
- return numpy.exp(x*numpy.less(abs(x), 250)) - \
- 1.0 * numpy.greater_equal(abs(x), 250)
-
- self.my_exp = myexp
-
- def gauss(x, *params):
- params = numpy.array(params, copy=False, dtype=numpy.float64)
- result = params[0] + params[1] * x
- for i in range(2, len(params), 3):
- p = params[i:(i+3)]
- dummy = 2.3548200450309493*(x - p[1])/p[2]
- result += p[0] * self.my_exp(-0.5 * dummy * dummy)
- return result
-
- self.gauss = gauss
-
- def gauss_derivative(x, params, idx):
- if idx == 0:
- return numpy.ones(len(x), numpy.float64)
- if idx == 1:
- return x
- gaussian_peak = (idx - 2) // 3
- gaussian_parameter = (idx - 2) % 3
- actual_idx = 2 + 3 * gaussian_peak
- p = params[actual_idx:(actual_idx+3)]
- if gaussian_parameter == 0:
- return self.gauss(x, *[0, 0, 1.0, p[1], p[2]])
- if gaussian_parameter == 1:
- tmp = self.gauss(x, *[0, 0, p[0], p[1], p[2]])
- tmp *= 2.3548200450309493*(x - p[1])/p[2]
- return tmp * 2.3548200450309493/p[2]
- if gaussian_parameter == 2:
- tmp = self.gauss(x, *[0, 0, p[0], p[1], p[2]])
- tmp *= 2.3548200450309493*(x - p[1])/p[2]
- return tmp * 2.3548200450309493*(x - p[1])/(p[2]*p[2])
-
- self.gauss_derivative = gauss_derivative
-
- def tearDown(self):
- self.instance = None
- self.gauss = None
- self.gauss_derivative = None
- self.my_exp = None
- self.model_function = None
- self.model_derivative = None
-
- def testImport(self):
- self.assertTrue(self.instance is not None,
- "Cannot import leastsq from silx.math.fit")
-
- def testUnconstrainedFitNoWeight(self):
- parameters_actual = [10.5, 2, 1000.0, 20., 15]
- x = numpy.arange(10000.)
- y = self.gauss(x, *parameters_actual)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
- model_function = self.gauss
-
- fittedpar, cov = self.instance(model_function, x, y, parameters_estimate)
- test_condition = numpy.allclose(parameters_actual, fittedpar)
- if not test_condition:
- msg = "Unsuccessfull fit\n"
- for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
- self.assertTrue(test_condition, msg)
-
- def testUnconstrainedFitWeight(self):
- parameters_actual = [10.5,2,1000.0,20.,15]
- x = numpy.arange(10000.)
- y = self.gauss(x, *parameters_actual)
- sigma = numpy.sqrt(y)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
- model_function = self.gauss
-
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma)
- test_condition = numpy.allclose(parameters_actual, fittedpar)
- if not test_condition:
- msg = "Unsuccessfull fit\n"
- for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
- self.assertTrue(test_condition, msg)
-
- def testDerivativeFunction(self):
- parameters_actual = [10.5, 2, 10000.0, 20., 150, 5000, 900., 300]
- x = numpy.arange(10000.)
- y = self.gauss(x, *parameters_actual)
- delta = numpy.sqrt(numpy.finfo(numpy.float64).eps)
- for i in range(len(parameters_actual)):
- p = parameters_actual * 1
- if p[i] == 0:
- delta_par = delta
- else:
- delta_par = p[i] * delta
- if i > 2:
- p[0] = 0.0
- p[1] = 0.0
- p[i] += delta_par
- yPlus = self.gauss(x, *p)
- p[i] = parameters_actual[i] - delta_par
- yMinus = self.gauss(x, *p)
- numerical_derivative = (yPlus - yMinus) / (2 * delta_par)
- #numerical_derivative = (self.gauss(x, *p) - y) / delta_par
- p[i] = parameters_actual[i]
- derivative = self.gauss_derivative(x, p, i)
- diff = numerical_derivative - derivative
- test_condition = numpy.allclose(numerical_derivative,
- derivative, atol=5.0e-6)
- if not test_condition:
- msg = "Error calculating derivative of parameter %d." % i
- msg += "\n diff min = %g diff max = %g" % (diff.min(), diff.max())
- self.assertTrue(test_condition, msg)
-
- def testConstrainedFit(self):
- CFREE = 0
- CPOSITIVE = 1
- CQUOTED = 2
- CFIXED = 3
- CFACTOR = 4
- CDELTA = 5
- CSUM = 6
- parameters_actual = [10.5, 2, 10000.0, 20., 150, 5000, 900., 300]
- x = numpy.arange(10000.)
- y = self.gauss(x, *parameters_actual)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10, 400, 850, 200]
- model_function = self.gauss
- model_deriv = self.gauss_derivative
- constraints_all_free = [[0, 0, 0]] * len(parameters_actual)
- constraints_all_positive = [[1, 0, 0]] * len(parameters_actual)
- constraints_delta_position = [[0, 0, 0]] * len(parameters_actual)
- constraints_delta_position[6] = [CDELTA, 3, 880]
- constraints_sum_position = constraints_all_positive * 1
- constraints_sum_position[6] = [CSUM, 3, 920]
- constraints_factor = constraints_delta_position * 1
- constraints_factor[2] = [CFACTOR, 5, 2]
- constraints_list = [None,
- constraints_all_free,
- constraints_all_positive,
- constraints_delta_position,
- constraints_sum_position]
-
- # for better code coverage, the warning recommending to set full_output
- # to True when using constraints should be shown at least once
- full_output = True
- for index, constraints in enumerate(constraints_list):
- if index == 2:
- full_output = None
- elif index == 3:
- full_output = 0
- for model_deriv in [None, self.gauss_derivative]:
- for sigma in [None, numpy.sqrt(y)]:
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- constraints=constraints,
- model_deriv=model_deriv,
- full_output=full_output)[:2]
- full_output = True
-
- test_condition = numpy.allclose(parameters_actual, fittedpar)
- if not test_condition:
- msg = "Unsuccessfull fit\n"
- for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
- self.assertTrue(test_condition, msg)
-
- def testUnconstrainedFitAnalyticalDerivative(self):
- parameters_actual = [10.5, 2, 1000.0, 20., 15]
- x = numpy.arange(10000.)
- y = self.gauss(x, *parameters_actual)
- sigma = numpy.sqrt(y)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
- model_function = self.gauss
- model_deriv = self.gauss_derivative
-
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- model_deriv=model_deriv)
- test_condition = numpy.allclose(parameters_actual, fittedpar)
- if not test_condition:
- msg = "Unsuccessfull fit\n"
- for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
- self.assertTrue(test_condition, msg)
-
- @testutils.test_logging(fitlogger.name, warning=2)
- def testBadlyShapedData(self):
- parameters_actual = [10.5, 2, 1000.0, 20., 15]
- x = numpy.arange(10000.).reshape(1000, 10)
- y = self.gauss(x, *parameters_actual)
- sigma = numpy.sqrt(y)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
- model_function = self.gauss
-
- for check_finite in [True, False]:
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=check_finite)
- test_condition = numpy.allclose(parameters_actual, fittedpar)
- if not test_condition:
- msg = "Unsuccessfull fit\n"
- for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
- self.assertTrue(test_condition, msg)
-
- @testutils.test_logging(fitlogger.name, warning=3)
- def testDataWithNaN(self):
- parameters_actual = [10.5, 2, 1000.0, 20., 15]
- x = numpy.arange(10000.).reshape(1000, 10)
- y = self.gauss(x, *parameters_actual)
- sigma = numpy.sqrt(y)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10]
- model_function = self.gauss
- x[500] = numpy.inf
- # check default behavior
- try:
- self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma)
- except ValueError:
- info = "%s" % sys.exc_info()[1]
- self.assertTrue("array must not contain inf" in info)
-
- # check requested behavior
- try:
- self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=True)
- except ValueError:
- info = "%s" % sys.exc_info()[1]
- self.assertTrue("array must not contain inf" in info)
-
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=False)
- test_condition = numpy.allclose(parameters_actual, fittedpar)
- if not test_condition:
- msg = "Unsuccessfull fit\n"
- for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
- self.assertTrue(test_condition, msg)
-
- # testing now with ydata containing NaN
- x = numpy.arange(10000.).reshape(1000, 10)
- y[500] = numpy.nan
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=False)
-
- test_condition = numpy.allclose(parameters_actual, fittedpar)
- if not test_condition:
- msg = "Unsuccessfull fit\n"
- for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
- self.assertTrue(test_condition, msg)
-
- # testing now with sigma containing NaN
- sigma[300] = numpy.nan
- fittedpar, cov = self.instance(model_function, x, y,
- parameters_estimate,
- sigma=sigma,
- check_finite=False)
- test_condition = numpy.allclose(parameters_actual, fittedpar)
- if not test_condition:
- msg = "Unsuccessfull fit\n"
- for i in range(len(fittedpar)):
- msg += "Expected %g obtained %g\n" % (parameters_actual[i],
- fittedpar[i])
- self.assertTrue(test_condition, msg)
-
- def testUncertainties(self):
- """Test for validity of uncertainties in returned full-output
- dictionary. This is a non-regression test for pull request #197"""
- parameters_actual = [10.5, 2, 1000.0, 20., 15, 2001.0, 30.1, 16]
- x = numpy.arange(10000.)
- y = self.gauss(x, *parameters_actual)
- parameters_estimate = [0.0, 1.0, 900.0, 25., 10., 1500., 20., 2.0]
-
- # test that uncertainties are not 0.
- fittedpar, cov, infodict = self.instance(self.gauss, x, y, parameters_estimate,
- full_output=True)
- uncertainties = infodict["uncertainties"]
- self.assertEqual(len(uncertainties), len(parameters_actual))
- self.assertEqual(len(uncertainties), len(fittedpar))
- for uncertainty in uncertainties:
- self.assertNotAlmostEqual(uncertainty, 0.)
-
- # set constraint FIXED for half the parameters.
- # This should cause leastsq to return 100% uncertainty.
- parameters_estimate = [10.6, 2.1, 1000.1, 20.1, 15.1, 2001.1, 30.2, 16.1]
- CFIXED = 3
- CFREE = 0
- constraints = []
- for i in range(len(parameters_estimate)):
- if i % 2:
- constraints.append([CFIXED, 0, 0])
- else:
- constraints.append([CFREE, 0, 0])
- fittedpar, cov, infodict = self.instance(self.gauss, x, y, parameters_estimate,
- constraints=constraints,
- full_output=True)
- uncertainties = infodict["uncertainties"]
- for i in range(len(parameters_estimate)):
- if i % 2:
- # test that all FIXED parameters have 100% uncertainty
- self.assertAlmostEqual(uncertainties[i],
- parameters_estimate[i])
-
-
-test_cases = (Test_leastsq,)
-
-def suite():
- loader = unittest.defaultTestLoader
- test_suite = unittest.TestSuite()
- for test_class in test_cases:
- tests = loader.loadTestsFromTestCase(test_class)
- test_suite.addTests(tests)
- return test_suite
-
-
-if __name__ == '__main__':
- unittest.main(defaultTest="suite")
diff --git a/silx/math/fit/test/test_fitmanager.py b/silx/math/fit/test/test_fitmanager.py
deleted file mode 100644
index acac242..0000000
--- a/silx/math/fit/test/test_fitmanager.py
+++ /dev/null
@@ -1,513 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-# Copyright (C) 2016-2020 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-"""
-Tests for fitmanager module
-"""
-
-import unittest
-import numpy
-import os.path
-
-from silx.math.fit import fitmanager
-from silx.math.fit import fittheories
-from silx.math.fit import bgtheories
-from silx.math.fit.fittheory import FitTheory
-from silx.math.fit.functions import sum_gauss, sum_stepdown, sum_stepup
-
-from silx.utils.testutils import ParametricTestCase
-from silx.test.utils import temp_dir
-
-custom_function_definition = """
-import copy
-from silx.math.fit.fittheory import FitTheory
-
-CONFIG = {'d': 1.}
-
-def myfun(x, a, b, c):
- "Model function"
- return (a * x**2 + b * x + c) / CONFIG['d']
-
-def myesti(x, y):
- "Initial parameters for iterative fit (a, b, c) = (1, 1, 1)"
- return (1., 1., 1.), ((0, 0, 0), (0, 0, 0), (0, 0, 0))
-
-def myconfig(d=1., **kw):
- "This function can modify CONFIG"
- CONFIG["d"] = d
- return CONFIG
-
-def myderiv(x, parameters, index):
- "Custom derivative (does not work, causes singular matrix)"
- pars_plus = copy.copy(parameters)
- pars_plus[index] *= 1.0001
-
- pars_minus = parameters
- pars_minus[index] *= copy.copy(0.9999)
-
- delta_fun = myfun(x, *pars_plus) - myfun(x, *pars_minus)
- delta_par = parameters[index] * 0.0001 * 2
-
- return delta_fun / delta_par
-
-THEORY = {
- 'my fit theory':
- FitTheory(function=myfun,
- parameters=('A', 'B', 'C'),
- estimate=myesti,
- configure=myconfig,
- derivative=myderiv)
-}
-
-"""
-
-old_custom_function_definition = """
-CONFIG = {'d': 1.0}
-
-def myfun(x, a, b, c):
- "Model function"
- return (a * x**2 + b * x + c) / CONFIG['d']
-
-def myesti(x, y, bg, xscalinq, yscaling):
- "Initial parameters for iterative fit (a, b, c) = (1, 1, 1)"
- return (1., 1., 1.), ((0, 0, 0), (0, 0, 0), (0, 0, 0))
-
-def myconfig(**kw):
- "Update or complete CONFIG dictionary"
- for key in kw:
- CONFIG[key] = kw[key]
- return CONFIG
-
-THEORY = ['my fit theory']
-PARAMETERS = [('A', 'B', 'C')]
-FUNCTION = [myfun]
-ESTIMATE = [myesti]
-CONFIGURE = [myconfig]
-
-"""
-
-
-def _order_of_magnitude(x):
- return numpy.log10(x).round()
-
-
-class TestFitmanager(ParametricTestCase):
- """
- Unit tests of multi-peak functions.
- """
- def setUp(self):
- pass
-
- def tearDown(self):
- pass
-
- def testFitManager(self):
- """Test fit manager on synthetic data using a gaussian function
- and a linear background"""
- # Create synthetic data with a sum of gaussian functions
- x = numpy.arange(1000).astype(numpy.float64)
-
- p = [1000, 100., 250,
- 255, 650., 45,
- 1500, 800.5, 95]
- linear_bg = 2.65 * x + 13
- y = linear_bg + sum_gauss(x, *p)
-
- y_with_nans = numpy.array(y)
- y_with_nans[::10] = numpy.nan
-
- x_with_nans = numpy.array(x)
- x_with_nans[5::15] = numpy.nan
-
- tests = {
- 'all finite': (x, y),
- 'y with NaNs': (x, y_with_nans),
- 'x with NaNs': (x_with_nans, y),
- }
-
- for name, (xdata, ydata) in tests.items():
- with self.subTest(name=name):
- # Fitting
- fit = fitmanager.FitManager()
- fit.setdata(x=xdata, y=ydata)
- fit.loadtheories(fittheories)
- # Use one of the default fit functions
- fit.settheory('Gaussians')
- fit.setbackground('Linear')
- fit.estimate()
- fit.runfit()
-
- # fit.fit_results[]
-
- # first 2 parameters are related to the linear background
- self.assertEqual(fit.fit_results[0]["name"], "Constant")
- self.assertAlmostEqual(fit.fit_results[0]["fitresult"], 13)
- self.assertEqual(fit.fit_results[1]["name"], "Slope")
- self.assertAlmostEqual(fit.fit_results[1]["fitresult"], 2.65)
-
- for i, param in enumerate(fit.fit_results[2:]):
- param_number = i // 3 + 1
- if i % 3 == 0:
- self.assertEqual(param["name"],
- "Height%d" % param_number)
- elif i % 3 == 1:
- self.assertEqual(param["name"],
- "Position%d" % param_number)
- elif i % 3 == 2:
- self.assertEqual(param["name"],
- "FWHM%d" % param_number)
-
- self.assertAlmostEqual(param["fitresult"],
- p[i])
- self.assertAlmostEqual(_order_of_magnitude(param["estimation"]),
- _order_of_magnitude(p[i]))
-
- def testLoadCustomFitFunction(self):
- """Test FitManager using a custom fit function defined in an external
- file and imported with FitManager.loadtheories"""
- # Create synthetic data with a sum of gaussian functions
- x = numpy.arange(100).astype(numpy.float64)
-
- # a, b, c are the fit parameters
- # d is a known scaling parameter that is set using configure()
- a, b, c, d = 1.5, 2.5, 3.5, 4.5
- y = (a * x**2 + b * x + c) / d
-
- # Fitting
- fit = fitmanager.FitManager()
- fit.setdata(x=x, y=y)
-
- # Create a temporary function definition file, and import it
- with temp_dir() as tmpDir:
- tmpfile = os.path.join(tmpDir, 'customfun.py')
- # custom_function_definition
- fd = open(tmpfile, "w")
- fd.write(custom_function_definition)
- fd.close()
- fit.loadtheories(tmpfile)
- tmpfile_pyc = os.path.join(tmpDir, 'customfun.pyc')
- if os.path.exists(tmpfile_pyc):
- os.unlink(tmpfile_pyc)
- os.unlink(tmpfile)
-
- fit.settheory('my fit theory')
- # Test configure
- fit.configure(d=4.5)
- fit.estimate()
- fit.runfit()
-
- self.assertEqual(fit.fit_results[0]["name"],
- "A1")
- self.assertAlmostEqual(fit.fit_results[0]["fitresult"],
- 1.5)
- self.assertEqual(fit.fit_results[1]["name"],
- "B1")
- self.assertAlmostEqual(fit.fit_results[1]["fitresult"],
- 2.5)
- self.assertEqual(fit.fit_results[2]["name"],
- "C1")
- self.assertAlmostEqual(fit.fit_results[2]["fitresult"],
- 3.5)
-
- def testLoadOldCustomFitFunction(self):
- """Test FitManager using a custom fit function defined in an external
- file and imported with FitManager.loadtheories (legacy PyMca format)"""
- # Create synthetic data with a sum of gaussian functions
- x = numpy.arange(100).astype(numpy.float64)
-
- # a, b, c are the fit parameters
- # d is a known scaling parameter that is set using configure()
- a, b, c, d = 1.5, 2.5, 3.5, 4.5
- y = (a * x**2 + b * x + c) / d
-
- # Fitting
- fit = fitmanager.FitManager()
- fit.setdata(x=x, y=y)
-
- # Create a temporary function definition file, and import it
- with temp_dir() as tmpDir:
- tmpfile = os.path.join(tmpDir, 'oldcustomfun.py')
- # custom_function_definition
- fd = open(tmpfile, "w")
- fd.write(old_custom_function_definition)
- fd.close()
- fit.loadtheories(tmpfile)
- tmpfile_pyc = os.path.join(tmpDir, 'oldcustomfun.pyc')
- if os.path.exists(tmpfile_pyc):
- os.unlink(tmpfile_pyc)
- os.unlink(tmpfile)
-
- fit.settheory('my fit theory')
- fit.configure(d=4.5)
- fit.estimate()
- fit.runfit()
-
- self.assertEqual(fit.fit_results[0]["name"],
- "A1")
- self.assertAlmostEqual(fit.fit_results[0]["fitresult"],
- 1.5)
- self.assertEqual(fit.fit_results[1]["name"],
- "B1")
- self.assertAlmostEqual(fit.fit_results[1]["fitresult"],
- 2.5)
- self.assertEqual(fit.fit_results[2]["name"],
- "C1")
- self.assertAlmostEqual(fit.fit_results[2]["fitresult"],
- 3.5)
-
- def testAddTheory(self, estimate=True):
- """Test FitManager using a custom fit function imported with
- FitManager.addtheory"""
- # Create synthetic data with a sum of gaussian functions
- x = numpy.arange(100).astype(numpy.float64)
-
- # a, b, c are the fit parameters
- # d is a known scaling parameter that is set using configure()
- a, b, c, d = -3.14, 1234.5, 10000, 4.5
- y = (a * x**2 + b * x + c) / d
-
- # Fitting
- fit = fitmanager.FitManager()
- fit.setdata(x=x, y=y)
-
- # Define and add the fit theory
- CONFIG = {'d': 1.}
-
- def myfun(x_, a_, b_, c_):
- """"Model function"""
- return (a_ * x_**2 + b_ * x_ + c_) / CONFIG['d']
-
- def myesti(x_, y_):
- """"Initial parameters for iterative fit:
- (a, b, c) = (1, 1, 1)
- Constraints all set to 0 (FREE)"""
- return (1., 1., 1.), ((0, 0, 0), (0, 0, 0), (0, 0, 0))
-
- def myconfig(d_=1., **kw):
- """This function can modify CONFIG"""
- CONFIG["d"] = d_
- return CONFIG
-
- def myderiv(x_, parameters, index):
- """Custom derivative"""
- pars_plus = numpy.array(parameters, copy=True)
- pars_plus[index] *= 1.001
-
- pars_minus = numpy.array(parameters, copy=True)
- pars_minus[index] *= 0.999
-
- delta_fun = myfun(x_, *pars_plus) - myfun(x_, *pars_minus)
- delta_par = parameters[index] * 0.001 * 2
-
- return delta_fun / delta_par
-
- fit.addtheory("polynomial",
- FitTheory(function=myfun,
- parameters=["A", "B", "C"],
- estimate=myesti if estimate else None,
- configure=myconfig,
- derivative=myderiv))
-
- fit.settheory('polynomial')
- fit.configure(d_=4.5)
- fit.estimate()
- params1, sigmas, infodict = fit.runfit()
-
- self.assertEqual(fit.fit_results[0]["name"],
- "A1")
- self.assertAlmostEqual(fit.fit_results[0]["fitresult"],
- -3.14)
- self.assertEqual(fit.fit_results[1]["name"],
- "B1")
- # params1[1] is the same as fit.fit_results[1]["fitresult"]
- self.assertAlmostEqual(params1[1],
- 1234.5)
- self.assertEqual(fit.fit_results[2]["name"],
- "C1")
- self.assertAlmostEqual(params1[2],
- 10000)
-
- # change configuration scaling factor and check that the fit returns
- # different values
- fit.configure(d_=5.)
- fit.estimate()
- params2, sigmas, infodict = fit.runfit()
- for p1, p2 in zip(params1, params2):
- self.assertFalse(numpy.array_equal(p1, p2),
- "Fit parameters are equal even though the " +
- "configuration has been changed")
-
- def testNoEstimate(self):
- """Ensure that the in the absence of the estimation function,
- the default estimation function :meth:`FitTheory.default_estimate`
- is used."""
- self.testAddTheory(estimate=False)
-
- def testStep(self):
- """Test fit manager on a step function with a more complex estimate
- function than the gaussian (convolution filter)"""
- for theory_name, theory_fun in (('Step Down', sum_stepdown),
- ('Step Up', sum_stepup)):
- # Create synthetic data with a sum of gaussian functions
- x = numpy.arange(1000).astype(numpy.float64)
-
- # ('Height', 'Position', 'FWHM')
- p = [1000, 439, 250]
-
- constantbg = 13
- y = theory_fun(x, *p) + constantbg
-
- # Fitting
- fit = fitmanager.FitManager()
- fit.setdata(x=x, y=y)
- fit.loadtheories(fittheories)
- fit.settheory(theory_name)
- fit.setbackground('Constant')
-
- fit.estimate()
-
- params, sigmas, infodict = fit.runfit()
-
- # first parameter is the constant background
- self.assertAlmostEqual(params[0], 13, places=5)
- for i, param in enumerate(params[1:]):
- self.assertAlmostEqual(param, p[i], places=5)
- self.assertAlmostEqual(_order_of_magnitude(fit.fit_results[i+1]["estimation"]),
- _order_of_magnitude(p[i]))
-
-
-def quadratic(x, a, b, c):
- return a * x**2 + b * x + c
-
-
-def cubic(x, a, b, c, d):
- return a * x**3 + b * x**2 + c * x + d
-
-
-class TestPolynomials(unittest.TestCase):
- """Test polynomial fit theories and fit background"""
- def setUp(self):
- self.x = numpy.arange(100).astype(numpy.float64)
-
- def testQuadraticBg(self):
- gaussian_params = [100, 45, 8]
- poly_params = [0.05, -2, 3]
- p = numpy.poly1d(poly_params)
-
- y = p(self.x) + sum_gauss(self.x, *gaussian_params)
-
- fm = fitmanager.FitManager(self.x, y)
- fm.loadbgtheories(bgtheories)
- fm.loadtheories(fittheories)
- fm.settheory("Gaussians")
- fm.setbackground("Degree 2 Polynomial")
- esti_params = fm.estimate()
- fit_params = fm.runfit()[0]
-
- for p, pfit in zip(poly_params + gaussian_params, fit_params):
- self.assertAlmostEqual(p,
- pfit)
-
- def testCubicBg(self):
- gaussian_params = [1000, 45, 8]
- poly_params = [0.0005, -0.05, 3, -4]
- p = numpy.poly1d(poly_params)
-
- y = p(self.x) + sum_gauss(self.x, *gaussian_params)
-
- fm = fitmanager.FitManager(self.x, y)
- fm.loadtheories(fittheories)
- fm.settheory("Gaussians")
- fm.setbackground("Degree 3 Polynomial")
- esti_params = fm.estimate()
- fit_params = fm.runfit()[0]
-
- for p, pfit in zip(poly_params + gaussian_params, fit_params):
- self.assertAlmostEqual(p,
- pfit)
-
- def testQuarticcBg(self):
- gaussian_params = [10000, 69, 25]
- poly_params = [5e-10, 0.0005, 0.005, 2, 4]
- p = numpy.poly1d(poly_params)
-
- y = p(self.x) + sum_gauss(self.x, *gaussian_params)
-
- fm = fitmanager.FitManager(self.x, y)
- fm.loadtheories(fittheories)
- fm.settheory("Gaussians")
- fm.setbackground("Degree 4 Polynomial")
- esti_params = fm.estimate()
- fit_params = fm.runfit()[0]
-
- for p, pfit in zip(poly_params + gaussian_params, fit_params):
- self.assertAlmostEqual(p,
- pfit,
- places=5)
-
- def _testPoly(self, poly_params, theory, places=5):
- p = numpy.poly1d(poly_params)
-
- y = p(self.x)
-
- fm = fitmanager.FitManager(self.x, y)
- fm.loadbgtheories(bgtheories)
- fm.loadtheories(fittheories)
- fm.settheory(theory)
- esti_params = fm.estimate()
- fit_params = fm.runfit()[0]
-
- for p, pfit in zip(poly_params, fit_params):
- self.assertAlmostEqual(p, pfit, places=places)
-
- def testQuadratic(self):
- self._testPoly([0.05, -2, 3],
- "Degree 2 Polynomial")
-
- def testCubic(self):
- self._testPoly([0.0005, -0.05, 3, -4],
- "Degree 3 Polynomial")
-
- def testQuartic(self):
- self._testPoly([1, -2, 3, -4, -5],
- "Degree 4 Polynomial")
-
- def testQuintic(self):
- self._testPoly([1, -2, 3, -4, -5, 6],
- "Degree 5 Polynomial",
- places=4)
-
-
-test_cases = (TestFitmanager, TestPolynomials)
-
-
-def suite():
- loader = unittest.defaultTestLoader
- test_suite = unittest.TestSuite()
- for test_class in test_cases:
- tests = loader.loadTestsFromTestCase(test_class)
- test_suite.addTests(tests)
- return test_suite
-
-if __name__ == '__main__':
- unittest.main(defaultTest="suite")
diff --git a/silx/math/fit/test/test_functions.py b/silx/math/fit/test/test_functions.py
deleted file mode 100644
index ce7dbd6..0000000
--- a/silx/math/fit/test/test_functions.py
+++ /dev/null
@@ -1,272 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-"""
-Tests for functions module
-"""
-
-import unittest
-import numpy
-import math
-
-from silx.math.fit import functions
-
-__authors__ = ["P. Knobel"]
-__license__ = "MIT"
-__date__ = "21/07/2016"
-
-class Test_functions(unittest.TestCase):
- """
- Unit tests of multi-peak functions.
- """
- def setUp(self):
- self.x = numpy.arange(11)
-
- # height, center, sigma1, sigma2
- (h, c, s1, s2) = (7., 5., 3., 2.1)
- self.g_params = {
- "height": h,
- "center": c,
- #"sigma": s,
- "fwhm1": 2 * math.sqrt(2 * math.log(2)) * s1,
- "fwhm2": 2 * math.sqrt(2 * math.log(2)) * s2,
- "area1": h * s1 * math.sqrt(2 * math.pi)
- }
- # result of `7 * scipy.signal.gaussian(11, 3)`
- self.scipy_gaussian = numpy.array(
- [1.74546546, 2.87778603, 4.24571462, 5.60516182, 6.62171628,
- 7., 6.62171628, 5.60516182, 4.24571462, 2.87778603,
- 1.74546546]
- )
-
- # result of:
- # numpy.concatenate((7 * scipy.signal.gaussian(11, 3)[0:5],
- # 7 * scipy.signal.gaussian(11, 2.1)[5:11]))
- self.scipy_asym_gaussian = numpy.array(
- [1.74546546, 2.87778603, 4.24571462, 5.60516182, 6.62171628,
- 7., 6.24968751, 4.44773692, 2.52313452, 1.14093853, 0.41124877]
- )
-
- def tearDown(self):
- pass
-
- def testGauss(self):
- """Compare sum_gauss with scipy.signals.gaussian"""
- y = functions.sum_gauss(self.x,
- self.g_params["height"],
- self.g_params["center"],
- self.g_params["fwhm1"])
-
- for i in range(11):
- self.assertAlmostEqual(y[i], self.scipy_gaussian[i])
-
- def testAGauss(self):
- """Compare sum_agauss with scipy.signals.gaussian"""
- y = functions.sum_agauss(self.x,
- self.g_params["area1"],
- self.g_params["center"],
- self.g_params["fwhm1"])
- for i in range(11):
- self.assertAlmostEqual(y[i], self.scipy_gaussian[i])
-
- def testFastAGauss(self):
- """Compare sum_fastagauss with scipy.signals.gaussian
- Limit precision to 3 decimal places."""
- y = functions.sum_fastagauss(self.x,
- self.g_params["area1"],
- self.g_params["center"],
- self.g_params["fwhm1"])
- for i in range(11):
- self.assertAlmostEqual(y[i], self.scipy_gaussian[i], 3)
-
-
- def testSplitGauss(self):
- """Compare sum_splitgauss with scipy.signals.gaussian"""
- y = functions.sum_splitgauss(self.x,
- self.g_params["height"],
- self.g_params["center"],
- self.g_params["fwhm1"],
- self.g_params["fwhm2"])
- for i in range(11):
- self.assertAlmostEqual(y[i], self.scipy_asym_gaussian[i])
-
- def testErf(self):
- """Compare erf with math.erf"""
- # scalars
- self.assertAlmostEqual(functions.erf(0.14), math.erf(0.14), places=5)
- self.assertAlmostEqual(functions.erf(0), math.erf(0), places=5)
- self.assertAlmostEqual(functions.erf(-0.74), math.erf(-0.74), places=5)
-
- # lists
- x = [-5, -2, -1.5, -0.6, 0, 0.1, 2, 3]
- erfx = functions.erf(x)
- for i in range(len(x)):
- self.assertAlmostEqual(erfx[i],
- math.erf(x[i]),
- places=5)
-
- # ndarray
- x = numpy.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
- erfx = functions.erf(x)
- for i in range(x.shape[0]):
- for j in range(x.shape[1]):
- self.assertAlmostEqual(erfx[i, j],
- math.erf(x[i, j]),
- places=5)
-
- def testErfc(self):
- """Compare erf with math.erf"""
- # scalars
- self.assertAlmostEqual(functions.erfc(0.14), math.erfc(0.14), places=5)
- self.assertAlmostEqual(functions.erfc(0), math.erfc(0), places=5)
- self.assertAlmostEqual(functions.erfc(-0.74), math.erfc(-0.74), places=5)
-
- # lists
- x = [-5, -2, -1.5, -0.6, 0, 0.1, 2, 3]
- erfcx = functions.erfc(x)
- for i in range(len(x)):
- self.assertAlmostEqual(erfcx[i], math.erfc(x[i]), places=5)
-
- # ndarray
- x = numpy.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
- erfcx = functions.erfc(x)
- for i in range(x.shape[0]):
- for j in range(x.shape[1]):
- self.assertAlmostEqual(erfcx[i, j], math.erfc(x[i, j]), places=5)
-
- def testAtanStepUp(self):
- """Compare atan_stepup with math.atan
-
- atan_stepup(x, a, b, c) = a * (0.5 + (arctan((x - b) / c) / pi))"""
- x0 = numpy.arange(100) / 6.33
- y0 = functions.atan_stepup(x0, 11.1, 22.2, 3.33)
-
- for x, y in zip(x0, y0):
- self.assertAlmostEqual(
- 11.1 * (0.5 + math.atan((x - 22.2) / 3.33) / math.pi),
- y
- )
-
- def testStepUp(self):
- """sanity check for step up:
-
- - derivative must be largest around the step center
- - max value must be close to height parameter
-
- """
- x0 = numpy.arange(1000)
- center = 444
- height = 1234
- fwhm = 210
- y0 = functions.sum_stepup(x0, height, center, fwhm)
-
- self.assertLess(max(y0), height)
- self.assertAlmostEqual(max(y0), height, places=1)
- self.assertAlmostEqual(min(y0), 0, places=1)
-
- deriv0 = _numerical_derivative(functions.sum_stepup, x0, [height, center, fwhm])
-
- # Test center position within +- 1 sample of max derivative
- index_max_deriv = numpy.argmax(deriv0)
- self.assertLess(abs(index_max_deriv - center),
- 1)
-
- def testStepDown(self):
- """sanity check for step down:
-
- - absolute value of derivative must be largest around the step center
- - max value must be close to height parameter
-
- """
- x0 = numpy.arange(1000)
- center = 444
- height = 1234
- fwhm = 210
- y0 = functions.sum_stepdown(x0, height, center, fwhm)
-
- self.assertLess(max(y0), height)
- self.assertAlmostEqual(max(y0), height, places=1)
- self.assertAlmostEqual(min(y0), 0, places=1)
-
- deriv0 = _numerical_derivative(functions.sum_stepdown, x0, [height, center, fwhm])
-
- # Test center position within +- 1 sample of max derivative
- index_min_deriv = numpy.argmax(-deriv0)
- self.assertLess(abs(index_min_deriv - center),
- 1)
-
- def testSlit(self):
- """sanity check for slit:
-
- - absolute value of derivative must be largest around the step center
- - max value must be close to height parameter
-
- """
- x0 = numpy.arange(1000)
- center = 444
- height = 1234
- fwhm = 210
- beamfwhm = 30
- y0 = functions.sum_slit(x0, height, center, fwhm, beamfwhm)
-
- self.assertAlmostEqual(max(y0), height, places=1)
- self.assertAlmostEqual(min(y0), 0, places=1)
-
- deriv0 = _numerical_derivative(functions.sum_slit, x0, [height, center, fwhm, beamfwhm])
-
- # Test step up center position (center - fwhm/2) within +- 1 sample of max derivative
- index_max_deriv = numpy.argmax(deriv0)
- self.assertLess(abs(index_max_deriv - (center - fwhm/2)),
- 1)
- # Test step down center position (center + fwhm/2) within +- 1 sample of min derivative
- index_min_deriv = numpy.argmin(deriv0)
- self.assertLess(abs(index_min_deriv - (center + fwhm/2)),
- 1)
-
-
-def _numerical_derivative(f, x, params=[], delta_factor=0.0001):
- """Compute the numerical derivative of ``f`` for all values of ``x``.
-
- :param f: function
- :param x: Array of evenly spaced abscissa values
- :param params: list of additional parameters
- :return: Array of derivative values
- """
- deltax = (x[1] - x[0]) * delta_factor
- y_plus = f(x + deltax, *params)
- y_minus = f(x - deltax, *params)
-
- return (y_plus - y_minus) / (2 * deltax)
-
-test_cases = (Test_functions,)
-
-def suite():
- loader = unittest.defaultTestLoader
- test_suite = unittest.TestSuite()
- for test_class in test_cases:
- tests = loader.loadTestsFromTestCase(test_class)
- test_suite.addTests(tests)
- return test_suite
-
-if __name__ == '__main__':
- unittest.main(defaultTest="suite")
diff --git a/silx/math/fit/test/test_peaks.py b/silx/math/fit/test/test_peaks.py
deleted file mode 100644
index 17eb75d..0000000
--- a/silx/math/fit/test/test_peaks.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# coding: utf-8
-# /*##########################################################################
-# Copyright (C) 2016 European Synchrotron Radiation Facility
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-#
-# ############################################################################*/
-"""
-Tests for peaks module
-"""
-
-import unittest
-import numpy
-import math
-
-from silx.math.fit import functions
-from silx.math.fit import peaks
-
-class Test_peak_search(unittest.TestCase):
- """
- Unit tests of peak_search on various types of multi-peak functions.
- """
- def setUp(self):
- self.x = numpy.arange(5000)
- # (height1, center1, fwhm1, ...)
- self.h_c_fwhm = (50, 500, 100,
- 50, 600, 80,
- 20, 2000, 100,
- 50, 2250, 110,
- 40, 3000, 99,
- 23, 4980, 80)
- # (height1, center1, fwhm1, eta1 ...)
- self.h_c_fwhm_eta = (50, 500, 100, 0.4,
- 50, 600, 80, 0.5,
- 20, 2000, 100, 0.6,
- 50, 2250, 110, 0.7,
- 40, 3000, 99, 0.8,
- 23, 4980, 80, 0.3,)
- # (height1, center1, fwhm11, fwhm21, ...)
- self.h_c_fwhm_fwhm = (50, 500, 100, 85,
- 50, 600, 80, 110,
- 20, 2000, 100, 100,
- 50, 2250, 110, 99,
- 40, 3000, 99, 110,
- 23, 4980, 80, 80,)
- # (height1, center1, fwhm11, fwhm21, eta1 ...)
- self.h_c_fwhm_fwhm_eta = (50, 500, 100, 85, 0.4,
- 50, 600, 80, 110, 0.5,
- 20, 2000, 100, 100, 0.6,
- 50, 2250, 110, 99, 0.7,
- 40, 3000, 99, 110, 0.8,
- 23, 4980, 80, 80, 0.3,)
- # (area1, center1, fwhm1, ...)
- self.a_c_fwhm = (2550, 500, 100,
- 2000, 600, 80,
- 500, 2000, 100,
- 4000, 2250, 110,
- 2300, 3000, 99,
- 3333, 4980, 80)
- # (area1, center1, fwhm1, eta1 ...)
- self.a_c_fwhm_eta = (500, 500, 100, 0.4,
- 500, 600, 80, 0.5,
- 200, 2000, 100, 0.6,
- 500, 2250, 110, 0.7,
- 400, 3000, 99, 0.8,
- 230, 4980, 80, 0.3,)
- # (area, position, fwhm, st_area_r, st_slope_r, lt_area_r, lt_slope_r, step_height_r)
- self.hypermet_params = (1000, 500, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 1000, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 2000, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 2350, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 3000, 200, 0.2, 100, 0.3, 100, 0.05,
- 1000, 4900, 200, 0.2, 100, 0.3, 100, 0.05,)
-
-
- def tearDown(self):
- pass
-
- def get_peaks(self, function, params):
- """
-
- :param function: Multi-peak function
- :param params: Parameter for this function
- :return: list of (peak, relevance) tuples
- """
- y = function(self.x, *params)
- return peaks.peak_search(y=y, fwhm=100, relevance_info=True)
-
- def testPeakSearch_various_functions(self):
- """Run peak search on a variety of synthetic functions, and
- check that result falls within +-25 samples of the actual peak
- (reasonable delta considering a fwhm of ~100 samples) and effects
- of overlapping peaks)."""
- f_p = ((functions.sum_gauss, self.h_c_fwhm ),
- (functions.sum_lorentz, self.h_c_fwhm),
- (functions.sum_pvoigt, self.h_c_fwhm_eta),
- (functions.sum_splitgauss, self.h_c_fwhm_fwhm),
- (functions.sum_splitlorentz, self.h_c_fwhm_fwhm),
- (functions.sum_splitpvoigt, self.h_c_fwhm_fwhm_eta),
- (functions.sum_agauss, self.a_c_fwhm),
- (functions.sum_fastagauss, self.a_c_fwhm),
- (functions.sum_alorentz, self.a_c_fwhm),
- (functions.sum_apvoigt, self.a_c_fwhm_eta),
- (functions.sum_ahypermet, self.hypermet_params),
- (functions.sum_fastahypermet, self.hypermet_params),)
-
- for function, params in f_p:
- peaks = self.get_peaks(function, params)
-
- self.assertEqual(len(peaks), 6,
- "Wrong number of peaks detected")
-
- for i in range(6):
- theoretical_peak_index = params[i*(len(params)//6) + 1]
- found_peak_index = peaks[i][0]
- self.assertLess(abs(found_peak_index - theoretical_peak_index), 25)
-
-
-test_cases = (Test_peak_search,)
-
-def suite():
- loader = unittest.defaultTestLoader
- test_suite = unittest.TestSuite()
- for test_class in test_cases:
- tests = loader.loadTestsFromTestCase(test_class)
- test_suite.addTests(tests)
- return test_suite
-
-if __name__ == '__main__':
- unittest.main(defaultTest="suite")